diff options
Diffstat (limited to 'arch')
877 files changed, 20738 insertions, 15754 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 75dd23acf133..8a7f7e1f2ca7 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -278,9 +278,6 @@ config HAVE_CLK The <linux/clk.h> calls support software clock gating and thus are a key power management tool on many systems. -config HAVE_DMA_API_DEBUG - bool - config HAVE_HW_BREAKPOINT bool depends on PERF_EVENTS @@ -600,21 +597,6 @@ config CC_STACKPROTECTOR_AUTO endchoice -config LD_DEAD_CODE_DATA_ELIMINATION - bool - help - Select this if the architecture wants to do dead code and - data elimination with the linker by compiling with - -ffunction-sections -fdata-sections and linking with - --gc-sections. - - This requires that the arch annotates or otherwise protects - its external entry points from being discarded. Linker scripts - must also merge .text.*, .data.*, and .bss.* correctly into - output sections. Care must be taken not to pull in unrelated - sections (e.g., '.text.init'). Typically '.' in section names - is used to distinguish them from label names / C identifiers. - config HAVE_ARCH_WITHIN_STACK_FRAMES bool help @@ -690,12 +672,6 @@ config MODULES_USE_ELF_REL Modules only use ELF REL relocations. Modules with ELF RELA relocations will give an error. -config HAVE_UNDERSCORE_SYMBOL_PREFIX - bool - help - Some architectures generate an _ in front of C symbols; things like - module loading and assembly files need to know about this. - config HAVE_IRQ_EXIT_ON_IRQ_STACK bool help @@ -874,6 +850,21 @@ config OLD_SIGACTION config COMPAT_OLD_SIGACTION bool +config 64BIT_TIME + def_bool ARCH_HAS_64BIT_TIME + help + This should be selected by all architectures that need to support + new system calls with a 64-bit time_t. This is relevant on all 32-bit + architectures, and 64-bit architectures as part of compat syscall + handling. + +config COMPAT_32BIT_TIME + def_bool (!64BIT && 64BIT_TIME) || COMPAT + help + This enables 32 bit time_t support in addition to 64 bit time_t support. + This is relevant on all 32-bit architectures, and 64-bit architectures + as part of compat syscall handling. + config ARCH_NO_COHERENT_DMA_MMAP bool diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index f19dc31288c8..0c4805a572c8 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -10,6 +10,8 @@ config ALPHA select HAVE_OPROFILE select HAVE_PCSPKR_PLATFORM select HAVE_PERF_EVENTS + select NEED_DMA_MAP_STATE + select NEED_SG_DMA_LENGTH select VIRT_TO_BUS select GENERIC_IRQ_PROBE select AUTO_IRQ_AFFINITY if SMP @@ -64,15 +66,6 @@ config ZONE_DMA bool default y -config ARCH_DMA_ADDR_T_64BIT - def_bool y - -config NEED_DMA_MAP_STATE - def_bool y - -config NEED_SG_DMA_LENGTH - def_bool y - config GENERIC_ISA_DMA bool default y @@ -346,9 +339,6 @@ config PCI_DOMAINS config PCI_SYSCALL def_bool PCI -config IOMMU_HELPER - def_bool PCI - config ALPHA_NONAME bool depends on ALPHA_BOOK1 || ALPHA_NONAME_CH @@ -586,7 +576,7 @@ config ARCH_DISCONTIGMEM_ENABLE Say Y to support efficient handling of discontiguous physical memory, for architectures which are either NUMA (Non-Uniform Memory Access) or have huge holes in the physical address space for other reasons. - See <file:Documentation/vm/numa> for more. + See <file:Documentation/vm/numa.rst> for more. source "mm/Kconfig" diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile index 2cc3cc519c54..c5ec8c09c0c6 100644 --- a/arch/alpha/Makefile +++ b/arch/alpha/Makefile @@ -11,7 +11,7 @@ NM := $(NM) -B LDFLAGS_vmlinux := -static -N #-relax -CHECKFLAGS += -D__alpha__ -m64 +CHECKFLAGS += -D__alpha__ cflags-y := -pipe -mno-fp-regs -ffixed-8 cflags-y += $(call cc-option, -fno-jump-tables) diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild index 9b68790013e2..0580cb8c84b2 100644 --- a/arch/alpha/include/asm/Kbuild +++ b/arch/alpha/include/asm/Kbuild @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 +generic-y += compat.h generic-y += exec.h generic-y += export.h generic-y += fb.h diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h index b9ec55351924..cf6bc1e64d66 100644 --- a/arch/alpha/include/asm/pci.h +++ b/arch/alpha/include/asm/pci.h @@ -56,11 +56,6 @@ struct pci_controller { /* IOMMU controls. */ -/* The PCI address space does not equal the physical memory address space. - The networking and block device layers use this boolean for bounce buffer - decisions. */ -#define PCI_DMA_BUS_IS_PHYS 0 - /* TODO: integrate with include/asm-generic/pci.h ? */ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) { diff --git a/arch/alpha/include/uapi/asm/Kbuild b/arch/alpha/include/uapi/asm/Kbuild index 9afaba5e5503..1a5b75310cf4 100644 --- a/arch/alpha/include/uapi/asm/Kbuild +++ b/arch/alpha/include/uapi/asm/Kbuild @@ -2,4 +2,8 @@ include include/uapi/asm-generic/Kbuild.asm generic-y += bpf_perf_event.h +generic-y += ipcbuf.h +generic-y += msgbuf.h generic-y += poll.h +generic-y += sembuf.h +generic-y += shmbuf.h diff --git a/arch/alpha/include/uapi/asm/ipcbuf.h b/arch/alpha/include/uapi/asm/ipcbuf.h deleted file mode 100644 index 90d6445a14df..000000000000 --- a/arch/alpha/include/uapi/asm/ipcbuf.h +++ /dev/null @@ -1,2 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#include <asm-generic/ipcbuf.h> diff --git a/arch/alpha/include/uapi/asm/msgbuf.h b/arch/alpha/include/uapi/asm/msgbuf.h deleted file mode 100644 index 8c5d4d8c1b16..000000000000 --- a/arch/alpha/include/uapi/asm/msgbuf.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _ALPHA_MSGBUF_H -#define _ALPHA_MSGBUF_H - -/* - * The msqid64_ds structure for alpha architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 2 miscellaneous 64-bit values - */ - -struct msqid64_ds { - struct ipc64_perm msg_perm; - __kernel_time_t msg_stime; /* last msgsnd time */ - __kernel_time_t msg_rtime; /* last msgrcv time */ - __kernel_time_t msg_ctime; /* last change time */ - unsigned long msg_cbytes; /* current number of bytes on queue */ - unsigned long msg_qnum; /* number of messages in queue */ - unsigned long msg_qbytes; /* max number of bytes on queue */ - __kernel_pid_t msg_lspid; /* pid of last msgsnd */ - __kernel_pid_t msg_lrpid; /* last receive pid */ - unsigned long __unused1; - unsigned long __unused2; -}; - -#endif /* _ALPHA_MSGBUF_H */ diff --git a/arch/alpha/include/uapi/asm/sembuf.h b/arch/alpha/include/uapi/asm/sembuf.h deleted file mode 100644 index f28ffa668b2f..000000000000 --- a/arch/alpha/include/uapi/asm/sembuf.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _ALPHA_SEMBUF_H -#define _ALPHA_SEMBUF_H - -/* - * The semid64_ds structure for alpha architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 2 miscellaneous 64-bit values - */ - -struct semid64_ds { - struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ - __kernel_time_t sem_otime; /* last semop time */ - __kernel_time_t sem_ctime; /* last change time */ - unsigned long sem_nsems; /* no. of semaphores in array */ - unsigned long __unused1; - unsigned long __unused2; -}; - -#endif /* _ALPHA_SEMBUF_H */ diff --git a/arch/alpha/include/uapi/asm/shmbuf.h b/arch/alpha/include/uapi/asm/shmbuf.h deleted file mode 100644 index 7e041ca2eb40..000000000000 --- a/arch/alpha/include/uapi/asm/shmbuf.h +++ /dev/null @@ -1,39 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _ALPHA_SHMBUF_H -#define _ALPHA_SHMBUF_H - -/* - * The shmid64_ds structure for alpha architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 2 miscellaneous 64-bit values - */ - -struct shmid64_ds { - struct ipc64_perm shm_perm; /* operation perms */ - size_t shm_segsz; /* size of segment (bytes) */ - __kernel_time_t shm_atime; /* last attach time */ - __kernel_time_t shm_dtime; /* last detach time */ - __kernel_time_t shm_ctime; /* last change time */ - __kernel_pid_t shm_cpid; /* pid of creator */ - __kernel_pid_t shm_lpid; /* pid of last operator */ - unsigned long shm_nattch; /* no. of current attaches */ - unsigned long __unused1; - unsigned long __unused2; -}; - -struct shminfo64 { - unsigned long shmmax; - unsigned long shmmin; - unsigned long shmmni; - unsigned long shmseg; - unsigned long shmall; - unsigned long __unused1; - unsigned long __unused2; - unsigned long __unused3; - unsigned long __unused4; -}; - -#endif /* _ALPHA_SHMBUF_H */ diff --git a/arch/alpha/include/uapi/asm/siginfo.h b/arch/alpha/include/uapi/asm/siginfo.h index 0cf3b527b274..db3f0138536f 100644 --- a/arch/alpha/include/uapi/asm/siginfo.h +++ b/arch/alpha/include/uapi/asm/siginfo.h @@ -7,18 +7,4 @@ #include <asm-generic/siginfo.h> -/* - * SIGFPE si_codes - */ -#ifdef __KERNEL__ -#define FPE_FIXME 0 /* Broken dup of SI_USER */ -#endif /* __KERNEL__ */ - -/* - * SIGTRAP si_codes - */ -#ifdef __KERNEL__ -#define TRAP_FIXME 0 /* Broken dup of SI_USER */ -#endif /* __KERNEL__ */ - #endif diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 89faa6f4de47..6e921754c8fc 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -871,8 +871,7 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer, send a signal. Old exceptions are not signaled. */ fex = (exc >> IEEE_STATUS_TO_EXCSUM_SHIFT) & swcr; if (fex) { - siginfo_t info; - int si_code = 0; + int si_code = FPE_FLTUNK; if (fex & IEEE_TRAP_ENABLE_DNO) si_code = FPE_FLTUND; if (fex & IEEE_TRAP_ENABLE_INE) si_code = FPE_FLTRES; @@ -881,11 +880,9 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer, if (fex & IEEE_TRAP_ENABLE_DZE) si_code = FPE_FLTDIV; if (fex & IEEE_TRAP_ENABLE_INV) si_code = FPE_FLTINV; - info.si_signo = SIGFPE; - info.si_errno = 0; - info.si_code = si_code; - info.si_addr = NULL; /* FIXME */ - send_sig_info(SIGFPE, &info, current); + send_sig_fault(SIGFPE, si_code, + (void __user *)NULL, /* FIXME */ + 0, current); } return 0; } diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index 9ebb3bcbc626..8c0c4ee0be6e 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c @@ -219,14 +219,8 @@ do_sigreturn(struct sigcontext __user *sc) /* Send SIGTRAP if we're single-stepping: */ if (ptrace_cancel_bpt (current)) { - siginfo_t info; - - info.si_signo = SIGTRAP; - info.si_errno = 0; - info.si_code = TRAP_BRKPT; - info.si_addr = (void __user *) regs->pc; - info.si_trapno = 0; - send_sig_info(SIGTRAP, &info, current); + send_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *) regs->pc, 0, + current); } return; @@ -253,14 +247,8 @@ do_rt_sigreturn(struct rt_sigframe __user *frame) /* Send SIGTRAP if we're single-stepping: */ if (ptrace_cancel_bpt (current)) { - siginfo_t info; - - info.si_signo = SIGTRAP; - info.si_errno = 0; - info.si_code = TRAP_BRKPT; - info.si_addr = (void __user *) regs->pc; - info.si_trapno = 0; - send_sig_info(SIGTRAP, &info, current); + send_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *) regs->pc, 0, + current); } return; diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index f43bd05dede2..bc9627698796 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -213,7 +213,6 @@ do_entArith(unsigned long summary, unsigned long write_mask, struct pt_regs *regs) { long si_code = FPE_FLTINV; - siginfo_t info; if (summary & 1) { /* Software-completion summary bit is set, so try to @@ -228,17 +227,12 @@ do_entArith(unsigned long summary, unsigned long write_mask, } die_if_kernel("Arithmetic fault", regs, 0, NULL); - info.si_signo = SIGFPE; - info.si_errno = 0; - info.si_code = si_code; - info.si_addr = (void __user *) regs->pc; - send_sig_info(SIGFPE, &info, current); + send_sig_fault(SIGFPE, si_code, (void __user *) regs->pc, 0, current); } asmlinkage void do_entIF(unsigned long type, struct pt_regs *regs) { - siginfo_t info; int signo, code; if ((regs->ps & ~IPL_MAX) == 0) { @@ -270,31 +264,20 @@ do_entIF(unsigned long type, struct pt_regs *regs) switch (type) { case 0: /* breakpoint */ - info.si_signo = SIGTRAP; - info.si_errno = 0; - info.si_code = TRAP_BRKPT; - info.si_trapno = 0; - info.si_addr = (void __user *) regs->pc; - if (ptrace_cancel_bpt(current)) { regs->pc -= 4; /* make pc point to former bpt */ } - send_sig_info(SIGTRAP, &info, current); + send_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc, 0, + current); return; case 1: /* bugcheck */ - info.si_signo = SIGTRAP; - info.si_errno = 0; - info.si_code = TRAP_FIXME; - info.si_addr = (void __user *) regs->pc; - info.si_trapno = 0; - send_sig_info(SIGTRAP, &info, current); + send_sig_fault(SIGTRAP, TRAP_UNK, (void __user *) regs->pc, 0, + current); return; case 2: /* gentrap */ - info.si_addr = (void __user *) regs->pc; - info.si_trapno = regs->r16; switch ((long) regs->r16) { case GEN_INTOVF: signo = SIGFPE; @@ -326,7 +309,7 @@ do_entIF(unsigned long type, struct pt_regs *regs) break; case GEN_ROPRAND: signo = SIGFPE; - code = FPE_FIXME; + code = FPE_FLTUNK; break; case GEN_DECOVF: @@ -348,15 +331,12 @@ do_entIF(unsigned long type, struct pt_regs *regs) case GEN_SUBRNG7: default: signo = SIGTRAP; - code = TRAP_FIXME; + code = TRAP_UNK; break; } - info.si_signo = signo; - info.si_errno = 0; - info.si_code = code; - info.si_addr = (void __user *) regs->pc; - send_sig_info(signo, &info, current); + send_sig_fault(signo, code, (void __user *) regs->pc, regs->r16, + current); return; case 4: /* opDEC */ @@ -380,11 +360,9 @@ do_entIF(unsigned long type, struct pt_regs *regs) if (si_code == 0) return; if (si_code > 0) { - info.si_signo = SIGFPE; - info.si_errno = 0; - info.si_code = si_code; - info.si_addr = (void __user *) regs->pc; - send_sig_info(SIGFPE, &info, current); + send_sig_fault(SIGFPE, si_code, + (void __user *) regs->pc, 0, + current); return; } } @@ -409,11 +387,7 @@ do_entIF(unsigned long type, struct pt_regs *regs) ; } - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_code = ILL_ILLOPC; - info.si_addr = (void __user *) regs->pc; - send_sig_info(SIGILL, &info, current); + send_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc, 0, current); } /* There is an ifdef in the PALcode in MILO that enables a @@ -426,15 +400,9 @@ do_entIF(unsigned long type, struct pt_regs *regs) asmlinkage void do_entDbg(struct pt_regs *regs) { - siginfo_t info; - die_if_kernel("Instruction fault", regs, 0, NULL); - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_code = ILL_ILLOPC; - info.si_addr = (void __user *) regs->pc; - force_sig_info(SIGILL, &info, current); + force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc, 0, current); } @@ -758,7 +726,7 @@ do_entUnaUser(void __user * va, unsigned long opcode, unsigned long tmp1, tmp2, tmp3, tmp4; unsigned long fake_reg, *reg_addr = &fake_reg; - siginfo_t info; + int si_code; long error; /* Check the UAC bits to decide what the user wants us to do @@ -981,34 +949,27 @@ do_entUnaUser(void __user * va, unsigned long opcode, give_sigsegv: regs->pc -= 4; /* make pc point to faulting insn */ - info.si_signo = SIGSEGV; - info.si_errno = 0; /* We need to replicate some of the logic in mm/fault.c, since we don't have access to the fault code in the exception handling return path. */ if ((unsigned long)va >= TASK_SIZE) - info.si_code = SEGV_ACCERR; + si_code = SEGV_ACCERR; else { struct mm_struct *mm = current->mm; down_read(&mm->mmap_sem); if (find_vma(mm, (unsigned long)va)) - info.si_code = SEGV_ACCERR; + si_code = SEGV_ACCERR; else - info.si_code = SEGV_MAPERR; + si_code = SEGV_MAPERR; up_read(&mm->mmap_sem); } - info.si_addr = va; - send_sig_info(SIGSEGV, &info, current); + send_sig_fault(SIGSEGV, si_code, va, 0, current); return; give_sigbus: regs->pc -= 4; - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRALN; - info.si_addr = va; - send_sig_info(SIGBUS, &info, current); + send_sig_fault(SIGBUS, BUS_ADRALN, va, 0, current); return; } diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index cd3c572ee912..de2bd217adad 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -88,7 +88,6 @@ do_page_fault(unsigned long address, unsigned long mmcsr, struct mm_struct *mm = current->mm; const struct exception_table_entry *fixup; int fault, si_code = SEGV_MAPERR; - siginfo_t info; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; /* As of EV6, a load into $31/$f31 is a prefetch, and never faults @@ -221,21 +220,13 @@ retry: up_read(&mm->mmap_sem); /* Send a sigbus, regardless of whether we were in kernel or user mode. */ - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRERR; - info.si_addr = (void __user *) address; - force_sig_info(SIGBUS, &info, current); + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address, 0, current); if (!user_mode(regs)) goto no_context; return; do_sigsegv: - info.si_signo = SIGSEGV; - info.si_errno = 0; - info.si_code = si_code; - info.si_addr = (void __user *) address; - force_sig_info(SIGSEGV, &info, current); + force_sig_fault(SIGSEGV, si_code, (void __user *) address, 0, current); return; #ifdef CONFIG_ALPHA_LARGE_VMALLOC diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index d76bf4a83740..e81bcd271be7 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -9,11 +9,15 @@ config ARC def_bool y select ARC_TIMERS + select ARCH_HAS_SYNC_DMA_FOR_CPU + select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SG_CHAIN select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS select COMMON_CLK + select DMA_NONCOHERENT_OPS + select DMA_NONCOHERENT_MMAP select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC) select GENERIC_CLOCKEVENTS select GENERIC_FIND_FIRST_BIT @@ -44,6 +48,7 @@ config ARC select HAVE_GENERIC_DMA_COHERENT select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZMA + select ARCH_HAS_PTE_SPECIAL config MIGHT_HAVE_PCI bool @@ -453,16 +458,11 @@ config ARC_HAS_PAE40 default n depends on ISA_ARCV2 select HIGHMEM + select PHYS_ADDR_T_64BIT help Enable access to physical memory beyond 4G, only supported on ARC cores with 40 bit Physical Addressing support -config ARCH_PHYS_ADDR_T_64BIT - def_bool ARC_HAS_PAE40 - -config ARCH_DMA_ADDR_T_64BIT - bool - config ARC_KVADDR_SIZE int "Kernel Virtual Address Space size (MB)" range 0 512 diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index 4bd5d4369e05..feed50ce89fa 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -1,7 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 generic-y += bugs.h +generic-y += compat.h generic-y += device.h generic-y += div64.h +generic-y += dma-mapping.h generic-y += emergency-restart.h generic-y += extable.h generic-y += fb.h diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h deleted file mode 100644 index 7a16824bfe98..000000000000 --- a/arch/arc/include/asm/dma-mapping.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * DMA Mapping glue for ARC - * - * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef ASM_ARC_DMA_MAPPING_H -#define ASM_ARC_DMA_MAPPING_H - -extern const struct dma_map_ops arc_dma_ops; - -static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) -{ - return &arc_dma_ops; -} - -#endif diff --git a/arch/arc/include/asm/pci.h b/arch/arc/include/asm/pci.h index ba56c23c1b20..4ff53c041c64 100644 --- a/arch/arc/include/asm/pci.h +++ b/arch/arc/include/asm/pci.h @@ -16,12 +16,6 @@ #define PCIBIOS_MIN_MEM 0x100000 #define pcibios_assign_all_busses() 1 -/* - * The PCI address space does equal the physical memory address space. - * The networking and block device layers use this boolean for bounce - * buffer decisions. - */ -#define PCI_DMA_BUS_IS_PHYS 1 #endif /* __KERNEL__ */ diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 08fe33830d4b..8ec5599a0957 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -320,8 +320,6 @@ PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE)); PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL)); PTE_BIT_FUNC(mkhuge, |= (_PAGE_HW_SZ)); -#define __HAVE_ARCH_PTE_SPECIAL - static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 1dcc404b5aec..8c1071840979 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -16,13 +16,12 @@ * The default DMA address == Phy address which is 0x8000_0000 based. */ -#include <linux/dma-mapping.h> +#include <linux/dma-noncoherent.h> #include <asm/cache.h> #include <asm/cacheflush.h> - -static void *arc_dma_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs) { unsigned long order = get_order(size); struct page *page; @@ -89,7 +88,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size, return kvaddr; } -static void arc_dma_free(struct device *dev, size_t size, void *vaddr, +void arch_dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { phys_addr_t paddr = dma_handle; @@ -105,9 +104,9 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr, __free_pages(page, get_order(size)); } -static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) +int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) { unsigned long user_count = vma_pages(vma); unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; @@ -130,149 +129,14 @@ static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma, return ret; } -/* - * streaming DMA Mapping API... - * CPU accesses page via normal paddr, thus needs to explicitly made - * consistent before each use - */ -static void _dma_cache_sync(phys_addr_t paddr, size_t size, - enum dma_data_direction dir) +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) { - switch (dir) { - case DMA_FROM_DEVICE: - dma_cache_inv(paddr, size); - break; - case DMA_TO_DEVICE: - dma_cache_wback(paddr, size); - break; - case DMA_BIDIRECTIONAL: - dma_cache_wback_inv(paddr, size); - break; - default: - pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir, &paddr); - } + dma_cache_wback(paddr, size); } -/* - * arc_dma_map_page - map a portion of a page for streaming DMA - * - * Ensure that any data held in the cache is appropriately discarded - * or written back. - * - * The device owns this memory once this call has completed. The CPU - * can regain ownership by calling dma_unmap_page(). - * - * Note: while it takes struct page as arg, caller can "abuse" it to pass - * a region larger than PAGE_SIZE, provided it is physically contiguous - * and this still works correctly - */ -static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs) +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) { - phys_addr_t paddr = page_to_phys(page) + offset; - - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - _dma_cache_sync(paddr, size, dir); - - return paddr; -} - -/* - * arc_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() - * - * After this call, reads by the CPU to the buffer are guaranteed to see - * whatever the device wrote there. - * - * Note: historically this routine was not implemented for ARC - */ -static void arc_dma_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - phys_addr_t paddr = handle; - - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - _dma_cache_sync(paddr, size, dir); + dma_cache_inv(paddr, size); } - -static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, unsigned long attrs) -{ - struct scatterlist *s; - int i; - - for_each_sg(sg, s, nents, i) - s->dma_address = dma_map_page(dev, sg_page(s), s->offset, - s->length, dir); - - return nents; -} - -static void arc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - struct scatterlist *s; - int i; - - for_each_sg(sg, s, nents, i) - arc_dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, - attrs); -} - -static void arc_dma_sync_single_for_cpu(struct device *dev, - dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) -{ - _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE); -} - -static void arc_dma_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) -{ - _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE); -} - -static void arc_dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sglist, int nelems, - enum dma_data_direction dir) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sglist, sg, nelems, i) - _dma_cache_sync(sg_phys(sg), sg->length, dir); -} - -static void arc_dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sglist, int nelems, - enum dma_data_direction dir) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sglist, sg, nelems, i) - _dma_cache_sync(sg_phys(sg), sg->length, dir); -} - -static int arc_dma_supported(struct device *dev, u64 dma_mask) -{ - /* Support 32 bit DMA mask exclusively */ - return dma_mask == DMA_BIT_MASK(32); -} - -const struct dma_map_ops arc_dma_ops = { - .alloc = arc_dma_alloc, - .free = arc_dma_free, - .mmap = arc_dma_mmap, - .map_page = arc_dma_map_page, - .unmap_page = arc_dma_unmap_page, - .map_sg = arc_dma_map_sg, - .unmap_sg = arc_dma_unmap_sg, - .sync_single_for_device = arc_dma_sync_single_for_device, - .sync_single_for_cpu = arc_dma_sync_single_for_cpu, - .sync_sg_for_cpu = arc_dma_sync_sg_for_cpu, - .sync_sg_for_device = arc_dma_sync_sg_for_device, - .dma_supported = arc_dma_supported, -}; -EXPORT_SYMBOL(arc_dma_ops); diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index a0b7bd6d030d..b884bbd6f354 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -70,6 +70,8 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + clear_siginfo(&info); + /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index a7f8e7f4b88f..534563ac7f5f 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -8,6 +8,7 @@ config ARM select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FORTIFY_SOURCE + select ARCH_HAS_PTE_SPECIAL if ARM_LPAE select ARCH_HAS_SET_MEMORY select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL @@ -60,7 +61,6 @@ config ARM select HAVE_CONTEXT_TRACKING select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_KMEMLEAK - select HAVE_DMA_API_DEBUG select HAVE_DMA_CONTIGUOUS if MMU select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE @@ -96,6 +96,7 @@ config ARM select HAVE_VIRT_CPU_ACCOUNTING_GEN select IRQ_FORCED_THREADING select MODULES_USE_ELF_REL + select NEED_DMA_MAP_STATE select NO_BOOTMEM select OF_EARLY_FLATTREE if OF select OF_RESERVED_MEM if OF @@ -119,9 +120,6 @@ config ARM_HAS_SG_CHAIN select ARCH_HAS_SG_CHAIN bool -config NEED_SG_DMA_LENGTH - bool - config ARM_DMA_USE_IOMMU bool select ARM_HAS_SG_CHAIN @@ -224,9 +222,6 @@ config ARCH_MAY_HAVE_PC_FDC config ZONE_DMA bool -config NEED_DMA_MAP_STATE - def_bool y - config ARCH_SUPPORTS_UPROBES def_bool y @@ -1704,6 +1699,7 @@ config ARCH_WANT_GENERAL_HUGETLB config ARM_MODULE_PLTS bool "Use PLTs to allow module memory to spill over into vmalloc area" depends on MODULES + default y help Allocate PLTs when loading modules so that jumps and calls whose targets are too far away for their relative offsets to be encoded @@ -1714,7 +1710,8 @@ config ARM_MODULE_PLTS rounding up to page size, the actual memory footprint is usually the same. - Say y if you are getting out of memory errors while loading modules + Disabling this is usually safe for small single-platform + configurations. If unsure, say y. source "mm/Kconfig" @@ -1778,12 +1775,6 @@ config SECCOMP and the task is only allowed to execute a few safe syscalls defined by each seccomp mode. -config SWIOTLB - def_bool y - -config IOMMU_HELPER - def_bool SWIOTLB - config PARAVIRT bool "Enable paravirtualization code" help @@ -1815,6 +1806,7 @@ config XEN depends on MMU select ARCH_DMA_ADDR_T_64BIT select ARM_PSCI + select SWIOTLB select SWIOTLB_XEN select PARAVIRT help diff --git a/arch/arm/Makefile b/arch/arm/Makefile index e4e537f27339..1dc4045e1af6 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -106,7 +106,7 @@ tune-$(CONFIG_CPU_V6K) =$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) tune-y := $(tune-y) ifeq ($(CONFIG_AEABI),y) -CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork -mfpu=vfp +CFLAGS_ABI :=-mabi=aapcs-linux -mfpu=vfp else CFLAGS_ABI :=$(call cc-option,-mapcs-32,-mabi=apcs-gnu) $(call cc-option,-mno-thumb-interwork,) endif @@ -135,7 +135,7 @@ endif KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float -CHECKFLAGS += -D__arm__ -m32 +CHECKFLAGS += -D__arm__ #Default value head-y := arch/arm/kernel/head$(MMUEXT).o diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 6a4e7341ecd3..a3c5fbcad4ab 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -113,7 +113,7 @@ CFLAGS_fdt_ro.o := $(nossp_flags) CFLAGS_fdt_rw.o := $(nossp_flags) CFLAGS_fdt_wip.o := $(nossp_flags) -ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj) +ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj) asflags-y := -DZIMAGE # Supply kernel BSS size to the decompressor via a linker symbol. diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi index 4cc09e43eea2..6916d7532ad2 100644 --- a/arch/arm/boot/dts/armada-38x.dtsi +++ b/arch/arm/boot/dts/armada-38x.dtsi @@ -163,7 +163,7 @@ }; uart0: serial@12000 { - compatible = "snps,dw-apb-uart"; + compatible = "marvell,armada-38x-uart"; reg = <0x12000 0x100>; reg-shift = <2>; interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; @@ -173,7 +173,7 @@ }; uart1: serial@12100 { - compatible = "snps,dw-apb-uart"; + compatible = "marvell,armada-38x-uart"; reg = <0x12100 0x100>; reg-shift = <2>; interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts index 2459d133f1be..f50b19447de6 100644 --- a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts +++ b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts @@ -161,7 +161,7 @@ }; at24@50 { - compatible = "at24,24c01"; + compatible = "atmel,24c01"; pagesize = <8>; reg = <0x50>; }; @@ -213,7 +213,7 @@ #size-cells = <0>; reg = <6>; eeprom@51 { - compatible = "at,24c01"; + compatible = "atmel,24c01"; pagesize = <8>; reg = <0x51>; }; @@ -224,7 +224,7 @@ #size-cells = <0>; reg = <7>; eeprom@51 { - compatible = "at,24c01"; + compatible = "atmel,24c01"; pagesize = <8>; reg = <0x51>; }; diff --git a/arch/arm/boot/dts/stm32mp157-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp157-pinctrl.dtsi index c0743305f31b..eb96ac3e6c1d 100644 --- a/arch/arm/boot/dts/stm32mp157-pinctrl.dtsi +++ b/arch/arm/boot/dts/stm32mp157-pinctrl.dtsi @@ -12,6 +12,8 @@ #size-cells = <1>; compatible = "st,stm32mp157-pinctrl"; ranges = <0 0x50002000 0xa400>; + interrupt-parent = <&exti>; + st,syscfg = <&exti 0x60 0xff>; pins-are-numbered; gpioa: gpio@50002000 { @@ -166,6 +168,8 @@ compatible = "st,stm32mp157-z-pinctrl"; ranges = <0 0x54004000 0x400>; pins-are-numbered; + interrupt-parent = <&exti>; + st,syscfg = <&exti 0x60 0xff>; status = "disabled"; gpioz: gpio@54004000 { diff --git a/arch/arm/boot/dts/stm32mp157c.dtsi b/arch/arm/boot/dts/stm32mp157c.dtsi index 9e17e42b02b2..4fa0df853c8a 100644 --- a/arch/arm/boot/dts/stm32mp157c.dtsi +++ b/arch/arm/boot/dts/stm32mp157c.dtsi @@ -183,6 +183,13 @@ status = "disabled"; }; + exti: interrupt-controller@5000d000 { + compatible = "st,stm32mp1-exti", "syscon"; + interrupt-controller; + #interrupt-cells = <2>; + reg = <0x5000d000 0x400>; + }; + usart1: serial@5c000000 { compatible = "st,stm32h7-uart"; reg = <0x5c000000 0x400>; diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index 2b913f17d50f..ad574d20415c 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c @@ -9,6 +9,7 @@ * published by the Free Software Foundation. */ +#include <linux/export.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/irqflags.h> @@ -174,6 +175,7 @@ bool mcpm_is_available(void) { return (platform_ops) ? true : false; } +EXPORT_SYMBOL_GPL(mcpm_is_available); /* * We can't use regular spinlocks. In the switcher case, it is possible diff --git a/arch/arm/crypto/sha1-armv4-large.S b/arch/arm/crypto/sha1-armv4-large.S index 99207c45ec10..f82cd8cf5a09 100644 --- a/arch/arm/crypto/sha1-armv4-large.S +++ b/arch/arm/crypto/sha1-armv4-large.S @@ -1,4 +1,14 @@ #define __ARM_ARCH__ __LINUX_ARM_ARCH__ +@ SPDX-License-Identifier: GPL-2.0 + +@ This code is taken from the OpenSSL project but the author (Andy Polyakov) +@ has relicensed it under the GPLv2. Therefore this program is free software; +@ you can redistribute it and/or modify it under the terms of the GNU General +@ Public License version 2 as published by the Free Software Foundation. +@ +@ The original headers, including the original license headers, are +@ included below for completeness. + @ ==================================================================== @ Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL @ project. The module is, however, dual licensed under OpenSSL and diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl index fac0533ea633..b9ec44060ed3 100644 --- a/arch/arm/crypto/sha256-armv4.pl +++ b/arch/arm/crypto/sha256-armv4.pl @@ -1,12 +1,19 @@ #!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 + +# This code is taken from the OpenSSL project but the author (Andy Polyakov) +# has relicensed it under the GPLv2. Therefore this program is free software; +# you can redistribute it and/or modify it under the terms of the GNU General +# Public License version 2 as published by the Free Software Foundation. +# +# The original headers, including the original license headers, are +# included below for completeness. # ==================================================================== # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL # project. The module is, however, dual licensed under OpenSSL and # CRYPTOGAMS licenses depending on where you obtain it. For further # details see http://www.openssl.org/~appro/cryptogams/. -# -# Permission to use under GPL terms is granted. # ==================================================================== # SHA256 block procedure for ARMv4. May 2007. diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped index 555a1a8eec90..3b58300d611c 100644 --- a/arch/arm/crypto/sha256-core.S_shipped +++ b/arch/arm/crypto/sha256-core.S_shipped @@ -1,11 +1,18 @@ +@ SPDX-License-Identifier: GPL-2.0 + +@ This code is taken from the OpenSSL project but the author (Andy Polyakov) +@ has relicensed it under the GPLv2. Therefore this program is free software; +@ you can redistribute it and/or modify it under the terms of the GNU General +@ Public License version 2 as published by the Free Software Foundation. +@ +@ The original headers, including the original license headers, are +@ included below for completeness. @ ==================================================================== @ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL @ project. The module is, however, dual licensed under OpenSSL and @ CRYPTOGAMS licenses depending on where you obtain it. For further @ details see http://www.openssl.org/~appro/cryptogams/. -@ -@ Permission to use under GPL terms is granted. @ ==================================================================== @ SHA256 block procedure for ARMv4. May 2007. diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl index a2b11a844357..fb5d15048c0b 100644 --- a/arch/arm/crypto/sha512-armv4.pl +++ b/arch/arm/crypto/sha512-armv4.pl @@ -1,12 +1,19 @@ #!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 + +# This code is taken from the OpenSSL project but the author (Andy Polyakov) +# has relicensed it under the GPLv2. Therefore this program is free software; +# you can redistribute it and/or modify it under the terms of the GNU General +# Public License version 2 as published by the Free Software Foundation. +# +# The original headers, including the original license headers, are +# included below for completeness. # ==================================================================== # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL # project. The module is, however, dual licensed under OpenSSL and # CRYPTOGAMS licenses depending on where you obtain it. For further # details see http://www.openssl.org/~appro/cryptogams/. -# -# Permission to use under GPL terms is granted. # ==================================================================== # SHA512 block procedure for ARMv4. September 2007. diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped index 3694c4d4ca2b..b1c334a49cda 100644 --- a/arch/arm/crypto/sha512-core.S_shipped +++ b/arch/arm/crypto/sha512-core.S_shipped @@ -1,11 +1,18 @@ +@ SPDX-License-Identifier: GPL-2.0 + +@ This code is taken from the OpenSSL project but the author (Andy Polyakov) +@ has relicensed it under the GPLv2. Therefore this program is free software; +@ you can redistribute it and/or modify it under the terms of the GNU General +@ Public License version 2 as published by the Free Software Foundation. +@ +@ The original headers, including the original license headers, are +@ included below for completeness. @ ==================================================================== @ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL @ project. The module is, however, dual licensed under OpenSSL and @ CRYPTOGAMS licenses depending on where you obtain it. For further @ details see http://www.openssl.org/~appro/cryptogams/. -@ -@ Permission to use under GPL terms is granted. @ ==================================================================== @ SHA512 block procedure for ARMv4. September 2007. diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 873e3c189279..1d66db9c9db5 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -1,3 +1,4 @@ +generic-y += compat.h generic-y += current.h generic-y += early_ioremap.h generic-y += emergency-restart.h diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 9342904cccca..0cd4dccbae78 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -447,6 +447,14 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) .size \name , . - \name .endm + .macro csdb +#ifdef CONFIG_THUMB2_KERNEL + .inst.w 0xf3af8014 +#else + .inst 0xe320f014 +#endif + .endm + .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req #ifndef CONFIG_CPU_USE_DOMAINS adds \tmp, \addr, #\size - 1 diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 40f5c410fd8c..69772e742a0a 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -17,6 +17,12 @@ #define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory") #define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory") #define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory") +#ifdef CONFIG_THUMB2_KERNEL +#define CSDB ".inst.w 0xf3af8014" +#else +#define CSDB ".inst 0xe320f014" +#endif +#define csdb() __asm__ __volatile__(CSDB : : : "memory") #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 #define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ : : "r" (0) : "memory") @@ -37,6 +43,13 @@ #define dmb(x) __asm__ __volatile__ ("" : : : "memory") #endif +#ifndef CSDB +#define CSDB +#endif +#ifndef csdb +#define csdb() +#endif + #ifdef CONFIG_ARM_HEAVY_MB extern void (*soc_mb)(void); extern void arm_heavy_mb(void); @@ -63,6 +76,25 @@ extern void arm_heavy_mb(void); #define __smp_rmb() __smp_mb() #define __smp_wmb() dmb(ishst) +#ifdef CONFIG_CPU_SPECTRE +static inline unsigned long array_index_mask_nospec(unsigned long idx, + unsigned long sz) +{ + unsigned long mask; + + asm volatile( + "cmp %1, %2\n" + " sbc %0, %1, %1\n" + CSDB + : "=r" (mask) + : "r" (idx), "Ir" (sz) + : "cc"); + + return mask; +} +#define array_index_mask_nospec array_index_mask_nospec +#endif + #include <asm-generic/barrier.h> #endif /* !__ASSEMBLY__ */ diff --git a/arch/arm/include/asm/bugs.h b/arch/arm/include/asm/bugs.h index a97f1ea708d1..73a99c72a930 100644 --- a/arch/arm/include/asm/bugs.h +++ b/arch/arm/include/asm/bugs.h @@ -10,12 +10,14 @@ #ifndef __ASM_BUGS_H #define __ASM_BUGS_H -#ifdef CONFIG_MMU extern void check_writebuffer_bugs(void); -#define check_bugs() check_writebuffer_bugs() +#ifdef CONFIG_MMU +extern void check_bugs(void); +extern void check_other_bugs(void); #else #define check_bugs() do { } while (0) +#define check_other_bugs() do { } while (0) #endif #endif diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h index 4c9fa72b59f5..07e27f212dc7 100644 --- a/arch/arm/include/asm/cp15.h +++ b/arch/arm/include/asm/cp15.h @@ -65,6 +65,9 @@ #define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) #define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) +#define BPIALL __ACCESS_CP15(c7, 0, c5, 6) +#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0) + extern unsigned long cr_alignment; /* defined in entry-armv.S */ static inline unsigned long get_cr(void) diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index cb546425da8a..26021980504d 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -77,8 +77,16 @@ #define ARM_CPU_PART_CORTEX_A12 0x4100c0d0 #define ARM_CPU_PART_CORTEX_A17 0x4100c0e0 #define ARM_CPU_PART_CORTEX_A15 0x4100c0f0 +#define ARM_CPU_PART_CORTEX_A53 0x4100d030 +#define ARM_CPU_PART_CORTEX_A57 0x4100d070 +#define ARM_CPU_PART_CORTEX_A72 0x4100d080 +#define ARM_CPU_PART_CORTEX_A73 0x4100d090 +#define ARM_CPU_PART_CORTEX_A75 0x4100d0a0 #define ARM_CPU_PART_MASK 0xff00fff0 +/* Broadcom cores */ +#define ARM_CPU_PART_BRAHMA_B15 0x420000f0 + /* DEC implemented cores */ #define ARM_CPU_PART_SA1100 0x4400a110 diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h index 3b73fdcf3627..8de1100d1067 100644 --- a/arch/arm/include/asm/kgdb.h +++ b/arch/arm/include/asm/kgdb.h @@ -77,7 +77,7 @@ extern int kgdb_fault_expected; #define KGDB_MAX_NO_CPUS 1 #define BUFMAX 400 -#define NUMREGBYTES (DBG_MAX_REG_NUM << 2) +#define NUMREGBYTES (GDB_MAX_REGS << 2) #define NUMCRITREGBYTES (32 << 2) #define _R0 0 diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 5a953ecb0d78..231e87ad45d5 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -61,8 +61,6 @@ struct kvm_vcpu; extern char __kvm_hyp_init[]; extern char __kvm_hyp_init_end[]; -extern char __kvm_hyp_vector[]; - extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index c7c28c885a19..2d75e77bf7bb 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -21,6 +21,7 @@ #include <linux/types.h> #include <linux/kvm_types.h> +#include <asm/cputype.h> #include <asm/kvm.h> #include <asm/kvm_asm.h> #include <asm/kvm_mmio.h> @@ -311,8 +312,29 @@ static inline void kvm_arm_vhe_guest_exit(void) {} static inline bool kvm_arm_harden_branch_predictor(void) { + switch(read_cpuid_part()) { +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR + case ARM_CPU_PART_BRAHMA_B15: + case ARM_CPU_PART_CORTEX_A12: + case ARM_CPU_PART_CORTEX_A15: + case ARM_CPU_PART_CORTEX_A17: + return true; +#endif + default: + return false; + } +} + +#define KVM_SSBD_UNKNOWN -1 +#define KVM_SSBD_FORCE_DISABLE 0 +#define KVM_SSBD_KERNEL 1 +#define KVM_SSBD_FORCE_ENABLE 2 +#define KVM_SSBD_MITIGATED 3 + +static inline int kvm_arm_have_ssbd(void) +{ /* No way to detect it yet, pretend it is not there. */ - return false; + return KVM_SSBD_UNKNOWN; } static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {} diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index f675162663f0..8553d68b7c8a 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -327,7 +327,28 @@ static inline int kvm_read_guest_lock(struct kvm *kvm, static inline void *kvm_get_hyp_vector(void) { - return kvm_ksym_ref(__kvm_hyp_vector); + switch(read_cpuid_part()) { +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR + case ARM_CPU_PART_CORTEX_A12: + case ARM_CPU_PART_CORTEX_A17: + { + extern char __kvm_hyp_vector_bp_inv[]; + return kvm_ksym_ref(__kvm_hyp_vector_bp_inv); + } + + case ARM_CPU_PART_BRAHMA_B15: + case ARM_CPU_PART_CORTEX_A15: + { + extern char __kvm_hyp_vector_ic_inv[]; + return kvm_ksym_ref(__kvm_hyp_vector_ic_inv); + } +#endif + default: + { + extern char __kvm_hyp_vector[]; + return kvm_ksym_ref(__kvm_hyp_vector); + } + } } static inline int kvm_map_vectors(void) @@ -335,6 +356,11 @@ static inline int kvm_map_vectors(void) return 0; } +static inline int hyp_map_aux_data(void) +{ + return 0; +} + #define kvm_phys_to_vttbr(addr) (addr) #endif /* !__ASSEMBLY__ */ diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h index 6d1491c8ee22..5e088c83d3d8 100644 --- a/arch/arm/include/asm/mpu.h +++ b/arch/arm/include/asm/mpu.h @@ -12,60 +12,101 @@ /* ID_MMFR0 data relevant to MPU */ #define MMFR0_PMSA (0xF << 4) #define MMFR0_PMSAv7 (3 << 4) +#define MMFR0_PMSAv8 (4 << 4) /* MPU D/I Size Register fields */ -#define MPU_RSR_SZ 1 -#define MPU_RSR_EN 0 -#define MPU_RSR_SD 8 +#define PMSAv7_RSR_SZ 1 +#define PMSAv7_RSR_EN 0 +#define PMSAv7_RSR_SD 8 /* Number of subregions (SD) */ -#define MPU_NR_SUBREGS 8 -#define MPU_MIN_SUBREG_SIZE 256 +#define PMSAv7_NR_SUBREGS 8 +#define PMSAv7_MIN_SUBREG_SIZE 256 /* The D/I RSR value for an enabled region spanning the whole of memory */ -#define MPU_RSR_ALL_MEM 63 +#define PMSAv7_RSR_ALL_MEM 63 /* Individual bits in the DR/IR ACR */ -#define MPU_ACR_XN (1 << 12) -#define MPU_ACR_SHARED (1 << 2) +#define PMSAv7_ACR_XN (1 << 12) +#define PMSAv7_ACR_SHARED (1 << 2) /* C, B and TEX[2:0] bits only have semantic meanings when grouped */ -#define MPU_RGN_CACHEABLE 0xB -#define MPU_RGN_SHARED_CACHEABLE (MPU_RGN_CACHEABLE | MPU_ACR_SHARED) -#define MPU_RGN_STRONGLY_ORDERED 0 +#define PMSAv7_RGN_CACHEABLE 0xB +#define PMSAv7_RGN_SHARED_CACHEABLE (PMSAv7_RGN_CACHEABLE | PMSAv7_ACR_SHARED) +#define PMSAv7_RGN_STRONGLY_ORDERED 0 /* Main region should only be shared for SMP */ #ifdef CONFIG_SMP -#define MPU_RGN_NORMAL (MPU_RGN_CACHEABLE | MPU_ACR_SHARED) +#define PMSAv7_RGN_NORMAL (PMSAv7_RGN_CACHEABLE | PMSAv7_ACR_SHARED) #else -#define MPU_RGN_NORMAL MPU_RGN_CACHEABLE +#define PMSAv7_RGN_NORMAL PMSAv7_RGN_CACHEABLE #endif /* Access permission bits of ACR (only define those that we use)*/ -#define MPU_AP_PL1RO_PL0NA (0x5 << 8) -#define MPU_AP_PL1RW_PL0RW (0x3 << 8) -#define MPU_AP_PL1RW_PL0R0 (0x2 << 8) -#define MPU_AP_PL1RW_PL0NA (0x1 << 8) +#define PMSAv7_AP_PL1RO_PL0NA (0x5 << 8) +#define PMSAv7_AP_PL1RW_PL0RW (0x3 << 8) +#define PMSAv7_AP_PL1RW_PL0R0 (0x2 << 8) +#define PMSAv7_AP_PL1RW_PL0NA (0x1 << 8) + +#define PMSAv8_BAR_XN 1 + +#define PMSAv8_LAR_EN 1 +#define PMSAv8_LAR_IDX(n) (((n) & 0x7) << 1) + + +#define PMSAv8_AP_PL1RW_PL0NA (0 << 1) +#define PMSAv8_AP_PL1RW_PL0RW (1 << 1) +#define PMSAv8_AP_PL1RO_PL0RO (3 << 1) + +#ifdef CONFIG_SMP +#define PMSAv8_RGN_SHARED (3 << 3) // inner sharable +#else +#define PMSAv8_RGN_SHARED (0 << 3) +#endif + +#define PMSAv8_RGN_DEVICE_nGnRnE 0 +#define PMSAv8_RGN_NORMAL 1 + +#define PMSAv8_MAIR(attr, mt) ((attr) << ((mt) * 8)) + +#ifdef CONFIG_CPU_V7M +#define PMSAv8_MINALIGN 32 +#else +#define PMSAv8_MINALIGN 64 +#endif /* For minimal static MPU region configurations */ -#define MPU_PROBE_REGION 0 -#define MPU_BG_REGION 1 -#define MPU_RAM_REGION 2 -#define MPU_ROM_REGION 3 +#define PMSAv7_PROBE_REGION 0 +#define PMSAv7_BG_REGION 1 +#define PMSAv7_RAM_REGION 2 +#define PMSAv7_ROM_REGION 3 + +/* Fixed for PMSAv8 only */ +#define PMSAv8_XIP_REGION 0 +#define PMSAv8_KERNEL_REGION 1 /* Maximum number of regions Linux is interested in */ -#define MPU_MAX_REGIONS 16 +#define MPU_MAX_REGIONS 16 -#define MPU_DATA_SIDE 0 -#define MPU_INSTR_SIDE 1 +#define PMSAv7_DATA_SIDE 0 +#define PMSAv7_INSTR_SIDE 1 #ifndef __ASSEMBLY__ struct mpu_rgn { /* Assume same attributes for d/i-side */ - u32 drbar; - u32 drsr; - u32 dracr; + union { + u32 drbar; /* PMSAv7 */ + u32 prbar; /* PMSAv8 */ + }; + union { + u32 drsr; /* PMSAv7 */ + u32 prlar; /* PMSAv8 */ + }; + union { + u32 dracr; /* PMSAv7 */ + u32 unused; /* not used in PMSAv8 */ + }; }; struct mpu_rgn_info { @@ -75,16 +116,17 @@ struct mpu_rgn_info { extern struct mpu_rgn_info mpu_rgn_info; #ifdef CONFIG_ARM_MPU +extern void __init pmsav7_adjust_lowmem_bounds(void); +extern void __init pmsav8_adjust_lowmem_bounds(void); -extern void __init adjust_lowmem_bounds_mpu(void); -extern void __init mpu_setup(void); - +extern void __init pmsav7_setup(void); +extern void __init pmsav8_setup(void); #else - -static inline void adjust_lowmem_bounds_mpu(void) {} -static inline void mpu_setup(void) {} - -#endif /* !CONFIG_ARM_MPU */ +static inline void pmsav7_adjust_lowmem_bounds(void) {}; +static inline void pmsav8_adjust_lowmem_bounds(void) {}; +static inline void pmsav7_setup(void) {}; +static inline void pmsav8_setup(void) {}; +#endif #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h index 1f0de808d111..0abd389cf0ec 100644 --- a/arch/arm/include/asm/pci.h +++ b/arch/arm/include/asm/pci.h @@ -19,13 +19,6 @@ static inline int pci_proc_domain(struct pci_bus *bus) } #endif /* CONFIG_PCI_DOMAINS */ -/* - * The PCI address space does equal the physical memory address space. - * The networking and block device layers use this boolean for bounce - * buffer decisions. - */ -#define PCI_DMA_BUS_IS_PHYS (1) - #define HAVE_PCI_MMAP #define ARCH_GENERIC_PCI_MMAP_RESOURCE diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 2a4836087358..6d50a11d7793 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -219,7 +219,6 @@ static inline pte_t pte_mkspecial(pte_t pte) pte_val(pte) |= L_PTE_SPECIAL; return pte; } -#define __HAVE_ARCH_PTE_SPECIAL #define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY)) #define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY)) diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index f2e1af45bd6f..e25f4392e1b2 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -37,6 +37,10 @@ extern struct processor { */ void (*_proc_init)(void); /* + * Check for processor bugs + */ + void (*check_bugs)(void); + /* * Disable any processor specifics */ void (*_proc_fin)(void); diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h index 78f6db114faf..8e76db83c498 100644 --- a/arch/arm/include/asm/system_misc.h +++ b/arch/arm/include/asm/system_misc.h @@ -8,6 +8,7 @@ #include <linux/linkage.h> #include <linux/irqflags.h> #include <linux/reboot.h> +#include <linux/percpu.h> extern void cpu_init(void); @@ -15,6 +16,20 @@ void soft_restart(unsigned long); extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); extern void (*arm_pm_idle)(void); +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR +typedef void (*harden_branch_predictor_fn_t)(void); +DECLARE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn); +static inline void harden_branch_predictor(void) +{ + harden_branch_predictor_fn_t fn = per_cpu(harden_branch_predictor_fn, + smp_processor_id()); + if (fn) + fn(); +} +#else +#define harden_branch_predictor() do { } while (0) +#endif + #define UDBG_UNDEFINED (1 << 0) #define UDBG_SYSCALL (1 << 1) #define UDBG_BADABORT (1 << 2) diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 0bf2347495f1..3d614e90c19f 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -152,7 +152,7 @@ extern int __get_user_64t_4(void *); #define __get_user_check(x, p) \ ({ \ unsigned long __limit = current_thread_info()->addr_limit - 1; \ - register const typeof(*(p)) __user *__p asm("r0") = (p);\ + register typeof(*(p)) __user *__p asm("r0") = (p); \ register typeof(x) __r2 asm("r2"); \ register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h index 634e77107425..187ccf6496ad 100644 --- a/arch/arm/include/asm/v7m.h +++ b/arch/arm/include/asm/v7m.h @@ -64,9 +64,17 @@ #define MPU_CTRL_ENABLE 1 #define MPU_CTRL_PRIVDEFENA (1 << 2) -#define MPU_RNR 0x98 -#define MPU_RBAR 0x9c -#define MPU_RASR 0xa0 +#define PMSAv7_RNR 0x98 +#define PMSAv7_RBAR 0x9c +#define PMSAv7_RASR 0xa0 + +#define PMSAv8_RNR 0x98 +#define PMSAv8_RBAR 0x9c +#define PMSAv8_RLAR 0xa0 +#define PMSAv8_RBAR_A(n) (PMSAv8_RBAR + 8*(n)) +#define PMSAv8_RLAR_A(n) (PMSAv8_RLAR + 8*(n)) +#define PMSAv8_MAIR0 0xc0 +#define PMSAv8_MAIR1 0xc4 /* Cache opeartions */ #define V7M_SCB_ICIALLU 0x250 /* I-cache invalidate all to PoU */ diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index b59ac4bf82b8..8cad59465af3 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -31,6 +31,7 @@ else obj-y += entry-armv.o endif +obj-$(CONFIG_MMU) += bugs.o obj-$(CONFIG_CPU_IDLE) += cpuidle.o obj-$(CONFIG_ISA_DMA_API) += dma.o obj-$(CONFIG_FIQ) += fiq.o fiqasm.o diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index f369ece99958..27c5381518d8 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -194,9 +194,11 @@ int main(void) DEFINE(MPU_RNG_INFO_USED, offsetof(struct mpu_rgn_info, used)); DEFINE(MPU_RNG_SIZE, sizeof(struct mpu_rgn)); - DEFINE(MPU_RGN_DRBAR, offsetof(struct mpu_rgn, drbar)); - DEFINE(MPU_RGN_DRSR, offsetof(struct mpu_rgn, drsr)); - DEFINE(MPU_RGN_DRACR, offsetof(struct mpu_rgn, dracr)); + DEFINE(MPU_RGN_DRBAR, offsetof(struct mpu_rgn, drbar)); + DEFINE(MPU_RGN_DRSR, offsetof(struct mpu_rgn, drsr)); + DEFINE(MPU_RGN_DRACR, offsetof(struct mpu_rgn, dracr)); + DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar)); + DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar)); #endif return 0; } diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c new file mode 100644 index 000000000000..7be511310191 --- /dev/null +++ b/arch/arm/kernel/bugs.c @@ -0,0 +1,18 @@ +// SPDX-Identifier: GPL-2.0 +#include <linux/init.h> +#include <asm/bugs.h> +#include <asm/proc-fns.h> + +void check_other_bugs(void) +{ +#ifdef MULTI_CPU + if (processor.check_bugs) + processor.check_bugs(); +#endif +} + +void __init check_bugs(void) +{ + check_writebuffer_bugs(); + check_other_bugs(); +} diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c index e651c4d0a0d9..6739d37c2bc5 100644 --- a/arch/arm/kernel/dma.c +++ b/arch/arm/kernel/dma.c @@ -276,21 +276,9 @@ static int proc_dma_show(struct seq_file *m, void *v) return 0; } -static int proc_dma_open(struct inode *inode, struct file *file) -{ - return single_open(file, proc_dma_show, NULL); -} - -static const struct file_operations proc_dma_operations = { - .open = proc_dma_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - static int __init proc_dma_init(void) { - proc_create("dma", 0, NULL, &proc_dma_operations); + proc_create_single("dma", 0, NULL, proc_dma_show); return 0; } diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 3c4f88701f22..20df608bf343 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -242,9 +242,7 @@ local_restart: tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? bne __sys_trace - cmp scno, #NR_syscalls @ check upper syscall limit - badr lr, ret_fast_syscall @ return address - ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine + invoke_syscall tbl, scno, r10, ret_fast_syscall add r1, sp, #S_OFF 2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) @@ -278,14 +276,8 @@ __sys_trace: mov r1, scno add r0, sp, #S_OFF bl syscall_trace_enter - - badr lr, __sys_trace_return @ return address - mov scno, r0 @ syscall number (possibly new) - add r1, sp, #S_R0 + S_OFF @ pointer to regs - cmp scno, #NR_syscalls @ check upper syscall limit - ldmccia r1, {r0 - r6} @ have to reload r0 - r6 - stmccia sp, {r4, r5} @ and update the stack args - ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine + mov scno, r0 + invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1 cmp scno, #-1 @ skip the syscall? bne 2b add sp, sp, #S_OFF @ restore stack @@ -363,6 +355,10 @@ sys_syscall: bic scno, r0, #__NR_OABI_SYSCALL_BASE cmp scno, #__NR_syscall - __NR_SYSCALL_BASE cmpne scno, #NR_syscalls @ check range +#ifdef CONFIG_CPU_SPECTRE + movhs scno, #0 + csdb +#endif stmloia sp, {r5, r6} @ shuffle args movlo r0, r1 movlo r1, r2 diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 0f07579af472..773424843d6e 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -378,6 +378,31 @@ #endif .endm + .macro invoke_syscall, table, nr, tmp, ret, reload=0 +#ifdef CONFIG_CPU_SPECTRE + mov \tmp, \nr + cmp \tmp, #NR_syscalls @ check upper syscall limit + movcs \tmp, #0 + csdb + badr lr, \ret @ return address + .if \reload + add r1, sp, #S_R0 + S_OFF @ pointer to regs + ldmccia r1, {r0 - r6} @ reload r0-r6 + stmccia sp, {r4, r5} @ update stack arguments + .endif + ldrcc pc, [\table, \tmp, lsl #2] @ call sys_* routine +#else + cmp \nr, #NR_syscalls @ check upper syscall limit + badr lr, \ret @ return address + .if \reload + add r1, sp, #S_R0 + S_OFF @ pointer to regs + ldmccia r1, {r0 - r6} @ reload r0-r6 + stmccia sp, {r4, r5} @ update stack arguments + .endif + ldrcc pc, [\table, \nr, lsl #2] @ call sys_* routine +#endif + .endm + /* * These are the registers used in the syscall handler, and allow us to * have in theory up to 7 arguments to a function - r0 to r6. diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 2e38f85b757a..dd546d65a383 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -68,14 +68,6 @@ ENTRY(stext) beq __error_p @ yes, error 'p' #ifdef CONFIG_ARM_MPU - /* Calculate the size of a region covering just the kernel */ - ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET - ldr r6, =(_end) @ Cover whole kernel - sub r6, r6, r5 @ Minimum size of region to map - clz r6, r6 @ Region size must be 2^N... - rsb r6, r6, #31 @ ...so round up region size - lsl r6, r6, #MPU_RSR_SZ @ Put size in right field - orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit bl __setup_mpu #endif @@ -83,8 +75,8 @@ ENTRY(stext) ldr r12, [r10, #PROCINFO_INITFUNC] add r12, r12, r10 ret r12 -1: bl __after_proc_init - b __mmap_switched +1: ldr lr, =__mmap_switched + b __after_proc_init ENDPROC(stext) #ifdef CONFIG_SMP @@ -110,8 +102,6 @@ ENTRY(secondary_startup) ldr r7, __secondary_data #ifdef CONFIG_ARM_MPU - /* Use MPU region info supplied by __cpu_up */ - ldr r6, [r7] @ get secondary_data.mpu_rgn_info bl __secondary_setup_mpu @ Initialize the MPU #endif @@ -133,12 +123,45 @@ __secondary_data: /* * Set the Control Register and Read the process ID. */ + .text __after_proc_init: +#ifdef CONFIG_ARM_MPU +M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB) +M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB) +M_CLASS(ldr r3, [r12, 0x50]) +AR_CLASS(mrc p15, 0, r3, c0, c1, 4) @ Read ID_MMFR0 + and r3, r3, #(MMFR0_PMSA) @ PMSA field + teq r3, #(MMFR0_PMSAv7) @ PMSA v7 + beq 1f + teq r3, #(MMFR0_PMSAv8) @ PMSA v8 + /* + * Memory region attributes for PMSAv8: + * + * n = AttrIndx[2:0] + * n MAIR + * DEVICE_nGnRnE 000 00000000 + * NORMAL 001 11111111 + */ + ldreq r3, =PMSAv8_MAIR(0x00, PMSAv8_RGN_DEVICE_nGnRnE) | \ + PMSAv8_MAIR(0xff, PMSAv8_RGN_NORMAL) +AR_CLASS(mcreq p15, 0, r3, c10, c2, 0) @ MAIR 0 +M_CLASS(streq r3, [r12, #PMSAv8_MAIR0]) + moveq r3, #0 +AR_CLASS(mcreq p15, 0, r3, c10, c2, 1) @ MAIR 1 +M_CLASS(streq r3, [r12, #PMSAv8_MAIR1]) + +1: +#endif #ifdef CONFIG_CPU_CP15 /* * CP15 system control register value returned in r0 from * the CPU init function. */ + +#ifdef CONFIG_ARM_MPU + biceq r0, r0, #CR_BR @ Disable the 'default mem-map' + orreq r0, r0, #CR_M @ Set SCTRL.M (MPU on) +#endif #if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6 orr r0, r0, #CR_A #else @@ -154,7 +177,15 @@ __after_proc_init: bic r0, r0, #CR_I #endif mcr p15, 0, r0, c1, c0, 0 @ write control reg + isb #elif defined (CONFIG_CPU_V7M) +#ifdef CONFIG_ARM_MPU + ldreq r3, [r12, MPU_CTRL] + biceq r3, #MPU_CTRL_PRIVDEFENA + orreq r3, #MPU_CTRL_ENABLE + streq r3, [r12, MPU_CTRL] + isb +#endif /* For V7M systems we want to modify the CCR similarly to the SCTLR */ #ifdef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #V7M_SCB_CCR_DC @@ -165,9 +196,7 @@ __after_proc_init: #ifdef CONFIG_CPU_ICACHE_DISABLE bic r0, r0, #V7M_SCB_CCR_IC #endif - movw r3, #:lower16:(BASEADDR_V7M_SCB + V7M_SCB_CCR) - movt r3, #:upper16:(BASEADDR_V7M_SCB + V7M_SCB_CCR) - str r0, [r3] + str r0, [r12, V7M_SCB_CCR] #endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */ ret lr ENDPROC(__after_proc_init) @@ -184,7 +213,7 @@ ENDPROC(__after_proc_init) .endm /* Setup a single MPU region, either D or I side (D-side for unified) */ -.macro setup_region bar, acr, sr, side = MPU_DATA_SIDE, unused +.macro setup_region bar, acr, sr, side = PMSAv7_DATA_SIDE, unused mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR @@ -192,14 +221,14 @@ ENDPROC(__after_proc_init) #else .macro set_region_nr tmp, rgnr, base mov \tmp, \rgnr - str \tmp, [\base, #MPU_RNR] + str \tmp, [\base, #PMSAv7_RNR] .endm .macro setup_region bar, acr, sr, unused, base lsl \acr, \acr, #16 orr \acr, \acr, \sr - str \bar, [\base, #MPU_RBAR] - str \acr, [\base, #MPU_RASR] + str \bar, [\base, #PMSAv7_RBAR] + str \acr, [\base, #PMSAv7_RASR] .endm #endif @@ -210,8 +239,9 @@ ENDPROC(__after_proc_init) * Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6 * Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page * - * r6: Value to be written to DRSR (and IRSR if required) for MPU_RAM_REGION + * r6: Value to be written to DRSR (and IRSR if required) for PMSAv7_RAM_REGION */ + __HEAD ENTRY(__setup_mpu) @@ -223,7 +253,22 @@ AR_CLASS(mrc p15, 0, r0, c0, c1, 4) @ Read ID_MMFR0 M_CLASS(ldr r0, [r12, 0x50]) and r0, r0, #(MMFR0_PMSA) @ PMSA field teq r0, #(MMFR0_PMSAv7) @ PMSA v7 - bxne lr + beq __setup_pmsa_v7 + teq r0, #(MMFR0_PMSAv8) @ PMSA v8 + beq __setup_pmsa_v8 + + ret lr +ENDPROC(__setup_mpu) + +ENTRY(__setup_pmsa_v7) + /* Calculate the size of a region covering just the kernel */ + ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET + ldr r6, =(_end) @ Cover whole kernel + sub r6, r6, r5 @ Minimum size of region to map + clz r6, r6 @ Region size must be 2^N... + rsb r6, r6, #31 @ ...so round up region size + lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field + orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit /* Determine whether the D/I-side memory map is unified. We set the * flags here and continue to use them for the rest of this function */ @@ -234,77 +279,189 @@ M_CLASS(ldr r0, [r12, #MPU_TYPE]) tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified /* Setup second region first to free up r6 */ - set_region_nr r0, #MPU_RAM_REGION, r12 + set_region_nr r0, #PMSAv7_RAM_REGION, r12 isb /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET - ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) + ldr r5,=(PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL) - setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ PHYS_OFFSET, shared, enabled + setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ PHYS_OFFSET, shared, enabled beq 1f @ Memory-map not unified - setup_region r0, r5, r6, MPU_INSTR_SIDE, r12 @ PHYS_OFFSET, shared, enabled + setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ PHYS_OFFSET, shared, enabled 1: isb /* First/background region */ - set_region_nr r0, #MPU_BG_REGION, r12 + set_region_nr r0, #PMSAv7_BG_REGION, r12 isb /* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */ mov r0, #0 @ BG region starts at 0x0 - ldr r5,=(MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA) - mov r6, #MPU_RSR_ALL_MEM @ 4GB region, enabled + ldr r5,=(PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0NA) + mov r6, #PMSAv7_RSR_ALL_MEM @ 4GB region, enabled - setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ 0x0, BG region, enabled + setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ 0x0, BG region, enabled beq 2f @ Memory-map not unified - setup_region r0, r5, r6, MPU_INSTR_SIDE r12 @ 0x0, BG region, enabled + setup_region r0, r5, r6, PMSAv7_INSTR_SIDE r12 @ 0x0, BG region, enabled 2: isb #ifdef CONFIG_XIP_KERNEL - set_region_nr r0, #MPU_ROM_REGION, r12 + set_region_nr r0, #PMSAv7_ROM_REGION, r12 isb - ldr r5,=(MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL) + ldr r5,=(PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL) ldr r0, =CONFIG_XIP_PHYS_ADDR @ ROM start ldr r6, =(_exiprom) @ ROM end sub r6, r6, r0 @ Minimum size of region to map clz r6, r6 @ Region size must be 2^N... rsb r6, r6, #31 @ ...so round up region size - lsl r6, r6, #MPU_RSR_SZ @ Put size in right field - orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit + lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field + orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit - setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled + setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled beq 3f @ Memory-map not unified - setup_region r0, r5, r6, MPU_INSTR_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled + setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled 3: isb #endif + ret lr +ENDPROC(__setup_pmsa_v7) + +ENTRY(__setup_pmsa_v8) + mov r0, #0 +AR_CLASS(mcr p15, 0, r0, c6, c2, 1) @ PRSEL +M_CLASS(str r0, [r12, #PMSAv8_RNR]) + isb + +#ifdef CONFIG_XIP_KERNEL + ldr r5, =CONFIG_XIP_PHYS_ADDR @ ROM start + ldr r6, =(_exiprom) @ ROM end + sub r6, r6, #1 + bic r6, r6, #(PMSAv8_MINALIGN - 1) + + orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED) + orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN) + +AR_CLASS(mcr p15, 0, r5, c6, c8, 0) @ PRBAR0 +AR_CLASS(mcr p15, 0, r6, c6, c8, 1) @ PRLAR0 +M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(0)]) +M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(0)]) +#endif + + ldr r5, =KERNEL_START + ldr r6, =KERNEL_END + sub r6, r6, #1 + bic r6, r6, #(PMSAv8_MINALIGN - 1) + + orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED) + orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN) + +AR_CLASS(mcr p15, 0, r5, c6, c8, 4) @ PRBAR1 +AR_CLASS(mcr p15, 0, r6, c6, c8, 5) @ PRLAR1 +M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(1)]) +M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(1)]) + + /* Setup Background: 0x0 - min(KERNEL_START, XIP_PHYS_ADDR) */ +#ifdef CONFIG_XIP_KERNEL + ldr r6, =KERNEL_START + ldr r5, =CONFIG_XIP_PHYS_ADDR + cmp r6, r5 + movcs r6, r5 +#else + ldr r6, =KERNEL_START +#endif + cmp r6, #0 + beq 1f + + mov r5, #0 + sub r6, r6, #1 + bic r6, r6, #(PMSAv8_MINALIGN - 1) + + orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN) + orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN) + +AR_CLASS(mcr p15, 0, r5, c6, c9, 0) @ PRBAR2 +AR_CLASS(mcr p15, 0, r6, c6, c9, 1) @ PRLAR2 +M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(2)]) +M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(2)]) + +1: + /* Setup Background: max(KERNEL_END, _exiprom) - 0xffffffff */ +#ifdef CONFIG_XIP_KERNEL + ldr r5, =KERNEL_END + ldr r6, =(_exiprom) + cmp r5, r6 + movcc r5, r6 +#else + ldr r5, =KERNEL_END +#endif + mov r6, #0xffffffff + bic r6, r6, #(PMSAv8_MINALIGN - 1) + + orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN) + orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN) - /* Enable the MPU */ -AR_CLASS(mrc p15, 0, r0, c1, c0, 0) @ Read SCTLR -AR_CLASS(bic r0, r0, #CR_BR) @ Disable the 'default mem-map' -AR_CLASS(orr r0, r0, #CR_M) @ Set SCTRL.M (MPU on) -AR_CLASS(mcr p15, 0, r0, c1, c0, 0) @ Enable MPU +AR_CLASS(mcr p15, 0, r5, c6, c9, 4) @ PRBAR3 +AR_CLASS(mcr p15, 0, r6, c6, c9, 5) @ PRLAR3 +M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(3)]) +M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(3)]) -M_CLASS(ldr r0, [r12, #MPU_CTRL]) -M_CLASS(bic r0, #MPU_CTRL_PRIVDEFENA) -M_CLASS(orr r0, #MPU_CTRL_ENABLE) -M_CLASS(str r0, [r12, #MPU_CTRL]) +#ifdef CONFIG_XIP_KERNEL + /* Setup Background: min(_exiprom, KERNEL_END) - max(KERNEL_START, XIP_PHYS_ADDR) */ + ldr r5, =(_exiprom) + ldr r6, =KERNEL_END + cmp r5, r6 + movcs r5, r6 + + ldr r6, =KERNEL_START + ldr r0, =CONFIG_XIP_PHYS_ADDR + cmp r6, r0 + movcc r6, r0 + + sub r6, r6, #1 + bic r6, r6, #(PMSAv8_MINALIGN - 1) + + orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN) + orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN) + +#ifdef CONFIG_CPU_V7M + /* There is no alias for n == 4 */ + mov r0, #4 + str r0, [r12, #PMSAv8_RNR] @ PRSEL isb + str r5, [r12, #PMSAv8_RBAR_A(0)] + str r6, [r12, #PMSAv8_RLAR_A(0)] +#else + mcr p15, 0, r5, c6, c10, 1 @ PRBAR4 + mcr p15, 0, r6, c6, c10, 2 @ PRLAR4 +#endif +#endif ret lr -ENDPROC(__setup_mpu) +ENDPROC(__setup_pmsa_v8) #ifdef CONFIG_SMP /* * r6: pointer at mpu_rgn_info */ + .text ENTRY(__secondary_setup_mpu) + /* Use MPU region info supplied by __cpu_up */ + ldr r6, [r7] @ get secondary_data.mpu_rgn_info + /* Probe for v7 PMSA compliance */ mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0 and r0, r0, #(MMFR0_PMSA) @ PMSA field teq r0, #(MMFR0_PMSAv7) @ PMSA v7 - bne __error_p + beq __secondary_setup_pmsa_v7 + teq r0, #(MMFR0_PMSAv8) @ PMSA v8 + beq __secondary_setup_pmsa_v8 + b __error_p +ENDPROC(__secondary_setup_mpu) +/* + * r6: pointer at mpu_rgn_info + */ +ENTRY(__secondary_setup_pmsa_v7) /* Determine whether the D/I-side memory map is unified. We set the * flags here and continue to use them for the rest of this function */ mrc p15, 0, r0, c0, c0, 4 @ MPUIR @@ -328,25 +485,45 @@ ENTRY(__secondary_setup_mpu) ldr r6, [r3, #MPU_RGN_DRSR] ldr r5, [r3, #MPU_RGN_DRACR] - setup_region r0, r5, r6, MPU_DATA_SIDE + setup_region r0, r5, r6, PMSAv7_DATA_SIDE beq 2f - setup_region r0, r5, r6, MPU_INSTR_SIDE + setup_region r0, r5, r6, PMSAv7_INSTR_SIDE 2: isb mrc p15, 0, r0, c0, c0, 4 @ Reevaluate the MPUIR cmp r4, #0 bgt 1b - /* Enable the MPU */ - mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR - bic r0, r0, #CR_BR @ Disable the 'default mem-map' - orr r0, r0, #CR_M @ Set SCTRL.M (MPU on) - mcr p15, 0, r0, c1, c0, 0 @ Enable MPU + ret lr +ENDPROC(__secondary_setup_pmsa_v7) + +ENTRY(__secondary_setup_pmsa_v8) + ldr r4, [r6, #MPU_RNG_INFO_USED] +#ifndef CONFIG_XIP_KERNEL + add r4, r4, #1 +#endif + mov r5, #MPU_RNG_SIZE + add r3, r6, #MPU_RNG_INFO_RNGS + mla r3, r4, r5, r3 + +1: + sub r3, r3, #MPU_RNG_SIZE + sub r4, r4, #1 + + mcr p15, 0, r4, c6, c2, 1 @ PRSEL isb - ret lr -ENDPROC(__secondary_setup_mpu) + ldr r5, [r3, #MPU_RGN_PRBAR] + ldr r6, [r3, #MPU_RGN_PRLAR] + mcr p15, 0, r5, c6, c3, 0 @ PRBAR + mcr p15, 0, r6, c6, c3, 1 @ PRLAR + + cmp r4, #0 + bgt 1b + + ret lr +ENDPROC(__secondary_setup_pmsa_v8) #endif /* CONFIG_SMP */ #endif /* CONFIG_ARM_MPU */ #include "head-common.S" diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 1d7061a38922..be42c4f66a40 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -303,12 +303,10 @@ static void armv6pmu_enable_event(struct perf_event *event) } static irqreturn_t -armv6pmu_handle_irq(int irq_num, - void *dev) +armv6pmu_handle_irq(struct arm_pmu *cpu_pmu) { unsigned long pmcr = armv6_pmcr_read(); struct perf_sample_data data; - struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); struct pt_regs *regs; int idx; diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 870b66c1e4ef..57f01e059f39 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -946,11 +946,10 @@ static void armv7pmu_disable_event(struct perf_event *event) raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) +static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu) { u32 pmnc; struct perf_sample_data data; - struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); struct pt_regs *regs; int idx; diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index fcf218da660e..88d1a76f5367 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -142,11 +142,10 @@ xscale1_pmnc_counter_has_overflowed(unsigned long pmnc, } static irqreturn_t -xscale1pmu_handle_irq(int irq_num, void *dev) +xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu) { unsigned long pmnc; struct perf_sample_data data; - struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); struct pt_regs *regs; int idx; @@ -489,11 +488,10 @@ xscale2_pmnc_counter_has_overflowed(unsigned long of_flags, } static irqreturn_t -xscale2pmu_handle_irq(int irq_num, void *dev) +xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu) { unsigned long pmnc, of_flags; struct perf_sample_data data; - struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); struct pt_regs *regs; int idx; diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 7724b0f661b3..36718a424358 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -205,6 +205,7 @@ void ptrace_break(struct task_struct *tsk, struct pt_regs *regs) { siginfo_t info; + clear_siginfo(&info); info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = TRAP_BRKPT; diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index fc40a2b40595..35ca494c028c 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -754,7 +754,7 @@ int __init arm_add_memory(u64 start, u64 size) else size -= aligned_start - start; -#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT +#ifndef CONFIG_PHYS_ADDR_T_64BIT if (aligned_start > ULONG_MAX) { pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n", (long long)start); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 2da087926ebe..0978282d5fc2 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -31,6 +31,7 @@ #include <linux/irq_work.h> #include <linux/atomic.h> +#include <asm/bugs.h> #include <asm/smp.h> #include <asm/cacheflush.h> #include <asm/cpu.h> @@ -236,8 +237,6 @@ int __cpu_disable(void) flush_cache_louis(); local_flush_tlb_all(); - clear_tasks_mm_cpumask(cpu); - return 0; } @@ -255,6 +254,7 @@ void __cpu_die(unsigned int cpu) } pr_debug("CPU%u: shutdown\n", cpu); + clear_tasks_mm_cpumask(cpu); /* * platform_cpu_kill() is generally expected to do the powering off * and/or cutting of clocks to the dying CPU. Optionally, this may @@ -405,6 +405,9 @@ asmlinkage void secondary_start_kernel(void) * before we continue - which happens after __cpu_up returns. */ set_cpu_online(cpu, true); + + check_other_bugs(); + complete(&cpu_running); local_irq_enable(); diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c index a40ebb7c0896..d08099269e35 100644 --- a/arch/arm/kernel/suspend.c +++ b/arch/arm/kernel/suspend.c @@ -3,6 +3,7 @@ #include <linux/slab.h> #include <linux/mm_types.h> +#include <asm/bugs.h> #include <asm/cacheflush.h> #include <asm/idmap.h> #include <asm/pgalloc.h> @@ -36,6 +37,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) cpu_switch_mm(mm->pgd, mm); local_flush_bp_all(); local_flush_tlb_all(); + check_other_bugs(); } return ret; diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 3bda08bee674..80517f293eb9 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -91,18 +91,6 @@ static int proc_status_show(struct seq_file *m, void *v) seq_printf(m, "Last process:\t\t%d\n", previous_pid); return 0; } - -static int proc_status_open(struct inode *inode, struct file *file) -{ - return single_open(file, proc_status_show, PDE_DATA(inode)); -} - -static const struct file_operations proc_status_fops = { - .open = proc_status_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; #endif /* @@ -112,6 +100,7 @@ static void set_segfault(struct pt_regs *regs, unsigned long addr) { siginfo_t info; + clear_siginfo(&info); down_read(¤t->mm->mmap_sem); if (find_vma(current->mm, addr) == NULL) info.si_code = SEGV_MAPERR; @@ -260,7 +249,8 @@ static int __init swp_emulation_init(void) return 0; #ifdef CONFIG_PROC_FS - if (!proc_create("cpu/swp_emulation", S_IRUGO, NULL, &proc_status_fops)) + if (!proc_create_single("cpu/swp_emulation", S_IRUGO, NULL, + proc_status_show)) return -ENOMEM; #endif /* CONFIG_PROC_FS */ diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 2fe87109ae46..badf02ca3693 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -441,6 +441,7 @@ asmlinkage void do_undefinstr(struct pt_regs *regs) siginfo_t info; void __user *pc; + clear_siginfo(&info); pc = (void __user *)instruction_pointer(regs); if (processor_mode(regs) == SVC_MODE) { @@ -540,6 +541,7 @@ static int bad_syscall(int n, struct pt_regs *regs) { siginfo_t info; + clear_siginfo(&info); if ((current->personality & PER_MASK) != PER_LINUX) { send_sig(SIGSEGV, current, 1); return regs->ARM_r0; @@ -607,6 +609,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) { siginfo_t info; + clear_siginfo(&info); if ((no >> 16) != (__ARM_NR_BASE>> 16)) return bad_syscall(no, regs); @@ -743,6 +746,8 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs) unsigned long addr = instruction_pointer(regs); siginfo_t info; + clear_siginfo(&info); + #ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_BADABORT) { pr_err("[%d] %s: bad data abort: code %d instr 0x%08lx\n", diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S index d32f5d35f602..3593d5c1acd2 100644 --- a/arch/arm/kernel/vmlinux-xip.lds.S +++ b/arch/arm/kernel/vmlinux-xip.lds.S @@ -13,6 +13,7 @@ #include <asm/cache.h> #include <asm/thread_info.h> #include <asm/memory.h> +#include <asm/mpu.h> #include <asm/page.h> #include "vmlinux.lds.h" @@ -148,6 +149,9 @@ SECTIONS __init_end = .; BSS_SECTION(0, 0, 8) +#ifdef CONFIG_ARM_MPU + . = ALIGN(PMSAv8_MINALIGN); +#endif _end = .; STABS_DEBUG diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index b77dc675ae55..23150c0f0f4d 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -12,6 +12,7 @@ #include <asm/cache.h> #include <asm/thread_info.h> #include <asm/memory.h> +#include <asm/mpu.h> #include <asm/page.h> #include <asm/pgtable.h> @@ -54,6 +55,9 @@ SECTIONS . = ALIGN(1<<SECTION_SHIFT); #endif +#ifdef CONFIG_ARM_MPU + . = ALIGN(PMSAv8_MINALIGN); +#endif .text : { /* Real text segment */ _stext = .; /* Text and read-only data */ ARM_TEXT @@ -143,6 +147,9 @@ SECTIONS _edata = .; BSS_SECTION(0, 0, 0) +#ifdef CONFIG_ARM_MPU + . = ALIGN(PMSAv8_MINALIGN); +#endif _end = .; STABS_DEBUG diff --git a/arch/arm/kernel/vmlinux.lds.h b/arch/arm/kernel/vmlinux.lds.h index 71281e08e1d4..ae5fdff18406 100644 --- a/arch/arm/kernel/vmlinux.lds.h +++ b/arch/arm/kernel/vmlinux.lds.h @@ -27,24 +27,24 @@ #define PROC_INFO \ . = ALIGN(4); \ - VMLINUX_SYMBOL(__proc_info_begin) = .; \ + __proc_info_begin = .; \ *(.proc.info.init) \ - VMLINUX_SYMBOL(__proc_info_end) = .; + __proc_info_end = .; #define HYPERVISOR_TEXT \ - VMLINUX_SYMBOL(__hyp_text_start) = .; \ + __hyp_text_start = .; \ *(.hyp.text) \ - VMLINUX_SYMBOL(__hyp_text_end) = .; + __hyp_text_end = .; #define IDMAP_TEXT \ ALIGN_FUNCTION(); \ - VMLINUX_SYMBOL(__idmap_text_start) = .; \ + __idmap_text_start = .; \ *(.idmap.text) \ - VMLINUX_SYMBOL(__idmap_text_end) = .; \ + __idmap_text_end = .; \ . = ALIGN(PAGE_SIZE); \ - VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ + __hyp_idmap_text_start = .; \ *(.hyp.idmap.text) \ - VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; + __hyp_idmap_text_end = .; #define ARM_DISCARD \ *(.ARM.exidx.exit.text) \ diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S index 95a2faefc070..aa3f9a9837ac 100644 --- a/arch/arm/kvm/hyp/hyp-entry.S +++ b/arch/arm/kvm/hyp/hyp-entry.S @@ -16,6 +16,7 @@ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ +#include <linux/arm-smccc.h> #include <linux/linkage.h> #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> @@ -71,6 +72,90 @@ __kvm_hyp_vector: W(b) hyp_irq W(b) hyp_fiq +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR + .align 5 +__kvm_hyp_vector_ic_inv: + .global __kvm_hyp_vector_ic_inv + + /* + * We encode the exception entry in the bottom 3 bits of + * SP, and we have to guarantee to be 8 bytes aligned. + */ + W(add) sp, sp, #1 /* Reset 7 */ + W(add) sp, sp, #1 /* Undef 6 */ + W(add) sp, sp, #1 /* Syscall 5 */ + W(add) sp, sp, #1 /* Prefetch abort 4 */ + W(add) sp, sp, #1 /* Data abort 3 */ + W(add) sp, sp, #1 /* HVC 2 */ + W(add) sp, sp, #1 /* IRQ 1 */ + W(nop) /* FIQ 0 */ + + mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */ + isb + + b decode_vectors + + .align 5 +__kvm_hyp_vector_bp_inv: + .global __kvm_hyp_vector_bp_inv + + /* + * We encode the exception entry in the bottom 3 bits of + * SP, and we have to guarantee to be 8 bytes aligned. + */ + W(add) sp, sp, #1 /* Reset 7 */ + W(add) sp, sp, #1 /* Undef 6 */ + W(add) sp, sp, #1 /* Syscall 5 */ + W(add) sp, sp, #1 /* Prefetch abort 4 */ + W(add) sp, sp, #1 /* Data abort 3 */ + W(add) sp, sp, #1 /* HVC 2 */ + W(add) sp, sp, #1 /* IRQ 1 */ + W(nop) /* FIQ 0 */ + + mcr p15, 0, r0, c7, c5, 6 /* BPIALL */ + isb + +decode_vectors: + +#ifdef CONFIG_THUMB2_KERNEL + /* + * Yet another silly hack: Use VPIDR as a temp register. + * Thumb2 is really a pain, as SP cannot be used with most + * of the bitwise instructions. The vect_br macro ensures + * things gets cleaned-up. + */ + mcr p15, 4, r0, c0, c0, 0 /* VPIDR */ + mov r0, sp + and r0, r0, #7 + sub sp, sp, r0 + push {r1, r2} + mov r1, r0 + mrc p15, 4, r0, c0, c0, 0 /* VPIDR */ + mrc p15, 0, r2, c0, c0, 0 /* MIDR */ + mcr p15, 4, r2, c0, c0, 0 /* VPIDR */ +#endif + +.macro vect_br val, targ +ARM( eor sp, sp, #\val ) +ARM( tst sp, #7 ) +ARM( eorne sp, sp, #\val ) + +THUMB( cmp r1, #\val ) +THUMB( popeq {r1, r2} ) + + beq \targ +.endm + + vect_br 0, hyp_fiq + vect_br 1, hyp_irq + vect_br 2, hyp_hvc + vect_br 3, hyp_dabt + vect_br 4, hyp_pabt + vect_br 5, hyp_svc + vect_br 6, hyp_undef + vect_br 7, hyp_reset +#endif + .macro invalid_vector label, cause .align \label: mov r0, #\cause @@ -118,7 +203,7 @@ hyp_hvc: lsr r2, r2, #16 and r2, r2, #0xff cmp r2, #0 - bne guest_trap @ Guest called HVC + bne guest_hvc_trap @ Guest called HVC /* * Getting here means host called HVC, we shift parameters and branch @@ -149,7 +234,14 @@ hyp_hvc: bx ip 1: - push {lr} + /* + * Pushing r2 here is just a way of keeping the stack aligned to + * 8 bytes on any path that can trigger a HYP exception. Here, + * we may well be about to jump into the guest, and the guest + * exit would otherwise be badly decoded by our fancy + * "decode-exception-without-a-branch" code... + */ + push {r2, lr} mov lr, r0 mov r0, r1 @@ -159,7 +251,21 @@ hyp_hvc: THUMB( orr lr, #1) blx lr @ Call the HYP function - pop {lr} + pop {r2, lr} + eret + +guest_hvc_trap: + movw r2, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1 + movt r2, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1 + ldr r0, [sp] @ Guest's r0 + teq r0, r2 + bne guest_trap + add sp, sp, #12 + @ Returns: + @ r0 = 0 + @ r1 = HSR value (perfectly predictable) + @ r2 = ARM_SMCCC_ARCH_WORKAROUND_1 + mov r0, #0 eret guest_trap: diff --git a/arch/arm/mach-axxia/Kconfig b/arch/arm/mach-axxia/Kconfig index bb2ce1c63fd9..d3eae6037913 100644 --- a/arch/arm/mach-axxia/Kconfig +++ b/arch/arm/mach-axxia/Kconfig @@ -2,7 +2,6 @@ config ARCH_AXXIA bool "LSI Axxia platforms" depends on ARCH_MULTI_V7 && ARM_LPAE - select ARCH_DMA_ADDR_T_64BIT select ARM_AMBA select ARM_GIC select ARM_TIMER_SP804 diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig index c2f3b0d216a4..c46a728df44e 100644 --- a/arch/arm/mach-bcm/Kconfig +++ b/arch/arm/mach-bcm/Kconfig @@ -211,7 +211,6 @@ config ARCH_BRCMSTB select BRCMSTB_L2_IRQ select BCM7120_L2_IRQ select ARCH_HAS_HOLES_MEMORYMODEL - select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE select ZONE_DMA if ARM_LPAE select SOC_BRCMSTB select SOC_BUS diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index 0581ffbedddd..faf48a3b1fea 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c @@ -635,6 +635,7 @@ EXPORT_SYMBOL(ep93xx_keypad_release_gpio); *************************************************************************/ static struct resource ep93xx_i2s_resource[] = { DEFINE_RES_MEM(EP93XX_I2S_PHYS_BASE, 0x100), + DEFINE_RES_IRQ(IRQ_EP93XX_SAI), }; static struct platform_device ep93xx_i2s_device = { diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index 647c319f9f5f..2ca405816846 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -112,7 +112,6 @@ config SOC_EXYNOS5440 bool "SAMSUNG EXYNOS5440" default y depends on ARCH_EXYNOS5 - select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE select HAVE_ARM_ARCH_TIMER select AUTO_ZRELADDR select PINCTRL_EXYNOS5440 diff --git a/arch/arm/mach-highbank/Kconfig b/arch/arm/mach-highbank/Kconfig index 81110ec34226..5552968f07f8 100644 --- a/arch/arm/mach-highbank/Kconfig +++ b/arch/arm/mach-highbank/Kconfig @@ -1,7 +1,6 @@ config ARCH_HIGHBANK bool "Calxeda ECX-1000/2000 (Highbank/Midway)" depends on ARCH_MULTI_V7 - select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE select ARCH_HAS_HOLES_MEMORYMODEL select ARCH_SUPPORTS_BIG_ENDIAN select ARM_AMBA diff --git a/arch/arm/mach-rockchip/Kconfig b/arch/arm/mach-rockchip/Kconfig index a4065966881a..fafd3d7f9f8c 100644 --- a/arch/arm/mach-rockchip/Kconfig +++ b/arch/arm/mach-rockchip/Kconfig @@ -3,7 +3,6 @@ config ARCH_ROCKCHIP depends on ARCH_MULTI_V7 select PINCTRL select PINCTRL_ROCKCHIP - select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE select ARCH_HAS_RESET_CONTROLLER select ARM_AMBA select ARM_GIC diff --git a/arch/arm/mach-rpc/ecard.c b/arch/arm/mach-rpc/ecard.c index bdb5ec1cf560..39aef4876ed4 100644 --- a/arch/arm/mach-rpc/ecard.c +++ b/arch/arm/mach-rpc/ecard.c @@ -657,25 +657,13 @@ static int ecard_devices_proc_show(struct seq_file *m, void *v) return 0; } -static int ecard_devices_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, ecard_devices_proc_show, NULL); -} - -static const struct file_operations bus_ecard_proc_fops = { - .owner = THIS_MODULE, - .open = ecard_devices_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - static struct proc_dir_entry *proc_bus_ecard_dir = NULL; static void ecard_proc_init(void) { proc_bus_ecard_dir = proc_mkdir("bus/ecard", NULL); - proc_create("devices", 0, proc_bus_ecard_dir, &bus_ecard_proc_fops); + proc_create_single("devices", 0, proc_bus_ecard_dir, + ecard_devices_proc_show); } #define ec_set_resource(ec,nr,st,sz) \ diff --git a/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/arch/arm/mach-s3c64xx/mach-crag6410-module.c index f00988705408..5aa472892465 100644 --- a/arch/arm/mach-s3c64xx/mach-crag6410-module.c +++ b/arch/arm/mach-s3c64xx/mach-crag6410-module.c @@ -9,6 +9,7 @@ #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/spi/spi.h> +#include <linux/gpio/machine.h> #include <linux/mfd/wm831x/irq.h> #include <linux/mfd/wm831x/gpio.h> @@ -206,9 +207,6 @@ static const struct i2c_board_info wm1277_devs[] = { }; static struct arizona_pdata wm5102_reva_pdata = { - .ldo1 = { - .ldoena = S3C64XX_GPN(7), - }, .gpio_base = CODEC_GPIO_BASE, .irq_flags = IRQF_TRIGGER_HIGH, .micd_pol_gpio = CODEC_GPIO_BASE + 4, @@ -237,10 +235,16 @@ static struct spi_board_info wm5102_reva_spi_devs[] = { }, }; -static struct arizona_pdata wm5102_pdata = { - .ldo1 = { - .ldoena = S3C64XX_GPN(7), +static struct gpiod_lookup_table wm5102_reva_gpiod_table = { + .dev_id = "spi0.1", /* SPI device name */ + .table = { + GPIO_LOOKUP("GPION", 7, + "wlf,ldoena", GPIO_ACTIVE_HIGH), + { }, }, +}; + +static struct arizona_pdata wm5102_pdata = { .gpio_base = CODEC_GPIO_BASE, .irq_flags = IRQF_TRIGGER_HIGH, .micd_pol_gpio = CODEC_GPIO_BASE + 2, @@ -264,6 +268,15 @@ static struct spi_board_info wm5102_spi_devs[] = { }, }; +static struct gpiod_lookup_table wm5102_gpiod_table = { + .dev_id = "spi0.1", /* SPI device name */ + .table = { + GPIO_LOOKUP("GPION", 7, + "wlf,ldo1ena", GPIO_ACTIVE_HIGH), + { }, + }, +}; + static struct spi_board_info wm5110_spi_devs[] = { [0] = { .modalias = "wm5110", @@ -366,6 +379,9 @@ static int wlf_gf_module_probe(struct i2c_client *i2c, rev == gf_mods[i].rev)) break; + gpiod_add_lookup_table(&wm5102_reva_gpiod_table); + gpiod_add_lookup_table(&wm5102_gpiod_table); + if (i < ARRAY_SIZE(gf_mods)) { dev_info(&i2c->dev, "%s revision %d\n", gf_mods[i].name, rev + 1); diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig index 280e7312a9e1..fe60cd09a5ca 100644 --- a/arch/arm/mach-shmobile/Kconfig +++ b/arch/arm/mach-shmobile/Kconfig @@ -29,7 +29,6 @@ config ARCH_RMOBILE menuconfig ARCH_RENESAS bool "Renesas ARM SoCs" depends on ARCH_MULTI_V7 && MMU - select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE select ARCH_SHMOBILE select ARM_GIC select GPIOLIB diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig index 1e0aeb47bac6..7f3b83e0d324 100644 --- a/arch/arm/mach-tegra/Kconfig +++ b/arch/arm/mach-tegra/Kconfig @@ -15,6 +15,5 @@ menuconfig ARCH_TEGRA select RESET_CONTROLLER select SOC_BUS select ZONE_DMA if ARM_LPAE - select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE help This enables support for NVIDIA Tegra based systems. diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 7f14acf67caf..96a7b6cf459b 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -415,6 +415,7 @@ config CPU_V7 select CPU_CP15_MPU if !MMU select CPU_HAS_ASID if MMU select CPU_PABRT_V7 + select CPU_SPECTRE if MMU select CPU_THUMB_CAPABLE select CPU_TLB_V7 if MMU @@ -661,6 +662,7 @@ config ARM_LPAE bool "Support for the Large Physical Address Extension" depends on MMU && CPU_32v7 && !CPU_32v6 && !CPU_32v5 && \ !CPU_32v4 && !CPU_32v3 + select PHYS_ADDR_T_64BIT help Say Y if you have an ARMv7 processor supporting the LPAE page table format and you would like to access memory beyond the @@ -673,12 +675,6 @@ config ARM_PV_FIXUP def_bool y depends on ARM_LPAE && ARM_PATCH_PHYS_VIRT && ARCH_KEYSTONE -config ARCH_PHYS_ADDR_T_64BIT - def_bool ARM_LPAE - -config ARCH_DMA_ADDR_T_64BIT - bool - config ARM_THUMB bool "Support Thumb user binaries" if !CPU_THUMBONLY && EXPERT depends on CPU_THUMB_CAPABLE @@ -826,6 +822,28 @@ config CPU_BPREDICT_DISABLE help Say Y here to disable branch prediction. If unsure, say N. +config CPU_SPECTRE + bool + +config HARDEN_BRANCH_PREDICTOR + bool "Harden the branch predictor against aliasing attacks" if EXPERT + depends on CPU_SPECTRE + default y + help + Speculation attacks against some high-performance processors rely + on being able to manipulate the branch predictor for a victim + context by executing aliasing branches in the attacker context. + Such attacks can be partially mitigated against by clearing + internal branch predictor state and limiting the prediction + logic in some situations. + + This config option will take CPU-specific actions to harden + the branch predictor against aliasing attacks and may rely on + specific instruction sequences or control bits being set by + the system firmware. + + If unsure, say Y. + config TLS_REG_EMUL bool select NEED_KUSER_HELPERS diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 9dbb84923e12..7cb1699fbfc4 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -10,7 +10,7 @@ obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ ifneq ($(CONFIG_MMU),y) obj-y += nommu.o -obj-$(CONFIG_ARM_MPU) += pmsa-v7.o +obj-$(CONFIG_ARM_MPU) += pmsa-v7.o pmsa-v8.o endif obj-$(CONFIG_ARM_PTDUMP_CORE) += dump.o @@ -97,7 +97,7 @@ obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o obj-$(CONFIG_CPU_V6) += proc-v6.o obj-$(CONFIG_CPU_V6K) += proc-v6.o -obj-$(CONFIG_CPU_V7) += proc-v7.o +obj-$(CONFIG_CPU_V7) += proc-v7.o proc-v7-bugs.o obj-$(CONFIG_CPU_V7M) += proc-v7m.o AFLAGS_proc-v6.o :=-Wa,-march=armv6 diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 2c96190e018b..bd2c739d8083 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -950,6 +950,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (ai_usermode & UM_SIGNAL) { siginfo_t si; + clear_siginfo(&si); si.si_signo = SIGBUS; si.si_errno = 0; si.si_code = BUS_ADRALN; diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c index 619f24a42d09..f448a0663b10 100644 --- a/arch/arm/mm/dma-mapping-nommu.c +++ b/arch/arm/mm/dma-mapping-nommu.c @@ -241,12 +241,3 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_teardown_dma_ops(struct device *dev) { } - -#define PREALLOC_DMA_DEBUG_ENTRIES 4096 - -static int __init dma_debug_do_init(void) -{ - dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); - return 0; -} -core_initcall(dma_debug_do_init); diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index ada8eb206a90..af27f1c22d93 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -831,7 +831,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, unsigned long attrs) { int ret; - unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + unsigned long nr_vma_pages = vma_pages(vma); unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long pfn = dma_to_pfn(dev, dma_addr); unsigned long off = vma->vm_pgoff; @@ -1151,15 +1151,6 @@ int arm_dma_supported(struct device *dev, u64 mask) return __dma_supported(dev, mask, false); } -#define PREALLOC_DMA_DEBUG_ENTRIES 4096 - -static int __init dma_debug_do_init(void) -{ - dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); - return 0; -} -core_initcall(dma_debug_do_init); - #ifdef CONFIG_ARM_DMA_USE_IOMMU static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs) diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index b75eada23d0a..84becc911ee3 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -163,6 +163,11 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, { struct siginfo si; + if (addr > TASK_SIZE) + harden_branch_predictor(); + + clear_siginfo(&si); + #ifdef CONFIG_DEBUG_USER if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) || ((user_debug & UDBG_BUS) && (sig == SIGBUS))) { @@ -557,6 +562,7 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) inf->name, fsr, addr); show_pte(current->mm, addr); + clear_siginfo(&info); info.si_signo = inf->sig; info.si_errno = 0; info.si_code = inf->code; @@ -589,6 +595,7 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", inf->name, ifsr, addr); + clear_siginfo(&info); info.si_signo = inf->sig; info.si_errno = 0; info.si_code = inf->code; diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 7c087961b7ce..5dd6c58d653b 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -99,6 +99,38 @@ void __init arm_mm_memblock_reserve(void) memblock_reserve(0, 1); } +static void __init adjust_lowmem_bounds_mpu(void) +{ + unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA; + + switch (pmsa) { + case MMFR0_PMSAv7: + pmsav7_adjust_lowmem_bounds(); + break; + case MMFR0_PMSAv8: + pmsav8_adjust_lowmem_bounds(); + break; + default: + break; + } +} + +static void __init mpu_setup(void) +{ + unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA; + + switch (pmsa) { + case MMFR0_PMSAv7: + pmsav7_setup(); + break; + case MMFR0_PMSAv8: + pmsav8_setup(); + break; + default: + break; + } +} + void __init adjust_lowmem_bounds(void) { phys_addr_t end; diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c index e2853bfff74e..699fa2e88725 100644 --- a/arch/arm/mm/pmsa-v7.c +++ b/arch/arm/mm/pmsa-v7.c @@ -102,7 +102,7 @@ static inline u32 irbar_read(void) static inline void rgnr_write(u32 v) { - writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RNR); + writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RNR); } /* Data-side / unified region attributes */ @@ -110,28 +110,28 @@ static inline void rgnr_write(u32 v) /* Region access control register */ static inline void dracr_write(u32 v) { - u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(15, 0); + u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(15, 0); - writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + MPU_RASR); + writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + PMSAv7_RASR); } /* Region size register */ static inline void drsr_write(u32 v) { - u32 racr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(31, 16); + u32 racr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(31, 16); - writel_relaxed(v | racr, BASEADDR_V7M_SCB + MPU_RASR); + writel_relaxed(v | racr, BASEADDR_V7M_SCB + PMSAv7_RASR); } /* Region base address register */ static inline void drbar_write(u32 v) { - writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RBAR); + writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RBAR); } static inline u32 drbar_read(void) { - return readl_relaxed(BASEADDR_V7M_SCB + MPU_RBAR); + return readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RBAR); } /* ARMv7-M only supports a unified MPU, so I-side operations are nop */ @@ -143,11 +143,6 @@ static inline unsigned long irbar_read(void) {return 0;} #endif -static int __init mpu_present(void) -{ - return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7); -} - static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct region *region) { unsigned long subreg, bslots, sslots; @@ -161,7 +156,7 @@ static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct r bdiff = base - abase; sdiff = p2size - asize; - subreg = p2size / MPU_NR_SUBREGS; + subreg = p2size / PMSAv7_NR_SUBREGS; if ((bdiff % subreg) || (sdiff % subreg)) return false; @@ -172,17 +167,17 @@ static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct r if (bslots || sslots) { int i; - if (subreg < MPU_MIN_SUBREG_SIZE) + if (subreg < PMSAv7_MIN_SUBREG_SIZE) return false; - if (bslots + sslots > MPU_NR_SUBREGS) + if (bslots + sslots > PMSAv7_NR_SUBREGS) return false; for (i = 0; i < bslots; i++) _set_bit(i, ®ion->subreg); for (i = 1; i <= sslots; i++) - _set_bit(MPU_NR_SUBREGS - i, ®ion->subreg); + _set_bit(PMSAv7_NR_SUBREGS - i, ®ion->subreg); } region->base = abase; @@ -233,7 +228,7 @@ static int __init allocate_region(phys_addr_t base, phys_addr_t size, } /* MPU initialisation functions */ -void __init adjust_lowmem_bounds_mpu(void) +void __init pmsav7_adjust_lowmem_bounds(void) { phys_addr_t specified_mem_size = 0, total_mem_size = 0; struct memblock_region *reg; @@ -243,10 +238,7 @@ void __init adjust_lowmem_bounds_mpu(void) unsigned int mem_max_regions; int num, i; - if (!mpu_present()) - return; - - /* Free-up MPU_PROBE_REGION */ + /* Free-up PMSAv7_PROBE_REGION */ mpu_min_region_order = __mpu_min_region_order(); /* How many regions are supported */ @@ -301,12 +293,12 @@ void __init adjust_lowmem_bounds_mpu(void) num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem); for (i = 0; i < num; i++) { - unsigned long subreg = mem[i].size / MPU_NR_SUBREGS; + unsigned long subreg = mem[i].size / PMSAv7_NR_SUBREGS; total_mem_size += mem[i].size - subreg * hweight_long(mem[i].subreg); pr_debug("MPU: base %pa size %pa disable subregions: %*pbl\n", - &mem[i].base, &mem[i].size, MPU_NR_SUBREGS, &mem[i].subreg); + &mem[i].base, &mem[i].size, PMSAv7_NR_SUBREGS, &mem[i].subreg); } if (total_mem_size != specified_mem_size) { @@ -349,7 +341,7 @@ static int __init __mpu_min_region_order(void) u32 drbar_result, irbar_result; /* We've kept a region free for this probing */ - rgnr_write(MPU_PROBE_REGION); + rgnr_write(PMSAv7_PROBE_REGION); isb(); /* * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum @@ -388,8 +380,8 @@ static int __init mpu_setup_region(unsigned int number, phys_addr_t start, return -ENOMEM; /* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */ - size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN; - size_data |= subregions << MPU_RSR_SD; + size_data = ((size_order - 1) << PMSAv7_RSR_SZ) | 1 << PMSAv7_RSR_EN; + size_data |= subregions << PMSAv7_RSR_SD; if (need_flush) flush_cache_all(); @@ -424,18 +416,15 @@ static int __init mpu_setup_region(unsigned int number, phys_addr_t start, /* * Set up default MPU regions, doing nothing if there is no MPU */ -void __init mpu_setup(void) +void __init pmsav7_setup(void) { int i, region = 0, err = 0; - if (!mpu_present()) - return; - /* Setup MPU (order is important) */ /* Background */ err |= mpu_setup_region(region++, 0, 32, - MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0RW, + PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0RW, 0, false); #ifdef CONFIG_XIP_KERNEL @@ -448,13 +437,13 @@ void __init mpu_setup(void) * with BG region (which is uncachable), thus we need * to clean and invalidate cache. */ - bool need_flush = region == MPU_RAM_REGION; + bool need_flush = region == PMSAv7_RAM_REGION; if (!xip[i].size) continue; err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size), - MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL, + PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL, xip[i].subreg, need_flush); } #endif @@ -465,14 +454,14 @@ void __init mpu_setup(void) continue; err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size), - MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL, + PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL, mem[i].subreg, false); } /* Vectors */ #ifndef CONFIG_CPU_V7M err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE), - MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL, + PMSAv7_AP_PL1RW_PL0NA | PMSAv7_RGN_NORMAL, 0, false); #endif if (err) { diff --git a/arch/arm/mm/pmsa-v8.c b/arch/arm/mm/pmsa-v8.c new file mode 100644 index 000000000000..617a83def88a --- /dev/null +++ b/arch/arm/mm/pmsa-v8.c @@ -0,0 +1,307 @@ +/* + * Based on linux/arch/arm/pmsa-v7.c + * + * ARM PMSAv8 supporting functions. + */ + +#include <linux/memblock.h> +#include <linux/range.h> + +#include <asm/cp15.h> +#include <asm/cputype.h> +#include <asm/mpu.h> + +#include <asm/memory.h> +#include <asm/sections.h> + +#include "mm.h" + +#ifndef CONFIG_CPU_V7M + +#define PRSEL __ACCESS_CP15(c6, 0, c2, 1) +#define PRBAR __ACCESS_CP15(c6, 0, c3, 0) +#define PRLAR __ACCESS_CP15(c6, 0, c3, 1) + +static inline u32 prlar_read(void) +{ + return read_sysreg(PRLAR); +} + +static inline u32 prbar_read(void) +{ + return read_sysreg(PRBAR); +} + +static inline void prsel_write(u32 v) +{ + write_sysreg(v, PRSEL); +} + +static inline void prbar_write(u32 v) +{ + write_sysreg(v, PRBAR); +} + +static inline void prlar_write(u32 v) +{ + write_sysreg(v, PRLAR); +} +#else + +static inline u32 prlar_read(void) +{ + return readl_relaxed(BASEADDR_V7M_SCB + PMSAv8_RLAR); +} + +static inline u32 prbar_read(void) +{ + return readl_relaxed(BASEADDR_V7M_SCB + PMSAv8_RBAR); +} + +static inline void prsel_write(u32 v) +{ + writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RNR); +} + +static inline void prbar_write(u32 v) +{ + writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RBAR); +} + +static inline void prlar_write(u32 v) +{ + writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RLAR); +} + +#endif + +static struct range __initdata io[MPU_MAX_REGIONS]; +static struct range __initdata mem[MPU_MAX_REGIONS]; + +static unsigned int __initdata mpu_max_regions; + +static __init bool is_region_fixed(int number) +{ + switch (number) { + case PMSAv8_XIP_REGION: + case PMSAv8_KERNEL_REGION: + return true; + default: + return false; + } +} + +void __init pmsav8_adjust_lowmem_bounds(void) +{ + phys_addr_t mem_end; + struct memblock_region *reg; + bool first = true; + + for_each_memblock(memory, reg) { + if (first) { + phys_addr_t phys_offset = PHYS_OFFSET; + + /* + * Initially only use memory continuous from + * PHYS_OFFSET */ + if (reg->base != phys_offset) + panic("First memory bank must be contiguous from PHYS_OFFSET"); + mem_end = reg->base + reg->size; + first = false; + } else { + /* + * memblock auto merges contiguous blocks, remove + * all blocks afterwards in one go (we can't remove + * blocks separately while iterating) + */ + pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n", + &mem_end, ®->base); + memblock_remove(reg->base, 0 - reg->base); + break; + } + } +} + +static int __init __mpu_max_regions(void) +{ + static int max_regions; + u32 mpuir; + + if (max_regions) + return max_regions; + + mpuir = read_cpuid_mputype(); + + max_regions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION; + + return max_regions; +} + +static int __init __pmsav8_setup_region(unsigned int number, u32 bar, u32 lar) +{ + if (number > mpu_max_regions + || number >= MPU_MAX_REGIONS) + return -ENOENT; + + dsb(); + prsel_write(number); + isb(); + prbar_write(bar); + prlar_write(lar); + + mpu_rgn_info.rgns[number].prbar = bar; + mpu_rgn_info.rgns[number].prlar = lar; + + mpu_rgn_info.used++; + + return 0; +} + +static int __init pmsav8_setup_ram(unsigned int number, phys_addr_t start,phys_addr_t end) +{ + u32 bar, lar; + + if (is_region_fixed(number)) + return -EINVAL; + + bar = start; + lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);; + + bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED; + lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN; + + return __pmsav8_setup_region(number, bar, lar); +} + +static int __init pmsav8_setup_io(unsigned int number, phys_addr_t start,phys_addr_t end) +{ + u32 bar, lar; + + if (is_region_fixed(number)) + return -EINVAL; + + bar = start; + lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);; + + bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN; + lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN; + + return __pmsav8_setup_region(number, bar, lar); +} + +static int __init pmsav8_setup_fixed(unsigned int number, phys_addr_t start,phys_addr_t end) +{ + u32 bar, lar; + + if (!is_region_fixed(number)) + return -EINVAL; + + bar = start; + lar = (end - 1) & ~(PMSAv8_MINALIGN - 1); + + bar |= PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED; + lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN; + + prsel_write(number); + isb(); + + if (prbar_read() != bar || prlar_read() != lar) + return -EINVAL; + + /* Reserved region was set up early, we just need a record for secondaries */ + mpu_rgn_info.rgns[number].prbar = bar; + mpu_rgn_info.rgns[number].prlar = lar; + + mpu_rgn_info.used++; + + return 0; +} + +#ifndef CONFIG_CPU_V7M +static int __init pmsav8_setup_vector(unsigned int number, phys_addr_t start,phys_addr_t end) +{ + u32 bar, lar; + + if (number == PMSAv8_KERNEL_REGION) + return -EINVAL; + + bar = start; + lar = (end - 1) & ~(PMSAv8_MINALIGN - 1); + + bar |= PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED; + lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN; + + return __pmsav8_setup_region(number, bar, lar); +} +#endif + +void __init pmsav8_setup(void) +{ + int i, err = 0; + int region = PMSAv8_KERNEL_REGION; + + /* How many regions are supported ? */ + mpu_max_regions = __mpu_max_regions(); + + /* RAM: single chunk of memory */ + add_range(mem, ARRAY_SIZE(mem), 0, memblock.memory.regions[0].base, + memblock.memory.regions[0].base + memblock.memory.regions[0].size); + + /* IO: cover full 4G range */ + add_range(io, ARRAY_SIZE(io), 0, 0, 0xffffffff); + + /* RAM and IO: exclude kernel */ + subtract_range(mem, ARRAY_SIZE(mem), __pa(KERNEL_START), __pa(KERNEL_END)); + subtract_range(io, ARRAY_SIZE(io), __pa(KERNEL_START), __pa(KERNEL_END)); + +#ifdef CONFIG_XIP_KERNEL + /* RAM and IO: exclude xip */ + subtract_range(mem, ARRAY_SIZE(mem), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom)); + subtract_range(io, ARRAY_SIZE(io), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom)); +#endif + +#ifndef CONFIG_CPU_V7M + /* RAM and IO: exclude vectors */ + subtract_range(mem, ARRAY_SIZE(mem), vectors_base, vectors_base + 2 * PAGE_SIZE); + subtract_range(io, ARRAY_SIZE(io), vectors_base, vectors_base + 2 * PAGE_SIZE); +#endif + /* IO: exclude RAM */ + for (i = 0; i < ARRAY_SIZE(mem); i++) + subtract_range(io, ARRAY_SIZE(io), mem[i].start, mem[i].end); + + /* Now program MPU */ + +#ifdef CONFIG_XIP_KERNEL + /* ROM */ + err |= pmsav8_setup_fixed(PMSAv8_XIP_REGION, CONFIG_XIP_PHYS_ADDR, __pa(_exiprom)); +#endif + /* Kernel */ + err |= pmsav8_setup_fixed(region++, __pa(KERNEL_START), __pa(KERNEL_END)); + + + /* IO */ + for (i = 0; i < ARRAY_SIZE(io); i++) { + if (!io[i].end) + continue; + + err |= pmsav8_setup_io(region++, io[i].start, io[i].end); + } + + /* RAM */ + for (i = 0; i < ARRAY_SIZE(mem); i++) { + if (!mem[i].end) + continue; + + err |= pmsav8_setup_ram(region++, mem[i].start, mem[i].end); + } + + /* Vectors */ +#ifndef CONFIG_CPU_V7M + err |= pmsav8_setup_vector(region++, vectors_base, vectors_base + 2 * PAGE_SIZE); +#endif + if (err) + pr_warn("MPU region initialization failure! %d", err); + else + pr_info("Using ARM PMSAv8 Compliant MPU. Used %d of %d regions\n", + mpu_rgn_info.used, mpu_max_regions); +} diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index f10e31d0730a..81d0efb055c6 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -273,13 +273,14 @@ mcr p15, 0, ip, c7, c10, 4 @ data write barrier .endm -.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0 +.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0 .type \name\()_processor_functions, #object .align 2 ENTRY(\name\()_processor_functions) .word \dabort .word \pabort .word cpu_\name\()_proc_init + .word \bugs .word cpu_\name\()_proc_fin .word cpu_\name\()_reset .word cpu_\name\()_do_idle diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index c6141a5435c3..f8d45ad2a515 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S @@ -41,11 +41,6 @@ * even on Cortex-A8 revisions not affected by 430973. * If IBE is not set, the flush BTAC/BTB won't do anything. */ -ENTRY(cpu_ca8_switch_mm) -#ifdef CONFIG_MMU - mov r2, #0 - mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB -#endif ENTRY(cpu_v7_switch_mm) #ifdef CONFIG_MMU mmid r1, r1 @ get mm->context.id @@ -66,7 +61,6 @@ ENTRY(cpu_v7_switch_mm) #endif bx lr ENDPROC(cpu_v7_switch_mm) -ENDPROC(cpu_ca8_switch_mm) /* * cpu_v7_set_pte_ext(ptep, pte) diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c new file mode 100644 index 000000000000..5544b82a2e7a --- /dev/null +++ b/arch/arm/mm/proc-v7-bugs.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/arm-smccc.h> +#include <linux/kernel.h> +#include <linux/psci.h> +#include <linux/smp.h> + +#include <asm/cp15.h> +#include <asm/cputype.h> +#include <asm/proc-fns.h> +#include <asm/system_misc.h> + +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR +DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn); + +extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); +extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); +extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); +extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); + +static void harden_branch_predictor_bpiall(void) +{ + write_sysreg(0, BPIALL); +} + +static void harden_branch_predictor_iciallu(void) +{ + write_sysreg(0, ICIALLU); +} + +static void __maybe_unused call_smc_arch_workaround_1(void) +{ + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); +} + +static void __maybe_unused call_hvc_arch_workaround_1(void) +{ + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); +} + +static void cpu_v7_spectre_init(void) +{ + const char *spectre_v2_method = NULL; + int cpu = smp_processor_id(); + + if (per_cpu(harden_branch_predictor_fn, cpu)) + return; + + switch (read_cpuid_part()) { + case ARM_CPU_PART_CORTEX_A8: + case ARM_CPU_PART_CORTEX_A9: + case ARM_CPU_PART_CORTEX_A12: + case ARM_CPU_PART_CORTEX_A17: + case ARM_CPU_PART_CORTEX_A73: + case ARM_CPU_PART_CORTEX_A75: + if (processor.switch_mm != cpu_v7_bpiall_switch_mm) + goto bl_error; + per_cpu(harden_branch_predictor_fn, cpu) = + harden_branch_predictor_bpiall; + spectre_v2_method = "BPIALL"; + break; + + case ARM_CPU_PART_CORTEX_A15: + case ARM_CPU_PART_BRAHMA_B15: + if (processor.switch_mm != cpu_v7_iciallu_switch_mm) + goto bl_error; + per_cpu(harden_branch_predictor_fn, cpu) = + harden_branch_predictor_iciallu; + spectre_v2_method = "ICIALLU"; + break; + +#ifdef CONFIG_ARM_PSCI + default: + /* Other ARM CPUs require no workaround */ + if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) + break; + /* fallthrough */ + /* Cortex A57/A72 require firmware workaround */ + case ARM_CPU_PART_CORTEX_A57: + case ARM_CPU_PART_CORTEX_A72: { + struct arm_smccc_res res; + + if (psci_ops.smccc_version == SMCCC_VERSION_1_0) + break; + + switch (psci_ops.conduit) { + case PSCI_CONDUIT_HVC: + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_1, &res); + if ((int)res.a0 != 0) + break; + if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu) + goto bl_error; + per_cpu(harden_branch_predictor_fn, cpu) = + call_hvc_arch_workaround_1; + processor.switch_mm = cpu_v7_hvc_switch_mm; + spectre_v2_method = "hypervisor"; + break; + + case PSCI_CONDUIT_SMC: + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_1, &res); + if ((int)res.a0 != 0) + break; + if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu) + goto bl_error; + per_cpu(harden_branch_predictor_fn, cpu) = + call_smc_arch_workaround_1; + processor.switch_mm = cpu_v7_smc_switch_mm; + spectre_v2_method = "firmware"; + break; + + default: + break; + } + } +#endif + } + + if (spectre_v2_method) + pr_info("CPU%u: Spectre v2: using %s workaround\n", + smp_processor_id(), spectre_v2_method); + return; + +bl_error: + pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n", + cpu); +} +#else +static void cpu_v7_spectre_init(void) +{ +} +#endif + +static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned, + u32 mask, const char *msg) +{ + u32 aux_cr; + + asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (aux_cr)); + + if ((aux_cr & mask) != mask) { + if (!*warned) + pr_err("CPU%u: %s", smp_processor_id(), msg); + *warned = true; + return false; + } + return true; +} + +static DEFINE_PER_CPU(bool, spectre_warned); + +static bool check_spectre_auxcr(bool *warned, u32 bit) +{ + return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) && + cpu_v7_check_auxcr_set(warned, bit, + "Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n"); +} + +void cpu_v7_ca8_ibe(void) +{ + if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6))) + cpu_v7_spectre_init(); +} + +void cpu_v7_ca15_ibe(void) +{ + if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0))) + cpu_v7_spectre_init(); +} + +void cpu_v7_bugs_init(void) +{ + cpu_v7_spectre_init(); +} diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index b528a15f460d..6fe52819e014 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -9,6 +9,7 @@ * * This is the "shell" of the ARMv7 processor support. */ +#include <linux/arm-smccc.h> #include <linux/init.h> #include <linux/linkage.h> #include <asm/assembler.h> @@ -93,6 +94,37 @@ ENTRY(cpu_v7_dcache_clean_area) ret lr ENDPROC(cpu_v7_dcache_clean_area) +#ifdef CONFIG_ARM_PSCI + .arch_extension sec +ENTRY(cpu_v7_smc_switch_mm) + stmfd sp!, {r0 - r3} + movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1 + movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1 + smc #0 + ldmfd sp!, {r0 - r3} + b cpu_v7_switch_mm +ENDPROC(cpu_v7_smc_switch_mm) + .arch_extension virt +ENTRY(cpu_v7_hvc_switch_mm) + stmfd sp!, {r0 - r3} + movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1 + movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1 + hvc #0 + ldmfd sp!, {r0 - r3} + b cpu_v7_switch_mm +ENDPROC(cpu_v7_smc_switch_mm) +#endif +ENTRY(cpu_v7_iciallu_switch_mm) + mov r3, #0 + mcr p15, 0, r3, c7, c5, 0 @ ICIALLU + b cpu_v7_switch_mm +ENDPROC(cpu_v7_iciallu_switch_mm) +ENTRY(cpu_v7_bpiall_switch_mm) + mov r3, #0 + mcr p15, 0, r3, c7, c5, 6 @ flush BTAC/BTB + b cpu_v7_switch_mm +ENDPROC(cpu_v7_bpiall_switch_mm) + string cpu_v7_name, "ARMv7 Processor" .align @@ -158,31 +190,6 @@ ENTRY(cpu_v7_do_resume) ENDPROC(cpu_v7_do_resume) #endif -/* - * Cortex-A8 - */ - globl_equ cpu_ca8_proc_init, cpu_v7_proc_init - globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin - globl_equ cpu_ca8_reset, cpu_v7_reset - globl_equ cpu_ca8_do_idle, cpu_v7_do_idle - globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area - globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext - globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size -#ifdef CONFIG_ARM_CPU_SUSPEND - globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend - globl_equ cpu_ca8_do_resume, cpu_v7_do_resume -#endif - -/* - * Cortex-A9 processor functions - */ - globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init - globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin - globl_equ cpu_ca9mp_reset, cpu_v7_reset - globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle - globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area - globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm - globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext .globl cpu_ca9mp_suspend_size .equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2 #ifdef CONFIG_ARM_CPU_SUSPEND @@ -547,12 +554,79 @@ __v7_setup_stack: __INITDATA + .weak cpu_v7_bugs_init + @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) - define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 + define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init + +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR + @ generic v7 bpiall on context switch + globl_equ cpu_v7_bpiall_proc_init, cpu_v7_proc_init + globl_equ cpu_v7_bpiall_proc_fin, cpu_v7_proc_fin + globl_equ cpu_v7_bpiall_reset, cpu_v7_reset + globl_equ cpu_v7_bpiall_do_idle, cpu_v7_do_idle + globl_equ cpu_v7_bpiall_dcache_clean_area, cpu_v7_dcache_clean_area + globl_equ cpu_v7_bpiall_set_pte_ext, cpu_v7_set_pte_ext + globl_equ cpu_v7_bpiall_suspend_size, cpu_v7_suspend_size +#ifdef CONFIG_ARM_CPU_SUSPEND + globl_equ cpu_v7_bpiall_do_suspend, cpu_v7_do_suspend + globl_equ cpu_v7_bpiall_do_resume, cpu_v7_do_resume +#endif + define_processor_functions v7_bpiall, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init + +#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_bpiall_processor_functions +#else +#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_processor_functions +#endif + #ifndef CONFIG_ARM_LPAE - define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 - define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 + @ Cortex-A8 - always needs bpiall switch_mm implementation + globl_equ cpu_ca8_proc_init, cpu_v7_proc_init + globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin + globl_equ cpu_ca8_reset, cpu_v7_reset + globl_equ cpu_ca8_do_idle, cpu_v7_do_idle + globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area + globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext + globl_equ cpu_ca8_switch_mm, cpu_v7_bpiall_switch_mm + globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size +#ifdef CONFIG_ARM_CPU_SUSPEND + globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend + globl_equ cpu_ca8_do_resume, cpu_v7_do_resume +#endif + define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca8_ibe + + @ Cortex-A9 - needs more registers preserved across suspend/resume + @ and bpiall switch_mm for hardening + globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init + globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin + globl_equ cpu_ca9mp_reset, cpu_v7_reset + globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle + globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR + globl_equ cpu_ca9mp_switch_mm, cpu_v7_bpiall_switch_mm +#else + globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm +#endif + globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext + define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init #endif + + @ Cortex-A15 - needs iciallu switch_mm for hardening + globl_equ cpu_ca15_proc_init, cpu_v7_proc_init + globl_equ cpu_ca15_proc_fin, cpu_v7_proc_fin + globl_equ cpu_ca15_reset, cpu_v7_reset + globl_equ cpu_ca15_do_idle, cpu_v7_do_idle + globl_equ cpu_ca15_dcache_clean_area, cpu_v7_dcache_clean_area +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR + globl_equ cpu_ca15_switch_mm, cpu_v7_iciallu_switch_mm +#else + globl_equ cpu_ca15_switch_mm, cpu_v7_switch_mm +#endif + globl_equ cpu_ca15_set_pte_ext, cpu_v7_set_pte_ext + globl_equ cpu_ca15_suspend_size, cpu_v7_suspend_size + globl_equ cpu_ca15_do_suspend, cpu_v7_do_suspend + globl_equ cpu_ca15_do_resume, cpu_v7_do_resume + define_processor_functions ca15, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca15_ibe #ifdef CONFIG_CPU_PJ4B define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 #endif @@ -669,7 +743,7 @@ __v7_ca7mp_proc_info: __v7_ca12mp_proc_info: .long 0x410fc0d0 .long 0xff0ffff0 - __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup + __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS .size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info /* @@ -679,7 +753,7 @@ __v7_ca12mp_proc_info: __v7_ca15mp_proc_info: .long 0x410fc0f0 .long 0xff0ffff0 - __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup + __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup, proc_fns = ca15_processor_functions .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info /* @@ -689,7 +763,7 @@ __v7_ca15mp_proc_info: __v7_b15mp_proc_info: .long 0x420f00f0 .long 0xff0ffff0 - __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, cache_fns = b15_cache_fns + __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, proc_fns = ca15_processor_functions, cache_fns = b15_cache_fns .size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info /* @@ -699,9 +773,25 @@ __v7_b15mp_proc_info: __v7_ca17mp_proc_info: .long 0x410fc0e0 .long 0xff0ffff0 - __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup + __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS .size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info + /* ARM Ltd. Cortex A73 processor */ + .type __v7_ca73_proc_info, #object +__v7_ca73_proc_info: + .long 0x410fd090 + .long 0xff0ffff0 + __v7_proc __v7_ca73_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS + .size __v7_ca73_proc_info, . - __v7_ca73_proc_info + + /* ARM Ltd. Cortex A75 processor */ + .type __v7_ca75_proc_info, #object +__v7_ca75_proc_info: + .long 0x410fd0a0 + .long 0xff0ffff0 + __v7_proc __v7_ca75_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS + .size __v7_ca75_proc_info, . - __v7_ca75_proc_info + /* * Qualcomm Inc. Krait processors. */ diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index b5030e1a41d8..6e8b71613039 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -84,7 +84,7 @@ * * 1. First argument is passed using the arm 32bit registers and rest of the * arguments are passed on stack scratch space. - * 2. First callee-saved arugument is mapped to arm 32 bit registers and rest + * 2. First callee-saved argument is mapped to arm 32 bit registers and rest * arguments are mapped to scratch space on stack. * 3. We need two 64 bit temp registers to do complex operations on eBPF * registers. @@ -234,18 +234,11 @@ static void jit_fill_hole(void *area, unsigned int size) #define SCRATCH_SIZE 80 /* total stack size used in JITed code */ -#define _STACK_SIZE \ - (ctx->prog->aux->stack_depth + \ - + SCRATCH_SIZE + \ - + 4 /* extra for skb_copy_bits buffer */) - -#define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT) +#define _STACK_SIZE (ctx->prog->aux->stack_depth + SCRATCH_SIZE) +#define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT) /* Get the offset of eBPF REGISTERs stored on scratch space. */ -#define STACK_VAR(off) (STACK_SIZE-off-4) - -/* Offset of skb_copy_bits buffer */ -#define SKB_BUFFER STACK_VAR(SCRATCH_SIZE) +#define STACK_VAR(off) (STACK_SIZE - off) #if __LINUX_ARM_ARCH__ < 7 @@ -708,7 +701,7 @@ static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk, } /* dst = dst >> src */ -static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk, +static inline void emit_a32_rsh_r64(const u8 dst[], const u8 src[], bool dstk, bool sstk, struct jit_ctx *ctx) { const u8 *tmp = bpf2a32[TMP_REG_1]; const u8 *tmp2 = bpf2a32[TMP_REG_2]; @@ -724,7 +717,7 @@ static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk, emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); } - /* Do LSH operation */ + /* Do RSH operation */ emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx); @@ -774,7 +767,7 @@ static inline void emit_a32_lsh_i64(const u8 dst[], bool dstk, } /* dst = dst >> val */ -static inline void emit_a32_lsr_i64(const u8 dst[], bool dstk, +static inline void emit_a32_rsh_i64(const u8 dst[], bool dstk, const u32 val, struct jit_ctx *ctx) { const u8 *tmp = bpf2a32[TMP_REG_1]; const u8 *tmp2 = bpf2a32[TMP_REG_2]; @@ -1199,8 +1192,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) s32 jmp_offset; #define check_imm(bits, imm) do { \ - if ((((imm) > 0) && ((imm) >> (bits))) || \ - (((imm) < 0) && (~(imm) >> (bits)))) { \ + if ((imm) >= (1 << ((bits) - 1)) || \ + (imm) < -(1 << ((bits) - 1))) { \ pr_info("[%2d] imm=%d(0x%x) out of range\n", \ i, imm, imm); \ return -EINVAL; \ @@ -1330,7 +1323,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_ALU64 | BPF_RSH | BPF_K: if (unlikely(imm > 63)) return -EINVAL; - emit_a32_lsr_i64(dst, dstk, imm, ctx); + emit_a32_rsh_i64(dst, dstk, imm, ctx); break; /* dst = dst << src */ case BPF_ALU64 | BPF_LSH | BPF_X: @@ -1338,7 +1331,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) break; /* dst = dst >> src */ case BPF_ALU64 | BPF_RSH | BPF_X: - emit_a32_lsr_r64(dst, src, dstk, sstk, ctx); + emit_a32_rsh_r64(dst, src, dstk, sstk, ctx); break; /* dst = dst >> src (signed) */ case BPF_ALU64 | BPF_ARSH | BPF_X: @@ -1452,83 +1445,6 @@ exit: emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx); emit_ldx_r(dst, rn, dstk, off, ctx, BPF_SIZE(code)); break; - /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ - case BPF_LD | BPF_ABS | BPF_W: - case BPF_LD | BPF_ABS | BPF_H: - case BPF_LD | BPF_ABS | BPF_B: - /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */ - case BPF_LD | BPF_IND | BPF_W: - case BPF_LD | BPF_IND | BPF_H: - case BPF_LD | BPF_IND | BPF_B: - { - const u8 r4 = bpf2a32[BPF_REG_6][1]; /* r4 = ptr to sk_buff */ - const u8 r0 = bpf2a32[BPF_REG_0][1]; /*r0: struct sk_buff *skb*/ - /* rtn value */ - const u8 r1 = bpf2a32[BPF_REG_0][0]; /* r1: int k */ - const u8 r2 = bpf2a32[BPF_REG_1][1]; /* r2: unsigned int size */ - const u8 r3 = bpf2a32[BPF_REG_1][0]; /* r3: void *buffer */ - const u8 r6 = bpf2a32[TMP_REG_1][1]; /* r6: void *(*func)(..) */ - int size; - - /* Setting up first argument */ - emit(ARM_MOV_R(r0, r4), ctx); - - /* Setting up second argument */ - emit_a32_mov_i(r1, imm, false, ctx); - if (BPF_MODE(code) == BPF_IND) - emit_a32_alu_r(r1, src_lo, false, sstk, ctx, - false, false, BPF_ADD); - - /* Setting up third argument */ - switch (BPF_SIZE(code)) { - case BPF_W: - size = 4; - break; - case BPF_H: - size = 2; - break; - case BPF_B: - size = 1; - break; - default: - return -EINVAL; - } - emit_a32_mov_i(r2, size, false, ctx); - - /* Setting up fourth argument */ - emit(ARM_ADD_I(r3, ARM_SP, imm8m(SKB_BUFFER)), ctx); - - /* Setting up function pointer to call */ - emit_a32_mov_i(r6, (unsigned int)bpf_load_pointer, false, ctx); - emit_blx_r(r6, ctx); - - emit(ARM_EOR_R(r1, r1, r1), ctx); - /* Check if return address is NULL or not. - * if NULL then jump to epilogue - * else continue to load the value from retn address - */ - emit(ARM_CMP_I(r0, 0), ctx); - jmp_offset = epilogue_offset(ctx); - check_imm24(jmp_offset); - _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx); - - /* Load value from the address */ - switch (BPF_SIZE(code)) { - case BPF_W: - emit(ARM_LDR_I(r0, r0, 0), ctx); - emit_rev32(r0, r0, ctx); - break; - case BPF_H: - emit(ARM_LDRH_I(r0, r0, 0), ctx); - emit_rev16(r0, r0, ctx); - break; - case BPF_B: - emit(ARM_LDRB_I(r0, r0, 0), ctx); - /* No need to reverse */ - break; - } - break; - } /* ST: *(size *)(dst + off) = imm */ case BPF_ST | BPF_MEM | BPF_W: case BPF_ST | BPF_MEM | BPF_H: diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index af4ee2cef2f9..35d0f823e823 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -218,8 +218,7 @@ static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) { siginfo_t info; - memset(&info, 0, sizeof(info)); - + clear_siginfo(&info); info.si_signo = SIGFPE; info.si_code = sicode; info.si_addr = (void __user *)(instruction_pointer(regs) - 4); diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index ba7f4c8f5c3e..8073625371f5 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -89,6 +89,17 @@ int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, } EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range); +/* Not used by XENFEAT_auto_translated guests. */ +int xen_remap_domain_mfn_array(struct vm_area_struct *vma, + unsigned long addr, + xen_pfn_t *mfn, int nr, + int *err_ptr, pgprot_t prot, + unsigned int domid, struct page **pages) +{ + return -ENOSYS; +} +EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array); + static void xen_read_wallclock(struct timespec64 *ts) { u32 version; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index eb2cf4938f6d..9795b59aa28a 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -7,16 +7,19 @@ config ARM64 select ACPI_REDUCED_HARDWARE_ONLY if ACPI select ACPI_MCFG if ACPI select ACPI_SPCR_TABLE if ACPI + select ACPI_PPTT if ACPI select ARCH_CLOCKSOURCE_DATA select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_ELF_RANDOMIZE + select ARCH_HAS_FAST_MULTIPLIER select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA select ARCH_HAS_KCOV select ARCH_HAS_MEMBARRIER_SYNC_CORE + select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_MEMORY select ARCH_HAS_SG_CHAIN select ARCH_HAS_STRICT_KERNEL_RWX @@ -105,7 +108,6 @@ config ARM64 select HAVE_CONTEXT_TRACKING select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_KMEMLEAK - select HAVE_DMA_API_DEBUG select HAVE_DMA_CONTIGUOUS select HAVE_DYNAMIC_FTRACE select HAVE_EFFICIENT_UNALIGNED_ACCESS @@ -133,6 +135,8 @@ config ARM64 select IRQ_FORCED_THREADING select MODULES_USE_ELF_RELA select MULTI_IRQ_HANDLER + select NEED_DMA_MAP_STATE + select NEED_SG_DMA_LENGTH select NO_BOOTMEM select OF select OF_EARLY_FLATTREE @@ -142,6 +146,7 @@ config ARM64 select POWER_SUPPLY select REFCOUNT_FULL select SPARSE_IRQ + select SWIOTLB select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK help @@ -150,9 +155,6 @@ config ARM64 config 64BIT def_bool y -config ARCH_PHYS_ADDR_T_64BIT - def_bool y - config MMU def_bool y @@ -237,24 +239,9 @@ config ZONE_DMA32 config HAVE_GENERIC_GUP def_bool y -config ARCH_DMA_ADDR_T_64BIT - def_bool y - -config NEED_DMA_MAP_STATE - def_bool y - -config NEED_SG_DMA_LENGTH - def_bool y - config SMP def_bool y -config SWIOTLB - def_bool y - -config IOMMU_HELPER - def_bool SWIOTLB - config KERNEL_MODE_NEON def_bool y @@ -938,6 +925,15 @@ config HARDEN_EL2_VECTORS If unsure, say Y. +config ARM64_SSBD + bool "Speculative Store Bypass Disable" if EXPERT + default y + help + This enables mitigation of the bypassing of previous stores + by speculative loads. + + If unsure, say Y. + menuconfig ARMV8_DEPRECATED bool "Emulate deprecated/obsolete ARMv8 instructions" depends on COMPAT @@ -1049,6 +1045,7 @@ config ARM64_PAN config ARM64_LSE_ATOMICS bool "Atomic instructions" + default y help As part of the Large System Extensions, ARMv8.1 introduces new atomic instructions that are designed specifically to scale in @@ -1057,7 +1054,8 @@ config ARM64_LSE_ATOMICS Say Y here to make use of these instructions for the in-kernel atomic routines. This incurs a small overhead on CPUs that do not support these instructions and requires the kernel to be - built with binutils >= 2.25. + built with binutils >= 2.25 in order for the new instructions + to be used. config ARM64_VHE bool "Enable support for Virtualization Host Extensions (VHE)" diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 87f7d2f9f17c..3c353b4715dc 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -78,7 +78,7 @@ LDFLAGS += -maarch64linux UTS_MACHINE := aarch64 endif -CHECKFLAGS += -D__aarch64__ -m64 +CHECKFLAGS += -D__aarch64__ ifeq ($(CONFIG_ARM64_MODULE_PLTS),y) KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi index c89d0c307f8d..e6b059378dc0 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi @@ -17,6 +17,7 @@ /dts-v1/; #include <dt-bindings/reset/altr,rst-mgr-s10.h> #include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/clock/stratix10-clock.h> / { compatible = "altr,socfpga-stratix10"; @@ -92,9 +93,32 @@ interrupt-parent = <&intc>; ranges = <0 0 0 0xffffffff>; - clkmgr@ffd1000 { - compatible = "altr,clk-mgr"; + clkmgr: clock-controller@ffd10000 { + compatible = "intel,stratix10-clkmgr"; reg = <0xffd10000 0x1000>; + #clock-cells = <1>; + }; + + clocks { + cb_intosc_hs_div2_clk: cb-intosc-hs-div2-clk { + #clock-cells = <0>; + compatible = "fixed-clock"; + }; + + cb_intosc_ls_clk: cb-intosc-ls-clk { + #clock-cells = <0>; + compatible = "fixed-clock"; + }; + + f2s_free_clk: f2s-free-clk { + #clock-cells = <0>; + compatible = "fixed-clock"; + }; + + osc1: osc1 { + #clock-cells = <0>; + compatible = "fixed-clock"; + }; }; gmac0: ethernet@ff800000 { @@ -105,6 +129,8 @@ mac-address = [00 00 00 00 00 00]; resets = <&rst EMAC0_RESET>; reset-names = "stmmaceth"; + clocks = <&clkmgr STRATIX10_EMAC0_CLK>; + clock-names = "stmmaceth"; status = "disabled"; }; @@ -116,6 +142,8 @@ mac-address = [00 00 00 00 00 00]; resets = <&rst EMAC1_RESET>; reset-names = "stmmaceth"; + clocks = <&clkmgr STRATIX10_EMAC1_CLK>; + clock-names = "stmmaceth"; status = "disabled"; }; @@ -127,6 +155,8 @@ mac-address = [00 00 00 00 00 00]; resets = <&rst EMAC2_RESET>; reset-names = "stmmaceth"; + clocks = <&clkmgr STRATIX10_EMAC2_CLK>; + clock-names = "stmmaceth"; status = "disabled"; }; @@ -177,6 +207,7 @@ reg = <0xffc02800 0x100>; interrupts = <0 103 4>; resets = <&rst I2C0_RESET>; + clocks = <&clkmgr STRATIX10_L4_SP_CLK>; status = "disabled"; }; @@ -187,6 +218,7 @@ reg = <0xffc02900 0x100>; interrupts = <0 104 4>; resets = <&rst I2C1_RESET>; + clocks = <&clkmgr STRATIX10_L4_SP_CLK>; status = "disabled"; }; @@ -197,6 +229,7 @@ reg = <0xffc02a00 0x100>; interrupts = <0 105 4>; resets = <&rst I2C2_RESET>; + clocks = <&clkmgr STRATIX10_L4_SP_CLK>; status = "disabled"; }; @@ -207,6 +240,7 @@ reg = <0xffc02b00 0x100>; interrupts = <0 106 4>; resets = <&rst I2C3_RESET>; + clocks = <&clkmgr STRATIX10_L4_SP_CLK>; status = "disabled"; }; @@ -217,6 +251,7 @@ reg = <0xffc02c00 0x100>; interrupts = <0 107 4>; resets = <&rst I2C4_RESET>; + clocks = <&clkmgr STRATIX10_L4_SP_CLK>; status = "disabled"; }; @@ -229,6 +264,9 @@ fifo-depth = <0x400>; resets = <&rst SDMMC_RESET>; reset-names = "reset"; + clocks = <&clkmgr STRATIX10_L4_MP_CLK>, + <&clkmgr STRATIX10_SDMMC_CLK>; + clock-names = "biu", "ciu"; status = "disabled"; }; @@ -237,6 +275,25 @@ reg = <0xffe00000 0x100000>; }; + pdma: pdma@ffda0000 { + compatible = "arm,pl330", "arm,primecell"; + reg = <0xffda0000 0x1000>; + interrupts = <0 81 4>, + <0 82 4>, + <0 83 4>, + <0 84 4>, + <0 85 4>, + <0 86 4>, + <0 87 4>, + <0 88 4>, + <0 89 4>; + #dma-cells = <1>; + #dma-channels = <8>; + #dma-requests = <32>; + clocks = <&clkmgr STRATIX10_L4_MAIN_CLK>; + clock-names = "apb_pclk"; + }; + rst: rstmgr@ffd11000 { #reset-cells = <1>; compatible = "altr,rst-mgr"; @@ -288,24 +345,32 @@ compatible = "snps,dw-apb-timer"; interrupts = <0 113 4>; reg = <0xffc03000 0x100>; + clocks = <&clkmgr STRATIX10_L4_SP_CLK>; + clock-names = "timer"; }; timer1: timer1@ffc03100 { compatible = "snps,dw-apb-timer"; interrupts = <0 114 4>; reg = <0xffc03100 0x100>; + clocks = <&clkmgr STRATIX10_L4_SP_CLK>; + clock-names = "timer"; }; timer2: timer2@ffd00000 { compatible = "snps,dw-apb-timer"; interrupts = <0 115 4>; reg = <0xffd00000 0x100>; + clocks = <&clkmgr STRATIX10_L4_SP_CLK>; + clock-names = "timer"; }; timer3: timer3@ffd00100 { compatible = "snps,dw-apb-timer"; interrupts = <0 116 4>; reg = <0xffd00100 0x100>; + clocks = <&clkmgr STRATIX10_L4_SP_CLK>; + clock-names = "timer"; }; uart0: serial0@ffc02000 { @@ -315,6 +380,7 @@ reg-shift = <2>; reg-io-width = <4>; resets = <&rst UART0_RESET>; + clocks = <&clkmgr STRATIX10_L4_SP_CLK>; status = "disabled"; }; @@ -325,6 +391,7 @@ reg-shift = <2>; reg-io-width = <4>; resets = <&rst UART1_RESET>; + clocks = <&clkmgr STRATIX10_L4_SP_CLK>; status = "disabled"; }; @@ -387,5 +454,17 @@ resets = <&rst WATCHDOG3_RESET>; status = "disabled"; }; + + eccmgr { + compatible = "altr,socfpga-s10-ecc-manager"; + interrupts = <0 15 4>, <0 95 4>; + interrupt-controller; + #interrupt-cells = <2>; + + sdramedac { + compatible = "altr,sdram-edac-s10"; + interrupts = <16 4>, <48 4>; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts index eaf13fe29287..f9b1ef12db48 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts @@ -50,6 +50,21 @@ /* We expect the bootloader to fill in the reg */ reg = <0 0 0 0>; }; + + ref_033v: 033-v-ref { + compatible = "regulator-fixed"; + regulator-name = "0.33V"; + regulator-min-microvolt = <330000>; + regulator-max-microvolt = <330000>; + }; + + soc { + clocks { + osc1 { + clock-frequency = <25000000>; + }; + }; + }; }; &gpio1 { @@ -79,7 +94,7 @@ rxd2-skew-ps = <420>; /* 0ps */ rxd3-skew-ps = <420>; /* 0ps */ txen-skew-ps = <0>; /* -420ps */ - txc-skew-ps = <1860>; /* 960ps */ + txc-skew-ps = <900>; /* 0ps */ rxdv-skew-ps = <420>; /* 0ps */ rxc-skew-ps = <1680>; /* 780ps */ }; @@ -105,3 +120,30 @@ &watchdog0 { status = "okay"; }; + +&i2c1 { + status = "okay"; + clock-frequency = <100000>; + + adc@14 { + compatible = "lltc,ltc2497"; + reg = <0x14>; + vref-supply = <&ref_033v>; + }; + + temp@4c { + compatible = "maxim,max1619"; + reg = <0x4c>; + }; + + eeprom@51 { + compatible = "atmel,24c32"; + reg = <0x51>; + pagesize = <32>; + }; + + rtc@68 { + compatible = "dallas,ds1339"; + reg = <0x68>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c-pins.dtsi index 24552f19b3fa..6a573875d45a 100644 --- a/arch/arm64/boot/dts/qcom/apq8096-db820c-pins.dtsi +++ b/arch/arm64/boot/dts/qcom/apq8096-db820c-pins.dtsi @@ -36,4 +36,30 @@ drive-strength = <2>; /* 2 MA */ }; }; + + blsp1_uart1_default: blsp1_uart1_default { + mux { + pins = "gpio41", "gpio42", "gpio43", "gpio44"; + function = "blsp_uart2"; + }; + + config { + pins = "gpio41", "gpio42", "gpio43", "gpio44"; + drive-strength = <16>; + bias-disable; + }; + }; + + blsp1_uart1_sleep: blsp1_uart1_sleep { + mux { + pins = "gpio41", "gpio42", "gpio43", "gpio44"; + function = "gpio"; + }; + + config { + pins = "gpio41", "gpio42", "gpio43", "gpio44"; + drive-strength = <2>; + bias-disable; + }; + }; }; diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi index 59b29ddfb6e9..6167af955659 100644 --- a/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi +++ b/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi @@ -14,6 +14,28 @@ }; }; + bt_en_gpios: bt_en_gpios { + pinconf { + pins = "gpio19"; + function = PMIC_GPIO_FUNC_NORMAL; + output-low; + power-source = <PM8994_GPIO_S4>; // 1.8V + qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>; + bias-pull-down; + }; + }; + + wlan_en_gpios: wlan_en_gpios { + pinconf { + pins = "gpio8"; + function = PMIC_GPIO_FUNC_NORMAL; + output-low; + power-source = <PM8994_GPIO_S4>; // 1.8V + qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>; + bias-pull-down; + }; + }; + volume_up_gpio: pm8996_gpio2 { pinconf { pins = "gpio2"; @@ -26,6 +48,16 @@ }; }; + divclk4_pin_a: divclk4 { + pinconf { + pins = "gpio18"; + function = PMIC_GPIO_FUNC_FUNC2; + + bias-disable; + power-source = <PM8994_GPIO_S4>; + }; + }; + usb3_vbus_det_gpio: pm8996_gpio22 { pinconf { pins = "gpio22"; diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi index 1c8f1b86472d..4b8bb026346e 100644 --- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi +++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi @@ -23,6 +23,7 @@ aliases { serial0 = &blsp2_uart1; serial1 = &blsp2_uart2; + serial2 = &blsp1_uart1; i2c0 = &blsp1_i2c2; i2c1 = &blsp2_i2c1; i2c2 = &blsp2_i2c0; @@ -34,7 +35,36 @@ stdout-path = "serial0:115200n8"; }; + clocks { + divclk4: divclk4 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <32768>; + clock-output-names = "divclk4"; + + pinctrl-names = "default"; + pinctrl-0 = <&divclk4_pin_a>; + }; + }; + soc { + serial@7570000 { + label = "BT-UART"; + status = "okay"; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&blsp1_uart1_default>; + pinctrl-1 = <&blsp1_uart1_sleep>; + + bluetooth { + compatible = "qcom,qca6174-bt"; + + /* bt_disable_n gpio */ + enable-gpios = <&pm8994_gpios 19 GPIO_ACTIVE_HIGH>; + + clocks = <&divclk4>; + }; + }; + serial@75b0000 { label = "LS-UART1"; status = "okay"; @@ -139,9 +169,40 @@ pinctrl-0 = <&usb2_vbus_det_gpio>; }; + bt_en: bt-en-1-8v { + pinctrl-names = "default"; + pinctrl-0 = <&bt_en_gpios>; + compatible = "regulator-fixed"; + regulator-name = "bt-en-regulator"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + + /* WLAN card specific delay */ + startup-delay-us = <70000>; + enable-active-high; + }; + + wlan_en: wlan-en-1-8v { + pinctrl-names = "default"; + pinctrl-0 = <&wlan_en_gpios>; + compatible = "regulator-fixed"; + regulator-name = "wlan-en-regulator"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + + gpio = <&pm8994_gpios 8 0>; + + /* WLAN card specific delay */ + startup-delay-us = <70000>; + enable-active-high; + }; + agnoc@0 { qcom,pcie@600000 { + status = "okay"; perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>; + vddpe-supply = <&wlan_en>; + vddpe1-supply = <&bt_en>; }; qcom,pcie@608000 { diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index 410ae787ebb4..f8e49d0b4681 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -419,6 +419,16 @@ #clock-cells = <1>; }; + blsp1_uart1: serial@7570000 { + compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; + reg = <0x07570000 0x1000>; + interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>, + <&gcc GCC_BLSP1_AHB_CLK>; + clock-names = "core", "iface"; + status = "disabled"; + }; + blsp1_spi0: spi@7575000 { compatible = "qcom,spi-qup-v2.2.1"; reg = <0x07575000 0x600>; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index ecf613761e78..17ea72b1b389 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -78,7 +78,8 @@ CONFIG_PCIE_ARMADA_8K=y CONFIG_PCI_AARDVARK=y CONFIG_PCI_TEGRA=y CONFIG_PCIE_RCAR=y -CONFIG_PCIE_ROCKCHIP=m +CONFIG_PCIE_ROCKCHIP=y +CONFIG_PCIE_ROCKCHIP_HOST=m CONFIG_PCI_HOST_GENERIC=y CONFIG_PCI_XGENE=y CONFIG_PCI_HOST_THUNDER_PEM=y diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index cb5a243110c4..e3fdb0fd6f70 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -47,6 +47,12 @@ config CRYPTO_SM3_ARM64_CE select CRYPTO_HASH select CRYPTO_SM3 +config CRYPTO_SM4_ARM64_CE + tristate "SM4 symmetric cipher (ARMv8.2 Crypto Extensions)" + depends on KERNEL_MODE_NEON + select CRYPTO_ALGAPI + select CRYPTO_SM4 + config CRYPTO_GHASH_ARM64_CE tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions" depends on KERNEL_MODE_NEON diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index f35ac684b1c0..bcafd016618e 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -23,6 +23,9 @@ sha3-ce-y := sha3-ce-glue.o sha3-ce-core.o obj-$(CONFIG_CRYPTO_SM3_ARM64_CE) += sm3-ce.o sm3-ce-y := sm3-ce-glue.o sm3-ce-core.o +obj-$(CONFIG_CRYPTO_SM4_ARM64_CE) += sm4-ce.o +sm4-ce-y := sm4-ce-glue.o sm4-ce-core.o + obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S index e3a375c4cb83..88f5aef7934c 100644 --- a/arch/arm64/crypto/aes-ce-ccm-core.S +++ b/arch/arm64/crypto/aes-ce-ccm-core.S @@ -19,24 +19,33 @@ * u32 *macp, u8 const rk[], u32 rounds); */ ENTRY(ce_aes_ccm_auth_data) - ldr w8, [x3] /* leftover from prev round? */ + frame_push 7 + + mov x19, x0 + mov x20, x1 + mov x21, x2 + mov x22, x3 + mov x23, x4 + mov x24, x5 + + ldr w25, [x22] /* leftover from prev round? */ ld1 {v0.16b}, [x0] /* load mac */ - cbz w8, 1f - sub w8, w8, #16 + cbz w25, 1f + sub w25, w25, #16 eor v1.16b, v1.16b, v1.16b -0: ldrb w7, [x1], #1 /* get 1 byte of input */ - subs w2, w2, #1 - add w8, w8, #1 +0: ldrb w7, [x20], #1 /* get 1 byte of input */ + subs w21, w21, #1 + add w25, w25, #1 ins v1.b[0], w7 ext v1.16b, v1.16b, v1.16b, #1 /* rotate in the input bytes */ beq 8f /* out of input? */ - cbnz w8, 0b + cbnz w25, 0b eor v0.16b, v0.16b, v1.16b -1: ld1 {v3.4s}, [x4] /* load first round key */ - prfm pldl1strm, [x1] - cmp w5, #12 /* which key size? */ - add x6, x4, #16 - sub w7, w5, #2 /* modified # of rounds */ +1: ld1 {v3.4s}, [x23] /* load first round key */ + prfm pldl1strm, [x20] + cmp w24, #12 /* which key size? */ + add x6, x23, #16 + sub w7, w24, #2 /* modified # of rounds */ bmi 2f bne 5f mov v5.16b, v3.16b @@ -55,33 +64,43 @@ ENTRY(ce_aes_ccm_auth_data) ld1 {v5.4s}, [x6], #16 /* load next round key */ bpl 3b aese v0.16b, v4.16b - subs w2, w2, #16 /* last data? */ + subs w21, w21, #16 /* last data? */ eor v0.16b, v0.16b, v5.16b /* final round */ bmi 6f - ld1 {v1.16b}, [x1], #16 /* load next input block */ + ld1 {v1.16b}, [x20], #16 /* load next input block */ eor v0.16b, v0.16b, v1.16b /* xor with mac */ - bne 1b -6: st1 {v0.16b}, [x0] /* store mac */ + beq 6f + + if_will_cond_yield_neon + st1 {v0.16b}, [x19] /* store mac */ + do_cond_yield_neon + ld1 {v0.16b}, [x19] /* reload mac */ + endif_yield_neon + + b 1b +6: st1 {v0.16b}, [x19] /* store mac */ beq 10f - adds w2, w2, #16 + adds w21, w21, #16 beq 10f - mov w8, w2 -7: ldrb w7, [x1], #1 + mov w25, w21 +7: ldrb w7, [x20], #1 umov w6, v0.b[0] eor w6, w6, w7 - strb w6, [x0], #1 - subs w2, w2, #1 + strb w6, [x19], #1 + subs w21, w21, #1 beq 10f ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */ b 7b -8: mov w7, w8 - add w8, w8, #16 +8: mov w7, w25 + add w25, w25, #16 9: ext v1.16b, v1.16b, v1.16b, #1 adds w7, w7, #1 bne 9b eor v0.16b, v0.16b, v1.16b - st1 {v0.16b}, [x0] -10: str w8, [x3] + st1 {v0.16b}, [x19] +10: str w25, [x22] + + frame_pop ret ENDPROC(ce_aes_ccm_auth_data) @@ -126,19 +145,29 @@ ENTRY(ce_aes_ccm_final) ENDPROC(ce_aes_ccm_final) .macro aes_ccm_do_crypt,enc - ldr x8, [x6, #8] /* load lower ctr */ - ld1 {v0.16b}, [x5] /* load mac */ -CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */ + frame_push 8 + + mov x19, x0 + mov x20, x1 + mov x21, x2 + mov x22, x3 + mov x23, x4 + mov x24, x5 + mov x25, x6 + + ldr x26, [x25, #8] /* load lower ctr */ + ld1 {v0.16b}, [x24] /* load mac */ +CPU_LE( rev x26, x26 ) /* keep swabbed ctr in reg */ 0: /* outer loop */ - ld1 {v1.8b}, [x6] /* load upper ctr */ - prfm pldl1strm, [x1] - add x8, x8, #1 - rev x9, x8 - cmp w4, #12 /* which key size? */ - sub w7, w4, #2 /* get modified # of rounds */ + ld1 {v1.8b}, [x25] /* load upper ctr */ + prfm pldl1strm, [x20] + add x26, x26, #1 + rev x9, x26 + cmp w23, #12 /* which key size? */ + sub w7, w23, #2 /* get modified # of rounds */ ins v1.d[1], x9 /* no carry in lower ctr */ - ld1 {v3.4s}, [x3] /* load first round key */ - add x10, x3, #16 + ld1 {v3.4s}, [x22] /* load first round key */ + add x10, x22, #16 bmi 1f bne 4f mov v5.16b, v3.16b @@ -165,9 +194,9 @@ CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */ bpl 2b aese v0.16b, v4.16b aese v1.16b, v4.16b - subs w2, w2, #16 - bmi 6f /* partial block? */ - ld1 {v2.16b}, [x1], #16 /* load next input block */ + subs w21, w21, #16 + bmi 7f /* partial block? */ + ld1 {v2.16b}, [x20], #16 /* load next input block */ .if \enc == 1 eor v2.16b, v2.16b, v5.16b /* final round enc+mac */ eor v1.16b, v1.16b, v2.16b /* xor with crypted ctr */ @@ -176,18 +205,29 @@ CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */ eor v1.16b, v2.16b, v5.16b /* final round enc */ .endif eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */ - st1 {v1.16b}, [x0], #16 /* write output block */ - bne 0b -CPU_LE( rev x8, x8 ) - st1 {v0.16b}, [x5] /* store mac */ - str x8, [x6, #8] /* store lsb end of ctr (BE) */ -5: ret - -6: eor v0.16b, v0.16b, v5.16b /* final round mac */ + st1 {v1.16b}, [x19], #16 /* write output block */ + beq 5f + + if_will_cond_yield_neon + st1 {v0.16b}, [x24] /* store mac */ + do_cond_yield_neon + ld1 {v0.16b}, [x24] /* reload mac */ + endif_yield_neon + + b 0b +5: +CPU_LE( rev x26, x26 ) + st1 {v0.16b}, [x24] /* store mac */ + str x26, [x25, #8] /* store lsb end of ctr (BE) */ + +6: frame_pop + ret + +7: eor v0.16b, v0.16b, v5.16b /* final round mac */ eor v1.16b, v1.16b, v5.16b /* final round enc */ - st1 {v0.16b}, [x5] /* store mac */ - add w2, w2, #16 /* process partial tail block */ -7: ldrb w9, [x1], #1 /* get 1 byte of input */ + st1 {v0.16b}, [x24] /* store mac */ + add w21, w21, #16 /* process partial tail block */ +8: ldrb w9, [x20], #1 /* get 1 byte of input */ umov w6, v1.b[0] /* get top crypted ctr byte */ umov w7, v0.b[0] /* get top mac byte */ .if \enc == 1 @@ -197,13 +237,13 @@ CPU_LE( rev x8, x8 ) eor w9, w9, w6 eor w7, w7, w9 .endif - strb w9, [x0], #1 /* store out byte */ - strb w7, [x5], #1 /* store mac byte */ - subs w2, w2, #1 - beq 5b + strb w9, [x19], #1 /* store out byte */ + strb w7, [x24], #1 /* store mac byte */ + subs w21, w21, #1 + beq 6b ext v0.16b, v0.16b, v0.16b, #1 /* shift out mac byte */ ext v1.16b, v1.16b, v1.16b, #1 /* shift out ctr byte */ - b 7b + b 8b .endm /* diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S index 50330f5c3adc..623e74ed1c67 100644 --- a/arch/arm64/crypto/aes-ce.S +++ b/arch/arm64/crypto/aes-ce.S @@ -30,18 +30,21 @@ .endm /* prepare for encryption with key in rk[] */ - .macro enc_prepare, rounds, rk, ignore - load_round_keys \rounds, \rk + .macro enc_prepare, rounds, rk, temp + mov \temp, \rk + load_round_keys \rounds, \temp .endm /* prepare for encryption (again) but with new key in rk[] */ - .macro enc_switch_key, rounds, rk, ignore - load_round_keys \rounds, \rk + .macro enc_switch_key, rounds, rk, temp + mov \temp, \rk + load_round_keys \rounds, \temp .endm /* prepare for decryption with key in rk[] */ - .macro dec_prepare, rounds, rk, ignore - load_round_keys \rounds, \rk + .macro dec_prepare, rounds, rk, temp + mov \temp, \rk + load_round_keys \rounds, \temp .endm .macro do_enc_Nx, de, mc, k, i0, i1, i2, i3 diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S index a68412e1e3a4..483a7130cf0e 100644 --- a/arch/arm64/crypto/aes-modes.S +++ b/arch/arm64/crypto/aes-modes.S @@ -14,12 +14,12 @@ .align 4 aes_encrypt_block4x: - encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7 + encrypt_block4x v0, v1, v2, v3, w22, x21, x8, w7 ret ENDPROC(aes_encrypt_block4x) aes_decrypt_block4x: - decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7 + decrypt_block4x v0, v1, v2, v3, w22, x21, x8, w7 ret ENDPROC(aes_decrypt_block4x) @@ -31,57 +31,71 @@ ENDPROC(aes_decrypt_block4x) */ AES_ENTRY(aes_ecb_encrypt) - stp x29, x30, [sp, #-16]! - mov x29, sp + frame_push 5 - enc_prepare w3, x2, x5 + mov x19, x0 + mov x20, x1 + mov x21, x2 + mov x22, x3 + mov x23, x4 + +.Lecbencrestart: + enc_prepare w22, x21, x5 .LecbencloopNx: - subs w4, w4, #4 + subs w23, w23, #4 bmi .Lecbenc1x - ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ + ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 pt blocks */ bl aes_encrypt_block4x - st1 {v0.16b-v3.16b}, [x0], #64 + st1 {v0.16b-v3.16b}, [x19], #64 + cond_yield_neon .Lecbencrestart b .LecbencloopNx .Lecbenc1x: - adds w4, w4, #4 + adds w23, w23, #4 beq .Lecbencout .Lecbencloop: - ld1 {v0.16b}, [x1], #16 /* get next pt block */ - encrypt_block v0, w3, x2, x5, w6 - st1 {v0.16b}, [x0], #16 - subs w4, w4, #1 + ld1 {v0.16b}, [x20], #16 /* get next pt block */ + encrypt_block v0, w22, x21, x5, w6 + st1 {v0.16b}, [x19], #16 + subs w23, w23, #1 bne .Lecbencloop .Lecbencout: - ldp x29, x30, [sp], #16 + frame_pop ret AES_ENDPROC(aes_ecb_encrypt) AES_ENTRY(aes_ecb_decrypt) - stp x29, x30, [sp, #-16]! - mov x29, sp + frame_push 5 + + mov x19, x0 + mov x20, x1 + mov x21, x2 + mov x22, x3 + mov x23, x4 - dec_prepare w3, x2, x5 +.Lecbdecrestart: + dec_prepare w22, x21, x5 .LecbdecloopNx: - subs w4, w4, #4 + subs w23, w23, #4 bmi .Lecbdec1x - ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ + ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 ct blocks */ bl aes_decrypt_block4x - st1 {v0.16b-v3.16b}, [x0], #64 + st1 {v0.16b-v3.16b}, [x19], #64 + cond_yield_neon .Lecbdecrestart b .LecbdecloopNx .Lecbdec1x: - adds w4, w4, #4 + adds w23, w23, #4 beq .Lecbdecout .Lecbdecloop: - ld1 {v0.16b}, [x1], #16 /* get next ct block */ - decrypt_block v0, w3, x2, x5, w6 - st1 {v0.16b}, [x0], #16 - subs w4, w4, #1 + ld1 {v0.16b}, [x20], #16 /* get next ct block */ + decrypt_block v0, w22, x21, x5, w6 + st1 {v0.16b}, [x19], #16 + subs w23, w23, #1 bne .Lecbdecloop .Lecbdecout: - ldp x29, x30, [sp], #16 + frame_pop ret AES_ENDPROC(aes_ecb_decrypt) @@ -94,78 +108,100 @@ AES_ENDPROC(aes_ecb_decrypt) */ AES_ENTRY(aes_cbc_encrypt) - ld1 {v4.16b}, [x5] /* get iv */ - enc_prepare w3, x2, x6 + frame_push 6 + + mov x19, x0 + mov x20, x1 + mov x21, x2 + mov x22, x3 + mov x23, x4 + mov x24, x5 + +.Lcbcencrestart: + ld1 {v4.16b}, [x24] /* get iv */ + enc_prepare w22, x21, x6 .Lcbcencloop4x: - subs w4, w4, #4 + subs w23, w23, #4 bmi .Lcbcenc1x - ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ + ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 pt blocks */ eor v0.16b, v0.16b, v4.16b /* ..and xor with iv */ - encrypt_block v0, w3, x2, x6, w7 + encrypt_block v0, w22, x21, x6, w7 eor v1.16b, v1.16b, v0.16b - encrypt_block v1, w3, x2, x6, w7 + encrypt_block v1, w22, x21, x6, w7 eor v2.16b, v2.16b, v1.16b - encrypt_block v2, w3, x2, x6, w7 + encrypt_block v2, w22, x21, x6, w7 eor v3.16b, v3.16b, v2.16b - encrypt_block v3, w3, x2, x6, w7 - st1 {v0.16b-v3.16b}, [x0], #64 + encrypt_block v3, w22, x21, x6, w7 + st1 {v0.16b-v3.16b}, [x19], #64 mov v4.16b, v3.16b + st1 {v4.16b}, [x24] /* return iv */ + cond_yield_neon .Lcbcencrestart b .Lcbcencloop4x .Lcbcenc1x: - adds w4, w4, #4 + adds w23, w23, #4 beq .Lcbcencout .Lcbcencloop: - ld1 {v0.16b}, [x1], #16 /* get next pt block */ + ld1 {v0.16b}, [x20], #16 /* get next pt block */ eor v4.16b, v4.16b, v0.16b /* ..and xor with iv */ - encrypt_block v4, w3, x2, x6, w7 - st1 {v4.16b}, [x0], #16 - subs w4, w4, #1 + encrypt_block v4, w22, x21, x6, w7 + st1 {v4.16b}, [x19], #16 + subs w23, w23, #1 bne .Lcbcencloop .Lcbcencout: - st1 {v4.16b}, [x5] /* return iv */ + st1 {v4.16b}, [x24] /* return iv */ + frame_pop ret AES_ENDPROC(aes_cbc_encrypt) AES_ENTRY(aes_cbc_decrypt) - stp x29, x30, [sp, #-16]! - mov x29, sp + frame_push 6 + + mov x19, x0 + mov x20, x1 + mov x21, x2 + mov x22, x3 + mov x23, x4 + mov x24, x5 - ld1 {v7.16b}, [x5] /* get iv */ - dec_prepare w3, x2, x6 +.Lcbcdecrestart: + ld1 {v7.16b}, [x24] /* get iv */ + dec_prepare w22, x21, x6 .LcbcdecloopNx: - subs w4, w4, #4 + subs w23, w23, #4 bmi .Lcbcdec1x - ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ + ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 ct blocks */ mov v4.16b, v0.16b mov v5.16b, v1.16b mov v6.16b, v2.16b bl aes_decrypt_block4x - sub x1, x1, #16 + sub x20, x20, #16 eor v0.16b, v0.16b, v7.16b eor v1.16b, v1.16b, v4.16b - ld1 {v7.16b}, [x1], #16 /* reload 1 ct block */ + ld1 {v7.16b}, [x20], #16 /* reload 1 ct block */ eor v2.16b, v2.16b, v5.16b eor v3.16b, v3.16b, v6.16b - st1 {v0.16b-v3.16b}, [x0], #64 + st1 {v0.16b-v3.16b}, [x19], #64 + st1 {v7.16b}, [x24] /* return iv */ + cond_yield_neon .Lcbcdecrestart b .LcbcdecloopNx .Lcbcdec1x: - adds w4, w4, #4 + adds w23, w23, #4 beq .Lcbcdecout .Lcbcdecloop: - ld1 {v1.16b}, [x1], #16 /* get next ct block */ + ld1 {v1.16b}, [x20], #16 /* get next ct block */ mov v0.16b, v1.16b /* ...and copy to v0 */ - decrypt_block v0, w3, x2, x6, w7 + decrypt_block v0, w22, x21, x6, w7 eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */ mov v7.16b, v1.16b /* ct is next iv */ - st1 {v0.16b}, [x0], #16 - subs w4, w4, #1 + st1 {v0.16b}, [x19], #16 + subs w23, w23, #1 bne .Lcbcdecloop .Lcbcdecout: - st1 {v7.16b}, [x5] /* return iv */ - ldp x29, x30, [sp], #16 + st1 {v7.16b}, [x24] /* return iv */ + frame_pop ret AES_ENDPROC(aes_cbc_decrypt) @@ -176,19 +212,26 @@ AES_ENDPROC(aes_cbc_decrypt) */ AES_ENTRY(aes_ctr_encrypt) - stp x29, x30, [sp, #-16]! - mov x29, sp + frame_push 6 - enc_prepare w3, x2, x6 - ld1 {v4.16b}, [x5] + mov x19, x0 + mov x20, x1 + mov x21, x2 + mov x22, x3 + mov x23, x4 + mov x24, x5 + +.Lctrrestart: + enc_prepare w22, x21, x6 + ld1 {v4.16b}, [x24] umov x6, v4.d[1] /* keep swabbed ctr in reg */ rev x6, x6 - cmn w6, w4 /* 32 bit overflow? */ - bcs .Lctrloop .LctrloopNx: - subs w4, w4, #4 + subs w23, w23, #4 bmi .Lctr1x + cmn w6, #4 /* 32 bit overflow? */ + bcs .Lctr1x ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */ dup v7.4s, w6 mov v0.16b, v4.16b @@ -200,25 +243,27 @@ AES_ENTRY(aes_ctr_encrypt) mov v1.s[3], v8.s[0] mov v2.s[3], v8.s[1] mov v3.s[3], v8.s[2] - ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */ + ld1 {v5.16b-v7.16b}, [x20], #48 /* get 3 input blocks */ bl aes_encrypt_block4x eor v0.16b, v5.16b, v0.16b - ld1 {v5.16b}, [x1], #16 /* get 1 input block */ + ld1 {v5.16b}, [x20], #16 /* get 1 input block */ eor v1.16b, v6.16b, v1.16b eor v2.16b, v7.16b, v2.16b eor v3.16b, v5.16b, v3.16b - st1 {v0.16b-v3.16b}, [x0], #64 + st1 {v0.16b-v3.16b}, [x19], #64 add x6, x6, #4 rev x7, x6 ins v4.d[1], x7 - cbz w4, .Lctrout + cbz w23, .Lctrout + st1 {v4.16b}, [x24] /* return next CTR value */ + cond_yield_neon .Lctrrestart b .LctrloopNx .Lctr1x: - adds w4, w4, #4 + adds w23, w23, #4 beq .Lctrout .Lctrloop: mov v0.16b, v4.16b - encrypt_block v0, w3, x2, x8, w7 + encrypt_block v0, w22, x21, x8, w7 adds x6, x6, #1 /* increment BE ctr */ rev x7, x6 @@ -226,22 +271,22 @@ AES_ENTRY(aes_ctr_encrypt) bcs .Lctrcarry /* overflow? */ .Lctrcarrydone: - subs w4, w4, #1 + subs w23, w23, #1 bmi .Lctrtailblock /* blocks <0 means tail block */ - ld1 {v3.16b}, [x1], #16 + ld1 {v3.16b}, [x20], #16 eor v3.16b, v0.16b, v3.16b - st1 {v3.16b}, [x0], #16 + st1 {v3.16b}, [x19], #16 bne .Lctrloop .Lctrout: - st1 {v4.16b}, [x5] /* return next CTR value */ - ldp x29, x30, [sp], #16 + st1 {v4.16b}, [x24] /* return next CTR value */ +.Lctrret: + frame_pop ret .Lctrtailblock: - st1 {v0.16b}, [x0] - ldp x29, x30, [sp], #16 - ret + st1 {v0.16b}, [x19] + b .Lctrret .Lctrcarry: umov x7, v4.d[0] /* load upper word of ctr */ @@ -274,10 +319,16 @@ CPU_LE( .quad 1, 0x87 ) CPU_BE( .quad 0x87, 1 ) AES_ENTRY(aes_xts_encrypt) - stp x29, x30, [sp, #-16]! - mov x29, sp + frame_push 6 + + mov x19, x0 + mov x20, x1 + mov x21, x2 + mov x22, x3 + mov x23, x4 + mov x24, x6 - ld1 {v4.16b}, [x6] + ld1 {v4.16b}, [x24] cbz w7, .Lxtsencnotfirst enc_prepare w3, x5, x8 @@ -286,15 +337,17 @@ AES_ENTRY(aes_xts_encrypt) ldr q7, .Lxts_mul_x b .LxtsencNx +.Lxtsencrestart: + ld1 {v4.16b}, [x24] .Lxtsencnotfirst: - enc_prepare w3, x2, x8 + enc_prepare w22, x21, x8 .LxtsencloopNx: ldr q7, .Lxts_mul_x next_tweak v4, v4, v7, v8 .LxtsencNx: - subs w4, w4, #4 + subs w23, w23, #4 bmi .Lxtsenc1x - ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ + ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 pt blocks */ next_tweak v5, v4, v7, v8 eor v0.16b, v0.16b, v4.16b next_tweak v6, v5, v7, v8 @@ -307,35 +360,43 @@ AES_ENTRY(aes_xts_encrypt) eor v0.16b, v0.16b, v4.16b eor v1.16b, v1.16b, v5.16b eor v2.16b, v2.16b, v6.16b - st1 {v0.16b-v3.16b}, [x0], #64 + st1 {v0.16b-v3.16b}, [x19], #64 mov v4.16b, v7.16b - cbz w4, .Lxtsencout + cbz w23, .Lxtsencout + st1 {v4.16b}, [x24] + cond_yield_neon .Lxtsencrestart b .LxtsencloopNx .Lxtsenc1x: - adds w4, w4, #4 + adds w23, w23, #4 beq .Lxtsencout .Lxtsencloop: - ld1 {v1.16b}, [x1], #16 + ld1 {v1.16b}, [x20], #16 eor v0.16b, v1.16b, v4.16b - encrypt_block v0, w3, x2, x8, w7 + encrypt_block v0, w22, x21, x8, w7 eor v0.16b, v0.16b, v4.16b - st1 {v0.16b}, [x0], #16 - subs w4, w4, #1 + st1 {v0.16b}, [x19], #16 + subs w23, w23, #1 beq .Lxtsencout next_tweak v4, v4, v7, v8 b .Lxtsencloop .Lxtsencout: - st1 {v4.16b}, [x6] - ldp x29, x30, [sp], #16 + st1 {v4.16b}, [x24] + frame_pop ret AES_ENDPROC(aes_xts_encrypt) AES_ENTRY(aes_xts_decrypt) - stp x29, x30, [sp, #-16]! - mov x29, sp + frame_push 6 - ld1 {v4.16b}, [x6] + mov x19, x0 + mov x20, x1 + mov x21, x2 + mov x22, x3 + mov x23, x4 + mov x24, x6 + + ld1 {v4.16b}, [x24] cbz w7, .Lxtsdecnotfirst enc_prepare w3, x5, x8 @@ -344,15 +405,17 @@ AES_ENTRY(aes_xts_decrypt) ldr q7, .Lxts_mul_x b .LxtsdecNx +.Lxtsdecrestart: + ld1 {v4.16b}, [x24] .Lxtsdecnotfirst: - dec_prepare w3, x2, x8 + dec_prepare w22, x21, x8 .LxtsdecloopNx: ldr q7, .Lxts_mul_x next_tweak v4, v4, v7, v8 .LxtsdecNx: - subs w4, w4, #4 + subs w23, w23, #4 bmi .Lxtsdec1x - ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ + ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 ct blocks */ next_tweak v5, v4, v7, v8 eor v0.16b, v0.16b, v4.16b next_tweak v6, v5, v7, v8 @@ -365,26 +428,28 @@ AES_ENTRY(aes_xts_decrypt) eor v0.16b, v0.16b, v4.16b eor v1.16b, v1.16b, v5.16b eor v2.16b, v2.16b, v6.16b - st1 {v0.16b-v3.16b}, [x0], #64 + st1 {v0.16b-v3.16b}, [x19], #64 mov v4.16b, v7.16b - cbz w4, .Lxtsdecout + cbz w23, .Lxtsdecout + st1 {v4.16b}, [x24] + cond_yield_neon .Lxtsdecrestart b .LxtsdecloopNx .Lxtsdec1x: - adds w4, w4, #4 + adds w23, w23, #4 beq .Lxtsdecout .Lxtsdecloop: - ld1 {v1.16b}, [x1], #16 + ld1 {v1.16b}, [x20], #16 eor v0.16b, v1.16b, v4.16b - decrypt_block v0, w3, x2, x8, w7 + decrypt_block v0, w22, x21, x8, w7 eor v0.16b, v0.16b, v4.16b - st1 {v0.16b}, [x0], #16 - subs w4, w4, #1 + st1 {v0.16b}, [x19], #16 + subs w23, w23, #1 beq .Lxtsdecout next_tweak v4, v4, v7, v8 b .Lxtsdecloop .Lxtsdecout: - st1 {v4.16b}, [x6] - ldp x29, x30, [sp], #16 + st1 {v4.16b}, [x24] + frame_pop ret AES_ENDPROC(aes_xts_decrypt) @@ -393,43 +458,61 @@ AES_ENDPROC(aes_xts_decrypt) * int blocks, u8 dg[], int enc_before, int enc_after) */ AES_ENTRY(aes_mac_update) - ld1 {v0.16b}, [x4] /* get dg */ + frame_push 6 + + mov x19, x0 + mov x20, x1 + mov x21, x2 + mov x22, x3 + mov x23, x4 + mov x24, x6 + + ld1 {v0.16b}, [x23] /* get dg */ enc_prepare w2, x1, x7 cbz w5, .Lmacloop4x encrypt_block v0, w2, x1, x7, w8 .Lmacloop4x: - subs w3, w3, #4 + subs w22, w22, #4 bmi .Lmac1x - ld1 {v1.16b-v4.16b}, [x0], #64 /* get next pt block */ + ld1 {v1.16b-v4.16b}, [x19], #64 /* get next pt block */ eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */ - encrypt_block v0, w2, x1, x7, w8 + encrypt_block v0, w21, x20, x7, w8 eor v0.16b, v0.16b, v2.16b - encrypt_block v0, w2, x1, x7, w8 + encrypt_block v0, w21, x20, x7, w8 eor v0.16b, v0.16b, v3.16b - encrypt_block v0, w2, x1, x7, w8 + encrypt_block v0, w21, x20, x7, w8 eor v0.16b, v0.16b, v4.16b - cmp w3, wzr - csinv x5, x6, xzr, eq + cmp w22, wzr + csinv x5, x24, xzr, eq cbz w5, .Lmacout - encrypt_block v0, w2, x1, x7, w8 + encrypt_block v0, w21, x20, x7, w8 + st1 {v0.16b}, [x23] /* return dg */ + cond_yield_neon .Lmacrestart b .Lmacloop4x .Lmac1x: - add w3, w3, #4 + add w22, w22, #4 .Lmacloop: - cbz w3, .Lmacout - ld1 {v1.16b}, [x0], #16 /* get next pt block */ + cbz w22, .Lmacout + ld1 {v1.16b}, [x19], #16 /* get next pt block */ eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */ - subs w3, w3, #1 - csinv x5, x6, xzr, eq + subs w22, w22, #1 + csinv x5, x24, xzr, eq cbz w5, .Lmacout - encrypt_block v0, w2, x1, x7, w8 +.Lmacenc: + encrypt_block v0, w21, x20, x7, w8 b .Lmacloop .Lmacout: - st1 {v0.16b}, [x4] /* return dg */ + st1 {v0.16b}, [x23] /* return dg */ + frame_pop ret + +.Lmacrestart: + ld1 {v0.16b}, [x23] /* get dg */ + enc_prepare w21, x20, x0 + b .Lmacloop4x AES_ENDPROC(aes_mac_update) diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S index ca0472500433..e613a87f8b53 100644 --- a/arch/arm64/crypto/aes-neonbs-core.S +++ b/arch/arm64/crypto/aes-neonbs-core.S @@ -565,54 +565,61 @@ ENDPROC(aesbs_decrypt8) * int blocks) */ .macro __ecb_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7 - stp x29, x30, [sp, #-16]! - mov x29, sp + frame_push 5 + + mov x19, x0 + mov x20, x1 + mov x21, x2 + mov x22, x3 + mov x23, x4 99: mov x5, #1 - lsl x5, x5, x4 - subs w4, w4, #8 - csel x4, x4, xzr, pl + lsl x5, x5, x23 + subs w23, w23, #8 + csel x23, x23, xzr, pl csel x5, x5, xzr, mi - ld1 {v0.16b}, [x1], #16 + ld1 {v0.16b}, [x20], #16 tbnz x5, #1, 0f - ld1 {v1.16b}, [x1], #16 + ld1 {v1.16b}, [x20], #16 tbnz x5, #2, 0f - ld1 {v2.16b}, [x1], #16 + ld1 {v2.16b}, [x20], #16 tbnz x5, #3, 0f - ld1 {v3.16b}, [x1], #16 + ld1 {v3.16b}, [x20], #16 tbnz x5, #4, 0f - ld1 {v4.16b}, [x1], #16 + ld1 {v4.16b}, [x20], #16 tbnz x5, #5, 0f - ld1 {v5.16b}, [x1], #16 + ld1 {v5.16b}, [x20], #16 tbnz x5, #6, 0f - ld1 {v6.16b}, [x1], #16 + ld1 {v6.16b}, [x20], #16 tbnz x5, #7, 0f - ld1 {v7.16b}, [x1], #16 + ld1 {v7.16b}, [x20], #16 -0: mov bskey, x2 - mov rounds, x3 +0: mov bskey, x21 + mov rounds, x22 bl \do8 - st1 {\o0\().16b}, [x0], #16 + st1 {\o0\().16b}, [x19], #16 tbnz x5, #1, 1f - st1 {\o1\().16b}, [x0], #16 + st1 {\o1\().16b}, [x19], #16 tbnz x5, #2, 1f - st1 {\o2\().16b}, [x0], #16 + st1 {\o2\().16b}, [x19], #16 tbnz x5, #3, 1f - st1 {\o3\().16b}, [x0], #16 + st1 {\o3\().16b}, [x19], #16 tbnz x5, #4, 1f - st1 {\o4\().16b}, [x0], #16 + st1 {\o4\().16b}, [x19], #16 tbnz x5, #5, 1f - st1 {\o5\().16b}, [x0], #16 + st1 {\o5\().16b}, [x19], #16 tbnz x5, #6, 1f - st1 {\o6\().16b}, [x0], #16 + st1 {\o6\().16b}, [x19], #16 tbnz x5, #7, 1f - st1 {\o7\().16b}, [x0], #16 + st1 {\o7\().16b}, [x19], #16 - cbnz x4, 99b + cbz x23, 1f + cond_yield_neon + b 99b -1: ldp x29, x30, [sp], #16 +1: frame_pop ret .endm @@ -632,43 +639,49 @@ ENDPROC(aesbs_ecb_decrypt) */ .align 4 ENTRY(aesbs_cbc_decrypt) - stp x29, x30, [sp, #-16]! - mov x29, sp + frame_push 6 + + mov x19, x0 + mov x20, x1 + mov x21, x2 + mov x22, x3 + mov x23, x4 + mov x24, x5 99: mov x6, #1 - lsl x6, x6, x4 - subs w4, w4, #8 - csel x4, x4, xzr, pl + lsl x6, x6, x23 + subs w23, w23, #8 + csel x23, x23, xzr, pl csel x6, x6, xzr, mi - ld1 {v0.16b}, [x1], #16 + ld1 {v0.16b}, [x20], #16 mov v25.16b, v0.16b tbnz x6, #1, 0f - ld1 {v1.16b}, [x1], #16 + ld1 {v1.16b}, [x20], #16 mov v26.16b, v1.16b tbnz x6, #2, 0f - ld1 {v2.16b}, [x1], #16 + ld1 {v2.16b}, [x20], #16 mov v27.16b, v2.16b tbnz x6, #3, 0f - ld1 {v3.16b}, [x1], #16 + ld1 {v3.16b}, [x20], #16 mov v28.16b, v3.16b tbnz x6, #4, 0f - ld1 {v4.16b}, [x1], #16 + ld1 {v4.16b}, [x20], #16 mov v29.16b, v4.16b tbnz x6, #5, 0f - ld1 {v5.16b}, [x1], #16 + ld1 {v5.16b}, [x20], #16 mov v30.16b, v5.16b tbnz x6, #6, 0f - ld1 {v6.16b}, [x1], #16 + ld1 {v6.16b}, [x20], #16 mov v31.16b, v6.16b tbnz x6, #7, 0f - ld1 {v7.16b}, [x1] + ld1 {v7.16b}, [x20] -0: mov bskey, x2 - mov rounds, x3 +0: mov bskey, x21 + mov rounds, x22 bl aesbs_decrypt8 - ld1 {v24.16b}, [x5] // load IV + ld1 {v24.16b}, [x24] // load IV eor v1.16b, v1.16b, v25.16b eor v6.16b, v6.16b, v26.16b @@ -679,34 +692,36 @@ ENTRY(aesbs_cbc_decrypt) eor v3.16b, v3.16b, v30.16b eor v5.16b, v5.16b, v31.16b - st1 {v0.16b}, [x0], #16 + st1 {v0.16b}, [x19], #16 mov v24.16b, v25.16b tbnz x6, #1, 1f - st1 {v1.16b}, [x0], #16 + st1 {v1.16b}, [x19], #16 mov v24.16b, v26.16b tbnz x6, #2, 1f - st1 {v6.16b}, [x0], #16 + st1 {v6.16b}, [x19], #16 mov v24.16b, v27.16b tbnz x6, #3, 1f - st1 {v4.16b}, [x0], #16 + st1 {v4.16b}, [x19], #16 mov v24.16b, v28.16b tbnz x6, #4, 1f - st1 {v2.16b}, [x0], #16 + st1 {v2.16b}, [x19], #16 mov v24.16b, v29.16b tbnz x6, #5, 1f - st1 {v7.16b}, [x0], #16 + st1 {v7.16b}, [x19], #16 mov v24.16b, v30.16b tbnz x6, #6, 1f - st1 {v3.16b}, [x0], #16 + st1 {v3.16b}, [x19], #16 mov v24.16b, v31.16b tbnz x6, #7, 1f - ld1 {v24.16b}, [x1], #16 - st1 {v5.16b}, [x0], #16 -1: st1 {v24.16b}, [x5] // store IV + ld1 {v24.16b}, [x20], #16 + st1 {v5.16b}, [x19], #16 +1: st1 {v24.16b}, [x24] // store IV - cbnz x4, 99b + cbz x23, 2f + cond_yield_neon + b 99b - ldp x29, x30, [sp], #16 +2: frame_pop ret ENDPROC(aesbs_cbc_decrypt) @@ -731,87 +746,93 @@ CPU_BE( .quad 0x87, 1 ) */ __xts_crypt8: mov x6, #1 - lsl x6, x6, x4 - subs w4, w4, #8 - csel x4, x4, xzr, pl + lsl x6, x6, x23 + subs w23, w23, #8 + csel x23, x23, xzr, pl csel x6, x6, xzr, mi - ld1 {v0.16b}, [x1], #16 + ld1 {v0.16b}, [x20], #16 next_tweak v26, v25, v30, v31 eor v0.16b, v0.16b, v25.16b tbnz x6, #1, 0f - ld1 {v1.16b}, [x1], #16 + ld1 {v1.16b}, [x20], #16 next_tweak v27, v26, v30, v31 eor v1.16b, v1.16b, v26.16b tbnz x6, #2, 0f - ld1 {v2.16b}, [x1], #16 + ld1 {v2.16b}, [x20], #16 next_tweak v28, v27, v30, v31 eor v2.16b, v2.16b, v27.16b tbnz x6, #3, 0f - ld1 {v3.16b}, [x1], #16 + ld1 {v3.16b}, [x20], #16 next_tweak v29, v28, v30, v31 eor v3.16b, v3.16b, v28.16b tbnz x6, #4, 0f - ld1 {v4.16b}, [x1], #16 - str q29, [sp, #16] + ld1 {v4.16b}, [x20], #16 + str q29, [sp, #.Lframe_local_offset] eor v4.16b, v4.16b, v29.16b next_tweak v29, v29, v30, v31 tbnz x6, #5, 0f - ld1 {v5.16b}, [x1], #16 - str q29, [sp, #32] + ld1 {v5.16b}, [x20], #16 + str q29, [sp, #.Lframe_local_offset + 16] eor v5.16b, v5.16b, v29.16b next_tweak v29, v29, v30, v31 tbnz x6, #6, 0f - ld1 {v6.16b}, [x1], #16 - str q29, [sp, #48] + ld1 {v6.16b}, [x20], #16 + str q29, [sp, #.Lframe_local_offset + 32] eor v6.16b, v6.16b, v29.16b next_tweak v29, v29, v30, v31 tbnz x6, #7, 0f - ld1 {v7.16b}, [x1], #16 - str q29, [sp, #64] + ld1 {v7.16b}, [x20], #16 + str q29, [sp, #.Lframe_local_offset + 48] eor v7.16b, v7.16b, v29.16b next_tweak v29, v29, v30, v31 -0: mov bskey, x2 - mov rounds, x3 +0: mov bskey, x21 + mov rounds, x22 br x7 ENDPROC(__xts_crypt8) .macro __xts_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7 - stp x29, x30, [sp, #-80]! - mov x29, sp + frame_push 6, 64 - ldr q30, .Lxts_mul_x - ld1 {v25.16b}, [x5] + mov x19, x0 + mov x20, x1 + mov x21, x2 + mov x22, x3 + mov x23, x4 + mov x24, x5 + +0: ldr q30, .Lxts_mul_x + ld1 {v25.16b}, [x24] 99: adr x7, \do8 bl __xts_crypt8 - ldp q16, q17, [sp, #16] - ldp q18, q19, [sp, #48] + ldp q16, q17, [sp, #.Lframe_local_offset] + ldp q18, q19, [sp, #.Lframe_local_offset + 32] eor \o0\().16b, \o0\().16b, v25.16b eor \o1\().16b, \o1\().16b, v26.16b eor \o2\().16b, \o2\().16b, v27.16b eor \o3\().16b, \o3\().16b, v28.16b - st1 {\o0\().16b}, [x0], #16 + st1 {\o0\().16b}, [x19], #16 mov v25.16b, v26.16b tbnz x6, #1, 1f - st1 {\o1\().16b}, [x0], #16 + st1 {\o1\().16b}, [x19], #16 mov v25.16b, v27.16b tbnz x6, #2, 1f - st1 {\o2\().16b}, [x0], #16 + st1 {\o2\().16b}, [x19], #16 mov v25.16b, v28.16b tbnz x6, #3, 1f - st1 {\o3\().16b}, [x0], #16 + st1 {\o3\().16b}, [x19], #16 mov v25.16b, v29.16b tbnz x6, #4, 1f @@ -820,18 +841,22 @@ ENDPROC(__xts_crypt8) eor \o6\().16b, \o6\().16b, v18.16b eor \o7\().16b, \o7\().16b, v19.16b - st1 {\o4\().16b}, [x0], #16 + st1 {\o4\().16b}, [x19], #16 tbnz x6, #5, 1f - st1 {\o5\().16b}, [x0], #16 + st1 {\o5\().16b}, [x19], #16 tbnz x6, #6, 1f - st1 {\o6\().16b}, [x0], #16 + st1 {\o6\().16b}, [x19], #16 tbnz x6, #7, 1f - st1 {\o7\().16b}, [x0], #16 + st1 {\o7\().16b}, [x19], #16 + + cbz x23, 1f + st1 {v25.16b}, [x24] - cbnz x4, 99b + cond_yield_neon 0b + b 99b -1: st1 {v25.16b}, [x5] - ldp x29, x30, [sp], #80 +1: st1 {v25.16b}, [x24] + frame_pop ret .endm @@ -856,24 +881,31 @@ ENDPROC(aesbs_xts_decrypt) * int rounds, int blocks, u8 iv[], u8 final[]) */ ENTRY(aesbs_ctr_encrypt) - stp x29, x30, [sp, #-16]! - mov x29, sp - - cmp x6, #0 - cset x10, ne - add x4, x4, x10 // do one extra block if final - - ldp x7, x8, [x5] - ld1 {v0.16b}, [x5] + frame_push 8 + + mov x19, x0 + mov x20, x1 + mov x21, x2 + mov x22, x3 + mov x23, x4 + mov x24, x5 + mov x25, x6 + + cmp x25, #0 + cset x26, ne + add x23, x23, x26 // do one extra block if final + +98: ldp x7, x8, [x24] + ld1 {v0.16b}, [x24] CPU_LE( rev x7, x7 ) CPU_LE( rev x8, x8 ) adds x8, x8, #1 adc x7, x7, xzr 99: mov x9, #1 - lsl x9, x9, x4 - subs w4, w4, #8 - csel x4, x4, xzr, pl + lsl x9, x9, x23 + subs w23, w23, #8 + csel x23, x23, xzr, pl csel x9, x9, xzr, le tbnz x9, #1, 0f @@ -891,82 +923,85 @@ CPU_LE( rev x8, x8 ) tbnz x9, #7, 0f next_ctr v7 -0: mov bskey, x2 - mov rounds, x3 +0: mov bskey, x21 + mov rounds, x22 bl aesbs_encrypt8 - lsr x9, x9, x10 // disregard the extra block + lsr x9, x9, x26 // disregard the extra block tbnz x9, #0, 0f - ld1 {v8.16b}, [x1], #16 + ld1 {v8.16b}, [x20], #16 eor v0.16b, v0.16b, v8.16b - st1 {v0.16b}, [x0], #16 + st1 {v0.16b}, [x19], #16 tbnz x9, #1, 1f - ld1 {v9.16b}, [x1], #16 + ld1 {v9.16b}, [x20], #16 eor v1.16b, v1.16b, v9.16b - st1 {v1.16b}, [x0], #16 + st1 {v1.16b}, [x19], #16 tbnz x9, #2, 2f - ld1 {v10.16b}, [x1], #16 + ld1 {v10.16b}, [x20], #16 eor v4.16b, v4.16b, v10.16b - st1 {v4.16b}, [x0], #16 + st1 {v4.16b}, [x19], #16 tbnz x9, #3, 3f - ld1 {v11.16b}, [x1], #16 + ld1 {v11.16b}, [x20], #16 eor v6.16b, v6.16b, v11.16b - st1 {v6.16b}, [x0], #16 + st1 {v6.16b}, [x19], #16 tbnz x9, #4, 4f - ld1 {v12.16b}, [x1], #16 + ld1 {v12.16b}, [x20], #16 eor v3.16b, v3.16b, v12.16b - st1 {v3.16b}, [x0], #16 + st1 {v3.16b}, [x19], #16 tbnz x9, #5, 5f - ld1 {v13.16b}, [x1], #16 + ld1 {v13.16b}, [x20], #16 eor v7.16b, v7.16b, v13.16b - st1 {v7.16b}, [x0], #16 + st1 {v7.16b}, [x19], #16 tbnz x9, #6, 6f - ld1 {v14.16b}, [x1], #16 + ld1 {v14.16b}, [x20], #16 eor v2.16b, v2.16b, v14.16b - st1 {v2.16b}, [x0], #16 + st1 {v2.16b}, [x19], #16 tbnz x9, #7, 7f - ld1 {v15.16b}, [x1], #16 + ld1 {v15.16b}, [x20], #16 eor v5.16b, v5.16b, v15.16b - st1 {v5.16b}, [x0], #16 + st1 {v5.16b}, [x19], #16 8: next_ctr v0 - cbnz x4, 99b + st1 {v0.16b}, [x24] + cbz x23, 0f + + cond_yield_neon 98b + b 99b -0: st1 {v0.16b}, [x5] - ldp x29, x30, [sp], #16 +0: frame_pop ret /* * If we are handling the tail of the input (x6 != NULL), return the * final keystream block back to the caller. */ -1: cbz x6, 8b - st1 {v1.16b}, [x6] +1: cbz x25, 8b + st1 {v1.16b}, [x25] b 8b -2: cbz x6, 8b - st1 {v4.16b}, [x6] +2: cbz x25, 8b + st1 {v4.16b}, [x25] b 8b -3: cbz x6, 8b - st1 {v6.16b}, [x6] +3: cbz x25, 8b + st1 {v6.16b}, [x25] b 8b -4: cbz x6, 8b - st1 {v3.16b}, [x6] +4: cbz x25, 8b + st1 {v3.16b}, [x25] b 8b -5: cbz x6, 8b - st1 {v7.16b}, [x6] +5: cbz x25, 8b + st1 {v7.16b}, [x25] b 8b -6: cbz x6, 8b - st1 {v2.16b}, [x6] +6: cbz x25, 8b + st1 {v2.16b}, [x25] b 8b -7: cbz x6, 8b - st1 {v5.16b}, [x6] +7: cbz x25, 8b + st1 {v5.16b}, [x25] b 8b ENDPROC(aesbs_ctr_encrypt) diff --git a/arch/arm64/crypto/crc32-ce-core.S b/arch/arm64/crypto/crc32-ce-core.S index 16ed3c7ebd37..8061bf0f9c66 100644 --- a/arch/arm64/crypto/crc32-ce-core.S +++ b/arch/arm64/crypto/crc32-ce-core.S @@ -100,9 +100,10 @@ dCONSTANT .req d0 qCONSTANT .req q0 - BUF .req x0 - LEN .req x1 - CRC .req x2 + BUF .req x19 + LEN .req x20 + CRC .req x21 + CONST .req x22 vzr .req v9 @@ -123,7 +124,14 @@ ENTRY(crc32_pmull_le) ENTRY(crc32c_pmull_le) adr_l x3, .Lcrc32c_constants -0: bic LEN, LEN, #15 +0: frame_push 4, 64 + + mov BUF, x0 + mov LEN, x1 + mov CRC, x2 + mov CONST, x3 + + bic LEN, LEN, #15 ld1 {v1.16b-v4.16b}, [BUF], #0x40 movi vzr.16b, #0 fmov dCONSTANT, CRC @@ -132,7 +140,7 @@ ENTRY(crc32c_pmull_le) cmp LEN, #0x40 b.lt less_64 - ldr qCONSTANT, [x3] + ldr qCONSTANT, [CONST] loop_64: /* 64 bytes Full cache line folding */ sub LEN, LEN, #0x40 @@ -162,10 +170,21 @@ loop_64: /* 64 bytes Full cache line folding */ eor v4.16b, v4.16b, v8.16b cmp LEN, #0x40 - b.ge loop_64 + b.lt less_64 + + if_will_cond_yield_neon + stp q1, q2, [sp, #.Lframe_local_offset] + stp q3, q4, [sp, #.Lframe_local_offset + 32] + do_cond_yield_neon + ldp q1, q2, [sp, #.Lframe_local_offset] + ldp q3, q4, [sp, #.Lframe_local_offset + 32] + ldr qCONSTANT, [CONST] + movi vzr.16b, #0 + endif_yield_neon + b loop_64 less_64: /* Folding cache line into 128bit */ - ldr qCONSTANT, [x3, #16] + ldr qCONSTANT, [CONST, #16] pmull2 v5.1q, v1.2d, vCONSTANT.2d pmull v1.1q, v1.1d, vCONSTANT.1d @@ -204,8 +223,8 @@ fold_64: eor v1.16b, v1.16b, v2.16b /* final 32-bit fold */ - ldr dCONSTANT, [x3, #32] - ldr d3, [x3, #40] + ldr dCONSTANT, [CONST, #32] + ldr d3, [CONST, #40] ext v2.16b, v1.16b, vzr.16b, #4 and v1.16b, v1.16b, v3.16b @@ -213,7 +232,7 @@ fold_64: eor v1.16b, v1.16b, v2.16b /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */ - ldr qCONSTANT, [x3, #48] + ldr qCONSTANT, [CONST, #48] and v2.16b, v1.16b, v3.16b ext v2.16b, vzr.16b, v2.16b, #8 @@ -223,6 +242,7 @@ fold_64: eor v1.16b, v1.16b, v2.16b mov w0, v1.s[1] + frame_pop ret ENDPROC(crc32_pmull_le) ENDPROC(crc32c_pmull_le) diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S index f179c01bd55c..663ea71cdb38 100644 --- a/arch/arm64/crypto/crct10dif-ce-core.S +++ b/arch/arm64/crypto/crct10dif-ce-core.S @@ -74,13 +74,19 @@ .text .cpu generic+crypto - arg1_low32 .req w0 - arg2 .req x1 - arg3 .req x2 + arg1_low32 .req w19 + arg2 .req x20 + arg3 .req x21 vzr .req v13 ENTRY(crc_t10dif_pmull) + frame_push 3, 128 + + mov arg1_low32, w0 + mov arg2, x1 + mov arg3, x2 + movi vzr.16b, #0 // init zero register // adjust the 16-bit initial_crc value, scale it to 32 bits @@ -175,8 +181,25 @@ CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 ) subs arg3, arg3, #128 // check if there is another 64B in the buffer to be able to fold - b.ge _fold_64_B_loop + b.lt _fold_64_B_end + + if_will_cond_yield_neon + stp q0, q1, [sp, #.Lframe_local_offset] + stp q2, q3, [sp, #.Lframe_local_offset + 32] + stp q4, q5, [sp, #.Lframe_local_offset + 64] + stp q6, q7, [sp, #.Lframe_local_offset + 96] + do_cond_yield_neon + ldp q0, q1, [sp, #.Lframe_local_offset] + ldp q2, q3, [sp, #.Lframe_local_offset + 32] + ldp q4, q5, [sp, #.Lframe_local_offset + 64] + ldp q6, q7, [sp, #.Lframe_local_offset + 96] + ldr_l q10, rk3, x8 + movi vzr.16b, #0 // init zero register + endif_yield_neon + + b _fold_64_B_loop +_fold_64_B_end: // at this point, the buffer pointer is pointing at the last y Bytes // of the buffer the 64B of folded data is in 4 of the vector // registers: v0, v1, v2, v3 @@ -304,6 +327,7 @@ _barrett: _cleanup: // scale the result back to 16 bits lsr x0, x0, #16 + frame_pop ret _less_than_128: diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S index 11ebf1ae248a..dcffb9e77589 100644 --- a/arch/arm64/crypto/ghash-ce-core.S +++ b/arch/arm64/crypto/ghash-ce-core.S @@ -213,22 +213,31 @@ .endm .macro __pmull_ghash, pn - ld1 {SHASH.2d}, [x3] - ld1 {XL.2d}, [x1] + frame_push 5 + + mov x19, x0 + mov x20, x1 + mov x21, x2 + mov x22, x3 + mov x23, x4 + +0: ld1 {SHASH.2d}, [x22] + ld1 {XL.2d}, [x20] ext SHASH2.16b, SHASH.16b, SHASH.16b, #8 eor SHASH2.16b, SHASH2.16b, SHASH.16b __pmull_pre_\pn /* do the head block first, if supplied */ - cbz x4, 0f - ld1 {T1.2d}, [x4] - b 1f + cbz x23, 1f + ld1 {T1.2d}, [x23] + mov x23, xzr + b 2f -0: ld1 {T1.2d}, [x2], #16 - sub w0, w0, #1 +1: ld1 {T1.2d}, [x21], #16 + sub w19, w19, #1 -1: /* multiply XL by SHASH in GF(2^128) */ +2: /* multiply XL by SHASH in GF(2^128) */ CPU_LE( rev64 T1.16b, T1.16b ) ext T2.16b, XL.16b, XL.16b, #8 @@ -250,9 +259,18 @@ CPU_LE( rev64 T1.16b, T1.16b ) eor T2.16b, T2.16b, XH.16b eor XL.16b, XL.16b, T2.16b - cbnz w0, 0b + cbz w19, 3f + + if_will_cond_yield_neon + st1 {XL.2d}, [x20] + do_cond_yield_neon + b 0b + endif_yield_neon + + b 1b - st1 {XL.2d}, [x1] +3: st1 {XL.2d}, [x20] + frame_pop ret .endm @@ -304,38 +322,55 @@ ENDPROC(pmull_ghash_update_p8) .endm .macro pmull_gcm_do_crypt, enc - ld1 {SHASH.2d}, [x4] - ld1 {XL.2d}, [x1] - ldr x8, [x5, #8] // load lower counter + frame_push 10 + + mov x19, x0 + mov x20, x1 + mov x21, x2 + mov x22, x3 + mov x23, x4 + mov x24, x5 + mov x25, x6 + mov x26, x7 + .if \enc == 1 + ldr x27, [sp, #96] // first stacked arg + .endif + + ldr x28, [x24, #8] // load lower counter +CPU_LE( rev x28, x28 ) + +0: mov x0, x25 + load_round_keys w26, x0 + ld1 {SHASH.2d}, [x23] + ld1 {XL.2d}, [x20] movi MASK.16b, #0xe1 ext SHASH2.16b, SHASH.16b, SHASH.16b, #8 -CPU_LE( rev x8, x8 ) shl MASK.2d, MASK.2d, #57 eor SHASH2.16b, SHASH2.16b, SHASH.16b .if \enc == 1 - ld1 {KS.16b}, [x7] + ld1 {KS.16b}, [x27] .endif -0: ld1 {CTR.8b}, [x5] // load upper counter - ld1 {INP.16b}, [x3], #16 - rev x9, x8 - add x8, x8, #1 - sub w0, w0, #1 +1: ld1 {CTR.8b}, [x24] // load upper counter + ld1 {INP.16b}, [x22], #16 + rev x9, x28 + add x28, x28, #1 + sub w19, w19, #1 ins CTR.d[1], x9 // set lower counter .if \enc == 1 eor INP.16b, INP.16b, KS.16b // encrypt input - st1 {INP.16b}, [x2], #16 + st1 {INP.16b}, [x21], #16 .endif rev64 T1.16b, INP.16b - cmp w6, #12 - b.ge 2f // AES-192/256? + cmp w26, #12 + b.ge 4f // AES-192/256? -1: enc_round CTR, v21 +2: enc_round CTR, v21 ext T2.16b, XL.16b, XL.16b, #8 ext IN1.16b, T1.16b, T1.16b, #8 @@ -390,27 +425,39 @@ CPU_LE( rev x8, x8 ) .if \enc == 0 eor INP.16b, INP.16b, KS.16b - st1 {INP.16b}, [x2], #16 + st1 {INP.16b}, [x21], #16 .endif - cbnz w0, 0b + cbz w19, 3f -CPU_LE( rev x8, x8 ) - st1 {XL.2d}, [x1] - str x8, [x5, #8] // store lower counter + if_will_cond_yield_neon + st1 {XL.2d}, [x20] + .if \enc == 1 + st1 {KS.16b}, [x27] + .endif + do_cond_yield_neon + b 0b + endif_yield_neon + b 1b + +3: st1 {XL.2d}, [x20] .if \enc == 1 - st1 {KS.16b}, [x7] + st1 {KS.16b}, [x27] .endif +CPU_LE( rev x28, x28 ) + str x28, [x24, #8] // store lower counter + + frame_pop ret -2: b.eq 3f // AES-192? +4: b.eq 5f // AES-192? enc_round CTR, v17 enc_round CTR, v18 -3: enc_round CTR, v19 +5: enc_round CTR, v19 enc_round CTR, v20 - b 1b + b 2b .endm /* diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index cfc9c92814fd..7cf0b1aa6ea8 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -63,11 +63,12 @@ static void (*pmull_ghash_update)(int blocks, u64 dg[], const char *src, asmlinkage void pmull_gcm_encrypt(int blocks, u64 dg[], u8 dst[], const u8 src[], struct ghash_key const *k, - u8 ctr[], int rounds, u8 ks[]); + u8 ctr[], u32 const rk[], int rounds, + u8 ks[]); asmlinkage void pmull_gcm_decrypt(int blocks, u64 dg[], u8 dst[], const u8 src[], struct ghash_key const *k, - u8 ctr[], int rounds); + u8 ctr[], u32 const rk[], int rounds); asmlinkage void pmull_gcm_encrypt_block(u8 dst[], u8 const src[], u32 const rk[], int rounds); @@ -368,26 +369,29 @@ static int gcm_encrypt(struct aead_request *req) pmull_gcm_encrypt_block(ks, iv, NULL, num_rounds(&ctx->aes_key)); put_unaligned_be32(3, iv + GCM_IV_SIZE); + kernel_neon_end(); - err = skcipher_walk_aead_encrypt(&walk, req, true); + err = skcipher_walk_aead_encrypt(&walk, req, false); while (walk.nbytes >= AES_BLOCK_SIZE) { int blocks = walk.nbytes / AES_BLOCK_SIZE; + kernel_neon_begin(); pmull_gcm_encrypt(blocks, dg, walk.dst.virt.addr, walk.src.virt.addr, &ctx->ghash_key, - iv, num_rounds(&ctx->aes_key), ks); + iv, ctx->aes_key.key_enc, + num_rounds(&ctx->aes_key), ks); + kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } - kernel_neon_end(); } else { __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, num_rounds(&ctx->aes_key)); put_unaligned_be32(2, iv + GCM_IV_SIZE); - err = skcipher_walk_aead_encrypt(&walk, req, true); + err = skcipher_walk_aead_encrypt(&walk, req, false); while (walk.nbytes >= AES_BLOCK_SIZE) { int blocks = walk.nbytes / AES_BLOCK_SIZE; @@ -467,15 +471,19 @@ static int gcm_decrypt(struct aead_request *req) pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, num_rounds(&ctx->aes_key)); put_unaligned_be32(2, iv + GCM_IV_SIZE); + kernel_neon_end(); - err = skcipher_walk_aead_decrypt(&walk, req, true); + err = skcipher_walk_aead_decrypt(&walk, req, false); while (walk.nbytes >= AES_BLOCK_SIZE) { int blocks = walk.nbytes / AES_BLOCK_SIZE; + kernel_neon_begin(); pmull_gcm_decrypt(blocks, dg, walk.dst.virt.addr, walk.src.virt.addr, &ctx->ghash_key, - iv, num_rounds(&ctx->aes_key)); + iv, ctx->aes_key.key_enc, + num_rounds(&ctx->aes_key)); + kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); @@ -483,14 +491,12 @@ static int gcm_decrypt(struct aead_request *req) if (walk.nbytes) pmull_gcm_encrypt_block(iv, iv, NULL, num_rounds(&ctx->aes_key)); - - kernel_neon_end(); } else { __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, num_rounds(&ctx->aes_key)); put_unaligned_be32(2, iv + GCM_IV_SIZE); - err = skcipher_walk_aead_decrypt(&walk, req, true); + err = skcipher_walk_aead_decrypt(&walk, req, false); while (walk.nbytes >= AES_BLOCK_SIZE) { int blocks = walk.nbytes / AES_BLOCK_SIZE; diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S index 46049850727d..78eb35fb5056 100644 --- a/arch/arm64/crypto/sha1-ce-core.S +++ b/arch/arm64/crypto/sha1-ce-core.S @@ -69,30 +69,36 @@ * int blocks) */ ENTRY(sha1_ce_transform) + frame_push 3 + + mov x19, x0 + mov x20, x1 + mov x21, x2 + /* load round constants */ - loadrc k0.4s, 0x5a827999, w6 +0: loadrc k0.4s, 0x5a827999, w6 loadrc k1.4s, 0x6ed9eba1, w6 loadrc k2.4s, 0x8f1bbcdc, w6 loadrc k3.4s, 0xca62c1d6, w6 /* load state */ - ld1 {dgav.4s}, [x0] - ldr dgb, [x0, #16] + ld1 {dgav.4s}, [x19] + ldr dgb, [x19, #16] /* load sha1_ce_state::finalize */ ldr_l w4, sha1_ce_offsetof_finalize, x4 - ldr w4, [x0, x4] + ldr w4, [x19, x4] /* load input */ -0: ld1 {v8.4s-v11.4s}, [x1], #64 - sub w2, w2, #1 +1: ld1 {v8.4s-v11.4s}, [x20], #64 + sub w21, w21, #1 CPU_LE( rev32 v8.16b, v8.16b ) CPU_LE( rev32 v9.16b, v9.16b ) CPU_LE( rev32 v10.16b, v10.16b ) CPU_LE( rev32 v11.16b, v11.16b ) -1: add t0.4s, v8.4s, k0.4s +2: add t0.4s, v8.4s, k0.4s mov dg0v.16b, dgav.16b add_update c, ev, k0, 8, 9, 10, 11, dgb @@ -123,16 +129,25 @@ CPU_LE( rev32 v11.16b, v11.16b ) add dgbv.2s, dgbv.2s, dg1v.2s add dgav.4s, dgav.4s, dg0v.4s - cbnz w2, 0b + cbz w21, 3f + + if_will_cond_yield_neon + st1 {dgav.4s}, [x19] + str dgb, [x19, #16] + do_cond_yield_neon + b 0b + endif_yield_neon + + b 1b /* * Final block: add padding and total bit count. * Skip if the input size was not a round multiple of the block size, * the padding is handled by the C code in that case. */ - cbz x4, 3f +3: cbz x4, 4f ldr_l w4, sha1_ce_offsetof_count, x4 - ldr x4, [x0, x4] + ldr x4, [x19, x4] movi v9.2d, #0 mov x8, #0x80000000 movi v10.2d, #0 @@ -141,10 +156,11 @@ CPU_LE( rev32 v11.16b, v11.16b ) mov x4, #0 mov v11.d[0], xzr mov v11.d[1], x7 - b 1b + b 2b /* store new state */ -3: st1 {dgav.4s}, [x0] - str dgb, [x0, #16] +4: st1 {dgav.4s}, [x19] + str dgb, [x19, #16] + frame_pop ret ENDPROC(sha1_ce_transform) diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S index 4c3c89b812ce..cd8b36412469 100644 --- a/arch/arm64/crypto/sha2-ce-core.S +++ b/arch/arm64/crypto/sha2-ce-core.S @@ -79,30 +79,36 @@ */ .text ENTRY(sha2_ce_transform) + frame_push 3 + + mov x19, x0 + mov x20, x1 + mov x21, x2 + /* load round constants */ - adr_l x8, .Lsha2_rcon +0: adr_l x8, .Lsha2_rcon ld1 { v0.4s- v3.4s}, [x8], #64 ld1 { v4.4s- v7.4s}, [x8], #64 ld1 { v8.4s-v11.4s}, [x8], #64 ld1 {v12.4s-v15.4s}, [x8] /* load state */ - ld1 {dgav.4s, dgbv.4s}, [x0] + ld1 {dgav.4s, dgbv.4s}, [x19] /* load sha256_ce_state::finalize */ ldr_l w4, sha256_ce_offsetof_finalize, x4 - ldr w4, [x0, x4] + ldr w4, [x19, x4] /* load input */ -0: ld1 {v16.4s-v19.4s}, [x1], #64 - sub w2, w2, #1 +1: ld1 {v16.4s-v19.4s}, [x20], #64 + sub w21, w21, #1 CPU_LE( rev32 v16.16b, v16.16b ) CPU_LE( rev32 v17.16b, v17.16b ) CPU_LE( rev32 v18.16b, v18.16b ) CPU_LE( rev32 v19.16b, v19.16b ) -1: add t0.4s, v16.4s, v0.4s +2: add t0.4s, v16.4s, v0.4s mov dg0v.16b, dgav.16b mov dg1v.16b, dgbv.16b @@ -131,16 +137,24 @@ CPU_LE( rev32 v19.16b, v19.16b ) add dgbv.4s, dgbv.4s, dg1v.4s /* handled all input blocks? */ - cbnz w2, 0b + cbz w21, 3f + + if_will_cond_yield_neon + st1 {dgav.4s, dgbv.4s}, [x19] + do_cond_yield_neon + b 0b + endif_yield_neon + + b 1b /* * Final block: add padding and total bit count. * Skip if the input size was not a round multiple of the block size, * the padding is handled by the C code in that case. */ - cbz x4, 3f +3: cbz x4, 4f ldr_l w4, sha256_ce_offsetof_count, x4 - ldr x4, [x0, x4] + ldr x4, [x19, x4] movi v17.2d, #0 mov x8, #0x80000000 movi v18.2d, #0 @@ -149,9 +163,10 @@ CPU_LE( rev32 v19.16b, v19.16b ) mov x4, #0 mov v19.d[0], xzr mov v19.d[1], x7 - b 1b + b 2b /* store new state */ -3: st1 {dgav.4s, dgbv.4s}, [x0] +4: st1 {dgav.4s, dgbv.4s}, [x19] + frame_pop ret ENDPROC(sha2_ce_transform) diff --git a/arch/arm64/crypto/sha256-core.S_shipped b/arch/arm64/crypto/sha256-core.S_shipped index 3ce82cc860bc..7c7ce2e3bad6 100644 --- a/arch/arm64/crypto/sha256-core.S_shipped +++ b/arch/arm64/crypto/sha256-core.S_shipped @@ -1,3 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 + +// This code is taken from the OpenSSL project but the author (Andy Polyakov) +// has relicensed it under the GPLv2. Therefore this program is free software; +// you can redistribute it and/or modify it under the terms of the GNU General +// Public License version 2 as published by the Free Software Foundation. +// +// The original headers, including the original license headers, are +// included below for completeness. + // Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use @@ -10,8 +20,6 @@ // project. The module is, however, dual licensed under OpenSSL and // CRYPTOGAMS licenses depending on where you obtain it. For further // details see http://www.openssl.org/~appro/cryptogams/. -// -// Permission to use under GPLv2 terms is granted. // ==================================================================== // // SHA256/512 for ARMv8. diff --git a/arch/arm64/crypto/sha3-ce-core.S b/arch/arm64/crypto/sha3-ce-core.S index 332ad7530690..a7d587fa54f6 100644 --- a/arch/arm64/crypto/sha3-ce-core.S +++ b/arch/arm64/crypto/sha3-ce-core.S @@ -41,9 +41,16 @@ */ .text ENTRY(sha3_ce_transform) - /* load state */ - add x8, x0, #32 - ld1 { v0.1d- v3.1d}, [x0] + frame_push 4 + + mov x19, x0 + mov x20, x1 + mov x21, x2 + mov x22, x3 + +0: /* load state */ + add x8, x19, #32 + ld1 { v0.1d- v3.1d}, [x19] ld1 { v4.1d- v7.1d}, [x8], #32 ld1 { v8.1d-v11.1d}, [x8], #32 ld1 {v12.1d-v15.1d}, [x8], #32 @@ -51,13 +58,13 @@ ENTRY(sha3_ce_transform) ld1 {v20.1d-v23.1d}, [x8], #32 ld1 {v24.1d}, [x8] -0: sub w2, w2, #1 +1: sub w21, w21, #1 mov w8, #24 adr_l x9, .Lsha3_rcon /* load input */ - ld1 {v25.8b-v28.8b}, [x1], #32 - ld1 {v29.8b-v31.8b}, [x1], #24 + ld1 {v25.8b-v28.8b}, [x20], #32 + ld1 {v29.8b-v31.8b}, [x20], #24 eor v0.8b, v0.8b, v25.8b eor v1.8b, v1.8b, v26.8b eor v2.8b, v2.8b, v27.8b @@ -66,10 +73,10 @@ ENTRY(sha3_ce_transform) eor v5.8b, v5.8b, v30.8b eor v6.8b, v6.8b, v31.8b - tbnz x3, #6, 2f // SHA3-512 + tbnz x22, #6, 3f // SHA3-512 - ld1 {v25.8b-v28.8b}, [x1], #32 - ld1 {v29.8b-v30.8b}, [x1], #16 + ld1 {v25.8b-v28.8b}, [x20], #32 + ld1 {v29.8b-v30.8b}, [x20], #16 eor v7.8b, v7.8b, v25.8b eor v8.8b, v8.8b, v26.8b eor v9.8b, v9.8b, v27.8b @@ -77,34 +84,34 @@ ENTRY(sha3_ce_transform) eor v11.8b, v11.8b, v29.8b eor v12.8b, v12.8b, v30.8b - tbnz x3, #4, 1f // SHA3-384 or SHA3-224 + tbnz x22, #4, 2f // SHA3-384 or SHA3-224 // SHA3-256 - ld1 {v25.8b-v28.8b}, [x1], #32 + ld1 {v25.8b-v28.8b}, [x20], #32 eor v13.8b, v13.8b, v25.8b eor v14.8b, v14.8b, v26.8b eor v15.8b, v15.8b, v27.8b eor v16.8b, v16.8b, v28.8b - b 3f + b 4f -1: tbz x3, #2, 3f // bit 2 cleared? SHA-384 +2: tbz x22, #2, 4f // bit 2 cleared? SHA-384 // SHA3-224 - ld1 {v25.8b-v28.8b}, [x1], #32 - ld1 {v29.8b}, [x1], #8 + ld1 {v25.8b-v28.8b}, [x20], #32 + ld1 {v29.8b}, [x20], #8 eor v13.8b, v13.8b, v25.8b eor v14.8b, v14.8b, v26.8b eor v15.8b, v15.8b, v27.8b eor v16.8b, v16.8b, v28.8b eor v17.8b, v17.8b, v29.8b - b 3f + b 4f // SHA3-512 -2: ld1 {v25.8b-v26.8b}, [x1], #16 +3: ld1 {v25.8b-v26.8b}, [x20], #16 eor v7.8b, v7.8b, v25.8b eor v8.8b, v8.8b, v26.8b -3: sub w8, w8, #1 +4: sub w8, w8, #1 eor3 v29.16b, v4.16b, v9.16b, v14.16b eor3 v26.16b, v1.16b, v6.16b, v11.16b @@ -183,17 +190,33 @@ ENTRY(sha3_ce_transform) eor v0.16b, v0.16b, v31.16b - cbnz w8, 3b - cbnz w2, 0b + cbnz w8, 4b + cbz w21, 5f + + if_will_cond_yield_neon + add x8, x19, #32 + st1 { v0.1d- v3.1d}, [x19] + st1 { v4.1d- v7.1d}, [x8], #32 + st1 { v8.1d-v11.1d}, [x8], #32 + st1 {v12.1d-v15.1d}, [x8], #32 + st1 {v16.1d-v19.1d}, [x8], #32 + st1 {v20.1d-v23.1d}, [x8], #32 + st1 {v24.1d}, [x8] + do_cond_yield_neon + b 0b + endif_yield_neon + + b 1b /* save state */ - st1 { v0.1d- v3.1d}, [x0], #32 - st1 { v4.1d- v7.1d}, [x0], #32 - st1 { v8.1d-v11.1d}, [x0], #32 - st1 {v12.1d-v15.1d}, [x0], #32 - st1 {v16.1d-v19.1d}, [x0], #32 - st1 {v20.1d-v23.1d}, [x0], #32 - st1 {v24.1d}, [x0] +5: st1 { v0.1d- v3.1d}, [x19], #32 + st1 { v4.1d- v7.1d}, [x19], #32 + st1 { v8.1d-v11.1d}, [x19], #32 + st1 {v12.1d-v15.1d}, [x19], #32 + st1 {v16.1d-v19.1d}, [x19], #32 + st1 {v20.1d-v23.1d}, [x19], #32 + st1 {v24.1d}, [x19] + frame_pop ret ENDPROC(sha3_ce_transform) diff --git a/arch/arm64/crypto/sha512-armv8.pl b/arch/arm64/crypto/sha512-armv8.pl index c55efb308544..2d8655d5b1af 100644 --- a/arch/arm64/crypto/sha512-armv8.pl +++ b/arch/arm64/crypto/sha512-armv8.pl @@ -1,4 +1,14 @@ #! /usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 + +# This code is taken from the OpenSSL project but the author (Andy Polyakov) +# has relicensed it under the GPLv2. Therefore this program is free software; +# you can redistribute it and/or modify it under the terms of the GNU General +# Public License version 2 as published by the Free Software Foundation. +# +# The original headers, including the original license headers, are +# included below for completeness. + # Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. # # Licensed under the OpenSSL license (the "License"). You may not use @@ -11,8 +21,6 @@ # project. The module is, however, dual licensed under OpenSSL and # CRYPTOGAMS licenses depending on where you obtain it. For further # details see http://www.openssl.org/~appro/cryptogams/. -# -# Permission to use under GPLv2 terms is granted. # ==================================================================== # # SHA256/512 for ARMv8. diff --git a/arch/arm64/crypto/sha512-ce-core.S b/arch/arm64/crypto/sha512-ce-core.S index 7f3bca5c59a2..ce65e3abe4f2 100644 --- a/arch/arm64/crypto/sha512-ce-core.S +++ b/arch/arm64/crypto/sha512-ce-core.S @@ -107,17 +107,23 @@ */ .text ENTRY(sha512_ce_transform) + frame_push 3 + + mov x19, x0 + mov x20, x1 + mov x21, x2 + /* load state */ - ld1 {v8.2d-v11.2d}, [x0] +0: ld1 {v8.2d-v11.2d}, [x19] /* load first 4 round constants */ adr_l x3, .Lsha512_rcon ld1 {v20.2d-v23.2d}, [x3], #64 /* load input */ -0: ld1 {v12.2d-v15.2d}, [x1], #64 - ld1 {v16.2d-v19.2d}, [x1], #64 - sub w2, w2, #1 +1: ld1 {v12.2d-v15.2d}, [x20], #64 + ld1 {v16.2d-v19.2d}, [x20], #64 + sub w21, w21, #1 CPU_LE( rev64 v12.16b, v12.16b ) CPU_LE( rev64 v13.16b, v13.16b ) @@ -196,9 +202,18 @@ CPU_LE( rev64 v19.16b, v19.16b ) add v11.2d, v11.2d, v3.2d /* handled all input blocks? */ - cbnz w2, 0b + cbz w21, 3f + + if_will_cond_yield_neon + st1 {v8.2d-v11.2d}, [x19] + do_cond_yield_neon + b 0b + endif_yield_neon + + b 1b /* store new state */ -3: st1 {v8.2d-v11.2d}, [x0] +3: st1 {v8.2d-v11.2d}, [x19] + frame_pop ret ENDPROC(sha512_ce_transform) diff --git a/arch/arm64/crypto/sha512-core.S_shipped b/arch/arm64/crypto/sha512-core.S_shipped index bd0f59f06c9d..e063a6106720 100644 --- a/arch/arm64/crypto/sha512-core.S_shipped +++ b/arch/arm64/crypto/sha512-core.S_shipped @@ -1,3 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 + +// This code is taken from the OpenSSL project but the author (Andy Polyakov) +// has relicensed it under the GPLv2. Therefore this program is free software; +// you can redistribute it and/or modify it under the terms of the GNU General +// Public License version 2 as published by the Free Software Foundation. +// +// The original headers, including the original license headers, are +// included below for completeness. + // Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use @@ -10,8 +20,6 @@ // project. The module is, however, dual licensed under OpenSSL and // CRYPTOGAMS licenses depending on where you obtain it. For further // details see http://www.openssl.org/~appro/cryptogams/. -// -// Permission to use under GPLv2 terms is granted. // ==================================================================== // // SHA256/512 for ARMv8. diff --git a/arch/arm64/crypto/sm4-ce-core.S b/arch/arm64/crypto/sm4-ce-core.S new file mode 100644 index 000000000000..af3bfbc3f4d4 --- /dev/null +++ b/arch/arm64/crypto/sm4-ce-core.S @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/linkage.h> +#include <asm/assembler.h> + + .irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8 + .set .Lv\b\().4s, \b + .endr + + .macro sm4e, rd, rn + .inst 0xcec08400 | .L\rd | (.L\rn << 5) + .endm + + /* + * void sm4_ce_do_crypt(const u32 *rk, u32 *out, const u32 *in); + */ + .text +ENTRY(sm4_ce_do_crypt) + ld1 {v8.4s}, [x2] + ld1 {v0.4s-v3.4s}, [x0], #64 +CPU_LE( rev32 v8.16b, v8.16b ) + ld1 {v4.4s-v7.4s}, [x0] + sm4e v8.4s, v0.4s + sm4e v8.4s, v1.4s + sm4e v8.4s, v2.4s + sm4e v8.4s, v3.4s + sm4e v8.4s, v4.4s + sm4e v8.4s, v5.4s + sm4e v8.4s, v6.4s + sm4e v8.4s, v7.4s + rev64 v8.4s, v8.4s + ext v8.16b, v8.16b, v8.16b, #8 +CPU_LE( rev32 v8.16b, v8.16b ) + st1 {v8.4s}, [x1] + ret +ENDPROC(sm4_ce_do_crypt) diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c new file mode 100644 index 000000000000..b7fb5274b250 --- /dev/null +++ b/arch/arm64/crypto/sm4-ce-glue.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <asm/neon.h> +#include <asm/simd.h> +#include <crypto/sm4.h> +#include <linux/module.h> +#include <linux/cpufeature.h> +#include <linux/crypto.h> +#include <linux/types.h> + +MODULE_ALIAS_CRYPTO("sm4"); +MODULE_ALIAS_CRYPTO("sm4-ce"); +MODULE_DESCRIPTION("SM4 symmetric cipher using ARMv8 Crypto Extensions"); +MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); +MODULE_LICENSE("GPL v2"); + +asmlinkage void sm4_ce_do_crypt(const u32 *rk, void *out, const void *in); + +static void sm4_ce_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm); + + if (!may_use_simd()) { + crypto_sm4_encrypt(tfm, out, in); + } else { + kernel_neon_begin(); + sm4_ce_do_crypt(ctx->rkey_enc, out, in); + kernel_neon_end(); + } +} + +static void sm4_ce_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm); + + if (!may_use_simd()) { + crypto_sm4_decrypt(tfm, out, in); + } else { + kernel_neon_begin(); + sm4_ce_do_crypt(ctx->rkey_dec, out, in); + kernel_neon_end(); + } +} + +static struct crypto_alg sm4_ce_alg = { + .cra_name = "sm4", + .cra_driver_name = "sm4-ce", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto_sm4_ctx), + .cra_module = THIS_MODULE, + .cra_u.cipher = { + .cia_min_keysize = SM4_KEY_SIZE, + .cia_max_keysize = SM4_KEY_SIZE, + .cia_setkey = crypto_sm4_set_key, + .cia_encrypt = sm4_ce_encrypt, + .cia_decrypt = sm4_ce_decrypt + } +}; + +static int __init sm4_ce_mod_init(void) +{ + return crypto_register_alg(&sm4_ce_alg); +} + +static void __exit sm4_ce_mod_fini(void) +{ + crypto_unregister_alg(&sm4_ce_alg); +} + +module_cpu_feature_match(SM3, sm4_ce_mod_init); +module_exit(sm4_ce_mod_fini); diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index 32f465a80e4e..0db62a4cbce2 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -86,6 +86,10 @@ static inline bool acpi_has_cpu_in_madt(void) } struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu); +static inline u32 get_acpi_id_for_cpu(unsigned int cpu) +{ + return acpi_cpu_get_madt_gicc(cpu)->uid; +} static inline void arch_fix_phys_package_id(int num, u32 slot) { } void __init acpi_init_cpus(void); diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 9bbffc7a301f..5df5cfe1c143 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -33,7 +33,7 @@ #define ICACHE_POLICY_VIPT 2 #define ICACHE_POLICY_PIPT 3 -#define L1_CACHE_SHIFT 7 +#define L1_CACHE_SHIFT (6) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) /* @@ -43,7 +43,7 @@ * cache before the transfer is done, causing old data to be seen by * the CPU. */ -#define ARCH_DMA_MINALIGN L1_CACHE_BYTES +#define ARCH_DMA_MINALIGN (128) #ifndef __ASSEMBLY__ @@ -77,7 +77,7 @@ static inline u32 cache_type_cwg(void) static inline int cache_line_size(void) { u32 cwg = cache_type_cwg(); - return cwg ? 4 << cwg : L1_CACHE_BYTES; + return cwg ? 4 << cwg : ARCH_DMA_MINALIGN; } #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index 4f5fd2a36e6e..3b0938281541 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -204,7 +204,9 @@ static inline void __cmpwait_case_##name(volatile void *ptr, \ unsigned long tmp; \ \ asm volatile( \ - " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \ + " sevl\n" \ + " wfe\n" \ + " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \ " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \ " cbnz %" #w "[tmp], 1f\n" \ " wfe\n" \ diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index c00c62e1a4a3..1a037b94eba1 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -34,7 +34,6 @@ typedef u32 compat_size_t; typedef s32 compat_ssize_t; -typedef s32 compat_time_t; typedef s32 compat_clock_t; typedef s32 compat_pid_t; typedef u16 __compat_uid_t; @@ -66,16 +65,6 @@ typedef u32 compat_ulong_t; typedef u64 compat_u64; typedef u32 compat_uptr_t; -struct compat_timespec { - compat_time_t tv_sec; - s32 tv_nsec; -}; - -struct compat_timeval { - compat_time_t tv_sec; - s32 tv_usec; -}; - struct compat_stat { #ifdef __AARCH64EB__ short st_dev; @@ -192,10 +181,10 @@ struct compat_ipc64_perm { struct compat_semid64_ds { struct compat_ipc64_perm sem_perm; - compat_time_t sem_otime; - compat_ulong_t __unused1; - compat_time_t sem_ctime; - compat_ulong_t __unused2; + compat_ulong_t sem_otime; + compat_ulong_t sem_otime_high; + compat_ulong_t sem_ctime; + compat_ulong_t sem_ctime_high; compat_ulong_t sem_nsems; compat_ulong_t __unused3; compat_ulong_t __unused4; @@ -203,12 +192,12 @@ struct compat_semid64_ds { struct compat_msqid64_ds { struct compat_ipc64_perm msg_perm; - compat_time_t msg_stime; - compat_ulong_t __unused1; - compat_time_t msg_rtime; - compat_ulong_t __unused2; - compat_time_t msg_ctime; - compat_ulong_t __unused3; + compat_ulong_t msg_stime; + compat_ulong_t msg_stime_high; + compat_ulong_t msg_rtime; + compat_ulong_t msg_rtime_high; + compat_ulong_t msg_ctime; + compat_ulong_t msg_ctime_high; compat_ulong_t msg_cbytes; compat_ulong_t msg_qnum; compat_ulong_t msg_qbytes; @@ -221,12 +210,12 @@ struct compat_msqid64_ds { struct compat_shmid64_ds { struct compat_ipc64_perm shm_perm; compat_size_t shm_segsz; - compat_time_t shm_atime; - compat_ulong_t __unused1; - compat_time_t shm_dtime; - compat_ulong_t __unused2; - compat_time_t shm_ctime; - compat_ulong_t __unused3; + compat_ulong_t shm_atime; + compat_ulong_t shm_atime_high; + compat_ulong_t shm_dtime; + compat_ulong_t shm_dtime_high; + compat_ulong_t shm_ctime; + compat_ulong_t shm_ctime_high; compat_pid_t shm_cpid; compat_pid_t shm_lpid; compat_ulong_t shm_nattch; diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index bc51b72fafd4..8a699c708fc9 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -48,7 +48,8 @@ #define ARM64_HAS_CACHE_IDC 27 #define ARM64_HAS_CACHE_DIC 28 #define ARM64_HW_DBM 29 +#define ARM64_SSBD 30 -#define ARM64_NCAPS 30 +#define ARM64_NCAPS 31 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 09b0f2a80c8f..55bc1f073bfb 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -537,6 +537,28 @@ static inline u64 read_zcr_features(void) return zcr; } +#define ARM64_SSBD_UNKNOWN -1 +#define ARM64_SSBD_FORCE_DISABLE 0 +#define ARM64_SSBD_KERNEL 1 +#define ARM64_SSBD_FORCE_ENABLE 2 +#define ARM64_SSBD_MITIGATED 3 + +static inline int arm64_get_ssbd_state(void) +{ +#ifdef CONFIG_ARM64_SSBD + extern int ssbd_state; + return ssbd_state; +#else + return ARM64_SSBD_UNKNOWN; +#endif +} + +#ifdef CONFIG_ARM64_SSBD +void arm64_set_ssbd_mitigation(bool state); +#else +static inline void arm64_set_ssbd_mitigation(bool state) {} +#endif + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index fac1c4de7898..433b9554c6a1 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -121,6 +121,9 @@ #ifndef __ASSEMBLY__ +#include <linux/bug.h> +#include <asm/processor.h> /* for signal_minsigstksz, used by ARCH_DLINFO */ + typedef unsigned long elf_greg_t; #define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t)) @@ -148,6 +151,16 @@ typedef struct user_fpsimd_state elf_fpregset_t; do { \ NEW_AUX_ENT(AT_SYSINFO_EHDR, \ (elf_addr_t)current->mm->context.vdso); \ + \ + /* \ + * Should always be nonzero unless there's a kernel bug. \ + * If we haven't determined a sensible value to give to \ + * userspace, omit the entry: \ + */ \ + if (likely(signal_minsigstksz)) \ + NEW_AUX_ENT(AT_MINSIGSTKSZ, signal_minsigstksz); \ + else \ + NEW_AUX_ENT(AT_IGNORE, 0); \ } while (0) #define ARCH_HAS_SETUP_ADDITIONAL_PAGES diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h index e050d765ca9e..46843515d77b 100644 --- a/arch/arm64/include/asm/fpsimdmacros.h +++ b/arch/arm64/include/asm/fpsimdmacros.h @@ -207,12 +207,14 @@ str w\nxtmp, [\xpfpsr, #4] .endm -.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp +.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2 mrs_s x\nxtmp, SYS_ZCR_EL1 - bic x\nxtmp, x\nxtmp, ZCR_ELx_LEN_MASK - orr x\nxtmp, x\nxtmp, \xvqminus1 - msr_s SYS_ZCR_EL1, x\nxtmp // self-synchronising - + bic \xtmp2, x\nxtmp, ZCR_ELx_LEN_MASK + orr \xtmp2, \xtmp2, \xvqminus1 + cmp \xtmp2, x\nxtmp + b.eq 921f + msr_s SYS_ZCR_EL1, \xtmp2 // self-synchronising +921: _for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34 _sve_ldr_p 0, \nxbase _sve_wrffr 0 diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index f6648a3e4152..951b2076a5e2 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -20,6 +20,9 @@ #include <asm/virt.h> +#define VCPU_WORKAROUND_2_FLAG_SHIFT 0 +#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT) + #define ARM_EXIT_WITH_SERROR_BIT 31 #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT)) #define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT)) @@ -71,14 +74,37 @@ extern u32 __kvm_get_mdcr_el2(void); extern u32 __init_stage2_translation(void); +/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */ +#define __hyp_this_cpu_ptr(sym) \ + ({ \ + void *__ptr = hyp_symbol_addr(sym); \ + __ptr += read_sysreg(tpidr_el2); \ + (typeof(&sym))__ptr; \ + }) + +#define __hyp_this_cpu_read(sym) \ + ({ \ + *__hyp_this_cpu_ptr(sym); \ + }) + #else /* __ASSEMBLY__ */ -.macro get_host_ctxt reg, tmp - adr_l \reg, kvm_host_cpu_state +.macro hyp_adr_this_cpu reg, sym, tmp + adr_l \reg, \sym mrs \tmp, tpidr_el2 add \reg, \reg, \tmp .endm +.macro hyp_ldr_this_cpu reg, sym, tmp + adr_l \reg, \sym + mrs \tmp, tpidr_el2 + ldr \reg, [\reg, \tmp] +.endm + +.macro get_host_ctxt reg, tmp + hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp +.endm + .macro get_vcpu_ptr vcpu, ctxt get_host_ctxt \ctxt, \vcpu ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 469de8acd06f..95d8a0e15b5f 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -216,6 +216,9 @@ struct kvm_vcpu_arch { /* Exception Information */ struct kvm_vcpu_fault_info fault; + /* State of various workarounds, see kvm_asm.h for bit assignment */ + u64 workaround_flags; + /* Guest debug state */ u64 debug_flags; @@ -452,6 +455,29 @@ static inline bool kvm_arm_harden_branch_predictor(void) return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR); } +#define KVM_SSBD_UNKNOWN -1 +#define KVM_SSBD_FORCE_DISABLE 0 +#define KVM_SSBD_KERNEL 1 +#define KVM_SSBD_FORCE_ENABLE 2 +#define KVM_SSBD_MITIGATED 3 + +static inline int kvm_arm_have_ssbd(void) +{ + switch (arm64_get_ssbd_state()) { + case ARM64_SSBD_FORCE_DISABLE: + return KVM_SSBD_FORCE_DISABLE; + case ARM64_SSBD_KERNEL: + return KVM_SSBD_KERNEL; + case ARM64_SSBD_FORCE_ENABLE: + return KVM_SSBD_FORCE_ENABLE; + case ARM64_SSBD_MITIGATED: + return KVM_SSBD_MITIGATED; + case ARM64_SSBD_UNKNOWN: + default: + return KVM_SSBD_UNKNOWN; + } +} + void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu); void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 6128992c2ded..fb9a7127bb75 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -72,7 +72,6 @@ #ifdef __ASSEMBLY__ #include <asm/alternative.h> -#include <asm/cpufeature.h> /* * Convert a kernel VA into a HYP VA. @@ -473,6 +472,30 @@ static inline int kvm_map_vectors(void) } #endif +#ifdef CONFIG_ARM64_SSBD +DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); + +static inline int hyp_map_aux_data(void) +{ + int cpu, err; + + for_each_possible_cpu(cpu) { + u64 *ptr; + + ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu); + err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP); + if (err) + return err; + } + return 0; +} +#else +static inline int hyp_map_aux_data(void) +{ + return 0; +} +#endif + #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h index 8747f7c5e0e7..9e690686e8aa 100644 --- a/arch/arm64/include/asm/pci.h +++ b/arch/arm64/include/asm/pci.h @@ -18,11 +18,6 @@ #define pcibios_assign_all_busses() \ (pci_has_flag(PCI_REASSIGN_ALL_BUS)) -/* - * PCI address space differs from physical memory address space - */ -#define PCI_DMA_BUS_IS_PHYS (0) - #define ARCH_GENERIC_PCI_MMAP_RESOURCE 1 extern int isa_dma_bridge_buggy; diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 7c4c8f318ba9..9f82d6b53851 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -306,8 +306,6 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b) #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) -#define __HAVE_ARCH_PTE_SPECIAL - static inline pte_t pgd_pte(pgd_t pgd) { return __pte(pgd_val(pgd)); diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 767598932549..65ab83e8926e 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -35,6 +35,8 @@ #ifdef __KERNEL__ #include <linux/build_bug.h> +#include <linux/cache.h> +#include <linux/init.h> #include <linux/stddef.h> #include <linux/string.h> @@ -244,6 +246,9 @@ void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused); void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused); void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused); +extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */ +extern void __init minsigstksz_setup(void); + /* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */ #define SVE_SET_VL(arg) sve_set_current_vl(arg) #define SVE_GET_VL() sve_get_current_vl() diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index ebdae15d665d..26c5bd7d88d8 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -122,11 +122,6 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock) static inline int arch_spin_is_locked(arch_spinlock_t *lock) { - /* - * Ensure prior spin_lock operations to other locks have completed - * on this CPU before we test whether "lock" is locked. - */ - smp_mb(); /* ^^^ */ return !arch_spin_value_unlocked(READ_ONCE(*lock)); } diff --git a/arch/arm64/include/asm/stat.h b/arch/arm64/include/asm/stat.h index 15e35598ac40..eab738019707 100644 --- a/arch/arm64/include/asm/stat.h +++ b/arch/arm64/include/asm/stat.h @@ -20,6 +20,7 @@ #ifdef CONFIG_COMPAT +#include <linux/compat_time.h> #include <asm/compat.h> /* diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 740aa03c5f0d..cbcf11b5e637 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -94,6 +94,7 @@ void arch_release_task_struct(struct task_struct *tsk); #define TIF_32BIT 22 /* 32bit process */ #define TIF_SVE 23 /* Scalable Vector Extension in use */ #define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */ +#define TIF_SSBD 25 /* Wants SSB mitigation */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index c4f2d50491eb..df48212f767b 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -7,14 +7,16 @@ struct cpu_topology { int thread_id; int core_id; - int cluster_id; + int package_id; + int llc_id; cpumask_t thread_sibling; cpumask_t core_sibling; + cpumask_t llc_siblings; }; extern struct cpu_topology cpu_topology[NR_CPUS]; -#define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id) +#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id) #define topology_core_id(cpu) (cpu_topology[cpu].core_id) #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) #define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) diff --git a/arch/arm64/include/uapi/asm/auxvec.h b/arch/arm64/include/uapi/asm/auxvec.h index ec0a86d484e1..743c0b84fd30 100644 --- a/arch/arm64/include/uapi/asm/auxvec.h +++ b/arch/arm64/include/uapi/asm/auxvec.h @@ -19,7 +19,8 @@ /* vDSO location */ #define AT_SYSINFO_EHDR 33 +#define AT_MINSIGSTKSZ 51 /* stack needed for signal delivery */ -#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ +#define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */ #endif diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index bf825f38d206..0025f8691046 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -54,6 +54,7 @@ arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o +arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o obj-y += $(arm64-obj-y) vdso/ probes/ obj-m += $(arm64-obj-m) diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index 6e47fc3ab549..97d45d5151d4 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -13,6 +13,7 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/sysctl.h> +#include <linux/uaccess.h> #include <asm/cpufeature.h> #include <asm/insn.h> @@ -20,8 +21,6 @@ #include <asm/system_misc.h> #include <asm/traps.h> #include <asm/kprobes.h> -#include <linux/uaccess.h> -#include <asm/cpufeature.h> #define CREATE_TRACE_POINTS #include "trace-events-emulation.h" diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 5bdda651bd05..323aeb5f2fe6 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -136,6 +136,7 @@ int main(void) #ifdef CONFIG_KVM_ARM_HOST DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1)); + DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags)); DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs)); diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c index 380f2e2fbed5..0bf0a835122f 100644 --- a/arch/arm64/kernel/cacheinfo.c +++ b/arch/arm64/kernel/cacheinfo.c @@ -17,6 +17,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/acpi.h> #include <linux/cacheinfo.h> #include <linux/of.h> @@ -46,7 +47,7 @@ static void ci_leaf_init(struct cacheinfo *this_leaf, static int __init_cache_level(unsigned int cpu) { - unsigned int ctype, level, leaves, of_level; + unsigned int ctype, level, leaves, fw_level; struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) { @@ -59,15 +60,19 @@ static int __init_cache_level(unsigned int cpu) leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1; } - of_level = of_find_last_cache_level(cpu); - if (level < of_level) { + if (acpi_disabled) + fw_level = of_find_last_cache_level(cpu); + else + fw_level = acpi_find_last_cache_level(cpu); + + if (level < fw_level) { /* * some external caches not specified in CLIDR_EL1 * the information may be available in the device tree * only unified external caches are considered here */ - leaves += (of_level - level); - level = of_level; + leaves += (fw_level - level); + level = fw_level; } this_cpu_ci->num_levels = level; diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index e4a1182deff7..1d2b6d768efe 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -16,6 +16,8 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/arm-smccc.h> +#include <linux/psci.h> #include <linux/types.h> #include <asm/cpu.h> #include <asm/cputype.h> @@ -232,6 +234,178 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) } #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ +#ifdef CONFIG_ARM64_SSBD +DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); + +int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; + +static const struct ssbd_options { + const char *str; + int state; +} ssbd_options[] = { + { "force-on", ARM64_SSBD_FORCE_ENABLE, }, + { "force-off", ARM64_SSBD_FORCE_DISABLE, }, + { "kernel", ARM64_SSBD_KERNEL, }, +}; + +static int __init ssbd_cfg(char *buf) +{ + int i; + + if (!buf || !buf[0]) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) { + int len = strlen(ssbd_options[i].str); + + if (strncmp(buf, ssbd_options[i].str, len)) + continue; + + ssbd_state = ssbd_options[i].state; + return 0; + } + + return -EINVAL; +} +early_param("ssbd", ssbd_cfg); + +void __init arm64_update_smccc_conduit(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, + int nr_inst) +{ + u32 insn; + + BUG_ON(nr_inst != 1); + + switch (psci_ops.conduit) { + case PSCI_CONDUIT_HVC: + insn = aarch64_insn_get_hvc_value(); + break; + case PSCI_CONDUIT_SMC: + insn = aarch64_insn_get_smc_value(); + break; + default: + return; + } + + *updptr = cpu_to_le32(insn); +} + +void __init arm64_enable_wa2_handling(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, + int nr_inst) +{ + BUG_ON(nr_inst != 1); + /* + * Only allow mitigation on EL1 entry/exit and guest + * ARCH_WORKAROUND_2 handling if the SSBD state allows it to + * be flipped. + */ + if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL) + *updptr = cpu_to_le32(aarch64_insn_gen_nop()); +} + +void arm64_set_ssbd_mitigation(bool state) +{ + switch (psci_ops.conduit) { + case PSCI_CONDUIT_HVC: + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL); + break; + + case PSCI_CONDUIT_SMC: + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL); + break; + + default: + WARN_ON_ONCE(1); + break; + } +} + +static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, + int scope) +{ + struct arm_smccc_res res; + bool required = true; + s32 val; + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + + if (psci_ops.smccc_version == SMCCC_VERSION_1_0) { + ssbd_state = ARM64_SSBD_UNKNOWN; + return false; + } + + switch (psci_ops.conduit) { + case PSCI_CONDUIT_HVC: + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_2, &res); + break; + + case PSCI_CONDUIT_SMC: + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_2, &res); + break; + + default: + ssbd_state = ARM64_SSBD_UNKNOWN; + return false; + } + + val = (s32)res.a0; + + switch (val) { + case SMCCC_RET_NOT_SUPPORTED: + ssbd_state = ARM64_SSBD_UNKNOWN; + return false; + + case SMCCC_RET_NOT_REQUIRED: + pr_info_once("%s mitigation not required\n", entry->desc); + ssbd_state = ARM64_SSBD_MITIGATED; + return false; + + case SMCCC_RET_SUCCESS: + required = true; + break; + + case 1: /* Mitigation not required on this CPU */ + required = false; + break; + + default: + WARN_ON(1); + return false; + } + + switch (ssbd_state) { + case ARM64_SSBD_FORCE_DISABLE: + pr_info_once("%s disabled from command-line\n", entry->desc); + arm64_set_ssbd_mitigation(false); + required = false; + break; + + case ARM64_SSBD_KERNEL: + if (required) { + __this_cpu_write(arm64_ssbd_callback_required, 1); + arm64_set_ssbd_mitigation(true); + } + break; + + case ARM64_SSBD_FORCE_ENABLE: + pr_info_once("%s forced from command-line\n", entry->desc); + arm64_set_ssbd_mitigation(true); + required = true; + break; + + default: + WARN_ON(1); + break; + } + + return required; +} +#endif /* CONFIG_ARM64_SSBD */ + #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ .matches = is_affected_midr_range, \ .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) @@ -488,6 +662,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors), }, #endif +#ifdef CONFIG_ARM64_SSBD + { + .desc = "Speculative Store Bypass Disable", + .capability = ARM64_SSBD, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = has_ssbd_mitigation, + }, +#endif { } }; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 9d1b06d67c53..d2856b129097 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1606,7 +1606,6 @@ static void __init setup_system_capabilities(void) void __init setup_cpu_features(void) { u32 cwg; - int cls; setup_system_capabilities(); mark_const_caps_ready(); @@ -1619,6 +1618,7 @@ void __init setup_cpu_features(void) pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n"); sve_setup(); + minsigstksz_setup(); /* Advertise that we have computed the system capabilities */ set_sys_caps_initialised(); @@ -1627,13 +1627,9 @@ void __init setup_cpu_features(void) * Check for sane CTR_EL0.CWG value. */ cwg = cache_type_cwg(); - cls = cache_line_size(); if (!cwg) - pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n", - cls); - if (L1_CACHE_BYTES < cls) - pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n", - L1_CACHE_BYTES, cls); + pr_warn("No Cache Writeback Granule information, assuming %d\n", + ARCH_DMA_MINALIGN); } static bool __maybe_unused diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S index 73f17bffcd23..12d4958e6429 100644 --- a/arch/arm64/kernel/entry-fpsimd.S +++ b/arch/arm64/kernel/entry-fpsimd.S @@ -49,7 +49,7 @@ ENTRY(sve_save_state) ENDPROC(sve_save_state) ENTRY(sve_load_state) - sve_load 0, x1, x2, 3 + sve_load 0, x1, x2, 3, x4 ret ENDPROC(sve_load_state) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index ec2ee720e33e..28ad8799406f 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -18,6 +18,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/arm-smccc.h> #include <linux/init.h> #include <linux/linkage.h> @@ -137,6 +138,25 @@ alternative_else_nop_endif add \dst, \dst, #(\sym - .entry.tramp.text) .endm + // This macro corrupts x0-x3. It is the caller's duty + // to save/restore them if required. + .macro apply_ssbd, state, targ, tmp1, tmp2 +#ifdef CONFIG_ARM64_SSBD +alternative_cb arm64_enable_wa2_handling + b \targ +alternative_cb_end + ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1 + cbz \tmp2, \targ + ldr \tmp2, [tsk, #TSK_TI_FLAGS] + tbnz \tmp2, #TIF_SSBD, \targ + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 + mov w1, #\state +alternative_cb arm64_update_smccc_conduit + nop // Patched to SMC/HVC #0 +alternative_cb_end +#endif + .endm + .macro kernel_entry, el, regsize = 64 .if \regsize == 32 mov w0, w0 // zero upper 32 bits of x0 @@ -163,6 +183,14 @@ alternative_else_nop_endif ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug disable_step_tsk x19, x20 // exceptions when scheduling. + apply_ssbd 1, 1f, x22, x23 + +#ifdef CONFIG_ARM64_SSBD + ldp x0, x1, [sp, #16 * 0] + ldp x2, x3, [sp, #16 * 1] +#endif +1: + mov x29, xzr // fp pointed to user-space .else add x21, sp, #S_FRAME_SIZE @@ -303,6 +331,8 @@ alternative_if ARM64_WORKAROUND_845719 alternative_else_nop_endif #endif 3: + apply_ssbd 0, 5f, x0, x1 +5: .endif msr elr_el1, x21 // set up the return data diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 87a35364e750..3b527ae46e49 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -31,7 +31,6 @@ #include <linux/percpu.h> #include <linux/prctl.h> #include <linux/preempt.h> -#include <linux/prctl.h> #include <linux/ptrace.h> #include <linux/sched/signal.h> #include <linux/sched/task_stack.h> @@ -129,7 +128,7 @@ static int sve_default_vl = -1; #ifdef CONFIG_ARM64_SVE /* Maximum supported vector length across all CPUs (initially poisoned) */ -int __ro_after_init sve_max_vl = -1; +int __ro_after_init sve_max_vl = SVE_VL_MIN; /* Set of available vector lengths, as vq_to_bit(vq): */ static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); static void __percpu *efi_sve_state; @@ -360,22 +359,13 @@ static int sve_proc_do_default_vl(struct ctl_table *table, int write, return ret; /* Writing -1 has the special meaning "set to max": */ - if (vl == -1) { - /* Fail safe if sve_max_vl wasn't initialised */ - if (WARN_ON(!sve_vl_valid(sve_max_vl))) - vl = SVE_VL_MIN; - else - vl = sve_max_vl; - - goto chosen; - } + if (vl == -1) + vl = sve_max_vl; if (!sve_vl_valid(vl)) return -EINVAL; - vl = find_supported_vector_length(vl); -chosen: - sve_default_vl = vl; + sve_default_vl = find_supported_vector_length(vl); return 0; } @@ -882,7 +872,7 @@ asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) si_code = FPE_FLTRES; } - memset(&info, 0, sizeof(info)); + clear_siginfo(&info); info.si_signo = SIGFPE; info.si_code = si_code; info.si_addr = (void __user *)instruction_pointer(regs); diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 1ec5f28c39fc..6b2686d54411 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -313,6 +313,17 @@ int swsusp_arch_suspend(void) sleep_cpu = -EINVAL; __cpu_suspend_exit(); + + /* + * Just in case the boot kernel did turn the SSBD + * mitigation off behind our back, let's set the state + * to what we expect it to be. + */ + switch (arm64_get_ssbd_state()) { + case ARM64_SSBD_FORCE_ENABLE: + case ARM64_SSBD_KERNEL: + arm64_set_ssbd_mitigation(true); + } } local_daif_restore(flags); diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index 74bb56f656ef..413dbe530da8 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -30,7 +30,6 @@ #include <linux/smp.h> #include <linux/uaccess.h> -#include <asm/compat.h> #include <asm/current.h> #include <asm/debug-monitors.h> #include <asm/hw_breakpoint.h> diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 85a251b6dfa8..33147aacdafd 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -670,11 +670,10 @@ static void armv8pmu_disable_event(struct perf_event *event) raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) +static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) { u32 pmovsr; struct perf_sample_data data; - struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); struct pt_regs *regs; int idx; diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c index 1d091d048d04..0bbac612146e 100644 --- a/arch/arm64/kernel/perf_regs.c +++ b/arch/arm64/kernel/perf_regs.c @@ -1,11 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/compat.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/perf_event.h> #include <linux/bug.h> #include <linux/sched/task_stack.h> -#include <asm/compat.h> #include <asm/perf_regs.h> #include <asm/ptrace.h> diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 7ff81fed46e1..bd732644c2f6 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -766,9 +766,6 @@ static void sve_init_header_from_task(struct user_sve_header *header, vq = sve_vq_from_vl(header->vl); header->max_vl = sve_max_vl; - if (WARN_ON(!sve_vl_valid(sve_max_vl))) - header->max_vl = header->vl; - header->size = SVE_PT_SIZE(vq, header->flags); header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl), SVE_PT_REGS_SVE); @@ -1046,8 +1043,6 @@ static const struct user_regset_view user_aarch64_view = { }; #ifdef CONFIG_COMPAT -#include <linux/compat.h> - enum compat_regset { REGSET_COMPAT_GPR, REGSET_COMPAT_VFP, diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 154b7d30145d..511af13e8d8f 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -17,6 +17,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/cache.h> #include <linux/compat.h> #include <linux/errno.h> #include <linux/kernel.h> @@ -570,8 +571,15 @@ badframe: return 0; } -/* Determine the layout of optional records in the signal frame */ -static int setup_sigframe_layout(struct rt_sigframe_user_layout *user) +/* + * Determine the layout of optional records in the signal frame + * + * add_all: if true, lays out the biggest possible signal frame for + * this task; otherwise, generates a layout for the current state + * of the task. + */ +static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, + bool add_all) { int err; @@ -581,7 +589,7 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user) return err; /* fault information, if valid */ - if (current->thread.fault_code) { + if (add_all || current->thread.fault_code) { err = sigframe_alloc(user, &user->esr_offset, sizeof(struct esr_context)); if (err) @@ -591,8 +599,14 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user) if (system_supports_sve()) { unsigned int vq = 0; - if (test_thread_flag(TIF_SVE)) - vq = sve_vq_from_vl(current->thread.sve_vl); + if (add_all || test_thread_flag(TIF_SVE)) { + int vl = sve_max_vl; + + if (!add_all) + vl = current->thread.sve_vl; + + vq = sve_vq_from_vl(vl); + } err = sigframe_alloc(user, &user->sve_offset, SVE_SIG_CONTEXT_SIZE(vq)); @@ -603,7 +617,6 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user) return sigframe_alloc_end(user); } - static int setup_sigframe(struct rt_sigframe_user_layout *user, struct pt_regs *regs, sigset_t *set) { @@ -701,7 +714,7 @@ static int get_sigframe(struct rt_sigframe_user_layout *user, int err; init_user_layout(user); - err = setup_sigframe_layout(user); + err = setup_sigframe_layout(user, false); if (err) return err; @@ -830,11 +843,12 @@ static void do_signal(struct pt_regs *regs) unsigned long continue_addr = 0, restart_addr = 0; int retval = 0; struct ksignal ksig; + bool syscall = in_syscall(regs); /* * If we were from a system call, check for system call restarting... */ - if (in_syscall(regs)) { + if (syscall) { continue_addr = regs->pc; restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4); retval = regs->regs[0]; @@ -886,7 +900,7 @@ static void do_signal(struct pt_regs *regs) * Handle restarting a different system call. As above, if a debugger * has chosen to restart at a different PC, ignore the restart. */ - if (in_syscall(regs) && regs->pc == restart_addr) { + if (syscall && regs->pc == restart_addr) { if (retval == -ERESTART_RESTARTBLOCK) setup_restart_syscall(regs); user_rewind_single_step(current); @@ -936,3 +950,28 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, thread_flags = READ_ONCE(current_thread_info()->flags); } while (thread_flags & _TIF_WORK_MASK); } + +unsigned long __ro_after_init signal_minsigstksz; + +/* + * Determine the stack space required for guaranteed signal devliery. + * This function is used to populate AT_MINSIGSTKSZ at process startup. + * cpufeatures setup is assumed to be complete. + */ +void __init minsigstksz_setup(void) +{ + struct rt_sigframe_user_layout user; + + init_user_layout(&user); + + /* + * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't + * be big enough, but it's our best guess: + */ + if (WARN_ON(setup_sigframe_layout(&user, true))) + return; + + signal_minsigstksz = sigframe_size(&user) + + round_up(sizeof(struct frame_record), 16) + + 16; /* max alignment padding */ +} diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c new file mode 100644 index 000000000000..3432e5ef9f41 --- /dev/null +++ b/arch/arm64/kernel/ssbd.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 ARM Ltd, All Rights Reserved. + */ + +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/thread_info.h> + +#include <asm/cpufeature.h> + +/* + * prctl interface for SSBD + * FIXME: Drop the below ifdefery once merged in 4.18. + */ +#ifdef PR_SPEC_STORE_BYPASS +static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl) +{ + int state = arm64_get_ssbd_state(); + + /* Unsupported */ + if (state == ARM64_SSBD_UNKNOWN) + return -EINVAL; + + /* Treat the unaffected/mitigated state separately */ + if (state == ARM64_SSBD_MITIGATED) { + switch (ctrl) { + case PR_SPEC_ENABLE: + return -EPERM; + case PR_SPEC_DISABLE: + case PR_SPEC_FORCE_DISABLE: + return 0; + } + } + + /* + * Things are a bit backward here: the arm64 internal API + * *enables the mitigation* when the userspace API *disables + * speculation*. So much fun. + */ + switch (ctrl) { + case PR_SPEC_ENABLE: + /* If speculation is force disabled, enable is not allowed */ + if (state == ARM64_SSBD_FORCE_ENABLE || + task_spec_ssb_force_disable(task)) + return -EPERM; + task_clear_spec_ssb_disable(task); + clear_tsk_thread_flag(task, TIF_SSBD); + break; + case PR_SPEC_DISABLE: + if (state == ARM64_SSBD_FORCE_DISABLE) + return -EPERM; + task_set_spec_ssb_disable(task); + set_tsk_thread_flag(task, TIF_SSBD); + break; + case PR_SPEC_FORCE_DISABLE: + if (state == ARM64_SSBD_FORCE_DISABLE) + return -EPERM; + task_set_spec_ssb_disable(task); + task_set_spec_ssb_force_disable(task); + set_tsk_thread_flag(task, TIF_SSBD); + break; + default: + return -ERANGE; + } + + return 0; +} + +int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, + unsigned long ctrl) +{ + switch (which) { + case PR_SPEC_STORE_BYPASS: + return ssbd_prctl_set(task, ctrl); + default: + return -ENODEV; + } +} + +static int ssbd_prctl_get(struct task_struct *task) +{ + switch (arm64_get_ssbd_state()) { + case ARM64_SSBD_UNKNOWN: + return -EINVAL; + case ARM64_SSBD_FORCE_ENABLE: + return PR_SPEC_DISABLE; + case ARM64_SSBD_KERNEL: + if (task_spec_ssb_force_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; + if (task_spec_ssb_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_DISABLE; + return PR_SPEC_PRCTL | PR_SPEC_ENABLE; + case ARM64_SSBD_FORCE_DISABLE: + return PR_SPEC_ENABLE; + default: + return PR_SPEC_NOT_AFFECTED; + } +} + +int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) +{ + switch (which) { + case PR_SPEC_STORE_BYPASS: + return ssbd_prctl_get(task); + default: + return -ENODEV; + } +} +#endif /* PR_SPEC_STORE_BYPASS */ diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index a307b9e13392..70c283368b64 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -62,6 +62,14 @@ void notrace __cpu_suspend_exit(void) */ if (hw_breakpoint_restore) hw_breakpoint_restore(cpu); + + /* + * On resume, firmware implementing dynamic mitigation will + * have turned the mitigation on. If the user has forcefully + * disabled it, make sure their wishes are obeyed. + */ + if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) + arm64_set_ssbd_mitigation(false); } /* diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index 93ab57dcfc14..a6109825eeb9 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -112,6 +112,7 @@ long compat_arm_syscall(struct pt_regs *regs) break; } + clear_siginfo(&info); info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_ILLTRP; diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 21868530018e..f845a8617812 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -11,7 +11,9 @@ * for more details. */ +#include <linux/acpi.h> #include <linux/arch_topology.h> +#include <linux/cacheinfo.h> #include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/init.h> @@ -22,6 +24,7 @@ #include <linux/sched.h> #include <linux/sched/topology.h> #include <linux/slab.h> +#include <linux/smp.h> #include <linux/string.h> #include <asm/cpu.h> @@ -47,7 +50,7 @@ static int __init get_cpu_for_node(struct device_node *node) return cpu; } -static int __init parse_core(struct device_node *core, int cluster_id, +static int __init parse_core(struct device_node *core, int package_id, int core_id) { char name[10]; @@ -63,7 +66,7 @@ static int __init parse_core(struct device_node *core, int cluster_id, leaf = false; cpu = get_cpu_for_node(t); if (cpu >= 0) { - cpu_topology[cpu].cluster_id = cluster_id; + cpu_topology[cpu].package_id = package_id; cpu_topology[cpu].core_id = core_id; cpu_topology[cpu].thread_id = i; } else { @@ -85,7 +88,7 @@ static int __init parse_core(struct device_node *core, int cluster_id, return -EINVAL; } - cpu_topology[cpu].cluster_id = cluster_id; + cpu_topology[cpu].package_id = package_id; cpu_topology[cpu].core_id = core_id; } else if (leaf) { pr_err("%pOF: Can't get CPU for leaf core\n", core); @@ -101,7 +104,7 @@ static int __init parse_cluster(struct device_node *cluster, int depth) bool leaf = true; bool has_cores = false; struct device_node *c; - static int cluster_id __initdata; + static int package_id __initdata; int core_id = 0; int i, ret; @@ -140,7 +143,7 @@ static int __init parse_cluster(struct device_node *cluster, int depth) } if (leaf) { - ret = parse_core(c, cluster_id, core_id++); + ret = parse_core(c, package_id, core_id++); } else { pr_err("%pOF: Non-leaf cluster with core %s\n", cluster, name); @@ -158,7 +161,7 @@ static int __init parse_cluster(struct device_node *cluster, int depth) pr_warn("%pOF: empty cluster\n", cluster); if (leaf) - cluster_id++; + package_id++; return 0; } @@ -194,7 +197,7 @@ static int __init parse_dt_topology(void) * only mark cores described in the DT as possible. */ for_each_possible_cpu(cpu) - if (cpu_topology[cpu].cluster_id == -1) + if (cpu_topology[cpu].package_id == -1) ret = -EINVAL; out_map: @@ -212,7 +215,14 @@ EXPORT_SYMBOL_GPL(cpu_topology); const struct cpumask *cpu_coregroup_mask(int cpu) { - return &cpu_topology[cpu].core_sibling; + const cpumask_t *core_mask = &cpu_topology[cpu].core_sibling; + + if (cpu_topology[cpu].llc_id != -1) { + if (cpumask_subset(&cpu_topology[cpu].llc_siblings, core_mask)) + core_mask = &cpu_topology[cpu].llc_siblings; + } + + return core_mask; } static void update_siblings_masks(unsigned int cpuid) @@ -224,7 +234,12 @@ static void update_siblings_masks(unsigned int cpuid) for_each_possible_cpu(cpu) { cpu_topo = &cpu_topology[cpu]; - if (cpuid_topo->cluster_id != cpu_topo->cluster_id) + if (cpuid_topo->llc_id == cpu_topo->llc_id) { + cpumask_set_cpu(cpu, &cpuid_topo->llc_siblings); + cpumask_set_cpu(cpuid, &cpu_topo->llc_siblings); + } + + if (cpuid_topo->package_id != cpu_topo->package_id) continue; cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); @@ -245,7 +260,7 @@ void store_cpu_topology(unsigned int cpuid) struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; u64 mpidr; - if (cpuid_topo->cluster_id != -1) + if (cpuid_topo->package_id != -1) goto topology_populated; mpidr = read_cpuid_mpidr(); @@ -259,19 +274,19 @@ void store_cpu_topology(unsigned int cpuid) /* Multiprocessor system : Multi-threads per core */ cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); - cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) | + cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) | MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8; } else { /* Multiprocessor system : Single-thread per core */ cpuid_topo->thread_id = -1; cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); - cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) | + cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) | MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 | MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16; } pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n", - cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id, + cpuid, cpuid_topo->package_id, cpuid_topo->core_id, cpuid_topo->thread_id, mpidr); topology_populated: @@ -287,7 +302,11 @@ static void __init reset_cpu_topology(void) cpu_topo->thread_id = -1; cpu_topo->core_id = 0; - cpu_topo->cluster_id = -1; + cpu_topo->package_id = -1; + + cpu_topo->llc_id = -1; + cpumask_clear(&cpu_topo->llc_siblings); + cpumask_set_cpu(cpu, &cpu_topo->llc_siblings); cpumask_clear(&cpu_topo->core_sibling); cpumask_set_cpu(cpu, &cpu_topo->core_sibling); @@ -296,6 +315,59 @@ static void __init reset_cpu_topology(void) } } +#ifdef CONFIG_ACPI +/* + * Propagate the topology information of the processor_topology_node tree to the + * cpu_topology array. + */ +static int __init parse_acpi_topology(void) +{ + bool is_threaded; + int cpu, topology_id; + + is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK; + + for_each_possible_cpu(cpu) { + int i, cache_id; + + topology_id = find_acpi_cpu_topology(cpu, 0); + if (topology_id < 0) + return topology_id; + + if (is_threaded) { + cpu_topology[cpu].thread_id = topology_id; + topology_id = find_acpi_cpu_topology(cpu, 1); + cpu_topology[cpu].core_id = topology_id; + } else { + cpu_topology[cpu].thread_id = -1; + cpu_topology[cpu].core_id = topology_id; + } + topology_id = find_acpi_cpu_topology_package(cpu); + cpu_topology[cpu].package_id = topology_id; + + i = acpi_find_last_cache_level(cpu); + + if (i > 0) { + /* + * this is the only part of cpu_topology that has + * a direct relationship with the cache topology + */ + cache_id = find_acpi_cpu_cache_topology(cpu, i); + if (cache_id > 0) + cpu_topology[cpu].llc_id = cache_id; + } + } + + return 0; +} + +#else +static inline int __init parse_acpi_topology(void) +{ + return -EINVAL; +} +#endif + void __init init_cpu_topology(void) { reset_cpu_topology(); @@ -304,6 +376,8 @@ void __init init_cpu_topology(void) * Discard anything that was parsed if we hit an error so we * don't use partial information. */ - if (of_have_populated_dt() && parse_dt_topology()) + if (!acpi_disabled && parse_acpi_topology()) + reset_cpu_topology(); + else if (of_have_populated_dt() && parse_dt_topology()) reset_cpu_topology(); } diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 8bbdc17e49df..d399d459397b 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -635,6 +635,7 @@ asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) siginfo_t info; void __user *pc = (void __user *)instruction_pointer(regs); + clear_siginfo(&info); info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_ILLOPC; diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 0221aca6493d..605d1b60469c 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -34,25 +34,25 @@ jiffies = jiffies_64; * 4 KB (see related ASSERT() below) \ */ \ . = ALIGN(SZ_4K); \ - VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ + __hyp_idmap_text_start = .; \ *(.hyp.idmap.text) \ - VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \ - VMLINUX_SYMBOL(__hyp_text_start) = .; \ + __hyp_idmap_text_end = .; \ + __hyp_text_start = .; \ *(.hyp.text) \ - VMLINUX_SYMBOL(__hyp_text_end) = .; + __hyp_text_end = .; #define IDMAP_TEXT \ . = ALIGN(SZ_4K); \ - VMLINUX_SYMBOL(__idmap_text_start) = .; \ + __idmap_text_start = .; \ *(.idmap.text) \ - VMLINUX_SYMBOL(__idmap_text_end) = .; + __idmap_text_end = .; #ifdef CONFIG_HIBERNATION #define HIBERNATE_TEXT \ . = ALIGN(SZ_4K); \ - VMLINUX_SYMBOL(__hibernate_exit_text_start) = .;\ + __hibernate_exit_text_start = .; \ *(.hibernate_exit.text) \ - VMLINUX_SYMBOL(__hibernate_exit_text_end) = .; + __hibernate_exit_text_end = .; #else #define HIBERNATE_TEXT #endif @@ -60,10 +60,10 @@ jiffies = jiffies_64; #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 #define TRAMP_TEXT \ . = ALIGN(PAGE_SIZE); \ - VMLINUX_SYMBOL(__entry_tramp_text_start) = .; \ + __entry_tramp_text_start = .; \ *(.entry.tramp.text) \ . = ALIGN(PAGE_SIZE); \ - VMLINUX_SYMBOL(__entry_tramp_text_end) = .; + __entry_tramp_text_end = .; #else #define TRAMP_TEXT #endif diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index bffece27b5c1..05d836979032 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -106,8 +106,44 @@ el1_hvc_guest: */ ldr x1, [sp] // Guest's x0 eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1 + cbz w1, wa_epilogue + + /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */ + eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \ + ARM_SMCCC_ARCH_WORKAROUND_2) cbnz w1, el1_trap - mov x0, x1 + +#ifdef CONFIG_ARM64_SSBD +alternative_cb arm64_enable_wa2_handling + b wa2_end +alternative_cb_end + get_vcpu_ptr x2, x0 + ldr x0, [x2, #VCPU_WORKAROUND_FLAGS] + + // Sanitize the argument and update the guest flags + ldr x1, [sp, #8] // Guest's x1 + clz w1, w1 // Murphy's device: + lsr w1, w1, #5 // w1 = !!w1 without using + eor w1, w1, #1 // the flags... + bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1 + str x0, [x2, #VCPU_WORKAROUND_FLAGS] + + /* Check that we actually need to perform the call */ + hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2 + cbz x0, wa2_end + + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 + smc #0 + + /* Don't leak data from the SMC call */ + mov x3, xzr +wa2_end: + mov x2, xzr + mov x1, xzr +#endif + +wa_epilogue: + mov x0, xzr add sp, sp, #16 eret diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index d9645236e474..c50cedc447f1 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -15,6 +15,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/arm-smccc.h> #include <linux/types.h> #include <linux/jump_label.h> #include <uapi/linux/psci.h> @@ -389,6 +390,39 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) return false; } +static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu) +{ + if (!cpus_have_const_cap(ARM64_SSBD)) + return false; + + return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG); +} + +static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu) +{ +#ifdef CONFIG_ARM64_SSBD + /* + * The host runs with the workaround always present. If the + * guest wants it disabled, so be it... + */ + if (__needs_ssbd_off(vcpu) && + __hyp_this_cpu_read(arm64_ssbd_callback_required)) + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL); +#endif +} + +static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu) +{ +#ifdef CONFIG_ARM64_SSBD + /* + * If the guest has disabled the workaround, bring it back on. + */ + if (__needs_ssbd_off(vcpu) && + __hyp_this_cpu_read(arm64_ssbd_callback_required)) + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL); +#endif +} + /* Switch to the guest for VHE systems running in EL2 */ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) { @@ -409,6 +443,8 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) sysreg_restore_guest_state_vhe(guest_ctxt); __debug_switch_to_guest(vcpu); + __set_guest_arch_workaround_state(vcpu); + do { /* Jump in the fire! */ exit_code = __guest_enter(vcpu, host_ctxt); @@ -416,6 +452,8 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) /* And we're baaack! */ } while (fixup_guest_exit(vcpu, &exit_code)); + __set_host_arch_workaround_state(vcpu); + fp_enabled = fpsimd_enabled_vhe(); sysreg_save_guest_state_vhe(guest_ctxt); @@ -465,6 +503,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) __sysreg_restore_state_nvhe(guest_ctxt); __debug_switch_to_guest(vcpu); + __set_guest_arch_workaround_state(vcpu); + do { /* Jump in the fire! */ exit_code = __guest_enter(vcpu, host_ctxt); @@ -472,6 +512,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) /* And we're baaack! */ } while (fixup_guest_exit(vcpu, &exit_code)); + __set_host_arch_workaround_state(vcpu); + fp_enabled = __fpsimd_enabled_nvhe(); __sysreg_save_state_nvhe(guest_ctxt); diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 3256b9228e75..a74311beda35 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -122,6 +122,10 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) /* Reset PMU */ kvm_pmu_vcpu_reset(vcpu); + /* Default workaround setup is enabled (if supported) */ + if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL) + vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; + /* Reset timer */ return kvm_timer_vcpu_reset(vcpu); } diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index a96ec0181818..49e217ac7e1e 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -504,20 +504,15 @@ static int __init arm64_dma_init(void) max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) swiotlb = 1; + WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(), + TAINT_CPU_OUT_OF_SPEC, + "ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)", + ARCH_DMA_MINALIGN, cache_line_size()); + return atomic_pool_init(); } arch_initcall(arm64_dma_init); -#define PREALLOC_DMA_DEBUG_ENTRIES 4096 - -static int __init dma_debug_do_init(void) -{ - dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); - return 0; -} -fs_initcall(dma_debug_do_init); - - #ifdef CONFIG_IOMMU_DMA #include <linux/dma-iommu.h> #include <linux/platform_device.h> diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 2af3dd89bcdb..b8eecc7b9531 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -235,8 +235,9 @@ static bool is_el1_instruction_abort(unsigned int esr) return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR; } -static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs, - unsigned long addr) +static inline bool is_el1_permission_fault(unsigned int esr, + struct pt_regs *regs, + unsigned long addr) { unsigned int ec = ESR_ELx_EC(esr); unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; @@ -254,6 +255,22 @@ static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs, return false; } +static void die_kernel_fault(const char *msg, unsigned long addr, + unsigned int esr, struct pt_regs *regs) +{ + bust_spinlocks(1); + + pr_alert("Unable to handle kernel %s at virtual address %016lx\n", msg, + addr); + + mem_abort_decode(esr); + + show_pte(addr); + die("Oops", regs, esr); + bust_spinlocks(0); + do_exit(SIGKILL); +} + static void __do_kernel_fault(unsigned long addr, unsigned int esr, struct pt_regs *regs) { @@ -266,9 +283,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, if (!is_el1_instruction_abort(esr) && fixup_exception(regs)) return; - bust_spinlocks(1); - - if (is_permission_fault(esr, regs, addr)) { + if (is_el1_permission_fault(esr, regs, addr)) { if (esr & ESR_ELx_WNR) msg = "write to read-only memory"; else @@ -279,15 +294,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, msg = "paging request"; } - pr_alert("Unable to handle kernel %s at virtual address %08lx\n", msg, - addr); - - mem_abort_decode(esr); - - show_pte(addr); - die("Oops", regs, esr); - bust_spinlocks(0); - do_exit(SIGKILL); + die_kernel_fault(msg, addr, esr, regs); } static void __do_user_fault(struct siginfo *info, unsigned int esr) @@ -356,11 +363,12 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re */ if (user_mode(regs)) { const struct fault_info *inf = esr_to_fault_info(esr); - struct siginfo si = { - .si_signo = inf->sig, - .si_code = inf->code, - .si_addr = (void __user *)addr, - }; + struct siginfo si; + + clear_siginfo(&si); + si.si_signo = inf->sig; + si.si_code = inf->code; + si.si_addr = (void __user *)addr; __do_user_fault(&si, esr); } else { @@ -446,16 +454,19 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, mm_flags |= FAULT_FLAG_WRITE; } - if (addr < TASK_SIZE && is_permission_fault(esr, regs, addr)) { + if (addr < TASK_SIZE && is_el1_permission_fault(esr, regs, addr)) { /* regs->orig_addr_limit may be 0 if we entered from EL0 */ if (regs->orig_addr_limit == KERNEL_DS) - die("Accessing user space memory with fs=KERNEL_DS", regs, esr); + die_kernel_fault("access to user memory with fs=KERNEL_DS", + addr, esr, regs); if (is_el1_instruction_abort(esr)) - die("Attempting to execute userspace memory", regs, esr); + die_kernel_fault("execution of user memory", + addr, esr, regs); if (!search_exception_tables(regs->pc)) - die("Accessing user space memory outside uaccess.h routines", regs, esr); + die_kernel_fault("access to user memory outside uaccess routines", + addr, esr, regs); } perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); @@ -634,6 +645,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) nmi_exit(); } + clear_siginfo(&info); info.si_signo = inf->sig; info.si_errno = 0; info.si_code = inf->code; @@ -738,6 +750,7 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, show_pte(addr); } + clear_siginfo(&info); info.si_signo = inf->sig; info.si_errno = 0; info.si_code = inf->code; @@ -780,6 +793,7 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr, local_irq_enable(); } + clear_siginfo(&info); info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; @@ -823,7 +837,6 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, struct pt_regs *regs) { const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr); - struct siginfo info; int rv; /* @@ -839,6 +852,9 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, if (!inf->fn(addr, esr, regs)) { rv = 1; } else { + struct siginfo info; + + clear_siginfo(&info); info.si_signo = inf->sig; info.si_errno = 0; info.si_code = inf->code; diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index a93350451e8e..a6fdaea07c63 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -21,7 +21,6 @@ #include <linux/bpf.h> #include <linux/filter.h> #include <linux/printk.h> -#include <linux/skbuff.h> #include <linux/slab.h> #include <asm/byteorder.h> @@ -80,23 +79,66 @@ static inline void emit(const u32 insn, struct jit_ctx *ctx) ctx->idx++; } +static inline void emit_a64_mov_i(const int is64, const int reg, + const s32 val, struct jit_ctx *ctx) +{ + u16 hi = val >> 16; + u16 lo = val & 0xffff; + + if (hi & 0x8000) { + if (hi == 0xffff) { + emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx); + } else { + emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx); + if (lo != 0xffff) + emit(A64_MOVK(is64, reg, lo, 0), ctx); + } + } else { + emit(A64_MOVZ(is64, reg, lo, 0), ctx); + if (hi) + emit(A64_MOVK(is64, reg, hi, 16), ctx); + } +} + +static int i64_i16_blocks(const u64 val, bool inverse) +{ + return (((val >> 0) & 0xffff) != (inverse ? 0xffff : 0x0000)) + + (((val >> 16) & 0xffff) != (inverse ? 0xffff : 0x0000)) + + (((val >> 32) & 0xffff) != (inverse ? 0xffff : 0x0000)) + + (((val >> 48) & 0xffff) != (inverse ? 0xffff : 0x0000)); +} + static inline void emit_a64_mov_i64(const int reg, const u64 val, struct jit_ctx *ctx) { - u64 tmp = val; - int shift = 0; - - emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx); - tmp >>= 16; - shift += 16; - while (tmp) { - if (tmp & 0xffff) - emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx); - tmp >>= 16; - shift += 16; + u64 nrm_tmp = val, rev_tmp = ~val; + bool inverse; + int shift; + + if (!(nrm_tmp >> 32)) + return emit_a64_mov_i(0, reg, (u32)val, ctx); + + inverse = i64_i16_blocks(nrm_tmp, true) < i64_i16_blocks(nrm_tmp, false); + shift = max(round_down((inverse ? (fls64(rev_tmp) - 1) : + (fls64(nrm_tmp) - 1)), 16), 0); + if (inverse) + emit(A64_MOVN(1, reg, (rev_tmp >> shift) & 0xffff, shift), ctx); + else + emit(A64_MOVZ(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx); + shift -= 16; + while (shift >= 0) { + if (((nrm_tmp >> shift) & 0xffff) != (inverse ? 0xffff : 0x0000)) + emit(A64_MOVK(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx); + shift -= 16; } } +/* + * This is an unoptimized 64 immediate emission used for BPF to BPF call + * addresses. It will always do a full 64 bit decomposition as otherwise + * more complexity in the last extra pass is required since we previously + * reserved 4 instructions for the address. + */ static inline void emit_addr_mov_i64(const int reg, const u64 val, struct jit_ctx *ctx) { @@ -111,26 +153,6 @@ static inline void emit_addr_mov_i64(const int reg, const u64 val, } } -static inline void emit_a64_mov_i(const int is64, const int reg, - const s32 val, struct jit_ctx *ctx) -{ - u16 hi = val >> 16; - u16 lo = val & 0xffff; - - if (hi & 0x8000) { - if (hi == 0xffff) { - emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx); - } else { - emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx); - emit(A64_MOVK(is64, reg, lo, 0), ctx); - } - } else { - emit(A64_MOVZ(is64, reg, lo, 0), ctx); - if (hi) - emit(A64_MOVK(is64, reg, hi, 16), ctx); - } -} - static inline int bpf2a64_offset(int bpf_to, int bpf_from, const struct jit_ctx *ctx) { @@ -163,7 +185,7 @@ static inline int epilogue_offset(const struct jit_ctx *ctx) /* Tail call offset to jump into */ #define PROLOGUE_OFFSET 7 -static int build_prologue(struct jit_ctx *ctx) +static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf) { const struct bpf_prog *prog = ctx->prog; const u8 r6 = bpf2a64[BPF_REG_6]; @@ -188,7 +210,7 @@ static int build_prologue(struct jit_ctx *ctx) * | ... | BPF prog stack * | | * +-----+ <= (BPF_FP - prog->aux->stack_depth) - * |RSVD | JIT scratchpad + * |RSVD | padding * current A64_SP => +-----+ <= (BPF_FP - ctx->stack_size) * | | * | ... | Function call stack @@ -210,19 +232,19 @@ static int build_prologue(struct jit_ctx *ctx) /* Set up BPF prog stack base register */ emit(A64_MOV(1, fp, A64_SP), ctx); - /* Initialize tail_call_cnt */ - emit(A64_MOVZ(1, tcc, 0, 0), ctx); + if (!ebpf_from_cbpf) { + /* Initialize tail_call_cnt */ + emit(A64_MOVZ(1, tcc, 0, 0), ctx); - cur_offset = ctx->idx - idx0; - if (cur_offset != PROLOGUE_OFFSET) { - pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n", - cur_offset, PROLOGUE_OFFSET); - return -1; + cur_offset = ctx->idx - idx0; + if (cur_offset != PROLOGUE_OFFSET) { + pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n", + cur_offset, PROLOGUE_OFFSET); + return -1; + } } - /* 4 byte extra for skb_copy_bits buffer */ - ctx->stack_size = prog->aux->stack_depth + 4; - ctx->stack_size = STACK_ALIGN(ctx->stack_size); + ctx->stack_size = STACK_ALIGN(prog->aux->stack_depth); /* Set up function call stack */ emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); @@ -723,71 +745,6 @@ emit_cond_jmp: emit(A64_CBNZ(0, tmp3, jmp_offset), ctx); break; - /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ - case BPF_LD | BPF_ABS | BPF_W: - case BPF_LD | BPF_ABS | BPF_H: - case BPF_LD | BPF_ABS | BPF_B: - /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */ - case BPF_LD | BPF_IND | BPF_W: - case BPF_LD | BPF_IND | BPF_H: - case BPF_LD | BPF_IND | BPF_B: - { - const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */ - const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */ - const u8 fp = bpf2a64[BPF_REG_FP]; - const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */ - const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */ - const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */ - const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */ - const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */ - int size; - - emit(A64_MOV(1, r1, r6), ctx); - emit_a64_mov_i(0, r2, imm, ctx); - if (BPF_MODE(code) == BPF_IND) - emit(A64_ADD(0, r2, r2, src), ctx); - switch (BPF_SIZE(code)) { - case BPF_W: - size = 4; - break; - case BPF_H: - size = 2; - break; - case BPF_B: - size = 1; - break; - default: - return -EINVAL; - } - emit_a64_mov_i64(r3, size, ctx); - emit(A64_SUB_I(1, r4, fp, ctx->stack_size), ctx); - emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx); - emit(A64_BLR(r5), ctx); - emit(A64_MOV(1, r0, A64_R(0)), ctx); - - jmp_offset = epilogue_offset(ctx); - check_imm19(jmp_offset); - emit(A64_CBZ(1, r0, jmp_offset), ctx); - emit(A64_MOV(1, r5, r0), ctx); - switch (BPF_SIZE(code)) { - case BPF_W: - emit(A64_LDR32(r0, r5, A64_ZR), ctx); -#ifndef CONFIG_CPU_BIG_ENDIAN - emit(A64_REV32(0, r0, r0), ctx); -#endif - break; - case BPF_H: - emit(A64_LDRH(r0, r5, A64_ZR), ctx); -#ifndef CONFIG_CPU_BIG_ENDIAN - emit(A64_REV16(0, r0, r0), ctx); -#endif - break; - case BPF_B: - emit(A64_LDRB(r0, r5, A64_ZR), ctx); - break; - } - break; - } default: pr_err_once("unknown opcode %02x\n", code); return -EINVAL; @@ -851,6 +808,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) struct bpf_prog *tmp, *orig_prog = prog; struct bpf_binary_header *header; struct arm64_jit_data *jit_data; + bool was_classic = bpf_prog_was_classic(prog); bool tmp_blinded = false; bool extra_pass = false; struct jit_ctx ctx; @@ -905,7 +863,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) goto out_off; } - if (build_prologue(&ctx)) { + if (build_prologue(&ctx, was_classic)) { prog = orig_prog; goto out_off; } @@ -928,7 +886,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) skip_init_ctx: ctx.idx = 0; - build_prologue(&ctx); + build_prologue(&ctx, was_classic); if (build_body(&ctx)) { bpf_jit_binary_free(header); diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index c6b4dd1418b4..bf59855628ac 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig @@ -6,11 +6,13 @@ config C6X def_bool y + select ARCH_HAS_SYNC_DMA_FOR_CPU + select ARCH_HAS_SYNC_DMA_FOR_DEVICE select CLKDEV_LOOKUP + select DMA_NONCOHERENT_OPS select GENERIC_ATOMIC64 select GENERIC_IRQ_SHOW select HAVE_ARCH_TRACEHOOK - select HAVE_DMA_API_DEBUG select HAVE_MEMBLOCK select SPARSE_IRQ select IRQ_DOMAIN diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index fd4c840de837..33a2c94fed0d 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild @@ -1,10 +1,12 @@ generic-y += atomic.h generic-y += barrier.h generic-y += bugs.h +generic-y += compat.h generic-y += current.h generic-y += device.h generic-y += div64.h generic-y += dma.h +generic-y += dma-mapping.h generic-y += emergency-restart.h generic-y += exec.h generic-y += extable.h diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h deleted file mode 100644 index 05daf1038111..000000000000 --- a/arch/c6x/include/asm/dma-mapping.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Port on Texas Instruments TMS320C6x architecture - * - * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated - * Author: Aurelien Jacquiot <aurelien.jacquiot@ti.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ -#ifndef _ASM_C6X_DMA_MAPPING_H -#define _ASM_C6X_DMA_MAPPING_H - -extern const struct dma_map_ops c6x_dma_ops; - -static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) -{ - return &c6x_dma_ops; -} - -extern void coherent_mem_init(u32 start, u32 size); -void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, - gfp_t gfp, unsigned long attrs); -void c6x_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, unsigned long attrs); - -#endif /* _ASM_C6X_DMA_MAPPING_H */ diff --git a/arch/c6x/include/asm/setup.h b/arch/c6x/include/asm/setup.h index 852afb209afb..350f34debb19 100644 --- a/arch/c6x/include/asm/setup.h +++ b/arch/c6x/include/asm/setup.h @@ -28,5 +28,7 @@ extern unsigned char c6x_fuse_mac[6]; extern void machine_init(unsigned long dt_ptr); extern void time_init(void); +extern void coherent_mem_init(u32 start, u32 size); + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_C6X_SETUP_H */ diff --git a/arch/c6x/kernel/Makefile b/arch/c6x/kernel/Makefile index 02f340d7b8fe..fbe74174de87 100644 --- a/arch/c6x/kernel/Makefile +++ b/arch/c6x/kernel/Makefile @@ -8,6 +8,6 @@ extra-y := head.o vmlinux.lds obj-y := process.o traps.o irq.o signal.o ptrace.o obj-y += setup.o sys_c6x.o time.o devicetree.o obj-y += switch_to.o entry.o vectors.o c6x_ksyms.o -obj-y += soc.o dma.o +obj-y += soc.o obj-$(CONFIG_MODULES) += module.o diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c deleted file mode 100644 index 9fff8be75f58..000000000000 --- a/arch/c6x/kernel/dma.c +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Copyright (C) 2011 Texas Instruments Incorporated - * Author: Mark Salter <msalter@redhat.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/module.h> -#include <linux/dma-mapping.h> -#include <linux/mm.h> -#include <linux/mm_types.h> -#include <linux/scatterlist.h> - -#include <asm/cacheflush.h> - -static void c6x_dma_sync(dma_addr_t handle, size_t size, - enum dma_data_direction dir) -{ - unsigned long paddr = handle; - - BUG_ON(!valid_dma_direction(dir)); - - switch (dir) { - case DMA_FROM_DEVICE: - L2_cache_block_invalidate(paddr, paddr + size); - break; - case DMA_TO_DEVICE: - L2_cache_block_writeback(paddr, paddr + size); - break; - case DMA_BIDIRECTIONAL: - L2_cache_block_writeback_invalidate(paddr, paddr + size); - break; - default: - break; - } -} - -static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - dma_addr_t handle = virt_to_phys(page_address(page) + offset); - - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - c6x_dma_sync(handle, size, dir); - - return handle; -} - -static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - c6x_dma_sync(handle, size, dir); -} - -static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist, - int nents, enum dma_data_direction dir, unsigned long attrs) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sglist, sg, nents, i) { - sg->dma_address = sg_phys(sg); - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - c6x_dma_sync(sg->dma_address, sg->length, dir); - } - - return nents; -} - -static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, - int nents, enum dma_data_direction dir, unsigned long attrs) -{ - struct scatterlist *sg; - int i; - - if (attrs & DMA_ATTR_SKIP_CPU_SYNC) - return; - - for_each_sg(sglist, sg, nents, i) - c6x_dma_sync(sg_dma_address(sg), sg->length, dir); -} - -static void c6x_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) -{ - c6x_dma_sync(handle, size, dir); - -} - -static void c6x_dma_sync_single_for_device(struct device *dev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - c6x_dma_sync(handle, size, dir); - -} - -static void c6x_dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sglist, int nents, - enum dma_data_direction dir) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sglist, sg, nents, i) - c6x_dma_sync_single_for_cpu(dev, sg_dma_address(sg), - sg->length, dir); - -} - -static void c6x_dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sglist, int nents, - enum dma_data_direction dir) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sglist, sg, nents, i) - c6x_dma_sync_single_for_device(dev, sg_dma_address(sg), - sg->length, dir); - -} - -const struct dma_map_ops c6x_dma_ops = { - .alloc = c6x_dma_alloc, - .free = c6x_dma_free, - .map_page = c6x_dma_map_page, - .unmap_page = c6x_dma_unmap_page, - .map_sg = c6x_dma_map_sg, - .unmap_sg = c6x_dma_unmap_sg, - .sync_single_for_device = c6x_dma_sync_single_for_device, - .sync_single_for_cpu = c6x_dma_sync_single_for_cpu, - .sync_sg_for_device = c6x_dma_sync_sg_for_device, - .sync_sg_for_cpu = c6x_dma_sync_sg_for_cpu, -}; -EXPORT_SYMBOL(c6x_dma_ops); - -/* Number of entries preallocated for DMA-API debugging */ -#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) - -static int __init dma_init(void) -{ - dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); - - return 0; -} -fs_initcall(dma_init); diff --git a/arch/c6x/kernel/traps.c b/arch/c6x/kernel/traps.c index 4c1d4b84dd2b..5c60aea3b75a 100644 --- a/arch/c6x/kernel/traps.c +++ b/arch/c6x/kernel/traps.c @@ -244,7 +244,6 @@ static struct exception_info eexcept_table[128] = { static void do_trap(struct exception_info *except_info, struct pt_regs *regs) { unsigned long addr = instruction_pointer(regs); - siginfo_t info; if (except_info->code != TRAP_BRKPT) pr_err("TRAP: %s PC[0x%lx] signo[%d] code[%d]\n", @@ -253,12 +252,8 @@ static void do_trap(struct exception_info *except_info, struct pt_regs *regs) die_if_kernel(except_info->kernel_str, regs, addr); - info.si_signo = except_info->signo; - info.si_errno = 0; - info.si_code = except_info->code; - info.si_addr = (void __user *)addr; - - force_sig_info(except_info->signo, &info, current); + force_sig_fault(except_info->signo, except_info->code, + (void __user *)addr, current); } /* diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c index 95e38ad27c69..d0a8e0c4b27e 100644 --- a/arch/c6x/mm/dma-coherent.c +++ b/arch/c6x/mm/dma-coherent.c @@ -19,10 +19,12 @@ #include <linux/bitops.h> #include <linux/module.h> #include <linux/interrupt.h> -#include <linux/dma-mapping.h> +#include <linux/dma-noncoherent.h> #include <linux/memblock.h> +#include <asm/cacheflush.h> #include <asm/page.h> +#include <asm/setup.h> /* * DMA coherent memory management, can be redefined using the memdma= @@ -73,7 +75,7 @@ static void __free_dma_pages(u32 addr, int order) * Allocate DMA coherent memory space and return both the kernel * virtual and DMA address for that space. */ -void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, unsigned long attrs) { u32 paddr; @@ -98,7 +100,7 @@ void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, /* * Free DMA coherent memory as defined by the above mapping. */ -void c6x_dma_free(struct device *dev, size_t size, void *vaddr, +void arch_dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { int order; @@ -139,3 +141,35 @@ void __init coherent_mem_init(phys_addr_t start, u32 size) dma_bitmap = phys_to_virt(bitmap_phys); memset(dma_bitmap, 0, dma_pages * PAGE_SIZE); } + +static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size, + enum dma_data_direction dir) +{ + BUG_ON(!valid_dma_direction(dir)); + + switch (dir) { + case DMA_FROM_DEVICE: + L2_cache_block_invalidate(paddr, paddr + size); + break; + case DMA_TO_DEVICE: + L2_cache_block_writeback(paddr, paddr + size); + break; + case DMA_BIDIRECTIONAL: + L2_cache_block_writeback_invalidate(paddr, paddr + size); + break; + default: + break; + } +} + +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) +{ + return c6x_dma_sync(dev, paddr, size, dir); +} + +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) +{ + return c6x_dma_sync(dev, paddr, size, dir); +} diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild index 14bac06b7116..a5d0b2991f47 100644 --- a/arch/h8300/include/asm/Kbuild +++ b/arch/h8300/include/asm/Kbuild @@ -3,6 +3,7 @@ generic-y += barrier.h generic-y += bugs.h generic-y += cacheflush.h generic-y += checksum.h +generic-y += compat.h generic-y += current.h generic-y += delay.h generic-y += device.h diff --git a/arch/h8300/include/asm/pci.h b/arch/h8300/include/asm/pci.h index 7c9e55d62215..d4d345a52092 100644 --- a/arch/h8300/include/asm/pci.h +++ b/arch/h8300/include/asm/pci.h @@ -15,6 +15,4 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) /* We don't do dynamic PCI IRQ allocation */ } -#define PCI_DMA_BUS_IS_PHYS (1) - #endif /* _ASM_H8300_PCI_H */ diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 76d2f20d525e..37adb2003033 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -19,6 +19,7 @@ config HEXAGON select GENERIC_IRQ_SHOW select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK + select NEED_SG_DMA_LENGTH select NO_IOPORT_MAP select GENERIC_IOMAP select GENERIC_SMP_IDLE_THREAD @@ -63,9 +64,6 @@ config GENERIC_CSUM config GENERIC_IRQ_PROBE def_bool y -config NEED_SG_DMA_LENGTH - def_bool y - config RWSEM_GENERIC_SPINLOCK def_bool n diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index e9743f689fb8..dd2fd9c0d292 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -2,6 +2,7 @@ generic-y += barrier.h generic-y += bug.h generic-y += bugs.h +generic-y += compat.h generic-y += current.h generic-y += device.h generic-y += div64.h diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c index ad8347c29dcf..77459df34e2e 100644 --- a/arch/hexagon/kernel/dma.c +++ b/arch/hexagon/kernel/dma.c @@ -208,7 +208,6 @@ const struct dma_map_ops hexagon_dma_ops = { .sync_single_for_cpu = hexagon_sync_single_for_cpu, .sync_single_for_device = hexagon_sync_single_for_device, .mapping_error = hexagon_mapping_error, - .is_phys = 1, }; void __init hexagon_dma_init(void) diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c index 2942a9204a9a..91ee04842c22 100644 --- a/arch/hexagon/kernel/traps.c +++ b/arch/hexagon/kernel/traps.c @@ -412,10 +412,6 @@ void do_trap0(struct pt_regs *regs) case TRAP_DEBUG: /* Trap0 0xdb is debug breakpoint */ if (user_mode(regs)) { - struct siginfo info; - - info.si_signo = SIGTRAP; - info.si_errno = 0; /* * Some architecures add some per-thread state * to distinguish between breakpoint traps and @@ -423,9 +419,8 @@ void do_trap0(struct pt_regs *regs) * set the si_code value appropriately, or we * may want to use a different trap0 flavor. */ - info.si_code = TRAP_BRKPT; - info.si_addr = (void __user *) pt_elr(regs); - force_sig_info(SIGTRAP, &info, current); + force_sig_fault(SIGTRAP, TRAP_BRKPT, + (void __user *) pt_elr(regs), current); } else { #ifdef CONFIG_KGDB kgdb_handle_exception(pt_cause(regs), SIGTRAP, diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c index 3eec33c5cfd7..933bbcef5363 100644 --- a/arch/hexagon/mm/vm_fault.c +++ b/arch/hexagon/mm/vm_fault.c @@ -50,7 +50,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; - siginfo_t info; + int si_signo; int si_code = SEGV_MAPERR; int fault; const struct exception_table_entry *fixup; @@ -140,28 +140,22 @@ good_area: * unable to fix up the page fault. */ if (fault & VM_FAULT_SIGBUS) { - info.si_signo = SIGBUS; - info.si_code = BUS_ADRERR; + si_signo = SIGBUS; + si_code = BUS_ADRERR; } /* Address is not in the memory map */ else { - info.si_signo = SIGSEGV; - info.si_code = SEGV_ACCERR; + si_signo = SIGSEGV; + si_code = SEGV_ACCERR; } - info.si_errno = 0; - info.si_addr = (void __user *)address; - force_sig_info(info.si_signo, &info, current); + force_sig_fault(si_signo, si_code, (void __user *)address, current); return; bad_area: up_read(&mm->mmap_sem); if (user_mode(regs)) { - info.si_signo = SIGSEGV; - info.si_errno = 0; - info.si_code = si_code; - info.si_addr = (void *)address; - force_sig_info(info.si_signo, &info, current); + force_sig_fault(SIGSEGV, si_code, (void __user *)address, current); return; } /* Kernel-mode fault falls through */ diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index bbe12a038d21..792437d526c6 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -29,7 +29,6 @@ config IA64 select HAVE_FUNCTION_TRACER select TTY select HAVE_ARCH_TRACEHOOK - select HAVE_DMA_API_DEBUG select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP select HAVE_VIRT_CPU_ACCOUNTING @@ -54,6 +53,8 @@ config IA64 select MODULES_USE_ELF_RELA select ARCH_USE_CMPXCHG_LOCKREF select HAVE_ARCH_AUDITSYSCALL + select NEED_DMA_MAP_STATE + select NEED_SG_DMA_LENGTH default y help The Itanium Processor Family is Intel's 64-bit successor to @@ -78,18 +79,6 @@ config MMU bool default y -config ARCH_DMA_ADDR_T_64BIT - def_bool y - -config NEED_DMA_MAP_STATE - def_bool y - -config NEED_SG_DMA_LENGTH - def_bool y - -config SWIOTLB - bool - config STACKTRACE_SUPPORT def_bool y @@ -146,7 +135,6 @@ config IA64_GENERIC bool "generic" select NUMA select ACPI_NUMA - select DMA_DIRECT_OPS select SWIOTLB select PCI_MSI help @@ -167,7 +155,6 @@ config IA64_GENERIC config IA64_DIG bool "DIG-compliant" - select DMA_DIRECT_OPS select SWIOTLB config IA64_DIG_VTD @@ -183,7 +170,6 @@ config IA64_HP_ZX1 config IA64_HP_ZX1_SWIOTLB bool "HP-zx1/sx1000 with software I/O TLB" - select DMA_DIRECT_OPS select SWIOTLB help Build a kernel that runs on HP zx1 and sx1000 systems even when they @@ -207,7 +193,6 @@ config IA64_SGI_UV bool "SGI-UV" select NUMA select ACPI_NUMA - select DMA_DIRECT_OPS select SWIOTLB help Selecting this option will optimize the kernel for use on UV based @@ -218,7 +203,6 @@ config IA64_SGI_UV config IA64_HP_SIM bool "Ski-simulator" - select DMA_DIRECT_OPS select SWIOTLB depends on !PM @@ -397,7 +381,7 @@ config ARCH_DISCONTIGMEM_ENABLE Say Y to support efficient handling of discontiguous physical memory, for architectures which are either NUMA (Non-Uniform Memory Access) or have huge holes in the physical address space for other reasons. - See <file:Documentation/vm/numa> for more. + See <file:Documentation/vm/numa.rst> for more. config ARCH_FLATMEM_ENABLE def_bool y @@ -613,6 +597,3 @@ source "security/Kconfig" source "crypto/Kconfig" source "lib/Kconfig" - -config IOMMU_HELPER - def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB) diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 2dd7f519ad0b..45f59808b842 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -18,7 +18,7 @@ READELF := $(CROSS_COMPILE)readelf export AWK -CHECKFLAGS += -m64 -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__ +CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__ OBJCOPYFLAGS := --strip-all LDFLAGS_vmlinux := -static diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index aec4a3354abe..ee5b652d320a 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -1845,9 +1845,6 @@ static void ioc_init(unsigned long hpa, struct ioc *ioc) ioc_resource_init(ioc); ioc_sac_init(ioc); - if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask) - ia64_max_iommu_merge_mask = ~iovp_mask; - printk(KERN_INFO PFX "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n", ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF, @@ -1942,19 +1939,6 @@ static const struct seq_operations ioc_seq_ops = { .show = ioc_show }; -static int -ioc_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &ioc_seq_ops); -} - -static const struct file_operations ioc_fops = { - .open = ioc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release -}; - static void __init ioc_proc_init(void) { @@ -1964,7 +1948,7 @@ ioc_proc_init(void) if (!dir) return; - proc_create(ioc_list->name, 0, dir, &ioc_fops); + proc_create_seq(ioc_list->name, 0, dir, &ioc_seq_ops); } #endif diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index a419ccf33cde..663388a73d4e 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c @@ -435,19 +435,6 @@ static int rs_proc_show(struct seq_file *m, void *v) return 0; } -static int rs_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, rs_proc_show, NULL); -} - -static const struct file_operations rs_proc_fops = { - .owner = THIS_MODULE, - .open = rs_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - static const struct tty_operations hp_ops = { .open = rs_open, .close = rs_close, @@ -462,7 +449,7 @@ static const struct tty_operations hp_ops = { .unthrottle = rs_unthrottle, .send_xchar = rs_send_xchar, .hangup = rs_hangup, - .proc_fops = &rs_proc_fops, + .proc_show = rs_proc_show, }; static const struct tty_port_operations hp_port_ops = { diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index 6dd867873364..557bbc8ba9f5 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild @@ -1,3 +1,4 @@ +generic-y += compat.h generic-y += exec.h generic-y += irq_work.h generic-y += mcs_spinlock.h diff --git a/arch/ia64/include/asm/hardirq.h b/arch/ia64/include/asm/hardirq.h index bdc4669c71c3..ccde7c2ba00f 100644 --- a/arch/ia64/include/asm/hardirq.h +++ b/arch/ia64/include/asm/hardirq.h @@ -13,7 +13,7 @@ #define __ARCH_IRQ_STAT 1 -#define local_softirq_pending() (local_cpu_data->softirq_pending) +#define local_softirq_pending_ref ia64_cpu_info.softirq_pending #include <linux/threads.h> #include <linux/irq.h> diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h index b1d04e8bafc8..780e8744ba85 100644 --- a/arch/ia64/include/asm/pci.h +++ b/arch/ia64/include/asm/pci.h @@ -30,23 +30,6 @@ struct pci_vector_struct { #define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_MEM 0x10000000 -/* - * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct - * correspondence between device bus addresses and CPU physical addresses. - * Platforms with a hardware I/O MMU _must_ turn this off to suppress the - * bounce buffer handling code in the block and network device layers. - * Platforms with separate bus address spaces _must_ turn this off and provide - * a device DMA mapping implementation that takes care of the necessary - * address translation. - * - * For now, the ia64 platforms which may have separate/multiple bus address - * spaces all have I/O MMUs which support the merging of physically - * discontiguous buffers, so we can use that as the sole factor to determine - * the setting of PCI_DMA_BUS_IS_PHYS. - */ -extern unsigned long ia64_max_iommu_merge_mask; -#define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL) - #define HAVE_PCI_MMAP #define ARCH_GENERIC_PCI_MMAP_RESOURCE #define arch_can_pci_mmap_wc() 1 diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild index c0527cfc48f0..3982e673e967 100644 --- a/arch/ia64/include/uapi/asm/Kbuild +++ b/arch/ia64/include/uapi/asm/Kbuild @@ -2,5 +2,9 @@ include include/uapi/asm-generic/Kbuild.asm generic-y += bpf_perf_event.h +generic-y += ipcbuf.h generic-y += kvm_para.h +generic-y += msgbuf.h generic-y += poll.h +generic-y += sembuf.h +generic-y += shmbuf.h diff --git a/arch/ia64/include/uapi/asm/ipcbuf.h b/arch/ia64/include/uapi/asm/ipcbuf.h deleted file mode 100644 index 90d6445a14df..000000000000 --- a/arch/ia64/include/uapi/asm/ipcbuf.h +++ /dev/null @@ -1,2 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#include <asm-generic/ipcbuf.h> diff --git a/arch/ia64/include/uapi/asm/msgbuf.h b/arch/ia64/include/uapi/asm/msgbuf.h deleted file mode 100644 index aa25df92d9dc..000000000000 --- a/arch/ia64/include/uapi/asm/msgbuf.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _ASM_IA64_MSGBUF_H -#define _ASM_IA64_MSGBUF_H - -/* - * The msqid64_ds structure for IA-64 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 2 miscellaneous 64-bit values - */ - -struct msqid64_ds { - struct ipc64_perm msg_perm; - __kernel_time_t msg_stime; /* last msgsnd time */ - __kernel_time_t msg_rtime; /* last msgrcv time */ - __kernel_time_t msg_ctime; /* last change time */ - unsigned long msg_cbytes; /* current number of bytes on queue */ - unsigned long msg_qnum; /* number of messages in queue */ - unsigned long msg_qbytes; /* max number of bytes on queue */ - __kernel_pid_t msg_lspid; /* pid of last msgsnd */ - __kernel_pid_t msg_lrpid; /* last receive pid */ - unsigned long __unused1; - unsigned long __unused2; -}; - -#endif /* _ASM_IA64_MSGBUF_H */ diff --git a/arch/ia64/include/uapi/asm/sembuf.h b/arch/ia64/include/uapi/asm/sembuf.h deleted file mode 100644 index 6ed058760afc..000000000000 --- a/arch/ia64/include/uapi/asm/sembuf.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _ASM_IA64_SEMBUF_H -#define _ASM_IA64_SEMBUF_H - -/* - * The semid64_ds structure for IA-64 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 2 miscellaneous 64-bit values - */ - -struct semid64_ds { - struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ - __kernel_time_t sem_otime; /* last semop time */ - __kernel_time_t sem_ctime; /* last change time */ - unsigned long sem_nsems; /* no. of semaphores in array */ - unsigned long __unused1; - unsigned long __unused2; -}; - -#endif /* _ASM_IA64_SEMBUF_H */ diff --git a/arch/ia64/include/uapi/asm/shmbuf.h b/arch/ia64/include/uapi/asm/shmbuf.h deleted file mode 100644 index 6ef57cb70dee..000000000000 --- a/arch/ia64/include/uapi/asm/shmbuf.h +++ /dev/null @@ -1,39 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _ASM_IA64_SHMBUF_H -#define _ASM_IA64_SHMBUF_H - -/* - * The shmid64_ds structure for IA-64 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 2 miscellaneous 64-bit values - */ - -struct shmid64_ds { - struct ipc64_perm shm_perm; /* operation perms */ - size_t shm_segsz; /* size of segment (bytes) */ - __kernel_time_t shm_atime; /* last attach time */ - __kernel_time_t shm_dtime; /* last detach time */ - __kernel_time_t shm_ctime; /* last change time */ - __kernel_pid_t shm_cpid; /* pid of creator */ - __kernel_pid_t shm_lpid; /* pid of last operator */ - unsigned long shm_nattch; /* no. of current attaches */ - unsigned long __unused1; - unsigned long __unused2; -}; - -struct shminfo64 { - unsigned long shmmax; - unsigned long shmmin; - unsigned long shmmni; - unsigned long shmseg; - unsigned long shmall; - unsigned long __unused1; - unsigned long __unused2; - unsigned long __unused3; - unsigned long __unused4; -}; - -#endif /* _ASM_IA64_SHMBUF_H */ diff --git a/arch/ia64/include/uapi/asm/siginfo.h b/arch/ia64/include/uapi/asm/siginfo.h index 5aa454ed89db..52b5af424511 100644 --- a/arch/ia64/include/uapi/asm/siginfo.h +++ b/arch/ia64/include/uapi/asm/siginfo.h @@ -27,11 +27,4 @@ #define __ISR_VALID_BIT 0 #define __ISR_VALID (1 << __ISR_VALID_BIT) -/* - * SIGFPE si_codes - */ -#ifdef __KERNEL__ -#define FPE_FIXME 0 /* Broken dup of SI_USER */ -#endif /* __KERNEL__ */ - #endif /* _UAPI_ASM_IA64_SIGINFO_H */ diff --git a/arch/ia64/kernel/brl_emu.c b/arch/ia64/kernel/brl_emu.c index 9bcc908bc85e..a61f6c6a36f8 100644 --- a/arch/ia64/kernel/brl_emu.c +++ b/arch/ia64/kernel/brl_emu.c @@ -62,6 +62,7 @@ ia64_emulate_brl (struct pt_regs *regs, unsigned long ar_ec) struct illegal_op_return rv; long tmp_taken, unimplemented_address; + clear_siginfo(&siginfo); rv.fkt = (unsigned long) -1; /* diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c index f2d57e66fd86..7a471d8d67d4 100644 --- a/arch/ia64/kernel/dma-mapping.c +++ b/arch/ia64/kernel/dma-mapping.c @@ -9,16 +9,6 @@ int iommu_detected __read_mostly; const struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); -#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) - -static int __init dma_init(void) -{ - dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); - - return 0; -} -fs_initcall(dma_init); - const struct dma_map_ops *dma_get_ops(struct device *dev) { return dma_ops; diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index b6e597860888..f4a94241265c 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -920,18 +920,6 @@ static int proc_palinfo_show(struct seq_file *m, void *v) return 0; } -static int proc_palinfo_open(struct inode *inode, struct file *file) -{ - return single_open(file, proc_palinfo_show, PDE_DATA(inode)); -} - -static const struct file_operations proc_palinfo_fops = { - .open = proc_palinfo_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - static int palinfo_add_proc(unsigned int cpu) { pal_func_cpu_u_t f; @@ -948,8 +936,8 @@ static int palinfo_add_proc(unsigned int cpu) for (j=0; j < NR_PALINFO_ENTRIES; j++) { f.func_id = j; - proc_create_data(palinfo_entries[j].name, 0, cpu_dir, - &proc_palinfo_fops, (void *)f.value); + proc_create_single_data(palinfo_entries[j].name, 0, cpu_dir, + proc_palinfo_show, (void *)f.value); } return 0; } diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 8fb280e33114..3b38c717008a 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -5708,13 +5708,6 @@ const struct seq_operations pfm_seq_ops = { .show = pfm_proc_show }; -static int -pfm_proc_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &pfm_seq_ops); -} - - /* * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens * during pfm_enable() hence before pfm_start(). We cannot assume monitoring @@ -6537,13 +6530,6 @@ found: return 0; } -static const struct file_operations pfm_proc_fops = { - .open = pfm_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - int __init pfm_init(void) { @@ -6615,7 +6601,7 @@ pfm_init(void) /* * create /proc/perfmon (mostly for debugging purposes) */ - perfmon_dir = proc_create("perfmon", S_IRUGO, NULL, &pfm_proc_fops); + perfmon_dir = proc_create_seq("perfmon", S_IRUGO, NULL, &pfm_seq_ops); if (perfmon_dir == NULL) { printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n"); pmu_conf = NULL; diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index 52c404b08904..aba1f463a8dd 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c @@ -54,8 +54,6 @@ MODULE_AUTHOR("Jesse Barnes <jbarnes@sgi.com>"); MODULE_DESCRIPTION("/proc interface to IA-64 SAL features"); MODULE_LICENSE("GPL"); -static const struct file_operations proc_salinfo_fops; - typedef struct { const char *name; /* name of the proc entry */ unsigned long feature; /* feature bit */ @@ -578,6 +576,17 @@ static int salinfo_cpu_pre_down(unsigned int cpu) return 0; } +/* + * 'data' contains an integer that corresponds to the feature we're + * testing + */ +static int proc_salinfo_show(struct seq_file *m, void *v) +{ + unsigned long data = (unsigned long)v; + seq_puts(m, (sal_platform_features & data) ? "1\n" : "0\n"); + return 0; +} + static int __init salinfo_init(void) { @@ -593,9 +602,9 @@ salinfo_init(void) for (i=0; i < NR_SALINFO_ENTRIES; i++) { /* pass the feature bit in question as misc data */ - *sdir++ = proc_create_data(salinfo_entries[i].name, 0, salinfo_dir, - &proc_salinfo_fops, - (void *)salinfo_entries[i].feature); + *sdir++ = proc_create_single_data(salinfo_entries[i].name, 0, + salinfo_dir, proc_salinfo_show, + (void *)salinfo_entries[i].feature); } for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) { @@ -633,27 +642,4 @@ salinfo_init(void) return 0; } -/* - * 'data' contains an integer that corresponds to the feature we're - * testing - */ -static int proc_salinfo_show(struct seq_file *m, void *v) -{ - unsigned long data = (unsigned long)v; - seq_puts(m, (sal_platform_features & data) ? "1\n" : "0\n"); - return 0; -} - -static int proc_salinfo_open(struct inode *inode, struct file *file) -{ - return single_open(file, proc_salinfo_show, PDE_DATA(inode)); -} - -static const struct file_operations proc_salinfo_fops = { - .open = proc_salinfo_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - module_init(salinfo_init); diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index dee56bcb993d..ad43cbf70628 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -124,18 +124,6 @@ unsigned long ia64_i_cache_stride_shift = ~0; unsigned long ia64_cache_stride_shift = ~0; /* - * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This - * mask specifies a mask of address bits that must be 0 in order for two buffers to be - * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start - * address of the second buffer must be aligned to (merge_mask+1) in order to be - * mergeable). By default, we assume there is no I/O MMU which can merge physically - * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu - * page-size of 2^64. - */ -unsigned long ia64_max_iommu_merge_mask = ~0UL; -EXPORT_SYMBOL(ia64_max_iommu_merge_mask); - -/* * We use a special marker for the end of memory and it uses the extra (+1) slot */ struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata; diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 54547c7cf8a2..d1234a5ba4c5 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -153,6 +153,7 @@ ia64_rt_sigreturn (struct sigscratch *scr) return retval; give_sigsegv: + clear_siginfo(&si); si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = SI_KERNEL; @@ -236,6 +237,7 @@ force_sigsegv_info (int sig, void __user *addr) unsigned long flags; struct siginfo si; + clear_siginfo(&si); if (sig == SIGSEGV) { /* * Acquiring siglock around the sa_handler-update is almost diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index 6d4e76a4267f..c6f4932073a1 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -104,6 +104,7 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs) int sig, code; /* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */ + clear_siginfo(&siginfo); siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); siginfo.si_imm = break_num; siginfo.si_flags = 0; /* clear __ISR_VALID */ @@ -293,7 +294,6 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr) { long exception, bundle[2]; unsigned long fault_ip; - struct siginfo siginfo; fault_ip = regs->cr_iip; if (!fp_fault && (ia64_psr(regs)->ri == 0)) @@ -344,13 +344,16 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr) printk(KERN_ERR "handle_fpu_swa: fp_emulate() returned -1\n"); return -1; } else { + struct siginfo siginfo; + /* is next instruction a trap? */ if (exception & 2) { ia64_increment_ip(regs); } + clear_siginfo(&siginfo); siginfo.si_signo = SIGFPE; siginfo.si_errno = 0; - siginfo.si_code = FPE_FIXME; /* default code */ + siginfo.si_code = FPE_FLTUNK; /* default code */ siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); if (isr & 0x11) { siginfo.si_code = FPE_FLTINV; @@ -372,9 +375,12 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr) return -1; } else if (exception != 0) { /* raise exception */ + struct siginfo siginfo; + + clear_siginfo(&siginfo); siginfo.si_signo = SIGFPE; siginfo.si_errno = 0; - siginfo.si_code = FPE_FIXME; /* default code */ + siginfo.si_code = FPE_FLTUNK; /* default code */ siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); if (isr & 0x880) { siginfo.si_code = FPE_FLTOVF; @@ -420,7 +426,7 @@ ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3, if (die_if_kernel(buf, ®s, 0)) return rv; - memset(&si, 0, sizeof(si)); + clear_siginfo(&si); si.si_signo = SIGILL; si.si_code = ILL_ILLOPC; si.si_addr = (void __user *) (regs.cr_iip + ia64_psr(®s)->ri); @@ -434,7 +440,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, long arg7, struct pt_regs regs) { unsigned long code, error = isr, iip; - struct siginfo siginfo; char buf[128]; int result, sig; static const char *reason[] = { @@ -485,6 +490,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, case 26: /* NaT Consumption */ if (user_mode(®s)) { + struct siginfo siginfo; void __user *addr; if (((isr >> 4) & 0xf) == 2) { @@ -499,6 +505,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, addr = (void __user *) (regs.cr_iip + ia64_psr(®s)->ri); } + clear_siginfo(&siginfo); siginfo.si_signo = sig; siginfo.si_code = code; siginfo.si_errno = 0; @@ -515,6 +522,9 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, case 31: /* Unsupported Data Reference */ if (user_mode(®s)) { + struct siginfo siginfo; + + clear_siginfo(&siginfo); siginfo.si_signo = SIGILL; siginfo.si_code = ILL_ILLOPN; siginfo.si_errno = 0; @@ -531,6 +541,10 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, case 29: /* Debug */ case 35: /* Taken Branch Trap */ case 36: /* Single Step Trap */ + { + struct siginfo siginfo; + + clear_siginfo(&siginfo); if (fsys_mode(current, ®s)) { extern char __kernel_syscall_via_break[]; /* @@ -578,11 +592,15 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, siginfo.si_isr = isr; force_sig_info(SIGTRAP, &siginfo, current); return; + } case 32: /* fp fault */ case 33: /* fp trap */ result = handle_fpu_swa((vector == 32) ? 1 : 0, ®s, isr); if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) { + struct siginfo siginfo; + + clear_siginfo(&siginfo); siginfo.si_signo = SIGFPE; siginfo.si_errno = 0; siginfo.si_code = FPE_FLTINV; @@ -616,6 +634,9 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, } else { /* Unimplemented Instr. Address Trap */ if (user_mode(®s)) { + struct siginfo siginfo; + + clear_siginfo(&siginfo); siginfo.si_signo = SIGILL; siginfo.si_code = ILL_BADIADDR; siginfo.si_errno = 0; diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index 72e9b4242564..e309f9859acc 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c @@ -1537,6 +1537,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) /* NOT_REACHED */ } force_sigbus: + clear_siginfo(&si); si.si_signo = SIGBUS; si.si_errno = 0; si.si_code = BUS_ADRALN; diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index dfdc152d6737..817fa120645f 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -85,7 +85,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re int signal = SIGSEGV, code = SEGV_MAPERR; struct vm_area_struct *vma, *prev_vma; struct mm_struct *mm = current->mm; - struct siginfo si; unsigned long mask; int fault; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; @@ -249,6 +248,9 @@ retry: return; } if (user_mode(regs)) { + struct siginfo si; + + clear_siginfo(&si); si.si_signo = signal; si.si_errno = 0; si.si_code = code; diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c index 11f2275570fb..8479e9a7ce16 100644 --- a/arch/ia64/sn/kernel/io_common.c +++ b/arch/ia64/sn/kernel/io_common.c @@ -480,11 +480,6 @@ sn_io_early_init(void) tioca_init_provider(); tioce_init_provider(); - /* - * This is needed to avoid bounce limit checks in the blk layer - */ - ia64_max_iommu_merge_mask = ~PAGE_MASK; - sn_irq_lh_init(); INIT_LIST_HEAD(&sn_sysdata_list); sn_init_cpei_timer(); diff --git a/arch/ia64/sn/kernel/sn2/prominfo_proc.c b/arch/ia64/sn/kernel/sn2/prominfo_proc.c index ec4de2b09653..e15457bf21ac 100644 --- a/arch/ia64/sn/kernel/sn2/prominfo_proc.c +++ b/arch/ia64/sn/kernel/sn2/prominfo_proc.c @@ -140,18 +140,6 @@ static int proc_fit_show(struct seq_file *m, void *v) return 0; } -static int proc_fit_open(struct inode *inode, struct file *file) -{ - return single_open(file, proc_fit_show, PDE_DATA(inode)); -} - -static const struct file_operations proc_fit_fops = { - .open = proc_fit_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - static int proc_version_show(struct seq_file *m, void *v) { unsigned long nasid = (unsigned long)m->private; @@ -174,18 +162,6 @@ static int proc_version_show(struct seq_file *m, void *v) return 0; } -static int proc_version_open(struct inode *inode, struct file *file) -{ - return single_open(file, proc_version_show, PDE_DATA(inode)); -} - -static const struct file_operations proc_version_fops = { - .open = proc_version_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - /* module entry points */ int __init prominfo_init(void); void __exit prominfo_exit(void); @@ -217,10 +193,10 @@ int __init prominfo_init(void) if (!dir) continue; nasid = cnodeid_to_nasid(cnodeid); - proc_create_data("fit", 0, dir, - &proc_fit_fops, (void *)nasid); - proc_create_data("version", 0, dir, - &proc_version_fops, (void *)nasid); + proc_create_single_data("fit", 0, dir, proc_fit_show, + (void *)nasid); + proc_create_single_data("version", 0, dir, proc_version_show, + (void *)nasid); } return 0; } diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c index 29cf8f8c08e9..c2a4d84297b0 100644 --- a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c +++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c @@ -18,33 +18,18 @@ static int partition_id_show(struct seq_file *s, void *p) return 0; } -static int partition_id_open(struct inode *inode, struct file *file) -{ - return single_open(file, partition_id_show, NULL); -} - static int system_serial_number_show(struct seq_file *s, void *p) { seq_printf(s, "%s\n", sn_system_serial_number()); return 0; } -static int system_serial_number_open(struct inode *inode, struct file *file) -{ - return single_open(file, system_serial_number_show, NULL); -} - static int licenseID_show(struct seq_file *s, void *p) { seq_printf(s, "0x%llx\n", sn_partition_serial_number_val()); return 0; } -static int licenseID_open(struct inode *inode, struct file *file) -{ - return single_open(file, licenseID_show, NULL); -} - static int coherence_id_show(struct seq_file *s, void *p) { seq_printf(s, "%d\n", partition_coherence_id()); @@ -52,43 +37,10 @@ static int coherence_id_show(struct seq_file *s, void *p) return 0; } -static int coherence_id_open(struct inode *inode, struct file *file) -{ - return single_open(file, coherence_id_show, NULL); -} - /* /proc/sgi_sn/sn_topology uses seq_file, see sn_hwperf.c */ extern int sn_topology_open(struct inode *, struct file *); extern int sn_topology_release(struct inode *, struct file *); -static const struct file_operations proc_partition_id_fops = { - .open = partition_id_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static const struct file_operations proc_system_sn_fops = { - .open = system_serial_number_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static const struct file_operations proc_license_id_fops = { - .open = licenseID_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static const struct file_operations proc_coherence_id_fops = { - .open = coherence_id_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - static const struct file_operations proc_sn_topo_fops = { .open = sn_topology_open, .read = seq_read, @@ -104,13 +56,13 @@ void register_sn_procfs(void) if (!(sgi_proc_dir = proc_mkdir("sgi_sn", NULL))) return; - proc_create("partition_id", 0444, sgi_proc_dir, - &proc_partition_id_fops); - proc_create("system_serial_number", 0444, sgi_proc_dir, - &proc_system_sn_fops); - proc_create("licenseID", 0444, sgi_proc_dir, &proc_license_id_fops); - proc_create("coherence_id", 0444, sgi_proc_dir, - &proc_coherence_id_fops); + proc_create_single("partition_id", 0444, sgi_proc_dir, + partition_id_show); + proc_create_single("system_serial_number", 0444, sgi_proc_dir, + system_serial_number_show); + proc_create_single("licenseID", 0444, sgi_proc_dir, licenseID_show); + proc_create_single("coherence_id", 0444, sgi_proc_dir, + coherence_id_show); proc_create("sn_topology", 0444, sgi_proc_dir, &proc_sn_topo_fops); } diff --git a/arch/m68k/68000/timers.c b/arch/m68k/68000/timers.c index 252455bce144..71ddb4c98726 100644 --- a/arch/m68k/68000/timers.c +++ b/arch/m68k/68000/timers.c @@ -125,7 +125,9 @@ int m68328_hwclk(int set, struct rtc_time *t) { if (!set) { long now = RTCTIME; - t->tm_year = t->tm_mon = t->tm_mday = 1; + t->tm_year = 1; + t->tm_mon = 0; + t->tm_mday = 1; t->tm_hour = (now >> 24) % 24; t->tm_min = (now >> 16) % 60; t->tm_sec = now % 60; diff --git a/arch/m68k/Kconfig.bus b/arch/m68k/Kconfig.bus index d5e66ec136db..aef698fa50e5 100644 --- a/arch/m68k/Kconfig.bus +++ b/arch/m68k/Kconfig.bus @@ -59,6 +59,10 @@ config ATARI_ROM_ISA config GENERIC_ISA_DMA def_bool ISA +source "drivers/zorro/Kconfig" + +endif + config PCI bool "PCI support" depends on M54xx @@ -66,10 +70,8 @@ config PCI Enable the PCI bus. Support for the PCI bus hardware built into the ColdFire 547x and 548x processors. +if PCI source "drivers/pci/Kconfig" - -source "drivers/zorro/Kconfig" - endif if !MMU diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c index 0d27706f14d4..b2a6bc63f8cd 100644 --- a/arch/m68k/apollo/config.c +++ b/arch/m68k/apollo/config.c @@ -221,8 +221,10 @@ int dn_dummy_hwclk(int op, struct rtc_time *t) { t->tm_hour=rtc->hours; t->tm_mday=rtc->day_of_month; t->tm_wday=rtc->day_of_week; - t->tm_mon=rtc->month; + t->tm_mon = rtc->month - 1; t->tm_year=rtc->year; + if (t->tm_year < 70) + t->tm_year += 100; } else { rtc->second=t->tm_sec; rtc->minute=t->tm_min; @@ -230,8 +232,8 @@ int dn_dummy_hwclk(int op, struct rtc_time *t) { rtc->day_of_month=t->tm_mday; if(t->tm_wday!=-1) rtc->day_of_week=t->tm_wday; - rtc->month=t->tm_mon; - rtc->year=t->tm_year; + rtc->month = t->tm_mon + 1; + rtc->year = t->tm_year % 100; } return 0; diff --git a/arch/m68k/coldfire/pci.c b/arch/m68k/coldfire/pci.c index 3097fa2ca746..62b0eb6cf69a 100644 --- a/arch/m68k/coldfire/pci.c +++ b/arch/m68k/coldfire/pci.c @@ -23,20 +23,10 @@ /* * Memory and IO mappings. We use a 1:1 mapping for local host memory to - * PCI bus memory (no reason not to really). IO space doesn't matter, we - * always use access functions for that. The device configuration space is - * mapped over the IO map space when we enable it in the PCICAR register. + * PCI bus memory (no reason not to really). IO space is mapped in its own + * separate address region. The device configuration space is mapped over + * the IO map space when we enable it in the PCICAR register. */ -#define PCI_MEM_PA 0xf0000000 /* Host physical address */ -#define PCI_MEM_BA 0xf0000000 /* Bus physical address */ -#define PCI_MEM_SIZE 0x08000000 /* 128 MB */ -#define PCI_MEM_MASK (PCI_MEM_SIZE - 1) - -#define PCI_IO_PA 0xf8000000 /* Host physical address */ -#define PCI_IO_BA 0x00000000 /* Bus physical address */ -#define PCI_IO_SIZE 0x00010000 /* 64k */ -#define PCI_IO_MASK (PCI_IO_SIZE - 1) - static struct pci_bus *rootbus; static unsigned long iospace; @@ -56,13 +46,6 @@ static unsigned char mcf_host_irq[] = { 0, 69, 69, 71, 71, }; - -static inline void syncio(void) -{ - /* The ColdFire "nop" instruction waits for all bus IO to complete */ - __asm__ __volatile__ ("nop"); -} - /* * Configuration space access functions. Configuration space access is * through the IO mapping window, enabling it via the PCICAR register. @@ -84,9 +67,9 @@ static int mcf_pci_readconfig(struct pci_bus *bus, unsigned int devfn, return PCIBIOS_SUCCESSFUL; } - syncio(); addr = mcf_mk_pcicar(bus->number, devfn, where); __raw_writel(PCICAR_E | addr, PCICAR); + __raw_readl(PCICAR); addr = iospace + (where & 0x3); switch (size) { @@ -101,8 +84,8 @@ static int mcf_pci_readconfig(struct pci_bus *bus, unsigned int devfn, break; } - syncio(); __raw_writel(0, PCICAR); + __raw_readl(PCICAR); return PCIBIOS_SUCCESSFUL; } @@ -116,9 +99,9 @@ static int mcf_pci_writeconfig(struct pci_bus *bus, unsigned int devfn, return PCIBIOS_SUCCESSFUL; } - syncio(); addr = mcf_mk_pcicar(bus->number, devfn, where); __raw_writel(PCICAR_E | addr, PCICAR); + __raw_readl(PCICAR); addr = iospace + (where & 0x3); switch (size) { @@ -133,8 +116,8 @@ static int mcf_pci_writeconfig(struct pci_bus *bus, unsigned int devfn, break; } - syncio(); __raw_writel(0, PCICAR); + __raw_readl(PCICAR); return PCIBIOS_SUCCESSFUL; } @@ -144,89 +127,6 @@ static struct pci_ops mcf_pci_ops = { }; /* - * IO address space access functions. Pretty strait forward, these are - * directly mapped in to the IO mapping window. And that is mapped into - * virtual address space. - */ -u8 mcf_pci_inb(u32 addr) -{ - return __raw_readb(iospace + (addr & PCI_IO_MASK)); -} -EXPORT_SYMBOL(mcf_pci_inb); - -u16 mcf_pci_inw(u32 addr) -{ - return le16_to_cpu(__raw_readw(iospace + (addr & PCI_IO_MASK))); -} -EXPORT_SYMBOL(mcf_pci_inw); - -u32 mcf_pci_inl(u32 addr) -{ - return le32_to_cpu(__raw_readl(iospace + (addr & PCI_IO_MASK))); -} -EXPORT_SYMBOL(mcf_pci_inl); - -void mcf_pci_insb(u32 addr, u8 *buf, u32 len) -{ - for (; len; len--) - *buf++ = mcf_pci_inb(addr); -} -EXPORT_SYMBOL(mcf_pci_insb); - -void mcf_pci_insw(u32 addr, u16 *buf, u32 len) -{ - for (; len; len--) - *buf++ = mcf_pci_inw(addr); -} -EXPORT_SYMBOL(mcf_pci_insw); - -void mcf_pci_insl(u32 addr, u32 *buf, u32 len) -{ - for (; len; len--) - *buf++ = mcf_pci_inl(addr); -} -EXPORT_SYMBOL(mcf_pci_insl); - -void mcf_pci_outb(u8 v, u32 addr) -{ - __raw_writeb(v, iospace + (addr & PCI_IO_MASK)); -} -EXPORT_SYMBOL(mcf_pci_outb); - -void mcf_pci_outw(u16 v, u32 addr) -{ - __raw_writew(cpu_to_le16(v), iospace + (addr & PCI_IO_MASK)); -} -EXPORT_SYMBOL(mcf_pci_outw); - -void mcf_pci_outl(u32 v, u32 addr) -{ - __raw_writel(cpu_to_le32(v), iospace + (addr & PCI_IO_MASK)); -} -EXPORT_SYMBOL(mcf_pci_outl); - -void mcf_pci_outsb(u32 addr, const u8 *buf, u32 len) -{ - for (; len; len--) - mcf_pci_outb(*buf++, addr); -} -EXPORT_SYMBOL(mcf_pci_outsb); - -void mcf_pci_outsw(u32 addr, const u16 *buf, u32 len) -{ - for (; len; len--) - mcf_pci_outw(*buf++, addr); -} -EXPORT_SYMBOL(mcf_pci_outsw); - -void mcf_pci_outsl(u32 addr, const u32 *buf, u32 len) -{ - for (; len; len--) - mcf_pci_outl(*buf++, addr); -} -EXPORT_SYMBOL(mcf_pci_outsl); - -/* * Initialize the PCI bus registers, and scan the bus. */ static struct resource mcf_pci_mem = { diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 37a8e5ab8728..a874e54404d1 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -98,8 +98,8 @@ CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_INET=m -CONFIG_NF_TABLES_NETDEV=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_RT=m @@ -204,7 +204,7 @@ CONFIG_NF_SOCKET_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m -CONFIG_NF_TABLES_ARP=m +CONFIG_NF_TABLES_ARP=y CONFIG_NF_FLOW_TABLE_IPV4=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -233,12 +233,12 @@ CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_SOCKET_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m -CONFIG_NFT_DUP_IPV6=m -CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -259,7 +259,7 @@ CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m -CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NF_TABLES_BRIDGE=y CONFIG_NFT_BRIDGE_META=m CONFIG_NFT_BRIDGE_REJECT=m CONFIG_NF_LOG_BRIDGE=m @@ -310,7 +310,6 @@ CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_NSH=m -CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set CONFIG_PSAMPLE=m @@ -414,6 +413,7 @@ CONFIG_ARIADNE=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set CONFIG_HYDRA=y CONFIG_APNE=y CONFIG_ZORRO8390=y @@ -485,6 +485,7 @@ CONFIG_RTC_DRV_MSM6242=m CONFIG_RTC_DRV_RP5C01=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set +CONFIG_DAX=m CONFIG_HEARTBEAT=y CONFIG_PROC_HARDWARE=y CONFIG_AMIGA_BUILTIN_SERIAL=y @@ -621,6 +622,7 @@ CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m @@ -647,6 +649,8 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 6a466266b852..8ce39e23aa42 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -96,8 +96,8 @@ CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_INET=m -CONFIG_NF_TABLES_NETDEV=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_RT=m @@ -202,7 +202,7 @@ CONFIG_NF_SOCKET_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m -CONFIG_NF_TABLES_ARP=m +CONFIG_NF_TABLES_ARP=y CONFIG_NF_FLOW_TABLE_IPV4=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -231,12 +231,12 @@ CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_SOCKET_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m -CONFIG_NFT_DUP_IPV6=m -CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -257,7 +257,7 @@ CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m -CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NF_TABLES_BRIDGE=y CONFIG_NFT_BRIDGE_META=m CONFIG_NFT_BRIDGE_REJECT=m CONFIG_NF_LOG_BRIDGE=m @@ -308,7 +308,6 @@ CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_NSH=m -CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set CONFIG_PSAMPLE=m @@ -392,6 +391,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set @@ -446,6 +446,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set +CONFIG_DAX=m CONFIG_HEARTBEAT=y CONFIG_PROC_HARDWARE=y CONFIG_EXT4_FS=y @@ -580,6 +581,7 @@ CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m @@ -606,6 +608,8 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index b0691a7a3345..346c4e75edf8 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -96,8 +96,8 @@ CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_INET=m -CONFIG_NF_TABLES_NETDEV=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_RT=m @@ -202,7 +202,7 @@ CONFIG_NF_SOCKET_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m -CONFIG_NF_TABLES_ARP=m +CONFIG_NF_TABLES_ARP=y CONFIG_NF_FLOW_TABLE_IPV4=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -231,12 +231,12 @@ CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_SOCKET_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m -CONFIG_NFT_DUP_IPV6=m -CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -257,7 +257,7 @@ CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m -CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NF_TABLES_BRIDGE=y CONFIG_NFT_BRIDGE_META=m CONFIG_NFT_BRIDGE_REJECT=m CONFIG_NF_LOG_BRIDGE=m @@ -308,7 +308,6 @@ CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_NSH=m -CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set CONFIG_PSAMPLE=m @@ -401,6 +400,7 @@ CONFIG_ATARILANCE=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set CONFIG_NE2000=y # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RENESAS is not set @@ -461,6 +461,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set +CONFIG_DAX=m CONFIG_HEARTBEAT=y CONFIG_PROC_HARDWARE=y CONFIG_NATFEAT=y @@ -602,6 +603,7 @@ CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m @@ -628,6 +630,8 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 6f6470fa9a50..fca9c7aa71a3 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -94,8 +94,8 @@ CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_INET=m -CONFIG_NF_TABLES_NETDEV=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_RT=m @@ -200,7 +200,7 @@ CONFIG_NF_SOCKET_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m -CONFIG_NF_TABLES_ARP=m +CONFIG_NF_TABLES_ARP=y CONFIG_NF_FLOW_TABLE_IPV4=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -229,12 +229,12 @@ CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_SOCKET_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m -CONFIG_NFT_DUP_IPV6=m -CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -255,7 +255,7 @@ CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m -CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NF_TABLES_BRIDGE=y CONFIG_NFT_BRIDGE_META=m CONFIG_NFT_BRIDGE_REJECT=m CONFIG_NF_LOG_BRIDGE=m @@ -306,7 +306,6 @@ CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_NSH=m -CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set CONFIG_PSAMPLE=m @@ -391,6 +390,7 @@ CONFIG_BVME6000_NET=y # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set @@ -439,6 +439,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set +CONFIG_DAX=m CONFIG_PROC_HARDWARE=y CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m @@ -572,6 +573,7 @@ CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m @@ -598,6 +600,8 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index 31a1a2b5e860..f9eab174915c 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -96,8 +96,8 @@ CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_INET=m -CONFIG_NF_TABLES_NETDEV=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_RT=m @@ -202,7 +202,7 @@ CONFIG_NF_SOCKET_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m -CONFIG_NF_TABLES_ARP=m +CONFIG_NF_TABLES_ARP=y CONFIG_NF_FLOW_TABLE_IPV4=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -231,12 +231,12 @@ CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_SOCKET_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m -CONFIG_NFT_DUP_IPV6=m -CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -257,7 +257,7 @@ CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m -CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NF_TABLES_BRIDGE=y CONFIG_NFT_BRIDGE_META=m CONFIG_NFT_BRIDGE_REJECT=m CONFIG_NF_LOG_BRIDGE=m @@ -308,7 +308,6 @@ CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_NSH=m -CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set CONFIG_PSAMPLE=m @@ -393,6 +392,7 @@ CONFIG_HPLANCE=y # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set @@ -449,6 +449,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set +CONFIG_DAX=m CONFIG_PROC_HARDWARE=y CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m @@ -582,6 +583,7 @@ CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m @@ -608,6 +610,8 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index 390d4a87441c..b52e597899eb 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -95,8 +95,8 @@ CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_INET=m -CONFIG_NF_TABLES_NETDEV=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_RT=m @@ -201,7 +201,7 @@ CONFIG_NF_SOCKET_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m -CONFIG_NF_TABLES_ARP=m +CONFIG_NF_TABLES_ARP=y CONFIG_NF_FLOW_TABLE_IPV4=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -230,12 +230,12 @@ CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_SOCKET_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m -CONFIG_NFT_DUP_IPV6=m -CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -256,7 +256,7 @@ CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m -CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NF_TABLES_BRIDGE=y CONFIG_NFT_BRIDGE_META=m CONFIG_NFT_BRIDGE_REJECT=m CONFIG_NF_LOG_BRIDGE=m @@ -310,7 +310,6 @@ CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_NSH=m -CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set CONFIG_PSAMPLE=m @@ -410,6 +409,7 @@ CONFIG_MAC89x0=y # CONFIG_NET_VENDOR_MICREL is not set CONFIG_MACSONIC=y # CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set CONFIG_MAC8390=y # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RENESAS is not set @@ -471,6 +471,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set +CONFIG_DAX=m CONFIG_PROC_HARDWARE=y CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m @@ -604,6 +605,7 @@ CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m @@ -630,6 +632,8 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 77be97d82dc3..2a84eeec5b02 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -105,8 +105,8 @@ CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_INET=m -CONFIG_NF_TABLES_NETDEV=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_RT=m @@ -211,7 +211,7 @@ CONFIG_NF_SOCKET_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m -CONFIG_NF_TABLES_ARP=m +CONFIG_NF_TABLES_ARP=y CONFIG_NF_FLOW_TABLE_IPV4=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -240,12 +240,12 @@ CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_SOCKET_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m -CONFIG_NFT_DUP_IPV6=m -CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -266,7 +266,7 @@ CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m -CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NF_TABLES_BRIDGE=y CONFIG_NFT_BRIDGE_META=m CONFIG_NFT_BRIDGE_REJECT=m CONFIG_NF_LOG_BRIDGE=m @@ -320,7 +320,6 @@ CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_NSH=m -CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set CONFIG_PSAMPLE=m @@ -452,6 +451,7 @@ CONFIG_MVME16x_NET=y # CONFIG_NET_VENDOR_MICREL is not set CONFIG_MACSONIC=y # CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set CONFIG_HYDRA=y CONFIG_MAC8390=y CONFIG_NE2000=y @@ -541,6 +541,7 @@ CONFIG_RTC_DRV_RP5C01=m CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set +CONFIG_DAX=m CONFIG_HEARTBEAT=y CONFIG_PROC_HARDWARE=y CONFIG_NATFEAT=y @@ -684,6 +685,7 @@ CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m @@ -710,6 +712,8 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index 2ca140757b0f..476e69994340 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -93,8 +93,8 @@ CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_INET=m -CONFIG_NF_TABLES_NETDEV=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_RT=m @@ -199,7 +199,7 @@ CONFIG_NF_SOCKET_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m -CONFIG_NF_TABLES_ARP=m +CONFIG_NF_TABLES_ARP=y CONFIG_NF_FLOW_TABLE_IPV4=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -228,12 +228,12 @@ CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_SOCKET_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m -CONFIG_NFT_DUP_IPV6=m -CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -254,7 +254,7 @@ CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m -CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NF_TABLES_BRIDGE=y CONFIG_NFT_BRIDGE_META=m CONFIG_NFT_BRIDGE_REJECT=m CONFIG_NF_LOG_BRIDGE=m @@ -305,7 +305,6 @@ CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_NSH=m -CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set CONFIG_PSAMPLE=m @@ -391,6 +390,7 @@ CONFIG_MVME147_NET=y # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set @@ -439,6 +439,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set +CONFIG_DAX=m CONFIG_PROC_HARDWARE=y CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m @@ -572,6 +573,7 @@ CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m @@ -598,6 +600,8 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 6a3b4dcc5aab..1477cda9146e 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -94,8 +94,8 @@ CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_INET=m -CONFIG_NF_TABLES_NETDEV=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_RT=m @@ -200,7 +200,7 @@ CONFIG_NF_SOCKET_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m -CONFIG_NF_TABLES_ARP=m +CONFIG_NF_TABLES_ARP=y CONFIG_NF_FLOW_TABLE_IPV4=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -229,12 +229,12 @@ CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_SOCKET_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m -CONFIG_NFT_DUP_IPV6=m -CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -255,7 +255,7 @@ CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m -CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NF_TABLES_BRIDGE=y CONFIG_NFT_BRIDGE_META=m CONFIG_NFT_BRIDGE_REJECT=m CONFIG_NF_LOG_BRIDGE=m @@ -306,7 +306,6 @@ CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_NSH=m -CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set CONFIG_PSAMPLE=m @@ -391,6 +390,7 @@ CONFIG_MVME16x_NET=y # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set @@ -439,6 +439,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set +CONFIG_DAX=m CONFIG_PROC_HARDWARE=y CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m @@ -572,6 +573,7 @@ CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m @@ -598,6 +600,8 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index 2a3e29c97652..b3a543dc48a0 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -94,8 +94,8 @@ CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_INET=m -CONFIG_NF_TABLES_NETDEV=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_RT=m @@ -200,7 +200,7 @@ CONFIG_NF_SOCKET_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m -CONFIG_NF_TABLES_ARP=m +CONFIG_NF_TABLES_ARP=y CONFIG_NF_FLOW_TABLE_IPV4=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -229,12 +229,12 @@ CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_SOCKET_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m -CONFIG_NFT_DUP_IPV6=m -CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -255,7 +255,7 @@ CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m -CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NF_TABLES_BRIDGE=y CONFIG_NFT_BRIDGE_META=m CONFIG_NFT_BRIDGE_REJECT=m CONFIG_NF_LOG_BRIDGE=m @@ -306,7 +306,6 @@ CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_NSH=m -CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set CONFIG_PSAMPLE=m @@ -400,6 +399,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set CONFIG_NE2000=y # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RENESAS is not set @@ -461,6 +461,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set +CONFIG_DAX=m CONFIG_HEARTBEAT=y CONFIG_PROC_HARDWARE=y CONFIG_EXT4_FS=y @@ -595,6 +596,7 @@ CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m @@ -621,6 +623,8 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index cba2494c99b2..d543ed5dfa96 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -91,8 +91,8 @@ CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_INET=m -CONFIG_NF_TABLES_NETDEV=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_RT=m @@ -197,7 +197,7 @@ CONFIG_NF_SOCKET_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m -CONFIG_NF_TABLES_ARP=m +CONFIG_NF_TABLES_ARP=y CONFIG_NF_FLOW_TABLE_IPV4=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -226,12 +226,12 @@ CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_SOCKET_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m -CONFIG_NFT_DUP_IPV6=m -CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -252,7 +252,7 @@ CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m -CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NF_TABLES_BRIDGE=y CONFIG_NFT_BRIDGE_META=m CONFIG_NFT_BRIDGE_REJECT=m CONFIG_NF_LOG_BRIDGE=m @@ -303,7 +303,6 @@ CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_NSH=m -CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set CONFIG_PSAMPLE=m @@ -388,6 +387,7 @@ CONFIG_SUN3_82586=y # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set @@ -441,6 +441,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set +CONFIG_DAX=m CONFIG_PROC_HARDWARE=y CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m @@ -573,6 +574,7 @@ CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m @@ -599,6 +601,8 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index d911561137fd..a67e54246023 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -91,8 +91,8 @@ CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_INET=m -CONFIG_NF_TABLES_NETDEV=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_RT=m @@ -197,7 +197,7 @@ CONFIG_NF_SOCKET_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m -CONFIG_NF_TABLES_ARP=m +CONFIG_NF_TABLES_ARP=y CONFIG_NF_FLOW_TABLE_IPV4=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -226,12 +226,12 @@ CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_SOCKET_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m -CONFIG_NFT_DUP_IPV6=m -CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -252,7 +252,7 @@ CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m -CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NF_TABLES_BRIDGE=y CONFIG_NFT_BRIDGE_META=m CONFIG_NFT_BRIDGE_REJECT=m CONFIG_NF_LOG_BRIDGE=m @@ -303,7 +303,6 @@ CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_NSH=m -CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set CONFIG_PSAMPLE=m @@ -389,6 +388,7 @@ CONFIG_SUN3LANCE=y # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set @@ -441,6 +441,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=m # CONFIG_VIRTIO_MENU is not set # CONFIG_IOMMU_SUPPORT is not set +CONFIG_DAX=m CONFIG_PROC_HARDWARE=y CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m @@ -574,6 +575,7 @@ CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m @@ -600,6 +602,8 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index 88a9d27df1ac..4d8d68c4e3dd 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild @@ -1,4 +1,5 @@ generic-y += barrier.h +generic-y += compat.h generic-y += device.h generic-y += emergency-restart.h generic-y += exec.h diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h index 972c8f33f055..9000b249d225 100644 --- a/arch/m68k/include/asm/atarihw.h +++ b/arch/m68k/include/asm/atarihw.h @@ -23,6 +23,7 @@ #include <linux/types.h> #include <asm/bootinfo-atari.h> #include <asm/raw_io.h> +#include <asm/kmap.h> extern u_long atari_mch_cookie; extern u_long atari_mch_type; diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h index 7f474121e4ca..751712f8beea 100644 --- a/arch/m68k/include/asm/delay.h +++ b/arch/m68k/include/asm/delay.h @@ -49,8 +49,6 @@ extern void __bad_udelay(void); * The simpler m68k and ColdFire processors do not have a 32*32->64 * multiply instruction. So we need to handle them a little differently. * We use a bit of shifting and a single 32*32->32 multiply to get close. - * This is a macro so that the const version can factor out the first - * multiply and shift. */ #define HZSCALE (268435456 / (1000000 / HZ)) @@ -115,6 +113,13 @@ static inline void __udelay(unsigned long usecs) */ #define HZSCALE (268435456 / (1000000 / HZ)) -#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000)) +static inline void ndelay(unsigned long nsec) +{ + __delay(DIV_ROUND_UP(nsec * + ((((HZSCALE) >> 11) * + (loops_per_jiffy >> 11)) >> 6), + 1000)); +} +#define ndelay(n) ndelay(n) #endif /* defined(_M68K_DELAY_H) */ diff --git a/arch/m68k/include/asm/io.h b/arch/m68k/include/asm/io.h index 756089cc019c..ca2849afb087 100644 --- a/arch/m68k/include/asm/io.h +++ b/arch/m68k/include/asm/io.h @@ -1,14 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifdef __uClinux__ +#if defined(__uClinux__) || defined(CONFIG_COLDFIRE) #include <asm/io_no.h> #else #include <asm/io_mm.h> #endif - -#define readb_relaxed(addr) readb(addr) -#define readw_relaxed(addr) readw(addr) -#define readl_relaxed(addr) readl(addr) - -#define writeb_relaxed(b, addr) writeb(b, addr) -#define writew_relaxed(b, addr) writew(b, addr) -#define writel_relaxed(b, addr) writel(b, addr) diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h index ed5333e87879..fe485f4f5fac 100644 --- a/arch/m68k/include/asm/io_mm.h +++ b/arch/m68k/include/asm/io_mm.h @@ -26,6 +26,7 @@ #include <linux/compiler.h> #include <asm/raw_io.h> #include <asm/virtconvert.h> +#include <asm/kmap.h> #include <asm-generic/iomap.h> @@ -85,53 +86,7 @@ #endif /* ATARI_ROM_ISA */ -#if defined(CONFIG_PCI) && defined(CONFIG_COLDFIRE) - -#define HAVE_ARCH_PIO_SIZE -#define PIO_OFFSET 0 -#define PIO_MASK 0xffff -#define PIO_RESERVED 0x10000 - -u8 mcf_pci_inb(u32 addr); -u16 mcf_pci_inw(u32 addr); -u32 mcf_pci_inl(u32 addr); -void mcf_pci_insb(u32 addr, u8 *buf, u32 len); -void mcf_pci_insw(u32 addr, u16 *buf, u32 len); -void mcf_pci_insl(u32 addr, u32 *buf, u32 len); - -void mcf_pci_outb(u8 v, u32 addr); -void mcf_pci_outw(u16 v, u32 addr); -void mcf_pci_outl(u32 v, u32 addr); -void mcf_pci_outsb(u32 addr, const u8 *buf, u32 len); -void mcf_pci_outsw(u32 addr, const u16 *buf, u32 len); -void mcf_pci_outsl(u32 addr, const u32 *buf, u32 len); - -#define inb mcf_pci_inb -#define inb_p mcf_pci_inb -#define inw mcf_pci_inw -#define inw_p mcf_pci_inw -#define inl mcf_pci_inl -#define inl_p mcf_pci_inl -#define insb mcf_pci_insb -#define insw mcf_pci_insw -#define insl mcf_pci_insl - -#define outb mcf_pci_outb -#define outb_p mcf_pci_outb -#define outw mcf_pci_outw -#define outw_p mcf_pci_outw -#define outl mcf_pci_outl -#define outl_p mcf_pci_outl -#define outsb mcf_pci_outsb -#define outsw mcf_pci_outsw -#define outsl mcf_pci_outsl - -#define readb(addr) in_8(addr) -#define writeb(v, addr) out_8((addr), (v)) -#define readw(addr) in_le16(addr) -#define writew(v, addr) out_le16((addr), (v)) - -#elif defined(CONFIG_ISA) || defined(CONFIG_ATARI_ROM_ISA) +#if defined(CONFIG_ISA) || defined(CONFIG_ATARI_ROM_ISA) #if MULTI_ISA == 0 #undef MULTI_ISA @@ -414,8 +369,7 @@ static inline void isa_delay(void) #define writew(val, addr) out_le16((addr), (val)) #endif /* CONFIG_ATARI_ROM_ISA */ -#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA) && \ - !(defined(CONFIG_PCI) && defined(CONFIG_COLDFIRE)) +#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA) /* * We need to define dummy functions for GENERIC_IOMAP support. */ @@ -461,39 +415,6 @@ static inline void isa_delay(void) #define mmiowb() -static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size) -{ - return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); -} -static inline void __iomem *ioremap_nocache(unsigned long physaddr, unsigned long size) -{ - return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); -} -#define ioremap_uc ioremap_nocache -static inline void __iomem *ioremap_wt(unsigned long physaddr, - unsigned long size) -{ - return __ioremap(physaddr, size, IOMAP_WRITETHROUGH); -} -static inline void __iomem *ioremap_fullcache(unsigned long physaddr, - unsigned long size) -{ - return __ioremap(physaddr, size, IOMAP_FULL_CACHING); -} - -static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) -{ - __builtin_memset((void __force *) addr, val, count); -} -static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) -{ - __builtin_memcpy(dst, (void __force *) src, count); -} -static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) -{ - __builtin_memcpy((void __force *) dst, src, count); -} - #ifndef CONFIG_SUN3 #define IO_SPACE_LIMIT 0xffff #else @@ -515,13 +436,12 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int */ #define xlate_dev_kmem_ptr(p) p -static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) -{ - return (void __iomem *) port; -} +#define readb_relaxed(addr) readb(addr) +#define readw_relaxed(addr) readw(addr) +#define readl_relaxed(addr) readl(addr) -static inline void ioport_unmap(void __iomem *p) -{ -} +#define writeb_relaxed(b, addr) writeb(b, addr) +#define writew_relaxed(b, addr) writew(b, addr) +#define writel_relaxed(b, addr) writel(b, addr) #endif /* _IO_H */ diff --git a/arch/m68k/include/asm/io_no.h b/arch/m68k/include/asm/io_no.h index 86f45b403bcc..83a0a6d449f4 100644 --- a/arch/m68k/include/asm/io_no.h +++ b/arch/m68k/include/asm/io_no.h @@ -2,191 +2,148 @@ #ifndef _M68KNOMMU_IO_H #define _M68KNOMMU_IO_H -#ifdef __KERNEL__ - -#define ARCH_HAS_IOREMAP_WT - -#include <asm/virtconvert.h> -#include <asm-generic/iomap.h> - /* - * These are for ISA/PCI shared memory _only_ and should never be used - * on any other type of memory, including Zorro memory. They are meant to - * access the bus in the bus byte order which is little-endian!. - * - * readX/writeX() are used to access memory mapped devices. On some - * architectures the memory mapped IO stuff needs to be accessed - * differently. On the m68k architecture, we just read/write the - * memory location directly. - */ -/* ++roman: The assignments to temp. vars avoid that gcc sometimes generates - * two accesses to memory, which may be undesirable for some devices. + * Convert a physical memory address into a IO memory address. + * For us this is trivially a type cast. */ +#define iomem(a) ((void __iomem *) (a)) /* - * swap functions are sometimes needed to interface little-endian hardware + * The non-MMU m68k and ColdFire IO and memory mapped hardware access + * functions have always worked in CPU native endian. We need to define + * that behavior here first before we include asm-generic/io.h. */ -static inline unsigned short _swapw(volatile unsigned short v) -{ - return ((v << 8) | (v >> 8)); -} - -static inline unsigned int _swapl(volatile unsigned long v) -{ - return ((v << 24) | ((v & 0xff00) << 8) | ((v & 0xff0000) >> 8) | (v >> 24)); -} - -#define readb(addr) \ +#define __raw_readb(addr) \ ({ unsigned char __v = (*(volatile unsigned char *) (addr)); __v; }) -#define readw(addr) \ +#define __raw_readw(addr) \ ({ unsigned short __v = (*(volatile unsigned short *) (addr)); __v; }) -#define readl(addr) \ +#define __raw_readl(addr) \ ({ unsigned int __v = (*(volatile unsigned int *) (addr)); __v; }) -#define writeb(b,addr) (void)((*(volatile unsigned char *) (addr)) = (b)) -#define writew(b,addr) (void)((*(volatile unsigned short *) (addr)) = (b)) -#define writel(b,addr) (void)((*(volatile unsigned int *) (addr)) = (b)) +#define __raw_writeb(b, addr) (void)((*(volatile unsigned char *) (addr)) = (b)) +#define __raw_writew(b, addr) (void)((*(volatile unsigned short *) (addr)) = (b)) +#define __raw_writel(b, addr) (void)((*(volatile unsigned int *) (addr)) = (b)) -#define __raw_readb readb -#define __raw_readw readw -#define __raw_readl readl -#define __raw_writeb writeb -#define __raw_writew writew -#define __raw_writel writel +#if defined(CONFIG_COLDFIRE) +/* + * For ColdFire platforms we may need to do some extra checks for what + * type of address range we are accessing. Include the ColdFire platform + * definitions so we can figure out if need to do something special. + */ +#include <asm/byteorder.h> +#include <asm/coldfire.h> +#include <asm/mcfsim.h> +#endif /* CONFIG_COLDFIRE */ -static inline void io_outsb(unsigned int addr, const void *buf, int len) +#if defined(IOMEMBASE) +/* + * The ColdFire SoC internal peripherals are mapped into virtual address + * space using the ACR registers of the cache control unit. This means we + * are using a 1:1 physical:virtual mapping for them. We can quickly + * determine if we are accessing an internal peripheral device given the + * physical or vitrual address using the same range check. This check logic + * applies just the same of there is no MMU but something like a PCI bus + * is present. + */ +static int __cf_internalio(unsigned long addr) { - volatile unsigned char *ap = (volatile unsigned char *) addr; - unsigned char *bp = (unsigned char *) buf; - while (len--) - *ap = *bp++; + return (addr >= IOMEMBASE) && (addr <= IOMEMBASE + IOMEMSIZE - 1); } -static inline void io_outsw(unsigned int addr, const void *buf, int len) +static int cf_internalio(const volatile void __iomem *addr) { - volatile unsigned short *ap = (volatile unsigned short *) addr; - unsigned short *bp = (unsigned short *) buf; - while (len--) - *ap = _swapw(*bp++); + return __cf_internalio((unsigned long) addr); } -static inline void io_outsl(unsigned int addr, const void *buf, int len) +/* + * We need to treat built-in peripherals and bus based address ranges + * differently. Local built-in peripherals (and the ColdFire SoC parts + * have quite a lot of them) are always native endian - which is big + * endian on m68k/ColdFire. Bus based address ranges, like the PCI bus, + * are accessed little endian - so we need to byte swap those. + */ +#define readw readw +static inline u16 readw(const volatile void __iomem *addr) { - volatile unsigned int *ap = (volatile unsigned int *) addr; - unsigned int *bp = (unsigned int *) buf; - while (len--) - *ap = _swapl(*bp++); + if (cf_internalio(addr)) + return __raw_readw(addr); + return __le16_to_cpu(__raw_readw(addr)); } -static inline void io_insb(unsigned int addr, void *buf, int len) +#define readl readl +static inline u32 readl(const volatile void __iomem *addr) { - volatile unsigned char *ap = (volatile unsigned char *) addr; - unsigned char *bp = (unsigned char *) buf; - while (len--) - *bp++ = *ap; + if (cf_internalio(addr)) + return __raw_readl(addr); + return __le32_to_cpu(__raw_readl(addr)); } -static inline void io_insw(unsigned int addr, void *buf, int len) +#define writew writew +static inline void writew(u16 value, volatile void __iomem *addr) { - volatile unsigned short *ap = (volatile unsigned short *) addr; - unsigned short *bp = (unsigned short *) buf; - while (len--) - *bp++ = _swapw(*ap); + if (cf_internalio(addr)) + __raw_writew(value, addr); + else + __raw_writew(__cpu_to_le16(value), addr); } -static inline void io_insl(unsigned int addr, void *buf, int len) +#define writel writel +static inline void writel(u32 value, volatile void __iomem *addr) { - volatile unsigned int *ap = (volatile unsigned int *) addr; - unsigned int *bp = (unsigned int *) buf; - while (len--) - *bp++ = _swapl(*ap); + if (cf_internalio(addr)) + __raw_writel(value, addr); + else + __raw_writel(__cpu_to_le32(value), addr); } -#define mmiowb() - -/* - * make the short names macros so specific devices - * can override them as required - */ - -#define memset_io(a,b,c) memset((void *)(a),(b),(c)) -#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) -#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) - -#define inb(addr) readb(addr) -#define inw(addr) readw(addr) -#define inl(addr) readl(addr) -#define outb(x,addr) ((void) writeb(x,addr)) -#define outw(x,addr) ((void) writew(x,addr)) -#define outl(x,addr) ((void) writel(x,addr)) - -#define inb_p(addr) inb(addr) -#define inw_p(addr) inw(addr) -#define inl_p(addr) inl(addr) -#define outb_p(x,addr) outb(x,addr) -#define outw_p(x,addr) outw(x,addr) -#define outl_p(x,addr) outl(x,addr) +#else -#define outsb(a,b,l) io_outsb(a,b,l) -#define outsw(a,b,l) io_outsw(a,b,l) -#define outsl(a,b,l) io_outsl(a,b,l) +#define readb __raw_readb +#define readw __raw_readw +#define readl __raw_readl +#define writeb __raw_writeb +#define writew __raw_writew +#define writel __raw_writel -#define insb(a,b,l) io_insb(a,b,l) -#define insw(a,b,l) io_insw(a,b,l) -#define insl(a,b,l) io_insl(a,b,l) - -#define IO_SPACE_LIMIT 0xffffffff - - -/* Values for nocacheflag and cmode */ -#define IOMAP_FULL_CACHING 0 -#define IOMAP_NOCACHE_SER 1 -#define IOMAP_NOCACHE_NONSER 2 -#define IOMAP_WRITETHROUGH 3 - -static inline void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag) -{ - return (void *) physaddr; -} -static inline void *ioremap(unsigned long physaddr, unsigned long size) -{ - return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); -} -static inline void *ioremap_nocache(unsigned long physaddr, unsigned long size) -{ - return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); -} -static inline void *ioremap_wt(unsigned long physaddr, unsigned long size) -{ - return __ioremap(physaddr, size, IOMAP_WRITETHROUGH); -} -static inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size) -{ - return __ioremap(physaddr, size, IOMAP_FULL_CACHING); -} - -#define iounmap(addr) do { } while(0) +#endif /* IOMEMBASE */ +#if defined(CONFIG_PCI) /* - * Convert a physical pointer to a virtual kernel pointer for /dev/mem - * access + * Support for PCI bus access uses the asm-generic access functions. + * We need to supply the base address and masks for the normal memory + * and IO address space mappings. */ -#define xlate_dev_mem_ptr(p) __va(p) +#define PCI_MEM_PA 0xf0000000 /* Host physical address */ +#define PCI_MEM_BA 0xf0000000 /* Bus physical address */ +#define PCI_MEM_SIZE 0x08000000 /* 128 MB */ +#define PCI_MEM_MASK (PCI_MEM_SIZE - 1) + +#define PCI_IO_PA 0xf8000000 /* Host physical address */ +#define PCI_IO_BA 0x00000000 /* Bus physical address */ +#define PCI_IO_SIZE 0x00010000 /* 64k */ +#define PCI_IO_MASK (PCI_IO_SIZE - 1) + +#define HAVE_ARCH_PIO_SIZE +#define PIO_OFFSET 0 +#define PIO_MASK 0xffff +#define PIO_RESERVED 0x10000 +#define PCI_IOBASE ((void __iomem *) PCI_IO_PA) +#define PCI_SPACE_LIMIT PCI_IO_MASK +#endif /* CONFIG_PCI */ /* - * Convert a virtual cached pointer to an uncached pointer + * These are defined in kmap.h as static inline functions. To maintain + * previous behavior we put these define guards here so io_mm.h doesn't + * see them. */ -#define xlate_dev_kmem_ptr(p) p +#ifdef CONFIG_MMU +#define memset_io memset_io +#define memcpy_fromio memcpy_fromio +#define memcpy_toio memcpy_toio +#endif -static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) -{ - return (void __iomem *) port; -} - -static inline void ioport_unmap(void __iomem *p) -{ -} - -#endif /* __KERNEL__ */ +#include <asm/kmap.h> +#include <asm/virtconvert.h> +#include <asm-generic/io.h> #endif /* _M68KNOMMU_IO_H */ diff --git a/arch/m68k/include/asm/kmap.h b/arch/m68k/include/asm/kmap.h new file mode 100644 index 000000000000..84b8333db8ad --- /dev/null +++ b/arch/m68k/include/asm/kmap.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _KMAP_H +#define _KMAP_H + +#ifdef CONFIG_MMU + +/* Values for nocacheflag and cmode */ +#define IOMAP_FULL_CACHING 0 +#define IOMAP_NOCACHE_SER 1 +#define IOMAP_NOCACHE_NONSER 2 +#define IOMAP_WRITETHROUGH 3 + +/* + * These functions exported by arch/m68k/mm/kmap.c. + * Only needed on MMU enabled systems. + */ +extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size, + int cacheflag); +extern void iounmap(void __iomem *addr); +extern void __iounmap(void *addr, unsigned long size); + +#define ioremap ioremap +static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size) +{ + return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); +} + +#define ioremap_nocache ioremap_nocache +static inline void __iomem *ioremap_nocache(unsigned long physaddr, + unsigned long size) +{ + return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); +} + +#define ioremap_uc ioremap_nocache +static inline void __iomem *ioremap_wt(unsigned long physaddr, + unsigned long size) +{ + return __ioremap(physaddr, size, IOMAP_WRITETHROUGH); +} + +#define ioremap_fillcache ioremap_fullcache +static inline void __iomem *ioremap_fullcache(unsigned long physaddr, + unsigned long size) +{ + return __ioremap(physaddr, size, IOMAP_FULL_CACHING); +} + +static inline void memset_io(volatile void __iomem *addr, unsigned char val, + int count) +{ + __builtin_memset((void __force *) addr, val, count); +} + +static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, + int count) +{ + __builtin_memcpy(dst, (void __force *) src, count); +} + +static inline void memcpy_toio(volatile void __iomem *dst, const void *src, + int count) +{ + __builtin_memcpy((void __force *) dst, src, count); +} + +#endif /* CONFIG_MMU */ + +#define ioport_map ioport_map +static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) +{ + return (void __iomem *) port; +} + +#define ioport_unmap ioport_unmap +static inline void ioport_unmap(void __iomem *p) +{ +} + +#endif /* _KMAP_H */ diff --git a/arch/m68k/include/asm/nubus.h b/arch/m68k/include/asm/nubus.h index d0d2039e434e..c2281da6c51a 100644 --- a/arch/m68k/include/asm/nubus.h +++ b/arch/m68k/include/asm/nubus.h @@ -3,6 +3,7 @@ #define _ASM_M68K_NUBUS_H #include <asm/raw_io.h> +#include <asm/kmap.h> #define nubus_readb raw_inb #define nubus_readw raw_inw diff --git a/arch/m68k/include/asm/pci.h b/arch/m68k/include/asm/pci.h index ef26fae8cf0b..5a4bc223743b 100644 --- a/arch/m68k/include/asm/pci.h +++ b/arch/m68k/include/asm/pci.h @@ -4,12 +4,6 @@ #include <asm-generic/pci.h> -/* The PCI address space does equal the physical memory - * address space. The networking and block device layers use - * this boolean for bounce buffer decisions. - */ -#define PCI_DMA_BUS_IS_PHYS (1) - #define pcibios_assign_all_busses() 1 #define PCIBIOS_MIN_IO 0x00000100 diff --git a/arch/m68k/include/asm/q40_master.h b/arch/m68k/include/asm/q40_master.h index 3a89c088800c..9b00fb8079e6 100644 --- a/arch/m68k/include/asm/q40_master.h +++ b/arch/m68k/include/asm/q40_master.h @@ -8,7 +8,7 @@ #define _Q40_MASTER_H #include <asm/raw_io.h> - +#include <asm/kmap.h> #define q40_master_addr 0xff000000 diff --git a/arch/m68k/include/asm/raw_io.h b/arch/m68k/include/asm/raw_io.h index 05e940c29b54..85761255dde5 100644 --- a/arch/m68k/include/asm/raw_io.h +++ b/arch/m68k/include/asm/raw_io.h @@ -13,20 +13,6 @@ #include <asm/byteorder.h> - -/* Values for nocacheflag and cmode */ -#define IOMAP_FULL_CACHING 0 -#define IOMAP_NOCACHE_SER 1 -#define IOMAP_NOCACHE_NONSER 2 -#define IOMAP_WRITETHROUGH 3 - -extern void iounmap(void __iomem *addr); - -extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size, - int cacheflag); -extern void __iounmap(void *addr, unsigned long size); - - /* ++roman: The assignments to temp. vars avoid that gcc sometimes generates * two accesses to memory, which may be undesirable for some devices. */ diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h index 75c172e909ac..c4cb889660aa 100644 --- a/arch/m68k/include/asm/uaccess_mm.h +++ b/arch/m68k/include/asm/uaccess_mm.h @@ -141,10 +141,12 @@ asm volatile ("\n" \ case 4: \ __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \ break; \ -/* case 8: disabled because gcc-4.1 has a broken typeof \ - { \ - const void *__gu_ptr = (ptr); \ - u64 __gu_val; \ + case 8: { \ + const void *__gu_ptr = (ptr); \ + union { \ + u64 l; \ + __typeof__(*(ptr)) t; \ + } __gu_val; \ asm volatile ("\n" \ "1: "MOVES".l (%2)+,%1\n" \ "2: "MOVES".l (%2),%R1\n" \ @@ -162,13 +164,13 @@ asm volatile ("\n" \ " .long 1b,10b\n" \ " .long 2b,10b\n" \ " .previous" \ - : "+d" (__gu_err), "=&r" (__gu_val), \ + : "+d" (__gu_err), "=&r" (__gu_val.l), \ "+a" (__gu_ptr) \ : "i" (-EFAULT) \ : "memory"); \ - (x) = (__force typeof(*(ptr)))__gu_val; \ + (x) = __gu_val.t; \ break; \ - } */ \ + } \ default: \ __gu_err = __get_user_bad(); \ break; \ diff --git a/arch/m68k/include/asm/vga.h b/arch/m68k/include/asm/vga.h index 010a624d1b39..4742e6bc3ab8 100644 --- a/arch/m68k/include/asm/vga.h +++ b/arch/m68k/include/asm/vga.h @@ -2,7 +2,15 @@ #ifndef _ASM_M68K_VGA_H #define _ASM_M68K_VGA_H +/* + * Some ColdFire platforms do in fact have a PCI bus. So for those we want + * to use the real IO access functions, don't fake them out or redirect them + * for that case. + */ +#ifndef CONFIG_PCI + #include <asm/raw_io.h> +#include <asm/kmap.h> /* * FIXME @@ -25,4 +33,5 @@ #define writeb raw_outb #define writew raw_outw +#endif /* CONFIG_PCI */ #endif /* _ASM_M68K_VGA_H */ diff --git a/arch/m68k/include/asm/virtconvert.h b/arch/m68k/include/asm/virtconvert.h index 4aea6be7b220..dfe43083b579 100644 --- a/arch/m68k/include/asm/virtconvert.h +++ b/arch/m68k/include/asm/virtconvert.h @@ -16,11 +16,13 @@ /* * Change virtual addresses to physical addresses and vv. */ +#define virt_to_phys virt_to_phys static inline unsigned long virt_to_phys(void *address) { return __pa(address); } +#define phys_to_virt phys_to_virt static inline void *phys_to_virt(unsigned long address) { return __va(address); diff --git a/arch/m68k/include/asm/zorro.h b/arch/m68k/include/asm/zorro.h index 96f64bf7bcaa..60fc4b6f294d 100644 --- a/arch/m68k/include/asm/zorro.h +++ b/arch/m68k/include/asm/zorro.h @@ -3,6 +3,7 @@ #define _ASM_M68K_ZORRO_H #include <asm/raw_io.h> +#include <asm/kmap.h> #define z_readb raw_inb #define z_readw raw_inw diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c index c01b9b8f97bf..463572c4943f 100644 --- a/arch/m68k/kernel/dma.c +++ b/arch/m68k/kernel/dma.c @@ -9,6 +9,7 @@ #include <linux/dma-mapping.h> #include <linux/device.h> #include <linux/kernel.h> +#include <linux/platform_device.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/vmalloc.h> @@ -165,3 +166,12 @@ const struct dma_map_ops m68k_dma_ops = { .sync_sg_for_device = m68k_dma_sync_sg_for_device, }; EXPORT_SYMBOL(m68k_dma_ops); + +void arch_setup_pdev_archdata(struct platform_device *pdev) +{ + if (pdev->dev.coherent_dma_mask == DMA_MASK_NONE && + pdev->dev.dma_mask == NULL) { + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + } +} diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c index dd25bfc22fb4..f35e3ebd6331 100644 --- a/arch/m68k/kernel/setup_mm.c +++ b/arch/m68k/kernel/setup_mm.c @@ -527,21 +527,9 @@ static int hardware_proc_show(struct seq_file *m, void *v) return 0; } -static int hardware_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, hardware_proc_show, NULL); -} - -static const struct file_operations hardware_proc_fops = { - .open = hardware_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - static int __init proc_hardware_init(void) { - proc_create("hardware", 0, NULL, &hardware_proc_fops); + proc_create_single("hardware", 0, NULL, hardware_proc_show); return 0; } module_init(proc_hardware_init); diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c index f7cd5ecfacd3..72850b85ecf8 100644 --- a/arch/m68k/kernel/signal.c +++ b/arch/m68k/kernel/signal.c @@ -576,41 +576,42 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs * static inline void siginfo_build_tests(void) { - /* This needs to be tested on m68k as it has a lesser - * alignment requirment than x86 and that can cause surprises. + /* + * This needs to be tested on m68k as it has a lesser + * alignment requirement than x86 and that can cause surprises. */ /* This is part of the ABI and can never change in size: */ BUILD_BUG_ON(sizeof(siginfo_t) != 128); - /* Ensure the know fields never change in location */ + /* Ensure the known fields never change in location */ BUILD_BUG_ON(offsetof(siginfo_t, si_signo) != 0); BUILD_BUG_ON(offsetof(siginfo_t, si_errno) != 4); BUILD_BUG_ON(offsetof(siginfo_t, si_code) != 8); /* _kill */ - BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x0C); + BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x0c); BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x10); /* _timer */ - BUILD_BUG_ON(offsetof(siginfo_t, si_tid) != 0x0C); + BUILD_BUG_ON(offsetof(siginfo_t, si_tid) != 0x0c); BUILD_BUG_ON(offsetof(siginfo_t, si_overrun) != 0x10); BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x14); /* _rt */ - BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x0C); + BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x0c); BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x10); BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x14); /* _sigchld */ - BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x0C); + BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x0c); BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x10); BUILD_BUG_ON(offsetof(siginfo_t, si_status) != 0x14); BUILD_BUG_ON(offsetof(siginfo_t, si_utime) != 0x18); - BUILD_BUG_ON(offsetof(siginfo_t, si_stime) != 0x1C); + BUILD_BUG_ON(offsetof(siginfo_t, si_stime) != 0x1c); /* _sigfault */ - BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x0C); + BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x0c); /* _sigfault._mcerr */ BUILD_BUG_ON(offsetof(siginfo_t, si_addr_lsb) != 0x10); @@ -623,11 +624,11 @@ static inline void siginfo_build_tests(void) BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x12); /* _sigpoll */ - BUILD_BUG_ON(offsetof(siginfo_t, si_band) != 0x0C); + BUILD_BUG_ON(offsetof(siginfo_t, si_band) != 0x0c); BUILD_BUG_ON(offsetof(siginfo_t, si_fd) != 0x10); /* _sigsys */ - BUILD_BUG_ON(offsetof(siginfo_t, si_call_addr) != 0x0C); + BUILD_BUG_ON(offsetof(siginfo_t, si_call_addr) != 0x0c); BUILD_BUG_ON(offsetof(siginfo_t, si_syscall) != 0x10); BUILD_BUG_ON(offsetof(siginfo_t, si_arch) != 0x14); diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c index 97dd4e26f234..3a8b47f8f97b 100644 --- a/arch/m68k/kernel/time.c +++ b/arch/m68k/kernel/time.c @@ -71,23 +71,26 @@ static irqreturn_t timer_interrupt(int irq, void *dummy) return IRQ_HANDLED; } -void read_persistent_clock(struct timespec *ts) +#ifdef CONFIG_M68KCLASSIC +#if !IS_BUILTIN(CONFIG_RTC_DRV_GENERIC) +void read_persistent_clock64(struct timespec64 *ts) { struct rtc_time time; + ts->tv_sec = 0; ts->tv_nsec = 0; - if (mach_hwclk) { - mach_hwclk(0, &time); + if (!mach_hwclk) + return; - if ((time.tm_year += 1900) < 1970) - time.tm_year += 100; - ts->tv_sec = mktime(time.tm_year, time.tm_mon, time.tm_mday, - time.tm_hour, time.tm_min, time.tm_sec); - } + mach_hwclk(0, &time); + + ts->tv_sec = mktime64(time.tm_year + 1900, time.tm_mon + 1, time.tm_mday, + time.tm_hour, time.tm_min, time.tm_sec); } +#endif -#if defined(CONFIG_ARCH_USES_GETTIMEOFFSET) && IS_ENABLED(CONFIG_RTC_DRV_GENERIC) +#if IS_ENABLED(CONFIG_RTC_DRV_GENERIC) static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm) { mach_hwclk(0, tm); @@ -145,8 +148,8 @@ static int __init rtc_init(void) } module_init(rtc_init); - -#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */ +#endif /* CONFIG_RTC_DRV_GENERIC */ +#endif /* CONFIG M68KCLASSIC */ void __init time_init(void) { diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index c1cc4e99aa94..b2fd000b9285 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -1007,9 +1007,9 @@ void bad_super_trap (struct frame *fp) asmlinkage void trap_c(struct frame *fp) { - int sig; + int sig, si_code; + void __user *addr; int vector = (fp->ptregs.vector >> 2) & 0xff; - siginfo_t info; if (fp->ptregs.sr & PS_S) { if (vector == VEC_TRACE) { @@ -1029,21 +1029,21 @@ asmlinkage void trap_c(struct frame *fp) /* send the appropriate signal to the user program */ switch (vector) { case VEC_ADDRERR: - info.si_code = BUS_ADRALN; + si_code = BUS_ADRALN; sig = SIGBUS; break; case VEC_ILLEGAL: case VEC_LINE10: case VEC_LINE11: - info.si_code = ILL_ILLOPC; + si_code = ILL_ILLOPC; sig = SIGILL; break; case VEC_PRIV: - info.si_code = ILL_PRVOPC; + si_code = ILL_PRVOPC; sig = SIGILL; break; case VEC_COPROC: - info.si_code = ILL_COPROC; + si_code = ILL_COPROC; sig = SIGILL; break; case VEC_TRAP1: @@ -1060,76 +1060,74 @@ asmlinkage void trap_c(struct frame *fp) case VEC_TRAP12: case VEC_TRAP13: case VEC_TRAP14: - info.si_code = ILL_ILLTRP; + si_code = ILL_ILLTRP; sig = SIGILL; break; case VEC_FPBRUC: case VEC_FPOE: case VEC_FPNAN: - info.si_code = FPE_FLTINV; + si_code = FPE_FLTINV; sig = SIGFPE; break; case VEC_FPIR: - info.si_code = FPE_FLTRES; + si_code = FPE_FLTRES; sig = SIGFPE; break; case VEC_FPDIVZ: - info.si_code = FPE_FLTDIV; + si_code = FPE_FLTDIV; sig = SIGFPE; break; case VEC_FPUNDER: - info.si_code = FPE_FLTUND; + si_code = FPE_FLTUND; sig = SIGFPE; break; case VEC_FPOVER: - info.si_code = FPE_FLTOVF; + si_code = FPE_FLTOVF; sig = SIGFPE; break; case VEC_ZERODIV: - info.si_code = FPE_INTDIV; + si_code = FPE_INTDIV; sig = SIGFPE; break; case VEC_CHK: case VEC_TRAP: - info.si_code = FPE_INTOVF; + si_code = FPE_INTOVF; sig = SIGFPE; break; case VEC_TRACE: /* ptrace single step */ - info.si_code = TRAP_TRACE; + si_code = TRAP_TRACE; sig = SIGTRAP; break; case VEC_TRAP15: /* breakpoint */ - info.si_code = TRAP_BRKPT; + si_code = TRAP_BRKPT; sig = SIGTRAP; break; default: - info.si_code = ILL_ILLOPC; + si_code = ILL_ILLOPC; sig = SIGILL; break; } - info.si_signo = sig; - info.si_errno = 0; switch (fp->ptregs.format) { default: - info.si_addr = (void *) fp->ptregs.pc; + addr = (void __user *) fp->ptregs.pc; break; case 2: - info.si_addr = (void *) fp->un.fmt2.iaddr; + addr = (void __user *) fp->un.fmt2.iaddr; break; case 7: - info.si_addr = (void *) fp->un.fmt7.effaddr; + addr = (void __user *) fp->un.fmt7.effaddr; break; case 9: - info.si_addr = (void *) fp->un.fmt9.iaddr; + addr = (void __user *) fp->un.fmt9.iaddr; break; case 10: - info.si_addr = (void *) fp->un.fmta.daddr; + addr = (void __user *) fp->un.fmta.daddr; break; case 11: - info.si_addr = (void *) fp->un.fmtb.daddr; + addr = (void __user*) fp->un.fmtb.daddr; break; } - force_sig_info (sig, &info, current); + force_sig_fault(sig, si_code, addr, current); } void die_if_kernel (char *str, struct pt_regs *fp, int nr) @@ -1161,12 +1159,6 @@ asmlinkage void fpsp040_die(void) #ifdef CONFIG_M68KFPU_EMU asmlinkage void fpemu_signal(int signal, int code, void *addr) { - siginfo_t info; - - info.si_signo = signal; - info.si_errno = 0; - info.si_code = code; - info.si_addr = addr; - force_sig_info(signal, &info, current); + force_sig_fault(signal, code, addr, current); } #endif diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index 0c3275aa0197..e522307db47c 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -1005,7 +1005,7 @@ int __init mac_platform_init(void) struct resource swim_rsrc = { .flags = IORESOURCE_MEM, .start = (resource_size_t)swim_base, - .end = (resource_size_t)swim_base + 0x2000, + .end = (resource_size_t)swim_base + 0x1FFF, }; platform_device_register_simple("swim", -1, &swim_rsrc, 1); diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 03253c4f8e6a..f2ff3779875a 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -21,35 +21,32 @@ extern void die_if_kernel(char *, struct pt_regs *, long); int send_fault_sig(struct pt_regs *regs) { - siginfo_t siginfo; + int signo, si_code; + void __user *addr; - clear_siginfo(&siginfo); - siginfo.si_signo = current->thread.signo; - siginfo.si_code = current->thread.code; - siginfo.si_addr = (void *)current->thread.faddr; - pr_debug("send_fault_sig: %p,%d,%d\n", siginfo.si_addr, - siginfo.si_signo, siginfo.si_code); + signo = current->thread.signo; + si_code = current->thread.code; + addr = (void __user *)current->thread.faddr; + pr_debug("send_fault_sig: %p,%d,%d\n", addr, signo, si_code); if (user_mode(regs)) { - force_sig_info(siginfo.si_signo, - &siginfo, current); + force_sig_fault(signo, si_code, addr, current); } else { if (fixup_exception(regs)) return -1; - //if (siginfo.si_signo == SIGBUS) - // force_sig_info(siginfo.si_signo, - // &siginfo, current); + //if (signo == SIGBUS) + // force_sig_fault(si_signo, si_code, addr, current); /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ - if ((unsigned long)siginfo.si_addr < PAGE_SIZE) + if ((unsigned long)addr < PAGE_SIZE) pr_alert("Unable to handle kernel NULL pointer dereference"); else pr_alert("Unable to handle kernel access"); - pr_cont(" at virtual address %p\n", siginfo.si_addr); + pr_cont(" at virtual address %p\n", addr); die_if_kernel("Oops", regs, 0 /*error_code*/); do_exit(SIGKILL); } diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c index c2a38321c96d..40a3b327da07 100644 --- a/arch/m68k/mm/kmap.c +++ b/arch/m68k/mm/kmap.c @@ -89,7 +89,8 @@ static inline void free_io_area(void *addr) for (p = &iolist ; (tmp = *p) ; p = &tmp->next) { if (tmp->addr == addr) { *p = tmp->next; - __iounmap(tmp->addr, tmp->size); + /* remove gap added in get_io_area() */ + __iounmap(tmp->addr, tmp->size - IO_SIZE); kfree(tmp); return; } @@ -125,6 +126,10 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla return (void __iomem *)physaddr; } #endif +#ifdef CONFIG_COLDFIRE + if (__cf_internalio(physaddr)) + return (void __iomem *) physaddr; +#endif #ifdef DEBUG printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag); @@ -235,6 +240,10 @@ void iounmap(void __iomem *addr) ((unsigned long)addr > 0x60000000))) free_io_area((__force void *)addr); #else +#ifdef CONFIG_COLDFIRE + if (cf_internalio(addr)) + return; +#endif free_io_area((__force void *)addr); #endif } diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c index 8778612d1f31..f8a710fd84cd 100644 --- a/arch/m68k/mvme147/config.c +++ b/arch/m68k/mvme147/config.c @@ -153,12 +153,14 @@ int mvme147_hwclk(int op, struct rtc_time *t) if (!op) { m147_rtc->ctrl = RTC_READ; t->tm_year = bcd2int (m147_rtc->bcd_year); - t->tm_mon = bcd2int (m147_rtc->bcd_mth); + t->tm_mon = bcd2int(m147_rtc->bcd_mth) - 1; t->tm_mday = bcd2int (m147_rtc->bcd_dom); t->tm_hour = bcd2int (m147_rtc->bcd_hr); t->tm_min = bcd2int (m147_rtc->bcd_min); t->tm_sec = bcd2int (m147_rtc->bcd_sec); m147_rtc->ctrl = 0; + if (t->tm_year < 70) + t->tm_year += 100; } return 0; } diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c index 6fa06d4d16bf..4ffd9ef98de4 100644 --- a/arch/m68k/mvme16x/config.c +++ b/arch/m68k/mvme16x/config.c @@ -400,12 +400,14 @@ int mvme16x_hwclk(int op, struct rtc_time *t) if (!op) { rtc->ctrl = RTC_READ; t->tm_year = bcd2int (rtc->bcd_year); - t->tm_mon = bcd2int (rtc->bcd_mth); + t->tm_mon = bcd2int(rtc->bcd_mth) - 1; t->tm_mday = bcd2int (rtc->bcd_dom); t->tm_hour = bcd2int (rtc->bcd_hr); t->tm_min = bcd2int (rtc->bcd_min); t->tm_sec = bcd2int (rtc->bcd_sec); rtc->ctrl = 0; + if (t->tm_year < 70) + t->tm_year += 100; } return 0; } diff --git a/arch/m68k/sun3/intersil.c b/arch/m68k/sun3/intersil.c index 2cd0bcbe6f30..d911070af02a 100644 --- a/arch/m68k/sun3/intersil.c +++ b/arch/m68k/sun3/intersil.c @@ -48,9 +48,9 @@ int sun3_hwclk(int set, struct rtc_time *t) todintersil->hour = t->tm_hour; todintersil->minute = t->tm_min; todintersil->second = t->tm_sec; - todintersil->month = t->tm_mon; + todintersil->month = t->tm_mon + 1; todintersil->day = t->tm_mday; - todintersil->year = t->tm_year - 68; + todintersil->year = (t->tm_year - 68) % 100; todintersil->weekday = t->tm_wday; } else { /* read clock */ @@ -58,10 +58,12 @@ int sun3_hwclk(int set, struct rtc_time *t) t->tm_hour = todintersil->hour; t->tm_min = todintersil->minute; t->tm_sec = todintersil->second; - t->tm_mon = todintersil->month; + t->tm_mon = todintersil->month - 1; t->tm_mday = todintersil->day; t->tm_year = todintersil->year + 68; t->tm_wday = todintersil->weekday; + if (t->tm_year < 70) + t->tm_year += 100; } intersil_clock->cmd_reg = START_VAL; diff --git a/arch/m68k/sun3x/time.c b/arch/m68k/sun3x/time.c index 7a2c53d9f779..047e2bcee3d7 100644 --- a/arch/m68k/sun3x/time.c +++ b/arch/m68k/sun3x/time.c @@ -52,8 +52,8 @@ int sun3x_hwclk(int set, struct rtc_time *t) h->hour = bin2bcd(t->tm_hour); h->wday = bin2bcd(t->tm_wday); h->mday = bin2bcd(t->tm_mday); - h->month = bin2bcd(t->tm_mon); - h->year = bin2bcd(t->tm_year); + h->month = bin2bcd(t->tm_mon + 1); + h->year = bin2bcd(t->tm_year % 100); h->csr &= ~C_WRITE; } else { h->csr |= C_READ; @@ -62,9 +62,11 @@ int sun3x_hwclk(int set, struct rtc_time *t) t->tm_hour = bcd2bin(h->hour); t->tm_wday = bcd2bin(h->wday); t->tm_mday = bcd2bin(h->mday); - t->tm_mon = bcd2bin(h->month); + t->tm_mon = bcd2bin(h->month) - 1; t->tm_year = bcd2bin(h->year); h->csr &= ~C_READ; + if (t->tm_year < 70) + t->tm_year += 100; } local_irq_restore(flags); diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 3817a3e2146c..d14782100088 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -19,7 +19,6 @@ config MICROBLAZE select HAVE_ARCH_HASH select HAVE_ARCH_KGDB select HAVE_DEBUG_KMEMLEAK - select HAVE_DMA_API_DEBUG select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_GRAPH_TRACER diff --git a/arch/microblaze/Kconfig.debug b/arch/microblaze/Kconfig.debug index 012e377330cd..331a3bb66297 100644 --- a/arch/microblaze/Kconfig.debug +++ b/arch/microblaze/Kconfig.debug @@ -8,14 +8,6 @@ config TRACE_IRQFLAGS_SUPPORT source "lib/Kconfig.debug" -config EARLY_PRINTK - bool "Early printk function for kernel" - depends on SERIAL_UARTLITE_CONSOLE || SERIAL_8250_CONSOLE - default n - help - This option turns on/off early printk messages to console. - First Uartlite node is taken. - config HEART_BEAT bool "Heart beat function for kernel" default n diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile index fd46385a4c97..600e5a198bd2 100644 --- a/arch/microblaze/boot/Makefile +++ b/arch/microblaze/boot/Makefile @@ -22,17 +22,19 @@ $(obj)/linux.bin.gz: $(obj)/linux.bin FORCE quiet_cmd_cp = CP $< $@$2 cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false) -quiet_cmd_strip = STRIP $@ +quiet_cmd_strip = STRIP $< $@$2 cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \ - -K _fdt_start vmlinux -o $@ + -K _fdt_start $< -o $@$2 UIMAGE_LOADADDR = $(CONFIG_KERNEL_BASE_ADDR) +UIMAGE_IN = $@ +UIMAGE_OUT = $@.ub $(obj)/simpleImage.%: vmlinux FORCE $(call if_changed,cp,.unstrip) $(call if_changed,objcopy) $(call if_changed,uimage) - $(call if_changed,strip) - @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' + $(call if_changed,strip,.strip) + @echo 'Kernel: $(UIMAGE_OUT) is ready' ' (#'`cat .version`')' clean-files += simpleImage.*.unstrip linux.bin.ub diff --git a/arch/microblaze/boot/dts/system.dts b/arch/microblaze/boot/dts/system.dts index b620da23febb..8a420c6702eb 100644 --- a/arch/microblaze/boot/dts/system.dts +++ b/arch/microblaze/boot/dts/system.dts @@ -44,7 +44,7 @@ } ; chosen { bootargs = "console=ttyUL0,115200 highres=on"; - linux,stdout-path = "/plb@0/serial@84000000"; + stdout-path = "/plb@0/serial@84000000"; } ; cpus { #address-cells = <1>; diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 3c80a5a308ed..fe6a6c6e5003 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -2,6 +2,7 @@ generic-y += barrier.h generic-y += bitops.h generic-y += bug.h generic-y += bugs.h +generic-y += compat.h generic-y += device.h generic-y += div64.h generic-y += emergency-restart.h diff --git a/arch/microblaze/include/asm/cpuinfo.h b/arch/microblaze/include/asm/cpuinfo.h index 3337417fcdca..8f4996730552 100644 --- a/arch/microblaze/include/asm/cpuinfo.h +++ b/arch/microblaze/include/asm/cpuinfo.h @@ -13,7 +13,7 @@ #ifndef _ASM_MICROBLAZE_CPUINFO_H #define _ASM_MICROBLAZE_CPUINFO_H -#include <asm/prom.h> +#include <linux/of.h> /* CPU Version and FPGA Family code conversion table type */ struct cpu_ver_key { diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h index 5de871eb4a59..859c19828dd4 100644 --- a/arch/microblaze/include/asm/pci.h +++ b/arch/microblaze/include/asm/pci.h @@ -19,7 +19,6 @@ #include <linux/scatterlist.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/pci-bridge.h> #define PCIBIOS_MIN_IO 0x1000 @@ -62,16 +61,6 @@ extern int pci_mmap_legacy_page_range(struct pci_bus *bus, #define HAVE_PCI_LEGACY 1 -/* The PCI address space does equal the physical memory - * address space (no IOMMU). The IDE and SCSI device layers use - * this boolean for bounce buffer decisions. - */ -#define PCI_DMA_BUS_IS_PHYS (1) - -extern void pcibios_claim_one_bus(struct pci_bus *b); - -extern void pcibios_finish_adding_to_bus(struct pci_bus *bus); - extern void pcibios_resource_survey(void); struct file; diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h deleted file mode 100644 index 2f03ac815851..000000000000 --- a/arch/microblaze/include/asm/prom.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Definitions for talking to the Open Firmware PROM on - * Power Macintosh computers. - * - * Copyright (C) 1996-2005 Paul Mackerras. - * - * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#ifndef _ASM_MICROBLAZE_PROM_H -#define _ASM_MICROBLAZE_PROM_H - -#include <linux/of.h> - -/* Other Prototypes */ -enum early_consoles { - UARTLITE = 1, - UART16550 = 2, -}; - -extern int of_early_console(void *version); - -#endif /* _ASM_MICROBLAZE_PROM_H */ diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h index 7c968c1d1729..d5384f6f36f7 100644 --- a/arch/microblaze/include/asm/setup.h +++ b/arch/microblaze/include/asm/setup.h @@ -19,16 +19,11 @@ extern char cmd_line[COMMAND_LINE_SIZE]; extern char *klimit; -int setup_early_printk(char *opt); -void remap_early_printk(void); -void disable_early_printk(void); - void microblaze_heartbeat(void); void microblaze_setup_heartbeat(void); # ifdef CONFIG_MMU extern void mmu_reset(void); -extern void early_console_reg_tlb_alloc(unsigned int addr); # endif /* CONFIG_MMU */ extern void of_platform_reset_gpio_probe(void); diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile index 0da76fa1ab17..7e99cf6984a1 100644 --- a/arch/microblaze/kernel/Makefile +++ b/arch/microblaze/kernel/Makefile @@ -22,7 +22,6 @@ obj-y += dma.o exceptions.o \ obj-y += cpu/ -obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_HEART_BEAT) += heartbeat.o obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o obj-$(CONFIG_MMU) += misc.o diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index c91e8cef98dd..3145e7dc8ab1 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -184,14 +184,3 @@ const struct dma_map_ops dma_nommu_ops = { .sync_sg_for_device = dma_nommu_sync_sg_for_device, }; EXPORT_SYMBOL(dma_nommu_ops); - -/* Number of entries preallocated for DMA-API debugging */ -#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) - -static int __init dma_init(void) -{ - dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); - - return 0; -} -fs_initcall(dma_init); diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c deleted file mode 100644 index 365f2d53f1b2..000000000000 --- a/arch/microblaze/kernel/early_printk.c +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Early printk support for Microblaze. - * - * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> - * Copyright (C) 2007-2009 PetaLogix - * Copyright (C) 2003-2006 Yasushi SHOJI <yashi@atmark-techno.com> - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#include <linux/console.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/string.h> -#include <linux/tty.h> -#include <linux/io.h> -#include <asm/processor.h> -#include <linux/fcntl.h> -#include <asm/setup.h> -#include <asm/prom.h> - -static u32 base_addr; - -#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE -static void early_printk_uartlite_putc(char c) -{ - /* - * Limit how many times we'll spin waiting for TX FIFO status. - * This will prevent lockups if the base address is incorrectly - * set, or any other issue on the UARTLITE. - * This limit is pretty arbitrary, unless we are at about 10 baud - * we'll never timeout on a working UART. - */ - - unsigned retries = 1000000; - /* read status bit - 0x8 offset */ - while (--retries && (in_be32(base_addr + 8) & (1 << 3))) - ; - - /* Only attempt the iowrite if we didn't timeout */ - /* write to TX_FIFO - 0x4 offset */ - if (retries) - out_be32(base_addr + 4, c & 0xff); -} - -static void early_printk_uartlite_write(struct console *unused, - const char *s, unsigned n) -{ - while (*s && n-- > 0) { - if (*s == '\n') - early_printk_uartlite_putc('\r'); - early_printk_uartlite_putc(*s); - s++; - } -} - -static struct console early_serial_uartlite_console = { - .name = "earlyser", - .write = early_printk_uartlite_write, - .flags = CON_PRINTBUFFER | CON_BOOT, - .index = -1, -}; -#endif /* CONFIG_SERIAL_UARTLITE_CONSOLE */ - -#ifdef CONFIG_SERIAL_8250_CONSOLE -static void early_printk_uart16550_putc(char c) -{ - /* - * Limit how many times we'll spin waiting for TX FIFO status. - * This will prevent lockups if the base address is incorrectly - * set, or any other issue on the UARTLITE. - * This limit is pretty arbitrary, unless we are at about 10 baud - * we'll never timeout on a working UART. - */ - - #define UART_LSR_TEMT 0x40 /* Transmitter empty */ - #define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */ - #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) - - unsigned retries = 10000; - - while (--retries && - !((in_be32(base_addr + 0x14) & BOTH_EMPTY) == BOTH_EMPTY)) - ; - - if (retries) - out_be32(base_addr, c & 0xff); -} - -static void early_printk_uart16550_write(struct console *unused, - const char *s, unsigned n) -{ - while (*s && n-- > 0) { - if (*s == '\n') - early_printk_uart16550_putc('\r'); - early_printk_uart16550_putc(*s); - s++; - } -} - -static struct console early_serial_uart16550_console = { - .name = "earlyser", - .write = early_printk_uart16550_write, - .flags = CON_PRINTBUFFER | CON_BOOT, - .index = -1, -}; -#endif /* CONFIG_SERIAL_8250_CONSOLE */ - -int __init setup_early_printk(char *opt) -{ - int version = 0; - - if (early_console) - return 1; - - base_addr = of_early_console(&version); - if (base_addr) { -#ifdef CONFIG_MMU - early_console_reg_tlb_alloc(base_addr); -#endif - switch (version) { -#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE - case UARTLITE: - pr_info("Early console on uartlite at 0x%08x\n", - base_addr); - early_console = &early_serial_uartlite_console; - break; -#endif -#ifdef CONFIG_SERIAL_8250_CONSOLE - case UART16550: - pr_info("Early console on uart16650 at 0x%08x\n", - base_addr); - early_console = &early_serial_uart16550_console; - break; -#endif - default: - pr_info("Unsupported early console %d\n", - version); - return 1; - } - - register_console(early_console); - return 0; - } - return 1; -} - -/* Remap early console to virtual address and do not allocate one TLB - * only for early console because of performance degression */ -void __init remap_early_printk(void) -{ - if (!early_console) - return; - pr_info("early_printk_console remapping from 0x%x to ", base_addr); - base_addr = (u32) ioremap(base_addr, PAGE_SIZE); - pr_cont("0x%x\n", base_addr); - -#ifdef CONFIG_MMU - /* - * Early console is on the top of skipped TLB entries - * decrease tlb_skip size ensure that hardcoded TLB entry will be - * used by generic algorithm - * FIXME check if early console mapping is on the top by rereading - * TLB entry and compare baseaddr - * mts rtlbx, (tlb_skip - 1) - * nop - * mfs rX, rtlblo - * nop - * cmp rX, orig_base_addr - */ - tlb_skip -= 1; -#endif -} - -void __init disable_early_printk(void) -{ - if (!early_console) - return; - pr_warn("disabling early console\n"); - unregister_console(early_console); - early_console = NULL; -} diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c index e6f338d0496b..eafff21fcb0e 100644 --- a/arch/microblaze/kernel/exceptions.c +++ b/arch/microblaze/kernel/exceptions.c @@ -60,16 +60,10 @@ asmlinkage void sw_exception(struct pt_regs *regs) void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) { - siginfo_t info; - if (kernel_mode(regs)) die("Exception in kernel mode", regs, signr); - info.si_signo = signr; - info.si_errno = 0; - info.si_code = code; - info.si_addr = (void __user *) addr; - force_sig_info(signr, &info, current); + force_sig_fault(signr, code, (void __user *)addr, current); } asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S index 1dafddeb8a0b..6759af688451 100644 --- a/arch/microblaze/kernel/misc.S +++ b/arch/microblaze/kernel/misc.S @@ -63,38 +63,3 @@ _tlbie_1: nop .size _tlbie, . - _tlbie - -/* - * Allocate TLB entry for early console - */ -.globl early_console_reg_tlb_alloc; -.type early_console_reg_tlb_alloc, @function -.align 4; -early_console_reg_tlb_alloc: - /* - * Load a TLB entry for the UART, so that microblaze_progress() can use - * the UARTs nice and early. We use a 4k real==virtual mapping. - */ - lwi r4, r0, tlb_skip - mts rtlbx, r4 /* TLB slot 63 */ - - or r4,r5,r0 - andi r4,r4,0xfffff000 - ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G) - - andi r5,r5,0xfffff000 - ori r5,r5,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K)) - - mts rtlblo,r4 /* Load the data portion of the entry */ - nop - mts rtlbhi,r5 /* Load the tag portion of the entry */ - nop - - lwi r5, r0, tlb_skip - addik r5, r5, 1 - swi r5, r0, tlb_skip - - rtsd r15, 8 - nop - - .size early_console_reg_tlb_alloc, . - early_console_reg_tlb_alloc diff --git a/arch/microblaze/kernel/platform.c b/arch/microblaze/kernel/platform.c index b9529caa507a..2540d60610d9 100644 --- a/arch/microblaze/kernel/platform.c +++ b/arch/microblaze/kernel/platform.c @@ -12,7 +12,6 @@ #include <linux/init.h> #include <linux/of_platform.h> -#include <asm/prom.h> #include <asm/setup.h> static struct of_device_id xilinx_of_bus_ids[] __initdata = { diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c index 68f099960ebc..c76c93b90b79 100644 --- a/arch/microblaze/kernel/prom.c +++ b/arch/microblaze/kernel/prom.c @@ -13,91 +13,11 @@ * 2 of the License, or (at your option) any later version. */ -#include <stdarg.h> -#include <linux/export.h> #include <linux/kernel.h> #include <linux/string.h> -#include <linux/init.h> -#include <linux/threads.h> -#include <linux/spinlock.h> -#include <linux/types.h> -#include <linux/pci.h> -#include <linux/stringify.h> -#include <linux/delay.h> -#include <linux/initrd.h> -#include <linux/bitops.h> -#include <linux/kexec.h> -#include <linux/debugfs.h> -#include <linux/irq.h> #include <linux/memblock.h> #include <linux/of_fdt.h> -#include <asm/prom.h> -#include <asm/page.h> -#include <asm/processor.h> -#include <asm/irq.h> -#include <linux/io.h> -#include <asm/mmu.h> -#include <asm/pgtable.h> -#include <asm/sections.h> -#include <asm/pci-bridge.h> - -#ifdef CONFIG_EARLY_PRINTK -static const char *stdout; - -static int __init early_init_dt_scan_chosen_serial(unsigned long node, - const char *uname, int depth, void *data) -{ - int l; - const char *p; - - pr_debug("%s: depth: %d, uname: %s\n", __func__, depth, uname); - - if (depth == 1 && (strcmp(uname, "chosen") == 0 || - strcmp(uname, "chosen@0") == 0)) { - p = of_get_flat_dt_prop(node, "linux,stdout-path", &l); - if (p != NULL && l > 0) - stdout = p; /* store pointer to stdout-path */ - } - - if (stdout && strstr(stdout, uname)) { - p = of_get_flat_dt_prop(node, "compatible", &l); - pr_debug("Compatible string: %s\n", p); - - if ((strncmp(p, "xlnx,xps-uart16550", 18) == 0) || - (strncmp(p, "xlnx,axi-uart16550", 18) == 0)) { - unsigned int addr; - - *(u32 *)data = UART16550; - - addr = *(u32 *)of_get_flat_dt_prop(node, "reg", &l); - addr += *(u32 *)of_get_flat_dt_prop(node, - "reg-offset", &l); - /* clear register offset */ - return be32_to_cpu(addr) & ~3; - } - if ((strncmp(p, "xlnx,xps-uartlite", 17) == 0) || - (strncmp(p, "xlnx,opb-uartlite", 17) == 0) || - (strncmp(p, "xlnx,axi-uartlite", 17) == 0) || - (strncmp(p, "xlnx,mdm", 8) == 0)) { - const unsigned int *addrp; - - *(u32 *)data = UARTLITE; - - addrp = of_get_flat_dt_prop(node, "reg", &l); - return be32_to_cpup(addrp); /* return address */ - } - } - return 0; -} - -/* this function is looking for early console - Microblaze specific */ -int __init of_early_console(void *version) -{ - return of_scan_flat_dt(early_init_dt_scan_chosen_serial, version); -} -#endif - void __init early_init_devtree(void *params) { pr_debug(" -> early_init_devtree(%p)\n", params); @@ -106,8 +26,6 @@ void __init early_init_devtree(void *params) if (!strlen(boot_command_line)) strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); - parse_early_param(); - memblock_allow_resize(); pr_debug("Phys. mem: %lx\n", (unsigned long) memblock_phys_mem_size()); diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index be98ffe28ca8..bbd6968ce55b 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c @@ -27,13 +27,12 @@ #include <linux/param.h> #include <linux/pci.h> #include <linux/cache.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <linux/dma-mapping.h> #include <asm/cacheflush.h> #include <asm/entry.h> #include <asm/cpuinfo.h> -#include <asm/prom.h> #include <asm/pgtable.h> DEFINE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */ @@ -54,6 +53,9 @@ void __init setup_arch(char **cmdline_p) { *cmdline_p = boot_command_line; + setup_memory(); + parse_early_param(); + console_verbose(); unflatten_device_tree(); @@ -62,13 +64,6 @@ void __init setup_arch(char **cmdline_p) microblaze_cache_init(); - setup_memory(); - -#ifdef CONFIG_EARLY_PRINTK - /* remap early console to virtual address */ - remap_early_printk(); -#endif - xilinx_pci_init(); #if defined(CONFIG_DUMMY_CONSOLE) @@ -133,10 +128,6 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, /* initialize device tree for usage in early_printk */ early_init_devtree(_fdt_start); -#ifdef CONFIG_EARLY_PRINTK - setup_early_printk(NULL); -#endif - /* setup kernel_tlb after BSS cleaning * Maybe worth to move to asm code */ kernel_tlb = tlb0 + tlb1; diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index f91b30f8aaa8..af607447c683 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -88,7 +88,6 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; - siginfo_t info; int code = SEGV_MAPERR; int is_write = error_code & ESR_S; int fault; @@ -269,11 +268,6 @@ bad_area_nosemaphore: /* User mode accesses cause a SIGSEGV */ if (user_mode(regs)) { _exception(SIGSEGV, regs, code, address); -/* info.si_signo = SIGSEGV; - info.si_errno = 0; - info.si_code = code; - info.si_addr = (void *) address; - force_sig_info(SIGSEGV, &info, current);*/ return; } @@ -295,11 +289,7 @@ out_of_memory: do_sigbus: up_read(&mm->mmap_sem); if (user_mode(regs)) { - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRERR; - info.si_addr = (void __user *)address; - force_sig_info(SIGBUS, &info, current); + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, current); return; } bad_page_fault(regs, address, SIGBUS); diff --git a/arch/microblaze/pci/indirect_pci.c b/arch/microblaze/pci/indirect_pci.c index ae4fca46c9f6..24030837a425 100644 --- a/arch/microblaze/pci/indirect_pci.c +++ b/arch/microblaze/pci/indirect_pci.c @@ -16,7 +16,6 @@ #include <linux/init.h> #include <linux/io.h> -#include <asm/prom.h> #include <asm/pci-bridge.h> static int diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index 161f9758c631..f34346d56095 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c @@ -915,67 +915,6 @@ void __init pcibios_resource_survey(void) pci_assign_unassigned_resources(); } -/* This is used by the PCI hotplug driver to allocate resource - * of newly plugged busses. We can try to consolidate with the - * rest of the code later, for now, keep it as-is as our main - * resource allocation function doesn't deal with sub-trees yet. - */ -void pcibios_claim_one_bus(struct pci_bus *bus) -{ - struct pci_dev *dev; - struct pci_bus *child_bus; - - list_for_each_entry(dev, &bus->devices, bus_list) { - int i; - - for (i = 0; i < PCI_NUM_RESOURCES; i++) { - struct resource *r = &dev->resource[i]; - - if (r->parent || !r->start || !r->flags) - continue; - - pr_debug("PCI: Claiming %s: ", pci_name(dev)); - pr_debug("Resource %d: %016llx..%016llx [%x]\n", - i, (unsigned long long)r->start, - (unsigned long long)r->end, - (unsigned int)r->flags); - - if (pci_claim_resource(dev, i) == 0) - continue; - - pci_claim_bridge_resource(dev, i); - } - } - - list_for_each_entry(child_bus, &bus->children, node) - pcibios_claim_one_bus(child_bus); -} -EXPORT_SYMBOL_GPL(pcibios_claim_one_bus); - - -/* pcibios_finish_adding_to_bus - * - * This is to be called by the hotplug code after devices have been - * added to a bus, this include calling it for a PHB that is just - * being added - */ -void pcibios_finish_adding_to_bus(struct pci_bus *bus) -{ - pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n", - pci_domain_nr(bus), bus->number); - - /* Allocate bus and devices resources */ - pcibios_allocate_bus_resources(bus); - pcibios_claim_one_bus(bus); - - /* Add new devices to global lists. Register in proc, sysfs. */ - pci_bus_add_devices(bus); - - /* Fixup EEH */ - /* eeh_add_device_tree_late(bus); */ -} -EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus); - static void pcibios_setup_phb_resources(struct pci_controller *hose, struct list_head *resources) { diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 225c95da23ce..7074b2215f36 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -42,7 +42,6 @@ config MIPS select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_STACKOVERFLOW - select HAVE_DMA_API_DEBUG select HAVE_DMA_CONTIGUOUS select HAVE_DYNAMIC_FTRACE select HAVE_EXIT_THREAD @@ -132,7 +131,7 @@ config MIPS_GENERIC config MIPS_ALCHEMY bool "Alchemy processor based machines" - select ARCH_PHYS_ADDR_T_64BIT + select PHYS_ADDR_T_64BIT select CEVT_R4K select CSRC_R4K select IRQ_MIPS_CPU @@ -890,7 +889,7 @@ config CAVIUM_OCTEON_SOC bool "Cavium Networks Octeon SoC based boards" select CEVT_R4K select ARCH_HAS_PHYS_TO_DMA - select ARCH_PHYS_ADDR_T_64BIT + select PHYS_ADDR_T_64BIT select DMA_COHERENT select SYS_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_BIG_ENDIAN @@ -912,6 +911,7 @@ config CAVIUM_OCTEON_SOC select MIPS_NR_CPU_NR_MAP_1024 select BUILTIN_DTB select MTD_COMPLEX_MAPPINGS + select SWIOTLB select SYS_SUPPORTS_RELOCATABLE help This option supports all of the Octeon reference boards from Cavium @@ -936,7 +936,7 @@ config NLM_XLR_BOARD select SWAP_IO_SPACE select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_64BIT_KERNEL - select ARCH_PHYS_ADDR_T_64BIT + select PHYS_ADDR_T_64BIT select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_HIGHMEM select DMA_COHERENT @@ -962,7 +962,7 @@ config NLM_XLP_BOARD select HW_HAS_PCI select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_64BIT_KERNEL - select ARCH_PHYS_ADDR_T_64BIT + select PHYS_ADDR_T_64BIT select GPIOLIB select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_LITTLE_ENDIAN @@ -1101,9 +1101,6 @@ config GPIO_TXX9 config FW_CFE bool -config ARCH_DMA_ADDR_T_64BIT - def_bool (HIGHMEM && ARCH_PHYS_ADDR_T_64BIT) || 64BIT - config ARCH_SUPPORTS_UPROBES bool @@ -1122,9 +1119,6 @@ config DMA_NONCOHERENT bool select NEED_DMA_MAP_STATE -config NEED_DMA_MAP_STATE - bool - config SYS_HAS_EARLY_PRINTK bool @@ -1373,6 +1367,7 @@ config CPU_LOONGSON3 select MIPS_PGD_C0_CONTEXT select MIPS_L1_CACHE_SHIFT_6 select GPIOLIB + select SWIOTLB help The Loongson 3 processor implements the MIPS64R2 instruction set with many extensions. @@ -1770,7 +1765,7 @@ config CPU_MIPS32_R5_XPA depends on SYS_SUPPORTS_HIGHMEM select XPA select HIGHMEM - select ARCH_PHYS_ADDR_T_64BIT + select PHYS_ADDR_T_64BIT default n help Choose this option if you want to enable the Extended Physical @@ -2402,9 +2397,6 @@ config SB1_PASS_2_1_WORKAROUNDS default y -config ARCH_PHYS_ADDR_T_64BIT - bool - choice prompt "SmartMIPS or microMIPS ASE support" @@ -2556,7 +2548,7 @@ config ARCH_DISCONTIGMEM_ENABLE Say Y to support efficient handling of discontiguous physical memory, for architectures which are either NUMA (Non-Uniform Memory Access) or have huge holes in the physical address space for other reasons. - See <file:Documentation/vm/numa> for more. + See <file:Documentation/vm/numa.rst> for more. config ARCH_SPARSEMEM_ENABLE bool diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 5e9fce076ab6..e2122cca4ae2 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -309,9 +309,6 @@ ifdef CONFIG_MIPS CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \ egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \ sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g') -ifdef CONFIG_64BIT -CHECKFLAGS += -m64 -endif endif OBJCOPYFLAGS += --remove-section=.reginfo diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts index 38078594cf97..50cff3cbcc6d 100644 --- a/arch/mips/boot/dts/ingenic/ci20.dts +++ b/arch/mips/boot/dts/ingenic/ci20.dts @@ -36,6 +36,28 @@ clock-frequency = <48000000>; }; +&mmc0 { + status = "okay"; + + bus-width = <4>; + max-frequency = <50000000>; + + pinctrl-names = "default"; + pinctrl-0 = <&pins_mmc0>; + + cd-gpios = <&gpf 20 GPIO_ACTIVE_LOW>; +}; + +&mmc1 { + status = "okay"; + + bus-width = <4>; + max-frequency = <50000000>; + + pinctrl-names = "default"; + pinctrl-0 = <&pins_mmc1>; +}; + &uart0 { status = "okay"; @@ -203,4 +225,16 @@ groups = "nemc-cs6"; bias-disable; }; + + pins_mmc0: mmc0 { + function = "mmc0"; + groups = "mmc0-1bit-e", "mmc0-4bit-e"; + bias-disable; + }; + + pins_mmc1: mmc1 { + function = "mmc1"; + groups = "mmc1-1bit-d", "mmc1-4bit-d"; + bias-disable; + }; }; diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi index 9b5794667aee..b72e53bb7292 100644 --- a/arch/mips/boot/dts/ingenic/jz4780.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <dt-bindings/clock/jz4780-cgu.h> +#include <dt-bindings/dma/jz4780-dma.h> / { #address-cells = <1>; @@ -241,6 +242,57 @@ status = "disabled"; }; + dma: dma@13420000 { + compatible = "ingenic,jz4780-dma"; + reg = <0x13420000 0x10000>; + #dma-cells = <2>; + + interrupt-parent = <&intc>; + interrupts = <10>; + + clocks = <&cgu JZ4780_CLK_PDMA>; + }; + + mmc0: mmc@13450000 { + compatible = "ingenic,jz4780-mmc"; + reg = <0x13450000 0x1000>; + + interrupt-parent = <&intc>; + interrupts = <37>; + + clocks = <&cgu JZ4780_CLK_MSC0>; + clock-names = "mmc"; + + cap-sd-highspeed; + cap-mmc-highspeed; + cap-sdio-irq; + dmas = <&dma JZ4780_DMA_MSC0_RX 0xffffffff>, + <&dma JZ4780_DMA_MSC0_TX 0xffffffff>; + dma-names = "rx", "tx"; + + status = "disabled"; + }; + + mmc1: mmc@13460000 { + compatible = "ingenic,jz4780-mmc"; + reg = <0x13460000 0x1000>; + + interrupt-parent = <&intc>; + interrupts = <36>; + + clocks = <&cgu JZ4780_CLK_MSC1>; + clock-names = "mmc"; + + cap-sd-highspeed; + cap-mmc-highspeed; + cap-sdio-irq; + dmas = <&dma JZ4780_DMA_MSC1_RX 0xffffffff>, + <&dma JZ4780_DMA_MSC1_TX 0xffffffff>; + dma-names = "rx", "tx"; + + status = "disabled"; + }; + bch: bch@134d0000 { compatible = "ingenic,jz4780-bch"; reg = <0x134d0000 0x10000>; diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig index b5eee1a57d6c..4984e462be30 100644 --- a/arch/mips/cavium-octeon/Kconfig +++ b/arch/mips/cavium-octeon/Kconfig @@ -67,18 +67,6 @@ config CAVIUM_OCTEON_LOCK_L2_MEMCPY help Lock the kernel's implementation of memcpy() into L2. -config IOMMU_HELPER - bool - -config NEED_SG_DMA_LENGTH - bool - -config SWIOTLB - def_bool y - select DMA_DIRECT_OPS - select IOMMU_HELPER - select NEED_SG_DMA_LENGTH - config OCTEON_ILM tristate "Module to measure interrupt latency using Octeon CIU Timer" help diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig index b5f4ad8f2c45..be23fd25eeaa 100644 --- a/arch/mips/configs/ci20_defconfig +++ b/arch/mips/configs/ci20_defconfig @@ -104,10 +104,14 @@ CONFIG_REGULATOR_FIXED_VOLTAGE=y # CONFIG_HID is not set # CONFIG_USB_SUPPORT is not set CONFIG_MMC=y +CONFIG_MMC_JZ4740=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_JZ4740=y +CONFIG_DMADEVICES=y +CONFIG_DMA_JZ4780=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_MEMORY=y +CONFIG_EXT4_FS=y # CONFIG_DNOTIFY is not set CONFIG_PROC_KCORE=y # CONFIG_PROC_PAGE_MONITOR is not set diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h index 9a0fa66b81ac..78675f19440f 100644 --- a/arch/mips/include/asm/compat.h +++ b/arch/mips/include/asm/compat.h @@ -14,7 +14,6 @@ typedef u32 compat_size_t; typedef s32 compat_ssize_t; -typedef s32 compat_time_t; typedef s32 compat_clock_t; typedef s32 compat_suseconds_t; @@ -38,24 +37,16 @@ typedef struct { typedef s32 compat_timer_t; typedef s32 compat_key_t; +typedef s16 compat_short_t; typedef s32 compat_int_t; typedef s32 compat_long_t; typedef s64 compat_s64; +typedef u16 compat_ushort_t; typedef u32 compat_uint_t; typedef u32 compat_ulong_t; typedef u64 compat_u64; typedef u32 compat_uptr_t; -struct compat_timespec { - compat_time_t tv_sec; - s32 tv_nsec; -}; - -struct compat_timeval { - compat_time_t tv_sec; - s32 tv_usec; -}; - struct compat_stat { compat_dev_t st_dev; s32 st_pad1[3]; @@ -168,35 +159,35 @@ struct compat_ipc64_perm { struct compat_semid64_ds { struct compat_ipc64_perm sem_perm; - compat_time_t sem_otime; - compat_time_t sem_ctime; + compat_ulong_t sem_otime; + compat_ulong_t sem_ctime; compat_ulong_t sem_nsems; - compat_ulong_t __unused1; - compat_ulong_t __unused2; + compat_ulong_t sem_otime_high; + compat_ulong_t sem_ctime_high; }; struct compat_msqid64_ds { struct compat_ipc64_perm msg_perm; #ifndef CONFIG_CPU_LITTLE_ENDIAN - compat_ulong_t __unused1; + compat_ulong_t msg_stime_high; #endif - compat_time_t msg_stime; + compat_ulong_t msg_stime; #ifdef CONFIG_CPU_LITTLE_ENDIAN - compat_ulong_t __unused1; + compat_ulong_t msg_stime_high; #endif #ifndef CONFIG_CPU_LITTLE_ENDIAN - compat_ulong_t __unused2; + compat_ulong_t msg_rtime_high; #endif - compat_time_t msg_rtime; + compat_ulong_t msg_rtime; #ifdef CONFIG_CPU_LITTLE_ENDIAN - compat_ulong_t __unused2; + compat_ulong_t msg_rtime_high; #endif #ifndef CONFIG_CPU_LITTLE_ENDIAN - compat_ulong_t __unused3; + compat_ulong_t msg_ctime_high; #endif - compat_time_t msg_ctime; + compat_ulong_t msg_ctime; #ifdef CONFIG_CPU_LITTLE_ENDIAN - compat_ulong_t __unused3; + compat_ulong_t msg_ctime_high; #endif compat_ulong_t msg_cbytes; compat_ulong_t msg_qnum; @@ -210,14 +201,16 @@ struct compat_msqid64_ds { struct compat_shmid64_ds { struct compat_ipc64_perm shm_perm; compat_size_t shm_segsz; - compat_time_t shm_atime; - compat_time_t shm_dtime; - compat_time_t shm_ctime; + compat_ulong_t shm_atime; + compat_ulong_t shm_dtime; + compat_ulong_t shm_ctime; compat_pid_t shm_cpid; compat_pid_t shm_lpid; compat_ulong_t shm_nattch; - compat_ulong_t __unused1; - compat_ulong_t __unused2; + compat_ushort_t shm_atime_high; + compat_ushort_t shm_dtime_high; + compat_ushort_t shm_ctime_high; + compat_ushort_t __unused2; }; /* MIPS has unusual order of fields in stack_t */ diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index 2339f42f047a..436099883022 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h @@ -121,13 +121,6 @@ extern unsigned long PCIBIOS_MIN_MEM; #include <linux/string.h> #include <asm/io.h> -/* - * The PCI address space does equal the physical memory address space. - * The networking and block device layers use this boolean for bounce - * buffer decisions. - */ -#define PCI_DMA_BUS_IS_PHYS (1) - #ifdef CONFIG_PCI_DOMAINS_GENERIC static inline int pci_proc_domain(struct pci_bus *bus) { diff --git a/arch/mips/include/uapi/asm/msgbuf.h b/arch/mips/include/uapi/asm/msgbuf.h index eb4d0f9d7364..46aa15b13e4e 100644 --- a/arch/mips/include/uapi/asm/msgbuf.h +++ b/arch/mips/include/uapi/asm/msgbuf.h @@ -9,33 +9,15 @@ * between kernel and user space. * * Pad space is left for: - * - extension of time_t to 64-bit on 32-bitsystem to solve the y2038 problem * - 2 miscellaneous unsigned long values */ +#if defined(__mips64) struct msqid64_ds { struct ipc64_perm msg_perm; -#if !defined(__mips64) && defined(__MIPSEB__) - unsigned long __unused1; -#endif __kernel_time_t msg_stime; /* last msgsnd time */ -#if !defined(__mips64) && defined(__MIPSEL__) - unsigned long __unused1; -#endif -#if !defined(__mips64) && defined(__MIPSEB__) - unsigned long __unused2; -#endif __kernel_time_t msg_rtime; /* last msgrcv time */ -#if !defined(__mips64) && defined(__MIPSEL__) - unsigned long __unused2; -#endif -#if !defined(__mips64) && defined(__MIPSEB__) - unsigned long __unused3; -#endif __kernel_time_t msg_ctime; /* last change time */ -#if !defined(__mips64) && defined(__MIPSEL__) - unsigned long __unused3; -#endif unsigned long msg_cbytes; /* current number of bytes on queue */ unsigned long msg_qnum; /* number of messages in queue */ unsigned long msg_qbytes; /* max number of bytes on queue */ @@ -44,5 +26,42 @@ struct msqid64_ds { unsigned long __unused4; unsigned long __unused5; }; +#elif defined (__MIPSEB__) +struct msqid64_ds { + struct ipc64_perm msg_perm; + unsigned long msg_stime_high; + unsigned long msg_stime; /* last msgsnd time */ + unsigned long msg_rtime_high; + unsigned long msg_rtime; /* last msgrcv time */ + unsigned long msg_ctime_high; + unsigned long msg_ctime; /* last change time */ + unsigned long msg_cbytes; /* current number of bytes on queue */ + unsigned long msg_qnum; /* number of messages in queue */ + unsigned long msg_qbytes; /* max number of bytes on queue */ + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ + __kernel_pid_t msg_lrpid; /* last receive pid */ + unsigned long __unused4; + unsigned long __unused5; +}; +#elif defined (__MIPSEL__) +struct msqid64_ds { + struct ipc64_perm msg_perm; + unsigned long msg_stime; /* last msgsnd time */ + unsigned long msg_stime_high; + unsigned long msg_rtime; /* last msgrcv time */ + unsigned long msg_rtime_high; + unsigned long msg_ctime; /* last change time */ + unsigned long msg_ctime_high; + unsigned long msg_cbytes; /* current number of bytes on queue */ + unsigned long msg_qnum; /* number of messages in queue */ + unsigned long msg_qbytes; /* max number of bytes on queue */ + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ + __kernel_pid_t msg_lrpid; /* last receive pid */ + unsigned long __unused4; + unsigned long __unused5; +}; +#else +#warning no endianess set +#endif #endif /* _ASM_MSGBUF_H */ diff --git a/arch/mips/include/uapi/asm/sembuf.h b/arch/mips/include/uapi/asm/sembuf.h index 2c0f507ab7d1..60c89e6cb25b 100644 --- a/arch/mips/include/uapi/asm/sembuf.h +++ b/arch/mips/include/uapi/asm/sembuf.h @@ -7,10 +7,11 @@ * Note extra padding because this structure is passed back and forth * between kernel and user space. * - * Pad space is left for: - * - 2 miscellaneous 64-bit values + * Pad space is left for 2 miscellaneous 64-bit values on mips64, + * but used for the upper 32 bit of the time values on mips32. */ +#ifdef __mips64 struct semid64_ds { struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ __kernel_time_t sem_otime; /* last semop time */ @@ -19,5 +20,15 @@ struct semid64_ds { unsigned long __unused1; unsigned long __unused2; }; +#else +struct semid64_ds { + struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ + unsigned long sem_otime; /* last semop time */ + unsigned long sem_ctime; /* last change time */ + unsigned long sem_nsems; /* no. of semaphores in array */ + unsigned long sem_otime_high; + unsigned long sem_ctime_high; +}; +#endif #endif /* _ASM_SEMBUF_H */ diff --git a/arch/mips/include/uapi/asm/shmbuf.h b/arch/mips/include/uapi/asm/shmbuf.h index 379e6bca518b..9b9bba3401f2 100644 --- a/arch/mips/include/uapi/asm/shmbuf.h +++ b/arch/mips/include/uapi/asm/shmbuf.h @@ -7,10 +7,13 @@ * Note extra padding because this structure is passed back and forth * between kernel and user space. * - * Pad space is left for: - * - 2 miscellaneous 32-bit rsp. 64-bit values + * As MIPS was lacking proper padding after shm_?time, we use 48 bits + * of the padding at the end to store a few additional bits of the time. + * libc implementations need to take care to convert this into a proper + * data structure when moving to 64-bit time_t. */ +#ifdef __mips64 struct shmid64_ds { struct ipc64_perm shm_perm; /* operation perms */ size_t shm_segsz; /* size of segment (bytes) */ @@ -23,6 +26,22 @@ struct shmid64_ds { unsigned long __unused1; unsigned long __unused2; }; +#else +struct shmid64_ds { + struct ipc64_perm shm_perm; /* operation perms */ + size_t shm_segsz; /* size of segment (bytes) */ + unsigned long shm_atime; /* last attach time */ + unsigned long shm_dtime; /* last detach time */ + unsigned long shm_ctime; /* last change time */ + __kernel_pid_t shm_cpid; /* pid of creator */ + __kernel_pid_t shm_lpid; /* pid of last operator */ + unsigned long shm_nattch; /* no. of current attaches */ + unsigned short shm_atime_high; + unsigned short shm_dtime_high; + unsigned short shm_ctime_high; + unsigned short __unused1; +}; +#endif struct shminfo64 { unsigned long shmmax; diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index b9e9bf628849..3775a8d694fb 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -721,6 +721,10 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) if (value & ~known_bits) return -EOPNOTSUPP; + /* Setting FRE without FR is not supported. */ + if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE) + return -EOPNOTSUPP; + /* Avoid inadvertently triggering emulation */ if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu && !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64)) diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 8d098b9f395c..0c0c23c9c9f5 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -818,7 +818,7 @@ long arch_ptrace(struct task_struct *child, long request, break; } #endif - tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); + tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); break; case PC: tmp = regs->cp0_epc; diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index 656a137c1fe2..f30c381d3e1c 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c @@ -109,7 +109,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, addr & 1); break; } - tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); + tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); break; case PC: tmp = regs->cp0_epc; diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index c4db910a8794..b5d9e1784aff 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -8,13 +8,13 @@ * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2016, Imagination Technologies Ltd. */ +#include <linux/compat.h> #include <linux/compiler.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/syscalls.h> -#include <asm/compat.h> #include <asm/compat-signal.h> #include <linux/uaccess.h> #include <asm/unistd.h> diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 967e9e4e795e..d67fa74622ee 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -699,17 +699,11 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode) asmlinkage void do_ov(struct pt_regs *regs) { enum ctx_state prev_state; - siginfo_t info; - - clear_siginfo(&info); - info.si_signo = SIGFPE; - info.si_code = FPE_INTOVF; - info.si_addr = (void __user *)regs->cp0_epc; prev_state = exception_enter(); die_if_kernel("Integer overflow", regs); - force_sig_info(SIGFPE, &info, current); + force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc, current); exception_exit(prev_state); } @@ -722,32 +716,27 @@ asmlinkage void do_ov(struct pt_regs *regs) void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr, struct task_struct *tsk) { - struct siginfo si; - - clear_siginfo(&si); - si.si_addr = fault_addr; - si.si_signo = SIGFPE; + int si_code = FPE_FLTUNK; if (fcr31 & FPU_CSR_INV_X) - si.si_code = FPE_FLTINV; + si_code = FPE_FLTINV; else if (fcr31 & FPU_CSR_DIV_X) - si.si_code = FPE_FLTDIV; + si_code = FPE_FLTDIV; else if (fcr31 & FPU_CSR_OVF_X) - si.si_code = FPE_FLTOVF; + si_code = FPE_FLTOVF; else if (fcr31 & FPU_CSR_UDF_X) - si.si_code = FPE_FLTUND; + si_code = FPE_FLTUND; else if (fcr31 & FPU_CSR_INE_X) - si.si_code = FPE_FLTRES; + si_code = FPE_FLTRES; - force_sig_info(SIGFPE, &si, tsk); + force_sig_fault(SIGFPE, si_code, fault_addr, tsk); } int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) { - struct siginfo si; + int si_code; struct vm_area_struct *vma; - clear_siginfo(&si); switch (sig) { case 0: return 0; @@ -757,23 +746,18 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) return 1; case SIGBUS: - si.si_addr = fault_addr; - si.si_signo = sig; - si.si_code = BUS_ADRERR; - force_sig_info(sig, &si, current); + force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr, current); return 1; case SIGSEGV: - si.si_addr = fault_addr; - si.si_signo = sig; down_read(¤t->mm->mmap_sem); vma = find_vma(current->mm, (unsigned long)fault_addr); if (vma && (vma->vm_start <= (unsigned long)fault_addr)) - si.si_code = SEGV_ACCERR; + si_code = SEGV_ACCERR; else - si.si_code = SEGV_MAPERR; + si_code = SEGV_MAPERR; up_read(¤t->mm->mmap_sem); - force_sig_info(sig, &si, current); + force_sig_fault(SIGSEGV, si_code, fault_addr, current); return 1; default: @@ -896,10 +880,8 @@ out: void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code, const char *str) { - siginfo_t info; char b[40]; - clear_siginfo(&info); #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) @@ -921,13 +903,9 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code, case BRK_DIVZERO: scnprintf(b, sizeof(b), "%s instruction in kernel code", str); die_if_kernel(b, regs); - if (code == BRK_DIVZERO) - info.si_code = FPE_INTDIV; - else - info.si_code = FPE_INTOVF; - info.si_signo = SIGFPE; - info.si_addr = (void __user *) regs->cp0_epc; - force_sig_info(SIGFPE, &info, current); + force_sig_fault(SIGFPE, + code == BRK_DIVZERO ? FPE_INTDIV : FPE_INTOVF, + (void __user *) regs->cp0_epc, current); break; case BRK_BUG: die_if_kernel("Kernel bug detected", regs); @@ -952,9 +930,7 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code, scnprintf(b, sizeof(b), "%s instruction in kernel code", str); die_if_kernel(b, regs); if (si_code) { - info.si_signo = SIGTRAP; - info.si_code = si_code; - force_sig_info(SIGTRAP, &info, current); + force_sig_fault(SIGTRAP, si_code, NULL, current); } else { force_sig(SIGTRAP, current); } @@ -1506,13 +1482,8 @@ asmlinkage void do_mdmx(struct pt_regs *regs) */ asmlinkage void do_watch(struct pt_regs *regs) { - siginfo_t info; enum ctx_state prev_state; - clear_siginfo(&info); - info.si_signo = SIGTRAP; - info.si_code = TRAP_HWBKPT; - prev_state = exception_enter(); /* * Clear WP (bit 22) bit of cause register so we don't loop @@ -1528,7 +1499,7 @@ asmlinkage void do_watch(struct pt_regs *regs) if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { mips_read_watch_registers(); local_irq_enable(); - force_sig_info(SIGTRAP, &info, current); + force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL, current); } else { mips_clear_watch_registers(); local_irq_enable(); diff --git a/arch/mips/loongson64/Kconfig b/arch/mips/loongson64/Kconfig index 72af0c183969..c79e6a565572 100644 --- a/arch/mips/loongson64/Kconfig +++ b/arch/mips/loongson64/Kconfig @@ -130,21 +130,6 @@ config LOONGSON_UART_BASE default y depends on EARLY_PRINTK || SERIAL_8250 -config IOMMU_HELPER - bool - -config NEED_SG_DMA_LENGTH - bool - -config SWIOTLB - bool "Soft IOMMU Support for All-Memory DMA" - default y - depends on CPU_LOONGSON3 - select DMA_DIRECT_OPS - select IOMMU_HELPER - select NEED_SG_DMA_LENGTH - select NEED_DMA_MAP_STATE - config PHYS48_TO_HT40 bool default y if CPU_LOONGSON3 diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index dcafa43613b6..f9fef0028ca2 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -402,13 +402,3 @@ static const struct dma_map_ops mips_default_dma_map_ops = { const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; EXPORT_SYMBOL(mips_dma_map_ops); - -#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) - -static int __init mips_dma_init(void) -{ - dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); - - return 0; -} -fs_initcall(mips_dma_init); diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 4f8f5bf46977..5f71f2b903b7 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -42,7 +42,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; const int field = sizeof(unsigned long) * 2; - siginfo_t info; + int si_code; int fault; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; @@ -63,7 +63,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, return; #endif - info.si_code = SEGV_MAPERR; + si_code = SEGV_MAPERR; /* * We fault-in kernel-space virtual memory on-demand. The @@ -112,7 +112,7 @@ retry: * we can handle it.. */ good_area: - info.si_code = SEGV_ACCERR; + si_code = SEGV_ACCERR; if (write) { if (!(vma->vm_flags & VM_WRITE)) @@ -223,11 +223,7 @@ bad_area_nosemaphore: pr_cont("\n"); } current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; - info.si_signo = SIGSEGV; - info.si_errno = 0; - /* info.si_code has been set above */ - info.si_addr = (void __user *) address; - force_sig_info(SIGSEGV, &info, tsk); + force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk); return; } @@ -283,11 +279,7 @@ do_sigbus: #endif current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; tsk->thread.cp0_badvaddr = address; - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRERR; - info.si_addr = (void __user *) address; - force_sig_info(SIGBUS, &info, tsk); + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk); return; #ifndef CONFIG_64BIT diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index 3e2798bfea4f..aeb7b1b0f202 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -95,7 +95,6 @@ enum reg_val_type { * struct jit_ctx - JIT context * @skf: The sk_filter * @stack_size: eBPF stack size - * @tmp_offset: eBPF $sp offset to 8-byte temporary memory * @idx: Instruction index * @flags: JIT flags * @offsets: Instruction offsets @@ -105,7 +104,6 @@ enum reg_val_type { struct jit_ctx { const struct bpf_prog *skf; int stack_size; - int tmp_offset; u32 idx; u32 flags; u32 *offsets; @@ -293,7 +291,6 @@ static int gen_int_prologue(struct jit_ctx *ctx) locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0; stack_adjust += locals_size; - ctx->tmp_offset = locals_size; ctx->stack_size = stack_adjust; @@ -399,7 +396,6 @@ static void gen_imm_to_reg(const struct bpf_insn *insn, int reg, emit_instr(ctx, lui, reg, upper >> 16); emit_instr(ctx, addiu, reg, reg, lower); } - } static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, @@ -547,28 +543,6 @@ static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, return 0; } -static void * __must_check -ool_skb_header_pointer(const struct sk_buff *skb, int offset, - int len, void *buffer) -{ - return skb_header_pointer(skb, offset, len, buffer); -} - -static int size_to_len(const struct bpf_insn *insn) -{ - switch (BPF_SIZE(insn->code)) { - case BPF_B: - return 1; - case BPF_H: - return 2; - case BPF_W: - return 4; - case BPF_DW: - return 8; - } - return 0; -} - static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value) { if (value >= 0xffffffffffff8000ull || value < 0x8000ull) { @@ -1267,110 +1241,6 @@ jeq_common: return -EINVAL; break; - case BPF_LD | BPF_B | BPF_ABS: - case BPF_LD | BPF_H | BPF_ABS: - case BPF_LD | BPF_W | BPF_ABS: - case BPF_LD | BPF_DW | BPF_ABS: - ctx->flags |= EBPF_SAVE_RA; - - gen_imm_to_reg(insn, MIPS_R_A1, ctx); - emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn)); - - if (insn->imm < 0) { - emit_const_to_reg(ctx, MIPS_R_T9, (u64)bpf_internal_load_pointer_neg_helper); - } else { - emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer); - emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset); - } - goto ld_skb_common; - - case BPF_LD | BPF_B | BPF_IND: - case BPF_LD | BPF_H | BPF_IND: - case BPF_LD | BPF_W | BPF_IND: - case BPF_LD | BPF_DW | BPF_IND: - ctx->flags |= EBPF_SAVE_RA; - src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); - if (src < 0) - return src; - ts = get_reg_val_type(ctx, this_idx, insn->src_reg); - if (ts == REG_32BIT_ZERO_EX) { - /* sign extend */ - emit_instr(ctx, sll, MIPS_R_A1, src, 0); - src = MIPS_R_A1; - } - if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) { - emit_instr(ctx, daddiu, MIPS_R_A1, src, insn->imm); - } else { - gen_imm_to_reg(insn, MIPS_R_AT, ctx); - emit_instr(ctx, daddu, MIPS_R_A1, MIPS_R_AT, src); - } - /* truncate to 32-bit int */ - emit_instr(ctx, sll, MIPS_R_A1, MIPS_R_A1, 0); - emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset); - emit_instr(ctx, slt, MIPS_R_AT, MIPS_R_A1, MIPS_R_ZERO); - - emit_const_to_reg(ctx, MIPS_R_T8, (u64)bpf_internal_load_pointer_neg_helper); - emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer); - emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn)); - emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_T8, MIPS_R_AT); - -ld_skb_common: - emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9); - /* delay slot move */ - emit_instr(ctx, daddu, MIPS_R_A0, MIPS_R_S0, MIPS_R_ZERO); - - /* Check the error value */ - b_off = b_imm(exit_idx, ctx); - if (is_bad_offset(b_off)) { - target = j_target(ctx, exit_idx); - if (target == (unsigned int)-1) - return -E2BIG; - - if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) { - ctx->offsets[this_idx] |= OFFSETS_B_CONV; - ctx->long_b_conversion = 1; - } - emit_instr(ctx, bne, MIPS_R_V0, MIPS_R_ZERO, 4 * 3); - emit_instr(ctx, nop); - emit_instr(ctx, j, target); - emit_instr(ctx, nop); - } else { - emit_instr(ctx, beq, MIPS_R_V0, MIPS_R_ZERO, b_off); - emit_instr(ctx, nop); - } - -#ifdef __BIG_ENDIAN - need_swap = false; -#else - need_swap = true; -#endif - dst = MIPS_R_V0; - switch (BPF_SIZE(insn->code)) { - case BPF_B: - emit_instr(ctx, lbu, dst, 0, MIPS_R_V0); - break; - case BPF_H: - emit_instr(ctx, lhu, dst, 0, MIPS_R_V0); - if (need_swap) - emit_instr(ctx, wsbh, dst, dst); - break; - case BPF_W: - emit_instr(ctx, lw, dst, 0, MIPS_R_V0); - if (need_swap) { - emit_instr(ctx, wsbh, dst, dst); - emit_instr(ctx, rotr, dst, dst, 16); - } - break; - case BPF_DW: - emit_instr(ctx, ld, dst, 0, MIPS_R_V0); - if (need_swap) { - emit_instr(ctx, dsbh, dst, dst); - emit_instr(ctx, dshd, dst, dst); - } - break; - } - - break; case BPF_ALU | BPF_END | BPF_FROM_BE: case BPF_ALU | BPF_END | BPF_FROM_LE: dst = ebpf_to_mips_reg(ctx, insn, dst_reg); diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig index 7fcfc7fe9f14..412351c5acc6 100644 --- a/arch/mips/netlogic/Kconfig +++ b/arch/mips/netlogic/Kconfig @@ -83,10 +83,4 @@ endif config NLM_COMMON bool -config IOMMU_HELPER - bool - -config NEED_SG_DMA_LENGTH - bool - endif diff --git a/arch/mips/pci/ops-pmcmsp.c b/arch/mips/pci/ops-pmcmsp.c index dd2d9f7e9412..7649372103af 100644 --- a/arch/mips/pci/ops-pmcmsp.c +++ b/arch/mips/pci/ops-pmcmsp.c @@ -83,18 +83,6 @@ static int show_msp_pci_counts(struct seq_file *m, void *v) return 0; } -static int msp_pci_rd_cnt_open(struct inode *inode, struct file *file) -{ - return single_open(file, show_msp_pci_counts, NULL); -} - -static const struct file_operations msp_pci_rd_cnt_fops = { - .open = msp_pci_rd_cnt_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - /***************************************************************************** * * FUNCTION: gen_pci_cfg_wr_show @@ -160,18 +148,6 @@ static int gen_pci_cfg_wr_show(struct seq_file *m, void *v) return 0; } -static int gen_pci_cfg_wr_open(struct inode *inode, struct file *file) -{ - return single_open(file, gen_pci_cfg_wr_show, NULL); -} - -static const struct file_operations gen_pci_cfg_wr_fops = { - .open = gen_pci_cfg_wr_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - /***************************************************************************** * * FUNCTION: pci_proc_init @@ -188,8 +164,8 @@ static const struct file_operations gen_pci_cfg_wr_fops = { ****************************************************************************/ static void pci_proc_init(void) { - proc_create("pmc_msp_pci_rd_cnt", 0, NULL, &msp_pci_rd_cnt_fops); - proc_create("pmc_msp_pci_cfg_wr", 0, NULL, &gen_pci_cfg_wr_fops); + proc_create_single("pmc_msp_pci_rd_cnt", 0, NULL, show_msp_pci_counts); + proc_create_single("pmc_msp_pci_cfg_wr", 0, NULL, gen_pci_cfg_wr_show); } #endif /* CONFIG_PROC_FS && PCI_COUNTERS */ diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c index 0c65c38e05d6..f1e92bf743c2 100644 --- a/arch/mips/pci/pci-legacy.c +++ b/arch/mips/pci/pci-legacy.c @@ -263,9 +263,8 @@ static int pcibios_enable_resources(struct pci_dev *dev, int mask) (!(r->flags & IORESOURCE_ROM_ENABLE))) continue; if (!r->start && r->end) { - printk(KERN_ERR "PCI: Device %s not available " - "because of resource collisions\n", - pci_name(dev)); + pci_err(dev, + "can't enable device: resource collisions\n"); return -EINVAL; } if (r->flags & IORESOURCE_IO) @@ -274,8 +273,7 @@ static int pcibios_enable_resources(struct pci_dev *dev, int mask) cmd |= PCI_COMMAND_MEMORY; } if (cmd != old_cmd) { - printk("PCI: Enabling device %s (%04x -> %04x)\n", - pci_name(dev), old_cmd, cmd); + pci_info(dev, "enabling device (%04x -> %04x)\n", old_cmd, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd); } return 0; diff --git a/arch/mips/sibyte/common/bus_watcher.c b/arch/mips/sibyte/common/bus_watcher.c index a4e55999ecb4..4bb85de9229b 100644 --- a/arch/mips/sibyte/common/bus_watcher.c +++ b/arch/mips/sibyte/common/bus_watcher.c @@ -142,24 +142,12 @@ static int bw_proc_show(struct seq_file *m, void *v) return 0; } -static int bw_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, bw_proc_show, PDE_DATA(inode)); -} - -static const struct file_operations bw_proc_fops = { - .open = bw_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - static void create_proc_decoder(struct bw_stats_struct *stats) { struct proc_dir_entry *ent; - ent = proc_create_data("bus_watcher", S_IWUSR | S_IRUGO, NULL, - &bw_proc_fops, stats); + ent = proc_create_single_data("bus_watcher", S_IWUSR | S_IRUGO, NULL, + bw_proc_show, stats); if (!ent) { printk(KERN_INFO "Unable to initialize bus_watcher /proc entry\n"); return; diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index 249f38d3388f..6aed974276d8 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig @@ -5,10 +5,19 @@ config NDS32 def_bool y + select ARCH_HAS_SYNC_DMA_FOR_CPU + select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_WANT_FRAME_POINTERS if FTRACE select CLKSRC_MMIO select CLONE_BACKWARDS select COMMON_CLK + select DMA_NONCOHERENT_OPS + select GENERIC_ASHLDI3 + select GENERIC_ASHRDI3 + select GENERIC_LSHRDI3 + select GENERIC_CMPDI2 + select GENERIC_MULDI3 + select GENERIC_UCMPDI2 select GENERIC_ATOMIC64 select GENERIC_CPU_DEVICES select GENERIC_CLOCKEVENTS @@ -82,6 +91,7 @@ endmenu menu "Kernel Features" source "kernel/Kconfig.preempt" +source "kernel/Kconfig.freezer" source "mm/Kconfig" source "kernel/Kconfig.hz" endmenu diff --git a/arch/nds32/Kconfig.cpu b/arch/nds32/Kconfig.cpu index ba44cc539da9..b8c8984d1456 100644 --- a/arch/nds32/Kconfig.cpu +++ b/arch/nds32/Kconfig.cpu @@ -1,10 +1,11 @@ comment "Processor Features" config CPU_BIG_ENDIAN - bool "Big endian" + def_bool !CPU_LITTLE_ENDIAN config CPU_LITTLE_ENDIAN - def_bool !CPU_BIG_ENDIAN + bool "Little endian" + default y config HWZOL bool "hardware zero overhead loop support" diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile index 91f933d5a962..513bb2e9baf9 100644 --- a/arch/nds32/Makefile +++ b/arch/nds32/Makefile @@ -23,9 +23,6 @@ export TEXTADDR # If we have a machine-specific directory, then include it in the build. core-y += arch/nds32/kernel/ arch/nds32/mm/ libs-y += arch/nds32/lib/ -LIBGCC_PATH := \ - $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name) -libs-y += $(LIBGCC_PATH) ifneq '$(CONFIG_NDS32_BUILTIN_DTB)' '""' BUILTIN_DTB := y @@ -35,8 +32,12 @@ endif ifdef CONFIG_CPU_LITTLE_ENDIAN KBUILD_CFLAGS += $(call cc-option, -EL) +KBUILD_AFLAGS += $(call cc-option, -EL) +LDFLAGS += $(call cc-option, -EL) else KBUILD_CFLAGS += $(call cc-option, -EB) +KBUILD_AFLAGS += $(call cc-option, -EB) +LDFLAGS += $(call cc-option, -EB) endif boot := arch/nds32/boot diff --git a/arch/nds32/include/asm/Kbuild b/arch/nds32/include/asm/Kbuild index 06bdf8167f5a..dbc4e5422550 100644 --- a/arch/nds32/include/asm/Kbuild +++ b/arch/nds32/include/asm/Kbuild @@ -9,13 +9,16 @@ generic-y += checksum.h generic-y += clkdev.h generic-y += cmpxchg.h generic-y += cmpxchg-local.h +generic-y += compat.h generic-y += cputime.h generic-y += device.h generic-y += div64.h generic-y += dma.h +generic-y += dma-mapping.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h +generic-y += export.h generic-y += fb.h generic-y += fcntl.h generic-y += ftrace.h @@ -49,6 +52,7 @@ generic-y += switch_to.h generic-y += timex.h generic-y += topology.h generic-y += trace_clock.h +generic-y += xor.h generic-y += unaligned.h generic-y += user.h generic-y += vga.h diff --git a/arch/nds32/include/asm/bitfield.h b/arch/nds32/include/asm/bitfield.h index c73f71d67744..8e84fc385b94 100644 --- a/arch/nds32/include/asm/bitfield.h +++ b/arch/nds32/include/asm/bitfield.h @@ -336,7 +336,7 @@ #define INT_MASK_mskIDIVZE ( 0x1 << INT_MASK_offIDIVZE ) #define INT_MASK_mskDSSIM ( 0x1 << INT_MASK_offDSSIM ) -#define INT_MASK_INITAIAL_VAL 0x10003 +#define INT_MASK_INITAIAL_VAL (INT_MASK_mskDSSIM|INT_MASK_mskIDIVZE) /****************************************************************************** * ir15: INT_PEND (Interrupt Pending Register) @@ -396,6 +396,7 @@ #define MMU_CTL_D8KB 1 #define MMU_CTL_UNA ( 0x1 << MMU_CTL_offUNA ) +#define MMU_CTL_CACHEABLE_NON 0 #define MMU_CTL_CACHEABLE_WB 2 #define MMU_CTL_CACHEABLE_WT 3 diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h index 1240f148ec0f..10b48f0d8e85 100644 --- a/arch/nds32/include/asm/cacheflush.h +++ b/arch/nds32/include/asm/cacheflush.h @@ -32,6 +32,8 @@ void flush_anon_page(struct vm_area_struct *vma, #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE void flush_kernel_dcache_page(struct page *page); +void flush_kernel_vmap_range(void *addr, int size); +void invalidate_kernel_vmap_range(void *addr, int size); void flush_icache_range(unsigned long start, unsigned long end); void flush_icache_page(struct vm_area_struct *vma, struct page *page); #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages) diff --git a/arch/nds32/include/asm/dma-mapping.h b/arch/nds32/include/asm/dma-mapping.h deleted file mode 100644 index 2dd47d245c25..000000000000 --- a/arch/nds32/include/asm/dma-mapping.h +++ /dev/null @@ -1,14 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2005-2017 Andes Technology Corporation - -#ifndef ASMNDS32_DMA_MAPPING_H -#define ASMNDS32_DMA_MAPPING_H - -extern struct dma_map_ops nds32_dma_ops; - -static inline struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) -{ - return &nds32_dma_ops; -} - -#endif diff --git a/arch/nds32/include/asm/io.h b/arch/nds32/include/asm/io.h index 966e71b3c960..71cd226d6863 100644 --- a/arch/nds32/include/asm/io.h +++ b/arch/nds32/include/asm/io.h @@ -4,6 +4,8 @@ #ifndef __ASM_NDS32_IO_H #define __ASM_NDS32_IO_H +#include <linux/types.h> + extern void iounmap(volatile void __iomem *addr); #define __raw_writeb __raw_writeb static inline void __raw_writeb(u8 val, volatile void __iomem *addr) diff --git a/arch/nds32/include/asm/page.h b/arch/nds32/include/asm/page.h index e27365c097b6..947f0491c9a7 100644 --- a/arch/nds32/include/asm/page.h +++ b/arch/nds32/include/asm/page.h @@ -27,6 +27,9 @@ extern void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma); extern void clear_user_highpage(struct page *page, unsigned long vaddr); +void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, + struct page *to); +void clear_user_page(void *addr, unsigned long vaddr, struct page *page); #define __HAVE_ARCH_COPY_USER_HIGHPAGE #define clear_user_highpage clear_user_highpage #else diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h index 6783937edbeb..d3e19a55cf53 100644 --- a/arch/nds32/include/asm/pgtable.h +++ b/arch/nds32/include/asm/pgtable.h @@ -152,6 +152,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define PAGE_CACHE_L1 __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE) #define PAGE_MEMORY __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) #define PAGE_KERNEL __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) +#define PAGE_SHARED __pgprot(_PAGE_V | _PAGE_M_URW_KRW | _PAGE_D | _PAGE_CACHE_SHRD) #define PAGE_DEVICE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV) #endif /* __ASSEMBLY__ */ diff --git a/arch/nds32/kernel/dma.c b/arch/nds32/kernel/dma.c index d291800fc621..d0dbd4fe9645 100644 --- a/arch/nds32/kernel/dma.c +++ b/arch/nds32/kernel/dma.c @@ -3,17 +3,14 @@ #include <linux/types.h> #include <linux/mm.h> -#include <linux/export.h> #include <linux/string.h> -#include <linux/scatterlist.h> -#include <linux/dma-mapping.h> +#include <linux/dma-noncoherent.h> #include <linux/io.h> #include <linux/cache.h> #include <linux/highmem.h> #include <linux/slab.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> -#include <asm/dma-mapping.h> #include <asm/proc-fns.h> /* @@ -22,11 +19,6 @@ static pte_t *consistent_pte; static DEFINE_RAW_SPINLOCK(consistent_lock); -enum master_type { - FOR_CPU = 0, - FOR_DEVICE = 1, -}; - /* * VM region handling support. * @@ -124,10 +116,8 @@ out: return c; } -/* FIXME: attrs is not used. */ -static void *nds32_dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t * handle, gfp_t gfp, - unsigned long attrs) +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, + gfp_t gfp, unsigned long attrs) { struct page *page; struct arch_vm_region *c; @@ -232,8 +222,8 @@ no_page: return NULL; } -static void nds32_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, unsigned long attrs) +void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle, unsigned long attrs) { struct arch_vm_region *c; unsigned long flags, addr; @@ -333,145 +323,69 @@ static int __init consistent_init(void) } core_initcall(consistent_init); -static void consistent_sync(void *vaddr, size_t size, int direction, int master_type); -static dma_addr_t nds32_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - consistent_sync((void *)(page_address(page) + offset), size, dir, FOR_DEVICE); - return page_to_phys(page) + offset; -} - -static void nds32_dma_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - consistent_sync(phys_to_virt(handle), size, dir, FOR_CPU); -} - -/* - * Make an area consistent for devices. - */ -static void consistent_sync(void *vaddr, size_t size, int direction, int master_type) -{ - unsigned long start = (unsigned long)vaddr; - unsigned long end = start + size; - - if (master_type == FOR_CPU) { - switch (direction) { - case DMA_TO_DEVICE: - break; - case DMA_FROM_DEVICE: - case DMA_BIDIRECTIONAL: - cpu_dma_inval_range(start, end); - break; - default: - BUG(); - } - } else { - /* FOR_DEVICE */ - switch (direction) { - case DMA_FROM_DEVICE: - break; - case DMA_TO_DEVICE: - case DMA_BIDIRECTIONAL: - cpu_dma_wb_range(start, end); - break; - default: - BUG(); - } - } -} -static int nds32_dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - unsigned long attrs) +static inline void cache_op(phys_addr_t paddr, size_t size, + void (*fn)(unsigned long start, unsigned long end)) { - int i; + struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); + unsigned offset = paddr & ~PAGE_MASK; + size_t left = size; + unsigned long start; - for (i = 0; i < nents; i++, sg++) { - void *virt; - unsigned long pfn; - struct page *page = sg_page(sg); + do { + size_t len = left; - sg->dma_address = sg_phys(sg); - pfn = page_to_pfn(page) + sg->offset / PAGE_SIZE; - page = pfn_to_page(pfn); if (PageHighMem(page)) { - virt = kmap_atomic(page); - consistent_sync(virt, sg->length, dir, FOR_CPU); - kunmap_atomic(virt); + void *addr; + + if (offset + len > PAGE_SIZE) { + if (offset >= PAGE_SIZE) { + page += offset >> PAGE_SHIFT; + offset &= ~PAGE_MASK; + } + len = PAGE_SIZE - offset; + } + + addr = kmap_atomic(page); + start = (unsigned long)(addr + offset); + fn(start, start + len); + kunmap_atomic(addr); } else { - if (sg->offset > PAGE_SIZE) - panic("sg->offset:%08x > PAGE_SIZE\n", - sg->offset); - virt = page_address(page) + sg->offset; - consistent_sync(virt, sg->length, dir, FOR_CPU); + start = (unsigned long)phys_to_virt(paddr); + fn(start, start + size); } - } - return nents; + offset = 0; + page++; + left -= len; + } while (left); } -static void nds32_dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nhwentries, enum dma_data_direction dir, - unsigned long attrs) +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) { -} - -static void -nds32_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) -{ - consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_CPU); -} - -static void -nds32_dma_sync_single_for_device(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) -{ - consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_DEVICE); -} - -static void -nds32_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir) -{ - int i; - - for (i = 0; i < nents; i++, sg++) { - char *virt = - page_address((struct page *)sg->page_link) + sg->offset; - consistent_sync(virt, sg->length, dir, FOR_CPU); + switch (dir) { + case DMA_FROM_DEVICE: + break; + case DMA_TO_DEVICE: + case DMA_BIDIRECTIONAL: + cache_op(paddr, size, cpu_dma_wb_range); + break; + default: + BUG(); } } -static void -nds32_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) { - int i; - - for (i = 0; i < nents; i++, sg++) { - char *virt = - page_address((struct page *)sg->page_link) + sg->offset; - consistent_sync(virt, sg->length, dir, FOR_DEVICE); + switch (dir) { + case DMA_TO_DEVICE: + break; + case DMA_FROM_DEVICE: + case DMA_BIDIRECTIONAL: + cache_op(paddr, size, cpu_dma_inval_range); + break; + default: + BUG(); } } - -struct dma_map_ops nds32_dma_ops = { - .alloc = nds32_dma_alloc_coherent, - .free = nds32_dma_free, - .map_page = nds32_dma_map_page, - .unmap_page = nds32_dma_unmap_page, - .map_sg = nds32_dma_map_sg, - .unmap_sg = nds32_dma_unmap_sg, - .sync_single_for_device = nds32_dma_sync_single_for_device, - .sync_single_for_cpu = nds32_dma_sync_single_for_cpu, - .sync_sg_for_cpu = nds32_dma_sync_sg_for_cpu, - .sync_sg_for_device = nds32_dma_sync_sg_for_device, -}; - -EXPORT_SYMBOL(nds32_dma_ops); diff --git a/arch/nds32/kernel/ex-entry.S b/arch/nds32/kernel/ex-entry.S index a72e83d804f5..b8ae4e9a6b93 100644 --- a/arch/nds32/kernel/ex-entry.S +++ b/arch/nds32/kernel/ex-entry.S @@ -118,7 +118,7 @@ common_exception_handler: /* interrupt */ 2: #ifdef CONFIG_TRACE_IRQFLAGS - jal arch_trace_hardirqs_off + jal trace_hardirqs_off #endif move $r0, $sp sethi $lp, hi20(ret_from_intr) diff --git a/arch/nds32/kernel/head.S b/arch/nds32/kernel/head.S index 71f57bd70f3b..c5fdae174ced 100644 --- a/arch/nds32/kernel/head.S +++ b/arch/nds32/kernel/head.S @@ -57,14 +57,32 @@ _nodtb: isb mtsr $r4, $L1_PPTB ! load page table pointer\n" -/* set NTC0 cacheable/writeback, mutliple page size in use */ +#ifdef CONFIG_CPU_DCACHE_DISABLE + #define MMU_CTL_NTCC MMU_CTL_CACHEABLE_NON +#else + #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH + #define MMU_CTL_NTCC MMU_CTL_CACHEABLE_WT + #else + #define MMU_CTL_NTCC MMU_CTL_CACHEABLE_WB + #endif +#endif + +/* set NTC cacheability, mutliple page size in use */ mfsr $r3, $MMU_CTL - li $r0, #~MMU_CTL_mskNTC0 - and $r3, $r3, $r0 +#if CONFIG_MEMORY_START >= 0xc0000000 + ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC3) +#elif CONFIG_MEMORY_START >= 0x80000000 + ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC2) +#elif CONFIG_MEMORY_START >= 0x40000000 + ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC1) +#else + ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC0) +#endif + #ifdef CONFIG_ANDES_PAGE_SIZE_4KB - ori $r3, $r3, #(MMU_CTL_mskMPZIU|(MMU_CTL_CACHEABLE_WB << MMU_CTL_offNTC0)) + ori $r3, $r3, #(MMU_CTL_mskMPZIU) #else - ori $r3, $r3, #(MMU_CTL_mskMPZIU|(MMU_CTL_CACHEABLE_WB << MMU_CTL_offNTC0)|MMU_CTL_D8KB) + ori $r3, $r3, #(MMU_CTL_mskMPZIU|MMU_CTL_D8KB) #endif #ifdef CONFIG_HW_SUPPORT_UNALIGNMENT_ACCESS li $r0, #MMU_CTL_UNA diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c index ba910e9e4ecb..2f5b2ccebe47 100644 --- a/arch/nds32/kernel/setup.c +++ b/arch/nds32/kernel/setup.c @@ -293,6 +293,9 @@ void __init setup_arch(char **cmdline_p) /* paging_init() sets up the MMU and marks all pages as reserved */ paging_init(); + /* invalidate all TLB entries because the new mapping is created */ + __nds32__tlbop_flua(); + /* use generic way to parse */ parse_early_param(); diff --git a/arch/nds32/kernel/stacktrace.c b/arch/nds32/kernel/stacktrace.c index bc70113c0e84..8b231e910ea6 100644 --- a/arch/nds32/kernel/stacktrace.c +++ b/arch/nds32/kernel/stacktrace.c @@ -9,6 +9,7 @@ void save_stack_trace(struct stack_trace *trace) { save_stack_trace_tsk(current, trace); } +EXPORT_SYMBOL_GPL(save_stack_trace); void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { @@ -45,3 +46,4 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) fpn = (unsigned long *)fpp; } } +EXPORT_SYMBOL_GPL(save_stack_trace_tsk); diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c index 6e34eb9824a4..a6205fd4db52 100644 --- a/arch/nds32/kernel/traps.c +++ b/arch/nds32/kernel/traps.c @@ -222,19 +222,13 @@ void die_if_kernel(const char *str, struct pt_regs *regs, int err) int bad_syscall(int n, struct pt_regs *regs) { - siginfo_t info; - if (current->personality != PER_LINUX) { send_sig(SIGSEGV, current, 1); return regs->uregs[0]; } - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_code = ILL_ILLTRP; - info.si_addr = (void __user *)instruction_pointer(regs) - 4; - - force_sig_info(SIGILL, &info, current); + force_sig_fault(SIGILL, ILL_ILLTRP, + (void __user *)instruction_pointer(regs) - 4, current); die_if_kernel("Oops - bad syscall", regs, n); return regs->uregs[0]; } @@ -287,16 +281,11 @@ void __init early_trap_init(void) void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code, int si_code) { - struct siginfo info; - tsk->thread.trap_no = ENTRY_DEBUG_RELATED; tsk->thread.error_code = error_code; - memset(&info, 0, sizeof(info)); - info.si_signo = SIGTRAP; - info.si_code = si_code; - info.si_addr = (void __user *)instruction_pointer(regs); - force_sig_info(SIGTRAP, &info, tsk); + force_sig_fault(SIGTRAP, si_code, + (void __user *)instruction_pointer(regs), tsk); } void do_debug_trap(unsigned long entry, unsigned long addr, @@ -318,29 +307,22 @@ void do_debug_trap(unsigned long entry, unsigned long addr, void unhandled_interruption(struct pt_regs *regs) { - siginfo_t si; pr_emerg("unhandled_interruption\n"); show_regs(regs); if (!user_mode(regs)) do_exit(SIGKILL); - si.si_signo = SIGKILL; - si.si_errno = 0; - force_sig_info(SIGKILL, &si, current); + force_sig(SIGKILL, current); } void unhandled_exceptions(unsigned long entry, unsigned long addr, unsigned long type, struct pt_regs *regs) { - siginfo_t si; pr_emerg("Unhandled Exception: entry: %lx addr:%lx itype:%lx\n", entry, addr, type); show_regs(regs); if (!user_mode(regs)) do_exit(SIGKILL); - si.si_signo = SIGKILL; - si.si_errno = 0; - si.si_addr = (void *)addr; - force_sig_info(SIGKILL, &si, current); + force_sig(SIGKILL, current); } extern int do_page_fault(unsigned long entry, unsigned long addr, @@ -363,14 +345,11 @@ void do_dispatch_tlb_misc(unsigned long entry, unsigned long addr, void do_revinsn(struct pt_regs *regs) { - siginfo_t si; pr_emerg("Reserved Instruction\n"); show_regs(regs); if (!user_mode(regs)) do_exit(SIGILL); - si.si_signo = SIGILL; - si.si_errno = 0; - force_sig_info(SIGILL, &si, current); + force_sig(SIGILL, current); } #ifdef CONFIG_ALIGNMENT_TRAP diff --git a/arch/nds32/kernel/vdso.c b/arch/nds32/kernel/vdso.c index f1198d7a5654..016f15891f6d 100644 --- a/arch/nds32/kernel/vdso.c +++ b/arch/nds32/kernel/vdso.c @@ -23,7 +23,7 @@ #include <asm/vdso_timer_info.h> #include <asm/cache_info.h> extern struct cache_info L1_cache_info[2]; -extern char vdso_start, vdso_end; +extern char vdso_start[], vdso_end[]; static unsigned long vdso_pages __ro_after_init; static unsigned long timer_mapping_base; @@ -66,16 +66,16 @@ static int __init vdso_init(void) int i; struct page **vdso_pagelist; - if (memcmp(&vdso_start, "\177ELF", 4)) { + if (memcmp(vdso_start, "\177ELF", 4)) { pr_err("vDSO is not a valid ELF object!\n"); return -EINVAL; } /* Creat a timer io mapping to get clock cycles counter */ get_timer_node_info(); - vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; + vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n", - vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data); + vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data); /* Allocate the vDSO pagelist */ vdso_pagelist = kcalloc(vdso_pages, sizeof(struct page *), GFP_KERNEL); @@ -83,7 +83,7 @@ static int __init vdso_init(void) return -ENOMEM; for (i = 0; i < vdso_pages; i++) - vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE); + vdso_pagelist[i] = virt_to_page(vdso_start + i * PAGE_SIZE); vdso_spec[1].pages = &vdso_pagelist[0]; return 0; diff --git a/arch/nds32/lib/copy_page.S b/arch/nds32/lib/copy_page.S index 4a2ff85f17ee..f8701ed161a8 100644 --- a/arch/nds32/lib/copy_page.S +++ b/arch/nds32/lib/copy_page.S @@ -2,6 +2,7 @@ // Copyright (C) 2005-2017 Andes Technology Corporation #include <linux/linkage.h> +#include <asm/export.h> #include <asm/page.h> .text @@ -16,6 +17,7 @@ ENTRY(copy_page) popm $r2, $r10 ret ENDPROC(copy_page) +EXPORT_SYMBOL(copy_page) ENTRY(clear_page) pushm $r1, $r9 @@ -35,3 +37,4 @@ ENTRY(clear_page) popm $r1, $r9 ret ENDPROC(clear_page) +EXPORT_SYMBOL(clear_page) diff --git a/arch/nds32/mm/alignment.c b/arch/nds32/mm/alignment.c index b96a01b10ca7..e1aed9dc692d 100644 --- a/arch/nds32/mm/alignment.c +++ b/arch/nds32/mm/alignment.c @@ -19,7 +19,7 @@ #define RA(inst) (((inst) >> 15) & 0x1FUL) #define RB(inst) (((inst) >> 10) & 0x1FUL) #define SV(inst) (((inst) >> 8) & 0x3UL) -#define IMM(inst) (((inst) >> 0) & 0x3FFFUL) +#define IMM(inst) (((inst) >> 0) & 0x7FFFUL) #define RA3(inst) (((inst) >> 3) & 0x7UL) #define RT3(inst) (((inst) >> 6) & 0x7UL) @@ -28,6 +28,9 @@ #define RA5(inst) (((inst) >> 0) & 0x1FUL) #define RT4(inst) (((inst) >> 5) & 0xFUL) +#define GET_IMMSVAL(imm_value) \ + (((imm_value >> 14) & 0x1) ? (imm_value - 0x8000) : imm_value) + #define __get8_data(val,addr,err) \ __asm__( \ "1: lbi.bi %1, [%2], #1\n" \ @@ -467,7 +470,7 @@ static inline int do_32(unsigned long inst, struct pt_regs *regs) } if (imm) - shift = IMM(inst) * len; + shift = GET_IMMSVAL(IMM(inst)) * len; else shift = *idx_to_addr(regs, RB(inst)) << SV(inst); @@ -552,7 +555,7 @@ static struct ctl_table alignment_tbl[3] = { static struct ctl_table nds32_sysctl_table[2] = { { - .procname = "unaligned_acess", + .procname = "unaligned_access", .mode = 0555, .child = alignment_tbl}, {} diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c index 6eb786a399a2..ce8fd34497bf 100644 --- a/arch/nds32/mm/cacheflush.c +++ b/arch/nds32/mm/cacheflush.c @@ -147,6 +147,25 @@ void flush_cache_vunmap(unsigned long start, unsigned long end) cpu_icache_inval_all(); } +void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, + struct page *to) +{ + cpu_dcache_wbinval_page((unsigned long)vaddr); + cpu_icache_inval_page((unsigned long)vaddr); + copy_page(vto, vfrom); + cpu_dcache_wbinval_page((unsigned long)vto); + cpu_icache_inval_page((unsigned long)vto); +} + +void clear_user_page(void *addr, unsigned long vaddr, struct page *page) +{ + cpu_dcache_wbinval_page((unsigned long)vaddr); + cpu_icache_inval_page((unsigned long)vaddr); + clear_page(addr); + cpu_dcache_wbinval_page((unsigned long)addr); + cpu_icache_inval_page((unsigned long)addr); +} + void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { @@ -156,11 +175,9 @@ void copy_user_highpage(struct page *to, struct page *from, pto = page_to_phys(to); pfrom = page_to_phys(from); + local_irq_save(flags); if (aliasing(vaddr, (unsigned long)kfrom)) cpu_dcache_wb_page((unsigned long)kfrom); - if (aliasing(vaddr, (unsigned long)kto)) - cpu_dcache_inval_page((unsigned long)kto); - local_irq_save(flags); vto = kremap0(vaddr, pto); vfrom = kremap1(vaddr, pfrom); copy_page((void *)vto, (void *)vfrom); @@ -198,21 +215,25 @@ void flush_dcache_page(struct page *page) if (mapping && !mapping_mapped(mapping)) set_bit(PG_dcache_dirty, &page->flags); else { - int i, pc; - unsigned long vto, kaddr, flags; + unsigned long kaddr, flags; + kaddr = (unsigned long)page_address(page); - cpu_dcache_wbinval_page(kaddr); - pc = CACHE_SET(DCACHE) * CACHE_LINE_SIZE(DCACHE) / PAGE_SIZE; local_irq_save(flags); - for (i = 0; i < pc; i++) { - vto = - kremap0(kaddr + i * PAGE_SIZE, page_to_phys(page)); - cpu_dcache_wbinval_page(vto); - kunmap01(vto); + cpu_dcache_wbinval_page(kaddr); + if (mapping) { + unsigned long vaddr, kto; + + vaddr = page->index << PAGE_SHIFT; + if (aliasing(vaddr, kaddr)) { + kto = kremap0(vaddr, page_to_phys(page)); + cpu_dcache_wbinval_page(kto); + kunmap01(kto); + } } local_irq_restore(flags); } } +EXPORT_SYMBOL(flush_dcache_page); void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, void *src, int len) @@ -251,7 +272,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr) { - unsigned long flags; + unsigned long kaddr, flags, ktmp; if (!PageAnon(page)) return; @@ -261,7 +282,12 @@ void flush_anon_page(struct vm_area_struct *vma, local_irq_save(flags); if (vma->vm_flags & VM_EXEC) cpu_icache_inval_page(vaddr & PAGE_MASK); - cpu_dcache_wbinval_page((unsigned long)page_address(page)); + kaddr = (unsigned long)page_address(page); + if (aliasing(vaddr, kaddr)) { + ktmp = kremap0(vaddr, page_to_phys(page)); + cpu_dcache_wbinval_page(ktmp); + kunmap01(ktmp); + } local_irq_restore(flags); } @@ -272,6 +298,25 @@ void flush_kernel_dcache_page(struct page *page) cpu_dcache_wbinval_page((unsigned long)page_address(page)); local_irq_restore(flags); } +EXPORT_SYMBOL(flush_kernel_dcache_page); + +void flush_kernel_vmap_range(void *addr, int size) +{ + unsigned long flags; + local_irq_save(flags); + cpu_dcache_wb_range((unsigned long)addr, (unsigned long)addr + size); + local_irq_restore(flags); +} +EXPORT_SYMBOL(flush_kernel_vmap_range); + +void invalidate_kernel_vmap_range(void *addr, int size) +{ + unsigned long flags; + local_irq_save(flags); + cpu_dcache_inval_range((unsigned long)addr, (unsigned long)addr + size); + local_irq_restore(flags); +} +EXPORT_SYMBOL(invalidate_kernel_vmap_range); void flush_icache_range(unsigned long start, unsigned long end) { @@ -283,6 +328,7 @@ void flush_icache_range(unsigned long start, unsigned long end) cpu_cache_wbinval_range(start, end, 1); local_irq_restore(flags); } +EXPORT_SYMBOL(flush_icache_range); void flush_icache_page(struct vm_area_struct *vma, struct page *page) { diff --git a/arch/nds32/mm/fault.c b/arch/nds32/mm/fault.c index 3a246fb8098c..9bdb7c3ecbb6 100644 --- a/arch/nds32/mm/fault.c +++ b/arch/nds32/mm/fault.c @@ -72,7 +72,7 @@ void do_page_fault(unsigned long entry, unsigned long addr, struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct *vma; - siginfo_t info; + int si_code; int fault; unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; @@ -80,7 +80,7 @@ void do_page_fault(unsigned long entry, unsigned long addr, error_code = error_code & (ITYPE_mskINST | ITYPE_mskETYPE); tsk = current; mm = tsk->mm; - info.si_code = SEGV_MAPERR; + si_code = SEGV_MAPERR; /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. @@ -161,7 +161,7 @@ retry: */ good_area: - info.si_code = SEGV_ACCERR; + si_code = SEGV_ACCERR; /* first do some preliminary protection checks */ if (entry == ENTRY_PTE_NOT_PRESENT) { @@ -266,11 +266,7 @@ bad_area_nosemaphore: tsk->thread.address = addr; tsk->thread.error_code = error_code; tsk->thread.trap_no = entry; - info.si_signo = SIGSEGV; - info.si_errno = 0; - /* info.si_code has been set above */ - info.si_addr = (void *)addr; - force_sig_info(SIGSEGV, &info, tsk); + force_sig_fault(SIGSEGV, si_code, (void __user *)addr, tsk); return; } @@ -339,11 +335,7 @@ do_sigbus: tsk->thread.address = addr; tsk->thread.error_code = error_code; tsk->thread.trap_no = entry; - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRERR; - info.si_addr = (void *)addr; - force_sig_info(SIGBUS, &info, tsk); + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr, tsk); return; diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c index 93ee0160720b..c713d2ad55dc 100644 --- a/arch/nds32/mm/init.c +++ b/arch/nds32/mm/init.c @@ -30,6 +30,7 @@ extern unsigned long phys_initrd_size; * zero-initialized data and COW. */ struct page *empty_zero_page; +EXPORT_SYMBOL(empty_zero_page); static void __init zone_sizes_init(void) { diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild index d232da2cbb38..64ed3d656956 100644 --- a/arch/nios2/include/asm/Kbuild +++ b/arch/nios2/include/asm/Kbuild @@ -4,6 +4,7 @@ generic-y += bitops.h generic-y += bug.h generic-y += bugs.h generic-y += cmpxchg.h +generic-y += compat.h generic-y += current.h generic-y += device.h generic-y += div64.h diff --git a/arch/nios2/kernel/traps.c b/arch/nios2/kernel/traps.c index 8184e7d6b385..3bc3cd22b750 100644 --- a/arch/nios2/kernel/traps.c +++ b/arch/nios2/kernel/traps.c @@ -26,13 +26,7 @@ static DEFINE_SPINLOCK(die_lock); static void _send_sig(int signo, int code, unsigned long addr) { - siginfo_t info; - - info.si_signo = signo; - info.si_errno = 0; - info.si_code = code; - info.si_addr = (void __user *) addr; - force_sig_info(signo, &info, current); + force_sig_fault(signo, code, (void __user *) addr, current); } void die(const char *str, struct pt_regs *regs, long err) diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile index cf8802962864..89076a66eee2 100644 --- a/arch/openrisc/Makefile +++ b/arch/openrisc/Makefile @@ -25,7 +25,6 @@ LDFLAGS_vmlinux := LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__ -CHECKFLAGS += -mbig-endian ifeq ($(CONFIG_OPENRISC_HAVE_INST_MUL),y) KBUILD_CFLAGS += $(call cc-option,-mhard-mul) diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild index f05c722a21f8..65964d390b10 100644 --- a/arch/openrisc/include/asm/Kbuild +++ b/arch/openrisc/include/asm/Kbuild @@ -2,6 +2,7 @@ generic-y += barrier.h generic-y += bug.h generic-y += bugs.h generic-y += checksum.h +generic-y += compat.h generic-y += current.h generic-y += device.h generic-y += div64.h diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index a945f00011b4..ec7fd45704d2 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c @@ -247,14 +247,3 @@ const struct dma_map_ops or1k_dma_map_ops = { .sync_single_for_device = or1k_sync_single_for_device, }; EXPORT_SYMBOL(or1k_dma_map_ops); - -/* Number of entries preallocated for DMA-API debugging */ -#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) - -static int __init dma_init(void) -{ - dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); - - return 0; -} -fs_initcall(dma_init); diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index 113c175fe469..fac246e6f37a 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c @@ -250,27 +250,16 @@ void __init trap_init(void) asmlinkage void do_trap(struct pt_regs *regs, unsigned long address) { - siginfo_t info; - memset(&info, 0, sizeof(info)); - info.si_signo = SIGTRAP; - info.si_code = TRAP_TRACE; - info.si_addr = (void *)address; - force_sig_info(SIGTRAP, &info, current); + force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)address, current); regs->pc += 4; } asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address) { - siginfo_t info; - if (user_mode(regs)) { /* Send a SIGBUS */ - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRALN; - info.si_addr = (void __user *)address; - force_sig_info(SIGBUS, &info, current); + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)address, current); } else { printk("KERNEL: Unaligned Access 0x%.8lx\n", address); show_registers(regs); @@ -281,15 +270,9 @@ asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address) asmlinkage void do_bus_fault(struct pt_regs *regs, unsigned long address) { - siginfo_t info; - if (user_mode(regs)) { /* Send a SIGBUS */ - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRERR; - info.si_addr = (void *)address; - force_sig_info(SIGBUS, &info, current); + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, current); } else { /* Kernel mode */ printk("KERNEL: Bus error (SIGBUS) 0x%.8lx\n", address); show_registers(regs); @@ -464,7 +447,6 @@ static inline void simulate_swa(struct pt_regs *regs, unsigned long address, asmlinkage void do_illegal_instruction(struct pt_regs *regs, unsigned long address) { - siginfo_t info; unsigned int op; unsigned int insn = *((unsigned int *)address); @@ -485,11 +467,7 @@ asmlinkage void do_illegal_instruction(struct pt_regs *regs, if (user_mode(regs)) { /* Send a SIGILL */ - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_code = ILL_ILLOPC; - info.si_addr = (void *)address; - force_sig_info(SIGBUS, &info, current); + force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)address, current); } else { /* Kernel mode */ printk("KERNEL: Illegal instruction (SIGILL) 0x%.8lx\n", address); diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index d0021dfae20a..9f011d16cc46 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -52,7 +52,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct *vma; - siginfo_t info; + int si_code; int fault; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; @@ -97,7 +97,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, } mm = tsk->mm; - info.si_code = SEGV_MAPERR; + si_code = SEGV_MAPERR; /* * If we're in an interrupt or have no user @@ -139,7 +139,7 @@ retry: */ good_area: - info.si_code = SEGV_ACCERR; + si_code = SEGV_ACCERR; /* first do some preliminary protection checks */ @@ -213,11 +213,7 @@ bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { - info.si_signo = SIGSEGV; - info.si_errno = 0; - /* info.si_code has been set above */ - info.si_addr = (void *)address; - force_sig_info(SIGSEGV, &info, tsk); + force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk); return; } @@ -282,11 +278,7 @@ do_sigbus: * Send a sigbus, regardless of whether we were in kernel * or user mode. */ - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRERR; - info.si_addr = (void *)address; - force_sig_info(SIGBUS, &info, tsk); + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index fc5a574c3482..4d8f64d48597 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -51,6 +51,8 @@ config PARISC select GENERIC_CLOCKEVENTS select ARCH_NO_COHERENT_DMA_MMAP select CPU_NO_EFFICIENT_FFS + select NEED_DMA_MAP_STATE + select NEED_SG_DMA_LENGTH help The PA-RISC microprocessor is designed by Hewlett-Packard and used @@ -111,12 +113,6 @@ config PM config STACKTRACE_SUPPORT def_bool y -config NEED_DMA_MAP_STATE - def_bool y - -config NEED_SG_DMA_LENGTH - def_bool y - config ISA_DMA_API bool diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 34ac503e28ad..714284ea6cc2 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -22,13 +22,13 @@ KBUILD_IMAGE := vmlinuz KBUILD_DEFCONFIG := default_defconfig NM = sh $(srctree)/arch/parisc/nm -CHECKFLAGS += -D__hppa__=1 -mbig-endian +CHECKFLAGS += -D__hppa__=1 LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) export LIBGCC ifdef CONFIG_64BIT UTS_MACHINE := parisc64 -CHECKFLAGS += -D__LP64__=1 -m64 +CHECKFLAGS += -D__LP64__=1 CC_ARCHES = hppa64 LD_BFD := elf64-hppa-linux else # 32-bit diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h index 57b8b2a2fd4e..ab8a54771507 100644 --- a/arch/parisc/include/asm/compat.h +++ b/arch/parisc/include/asm/compat.h @@ -13,7 +13,6 @@ typedef u32 compat_size_t; typedef s32 compat_ssize_t; -typedef s32 compat_time_t; typedef s32 compat_clock_t; typedef s32 compat_pid_t; typedef u32 __compat_uid_t; @@ -40,16 +39,6 @@ typedef u32 compat_ulong_t; typedef u64 compat_u64; typedef u32 compat_uptr_t; -struct compat_timespec { - compat_time_t tv_sec; - s32 tv_nsec; -}; - -struct compat_timeval { - compat_time_t tv_sec; - s32 tv_usec; -}; - struct compat_stat { compat_dev_t st_dev; /* dev_t is 32 bits on parisc */ compat_ino_t st_ino; /* 32 bits */ @@ -149,10 +138,10 @@ struct compat_ipc64_perm { struct compat_semid64_ds { struct compat_ipc64_perm sem_perm; - unsigned int __unused1; - compat_time_t sem_otime; - unsigned int __unused2; - compat_time_t sem_ctime; + unsigned int sem_otime_high; + unsigned int sem_otime; + unsigned int sem_ctime_high; + unsigned int sem_ctime; compat_ulong_t sem_nsems; compat_ulong_t __unused3; compat_ulong_t __unused4; @@ -160,12 +149,12 @@ struct compat_semid64_ds { struct compat_msqid64_ds { struct compat_ipc64_perm msg_perm; - unsigned int __unused1; - compat_time_t msg_stime; - unsigned int __unused2; - compat_time_t msg_rtime; - unsigned int __unused3; - compat_time_t msg_ctime; + unsigned int msg_stime_high; + unsigned int msg_stime; + unsigned int msg_rtime_high; + unsigned int msg_rtime; + unsigned int msg_ctime_high; + unsigned int msg_ctime; compat_ulong_t msg_cbytes; compat_ulong_t msg_qnum; compat_ulong_t msg_qbytes; @@ -177,12 +166,12 @@ struct compat_msqid64_ds { struct compat_shmid64_ds { struct compat_ipc64_perm shm_perm; - unsigned int __unused1; - compat_time_t shm_atime; - unsigned int __unused2; - compat_time_t shm_dtime; - unsigned int __unused3; - compat_time_t shm_ctime; + unsigned int shm_atime_high; + unsigned int shm_atime; + unsigned int shm_dtime_high; + unsigned int shm_dtime; + unsigned int shm_ctime_high; + unsigned int shm_ctime; unsigned int __unused4; compat_size_t shm_segsz; compat_pid_t shm_cpid; diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h index 077815169258..1a1235a9d533 100644 --- a/arch/parisc/include/asm/hardirq.h +++ b/arch/parisc/include/asm/hardirq.h @@ -34,14 +34,6 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); #define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) #define inc_irq_stat(member) this_cpu_inc(irq_stat.member) #define __inc_irq_stat(member) __this_cpu_inc(irq_stat.member) -#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending) - -#define __ARCH_SET_SOFTIRQ_PENDING - -#define set_softirq_pending(x) \ - this_cpu_write(irq_stat.__softirq_pending, (x)) -#define or_softirq_pending(x) this_cpu_or(irq_stat.__softirq_pending, (x)) - #define ack_bad_irq(irq) WARN(1, "unexpected IRQ trap at vector %02x\n", irq) #endif /* _PARISC_HARDIRQ_H */ diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h index 96b7deec512d..3328fd17c19d 100644 --- a/arch/parisc/include/asm/pci.h +++ b/arch/parisc/include/asm/pci.h @@ -88,29 +88,6 @@ struct pci_hba_data { #endif /* !CONFIG_64BIT */ /* - * If the PCI device's view of memory is the same as the CPU's view of memory, - * PCI_DMA_BUS_IS_PHYS is true. The networking and block device layers use - * this boolean for bounce buffer decisions. - */ -#ifdef CONFIG_PA20 -/* All PA-2.0 machines have an IOMMU. */ -#define PCI_DMA_BUS_IS_PHYS 0 -#define parisc_has_iommu() do { } while (0) -#else - -#if defined(CONFIG_IOMMU_CCIO) || defined(CONFIG_IOMMU_SBA) -extern int parisc_bus_is_phys; /* in arch/parisc/kernel/setup.c */ -#define PCI_DMA_BUS_IS_PHYS parisc_bus_is_phys -#define parisc_has_iommu() do { parisc_bus_is_phys = 0; } while (0) -#else -#define PCI_DMA_BUS_IS_PHYS 1 -#define parisc_has_iommu() do { } while (0) -#endif - -#endif /* !CONFIG_PA20 */ - - -/* ** Most PCI devices (eg Tulip, NCR720) also export the same registers ** to both MMIO and I/O port space. Due to poor performance of I/O Port ** access under HP PCI bus adapters, strongly recommend the use of MMIO diff --git a/arch/parisc/include/uapi/asm/msgbuf.h b/arch/parisc/include/uapi/asm/msgbuf.h index b48b810e626b..6a2e9ab2ef8d 100644 --- a/arch/parisc/include/uapi/asm/msgbuf.h +++ b/arch/parisc/include/uapi/asm/msgbuf.h @@ -10,31 +10,30 @@ * between kernel and user space. * * Pad space is left for: - * - 64-bit time_t to solve y2038 problem * - 2 miscellaneous 32-bit values */ struct msqid64_ds { struct ipc64_perm msg_perm; -#if __BITS_PER_LONG != 64 - unsigned int __pad1; -#endif +#if __BITS_PER_LONG == 64 __kernel_time_t msg_stime; /* last msgsnd time */ -#if __BITS_PER_LONG != 64 - unsigned int __pad2; -#endif __kernel_time_t msg_rtime; /* last msgrcv time */ -#if __BITS_PER_LONG != 64 - unsigned int __pad3; -#endif __kernel_time_t msg_ctime; /* last change time */ - unsigned long msg_cbytes; /* current number of bytes on queue */ - unsigned long msg_qnum; /* number of messages in queue */ - unsigned long msg_qbytes; /* max number of bytes on queue */ - __kernel_pid_t msg_lspid; /* pid of last msgsnd */ - __kernel_pid_t msg_lrpid; /* last receive pid */ - unsigned long __unused1; - unsigned long __unused2; +#else + unsigned long msg_stime_high; + unsigned long msg_stime; /* last msgsnd time */ + unsigned long msg_rtime_high; + unsigned long msg_rtime; /* last msgrcv time */ + unsigned long msg_ctime_high; + unsigned long msg_ctime; /* last change time */ +#endif + unsigned long msg_cbytes; /* current number of bytes on queue */ + unsigned long msg_qnum; /* number of messages in queue */ + unsigned long msg_qbytes; /* max number of bytes on queue */ + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ + __kernel_pid_t msg_lrpid; /* last receive pid */ + unsigned long __unused1; + unsigned long __unused2; }; #endif /* _PARISC_MSGBUF_H */ diff --git a/arch/parisc/include/uapi/asm/sembuf.h b/arch/parisc/include/uapi/asm/sembuf.h index 746c5d86a9b1..3c31163b1241 100644 --- a/arch/parisc/include/uapi/asm/sembuf.h +++ b/arch/parisc/include/uapi/asm/sembuf.h @@ -10,21 +10,21 @@ * between kernel and user space. * * Pad space is left for: - * - 64-bit time_t to solve y2038 problem * - 2 miscellaneous 32-bit values */ struct semid64_ds { struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ -#if __BITS_PER_LONG != 64 - unsigned int __pad1; -#endif +#if __BITS_PER_LONG == 64 __kernel_time_t sem_otime; /* last semop time */ -#if __BITS_PER_LONG != 64 - unsigned int __pad2; -#endif __kernel_time_t sem_ctime; /* last change time */ - unsigned long sem_nsems; /* no. of semaphores in array */ +#else + unsigned long sem_otime_high; + unsigned long sem_otime; /* last semop time */ + unsigned long sem_ctime_high; + unsigned long sem_ctime; /* last change time */ +#endif + unsigned long sem_nsems; /* no. of semaphores in array */ unsigned long __unused1; unsigned long __unused2; }; diff --git a/arch/parisc/include/uapi/asm/shmbuf.h b/arch/parisc/include/uapi/asm/shmbuf.h index cd4dbce55d0b..c89b3dd8db21 100644 --- a/arch/parisc/include/uapi/asm/shmbuf.h +++ b/arch/parisc/include/uapi/asm/shmbuf.h @@ -10,25 +10,22 @@ * between kernel and user space. * * Pad space is left for: - * - 64-bit time_t to solve y2038 problem * - 2 miscellaneous 32-bit values */ struct shmid64_ds { struct ipc64_perm shm_perm; /* operation perms */ -#if __BITS_PER_LONG != 64 - unsigned int __pad1; -#endif +#if __BITS_PER_LONG == 64 __kernel_time_t shm_atime; /* last attach time */ -#if __BITS_PER_LONG != 64 - unsigned int __pad2; -#endif __kernel_time_t shm_dtime; /* last detach time */ -#if __BITS_PER_LONG != 64 - unsigned int __pad3; -#endif __kernel_time_t shm_ctime; /* last change time */ -#if __BITS_PER_LONG != 64 +#else + unsigned long shm_atime_high; + unsigned long shm_atime; /* last attach time */ + unsigned long shm_dtime_high; + unsigned long shm_dtime; /* last detach time */ + unsigned long shm_ctime_high; + unsigned long shm_ctime; /* last change time */ unsigned int __pad4; #endif __kernel_size_t shm_segsz; /* size of segment (bytes) */ diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 91bc0cac03a1..6df07ce4f3c2 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -367,19 +367,6 @@ static int proc_pcxl_dma_show(struct seq_file *m, void *v) return 0; } -static int proc_pcxl_dma_open(struct inode *inode, struct file *file) -{ - return single_open(file, proc_pcxl_dma_show, NULL); -} - -static const struct file_operations proc_pcxl_dma_ops = { - .owner = THIS_MODULE, - .open = proc_pcxl_dma_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - static int __init pcxl_dma_init(void) { @@ -397,8 +384,8 @@ pcxl_dma_init(void) "pcxl_dma_init: Unable to create gsc /proc dir entry\n"); else { struct proc_dir_entry* ent; - ent = proc_create("pcxl_dma", 0, proc_gsc_root, - &proc_pcxl_dma_ops); + ent = proc_create_single("pcxl_dma", 0, proc_gsc_root, + proc_pcxl_dma_show); if (!ent) printk(KERN_WARNING "pci-dma.c: Unable to create pcxl_dma /proc entry.\n"); diff --git a/arch/parisc/kernel/pdc_chassis.c b/arch/parisc/kernel/pdc_chassis.c index 3e04242de5a7..28e07482b0f1 100644 --- a/arch/parisc/kernel/pdc_chassis.c +++ b/arch/parisc/kernel/pdc_chassis.c @@ -266,18 +266,6 @@ static int pdc_chassis_warn_show(struct seq_file *m, void *v) return 0; } -static int pdc_chassis_warn_open(struct inode *inode, struct file *file) -{ - return single_open(file, pdc_chassis_warn_show, NULL); -} - -static const struct file_operations pdc_chassis_warn_fops = { - .open = pdc_chassis_warn_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - static int __init pdc_chassis_create_procfs(void) { unsigned long test; @@ -292,7 +280,7 @@ static int __init pdc_chassis_create_procfs(void) printk(KERN_INFO "Enabling PDC chassis warnings support v%s\n", PDC_CHASSIS_VER); - proc_create("chassis", 0400, NULL, &pdc_chassis_warn_fops); + proc_create_single("chassis", 0400, NULL, pdc_chassis_warn_show); return 0; } diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 1a2be6e639b5..7aa1d4d0d444 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -76,8 +76,6 @@ void user_enable_single_step(struct task_struct *task) set_tsk_thread_flag(task, TIF_SINGLESTEP); if (pa_psw(task)->n) { - struct siginfo si; - /* Nullified, just crank over the queue. */ task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1]; task_regs(task)->iasq[0] = task_regs(task)->iasq[1]; @@ -90,11 +88,9 @@ void user_enable_single_step(struct task_struct *task) ptrace_disable(task); /* Don't wake up the task, but let the parent know something happened. */ - si.si_code = TRAP_TRACE; - si.si_addr = (void __user *) (task_regs(task)->iaoq[0] & ~3); - si.si_signo = SIGTRAP; - si.si_errno = 0; - force_sig_info(SIGTRAP, &si, task); + force_sig_fault(SIGTRAP, TRAP_TRACE, + (void __user *) (task_regs(task)->iaoq[0] & ~3), + task); /* notify_parent(task, SIGCHLD); */ return; } diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 0e9675f857a5..8d3a7b80ac42 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -58,11 +58,6 @@ struct proc_dir_entry * proc_runway_root __read_mostly = NULL; struct proc_dir_entry * proc_gsc_root __read_mostly = NULL; struct proc_dir_entry * proc_mckinley_root __read_mostly = NULL; -#if !defined(CONFIG_PA20) && (defined(CONFIG_IOMMU_CCIO) || defined(CONFIG_IOMMU_SBA)) -int parisc_bus_is_phys __read_mostly = 1; /* Assume no IOMMU is present */ -EXPORT_SYMBOL(parisc_bus_is_phys); -#endif - void __init setup_cmdline(char **cmdline_p) { extern unsigned int boot_args[]; diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 71d31274d782..4309ad31a874 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -297,13 +297,8 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err) #define GDB_BREAK_INSN 0x10004 static void handle_gdb_break(struct pt_regs *regs, int wot) { - struct siginfo si; - - si.si_signo = SIGTRAP; - si.si_errno = 0; - si.si_code = wot; - si.si_addr = (void __user *) (regs->iaoq[0] & ~3); - force_sig_info(SIGTRAP, &si, current); + force_sig_fault(SIGTRAP, wot, + (void __user *) (regs->iaoq[0] & ~3), current); } static void handle_break(struct pt_regs *regs) @@ -487,7 +482,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) { unsigned long fault_address = 0; unsigned long fault_space = 0; - struct siginfo si; + int si_code; if (code == 1) pdc_console_restart(); /* switch back to pdc if HPMC */ @@ -571,7 +566,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) case 8: /* Illegal instruction trap */ die_if_kernel("Illegal instruction", regs, code); - si.si_code = ILL_ILLOPC; + si_code = ILL_ILLOPC; goto give_sigill; case 9: @@ -582,7 +577,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) case 10: /* Privileged operation trap */ die_if_kernel("Privileged operation", regs, code); - si.si_code = ILL_PRVOPC; + si_code = ILL_PRVOPC; goto give_sigill; case 11: @@ -605,20 +600,16 @@ void notrace handle_interruption(int code, struct pt_regs *regs) } die_if_kernel("Privileged register usage", regs, code); - si.si_code = ILL_PRVREG; + si_code = ILL_PRVREG; give_sigill: - si.si_signo = SIGILL; - si.si_errno = 0; - si.si_addr = (void __user *) regs->iaoq[0]; - force_sig_info(SIGILL, &si, current); + force_sig_fault(SIGILL, si_code, + (void __user *) regs->iaoq[0], current); return; case 12: /* Overflow Trap, let the userland signal handler do the cleanup */ - si.si_signo = SIGFPE; - si.si_code = FPE_INTOVF; - si.si_addr = (void __user *) regs->iaoq[0]; - force_sig_info(SIGFPE, &si, current); + force_sig_fault(SIGFPE, FPE_INTOVF, + (void __user *) regs->iaoq[0], current); return; case 13: @@ -626,13 +617,11 @@ void notrace handle_interruption(int code, struct pt_regs *regs) The condition succeeds in an instruction which traps on condition */ if(user_mode(regs)){ - si.si_signo = SIGFPE; /* Let userspace app figure it out from the insn pointed * to by si_addr. */ - si.si_code = FPE_CONDTRAP; - si.si_addr = (void __user *) regs->iaoq[0]; - force_sig_info(SIGFPE, &si, current); + force_sig_fault(SIGFPE, FPE_CONDTRAP, + (void __user *) regs->iaoq[0], current); return; } /* The kernel doesn't want to handle condition codes */ @@ -741,14 +730,10 @@ void notrace handle_interruption(int code, struct pt_regs *regs) return; die_if_kernel("Protection id trap", regs, code); - si.si_code = SEGV_MAPERR; - si.si_signo = SIGSEGV; - si.si_errno = 0; - if (code == 7) - si.si_addr = (void __user *) regs->iaoq[0]; - else - si.si_addr = (void __user *) regs->ior; - force_sig_info(SIGSEGV, &si, current); + force_sig_fault(SIGSEGV, SEGV_MAPERR, + (code == 7)? + ((void __user *) regs->iaoq[0]) : + ((void __user *) regs->ior), current); return; case 28: @@ -762,11 +747,8 @@ void notrace handle_interruption(int code, struct pt_regs *regs) "handle_interruption() pid=%d command='%s'\n", task_pid_nr(current), current->comm); /* SIGBUS, for lack of a better one. */ - si.si_signo = SIGBUS; - si.si_code = BUS_OBJERR; - si.si_errno = 0; - si.si_addr = (void __user *) regs->ior; - force_sig_info(SIGBUS, &si, current); + force_sig_fault(SIGBUS, BUS_OBJERR, + (void __user *)regs->ior, current); return; } pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); @@ -781,11 +763,8 @@ void notrace handle_interruption(int code, struct pt_regs *regs) "User fault %d on space 0x%08lx, pid=%d command='%s'\n", code, fault_space, task_pid_nr(current), current->comm); - si.si_signo = SIGSEGV; - si.si_errno = 0; - si.si_code = SEGV_MAPERR; - si.si_addr = (void __user *) regs->ior; - force_sig_info(SIGSEGV, &si, current); + force_sig_fault(SIGSEGV, SEGV_MAPERR, + (void __user *)regs->ior, current); return; } } diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index e36f7b75ab07..932bfc0b7cd8 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -452,7 +452,6 @@ void handle_unaligned(struct pt_regs *regs) unsigned long newbase = R1(regs->iir)?regs->gr[R1(regs->iir)]:0; int modify = 0; int ret = ERR_NOTHANDLED; - struct siginfo si; register int flop=0; /* true if this is a flop */ __inc_irq_stat(irq_unaligned_count); @@ -690,21 +689,15 @@ void handle_unaligned(struct pt_regs *regs) if (ret == ERR_PAGEFAULT) { - si.si_signo = SIGSEGV; - si.si_errno = 0; - si.si_code = SEGV_MAPERR; - si.si_addr = (void __user *)regs->ior; - force_sig_info(SIGSEGV, &si, current); + force_sig_fault(SIGSEGV, SEGV_MAPERR, + (void __user *)regs->ior, current); } else { force_sigbus: /* couldn't handle it ... */ - si.si_signo = SIGBUS; - si.si_errno = 0; - si.si_code = BUS_ADRALN; - si.si_addr = (void __user *)regs->ior; - force_sig_info(SIGBUS, &si, current); + force_sig_fault(SIGBUS, BUS_ADRALN, + (void __user *)regs->ior, current); } return; diff --git a/arch/parisc/math-emu/driver.c b/arch/parisc/math-emu/driver.c index 2fb59d2e2b29..0590e05571d1 100644 --- a/arch/parisc/math-emu/driver.c +++ b/arch/parisc/math-emu/driver.c @@ -81,7 +81,6 @@ int handle_fpe(struct pt_regs *regs) { extern void printbinary(unsigned long x, int nbits); - struct siginfo si; unsigned int orig_sw, sw; int signalcode; /* need an intermediate copy of float regs because FPU emulation @@ -117,11 +116,8 @@ handle_fpe(struct pt_regs *regs) memcpy(regs->fr, frcopy, sizeof regs->fr); if (signalcode != 0) { - si.si_signo = signalcode >> 24; - si.si_errno = 0; - si.si_code = signalcode & 0xffffff; - si.si_addr = (void __user *) regs->iaoq[0]; - force_sig_info(si.si_signo, &si, current); + force_sig_fault(signalcode >> 24, signalcode & 0xffffff, + (void __user *) regs->iaoq[0], current); return -1; } diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index e247edbca68e..a80117980fc2 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -353,23 +353,22 @@ bad_area: up_read(&mm->mmap_sem); if (user_mode(regs)) { - struct siginfo si; - unsigned int lsb = 0; + int signo, si_code; switch (code) { case 15: /* Data TLB miss fault/Data page fault */ /* send SIGSEGV when outside of vma */ if (!vma || address < vma->vm_start || address >= vma->vm_end) { - si.si_signo = SIGSEGV; - si.si_code = SEGV_MAPERR; + signo = SIGSEGV; + si_code = SEGV_MAPERR; break; } /* send SIGSEGV for wrong permissions */ if ((vma->vm_flags & acc_type) != acc_type) { - si.si_signo = SIGSEGV; - si.si_code = SEGV_ACCERR; + signo = SIGSEGV; + si_code = SEGV_ACCERR; break; } @@ -377,43 +376,40 @@ bad_area: /* fall through */ case 17: /* NA data TLB miss / page fault */ case 18: /* Unaligned access - PCXS only */ - si.si_signo = SIGBUS; - si.si_code = (code == 18) ? BUS_ADRALN : BUS_ADRERR; + signo = SIGBUS; + si_code = (code == 18) ? BUS_ADRALN : BUS_ADRERR; break; case 16: /* Non-access instruction TLB miss fault */ case 26: /* PCXL: Data memory access rights trap */ default: - si.si_signo = SIGSEGV; - si.si_code = (code == 26) ? SEGV_ACCERR : SEGV_MAPERR; + signo = SIGSEGV; + si_code = (code == 26) ? SEGV_ACCERR : SEGV_MAPERR; break; } - #ifdef CONFIG_MEMORY_FAILURE if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { + unsigned int lsb = 0; printk(KERN_ERR "MCE: Killing %s:%d due to hardware memory corruption fault at %08lx\n", tsk->comm, tsk->pid, address); - si.si_signo = SIGBUS; - si.si_code = BUS_MCEERR_AR; + /* + * Either small page or large page may be poisoned. + * In other words, VM_FAULT_HWPOISON_LARGE and + * VM_FAULT_HWPOISON are mutually exclusive. + */ + if (fault & VM_FAULT_HWPOISON_LARGE) + lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); + else if (fault & VM_FAULT_HWPOISON) + lsb = PAGE_SHIFT; + + force_sig_mceerr(BUS_MCEERR_AR, (void __user *) address, + lsb, current); + return; } #endif + show_signal_msg(regs, code, address, tsk, vma); - /* - * Either small page or large page may be poisoned. - * In other words, VM_FAULT_HWPOISON_LARGE and - * VM_FAULT_HWPOISON are mutually exclusive. - */ - if (fault & VM_FAULT_HWPOISON_LARGE) - lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); - else if (fault & VM_FAULT_HWPOISON) - lsb = PAGE_SHIFT; - else - show_signal_msg(regs, code, address, tsk, vma); - si.si_addr_lsb = lsb; - - si.si_errno = 0; - si.si_addr = (void __user *) address; - force_sig_info(si.si_signo, &si, current); + force_sig_fault(signo, si_code, (void __user *) address, current); return; } diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index c32a181a7cbb..8f959df2de7a 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -13,12 +13,6 @@ config 64BIT bool default y if PPC64 -config ARCH_PHYS_ADDR_T_64BIT - def_bool PPC64 || PHYS_64BIT - -config ARCH_DMA_ADDR_T_64BIT - def_bool ARCH_PHYS_ADDR_T_64BIT - config MMU bool default y @@ -141,6 +135,7 @@ config PPC select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_PMEM_API if PPC64 + select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_MEMBARRIER_CALLBACKS select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE select ARCH_HAS_SG_CHAIN @@ -162,6 +157,7 @@ config PPC select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN + select DYNAMIC_FTRACE if FUNCTION_TRACER select EDAC_ATOMIC_SCRUB select EDAC_SUPPORT select GENERIC_ATOMIC64 if PPC32 @@ -187,7 +183,6 @@ config PPC select HAVE_CONTEXT_TRACKING if PPC64 select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_STACKOVERFLOW - select HAVE_DMA_API_DEBUG select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL select HAVE_EBPF_JIT if PPC64 @@ -205,6 +200,7 @@ config PPC select HAVE_KPROBES select HAVE_KPROBES_ON_FTRACE select HAVE_KRETPROBES + select HAVE_LD_DEAD_CODE_DATA_ELIMINATION select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP @@ -220,18 +216,22 @@ config PPC select HAVE_PERF_USER_STACK_DUMP select HAVE_RCU_TABLE_FREE if SMP select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RELIABLE_STACKTRACE if PPC64 && CPU_LITTLE_ENDIAN select HAVE_SYSCALL_TRACEPOINTS select HAVE_VIRT_CPU_ACCOUNTING select HAVE_IRQ_TIME_ACCOUNTING + select IOMMU_HELPER if PPC64 select IRQ_DOMAIN select IRQ_FORCED_THREADING select MODULES_USE_ELF_RELA + select NEED_SG_DMA_LENGTH select NO_BOOTMEM select OF select OF_EARLY_FLATTREE select OF_RESERVED_MEM select OLD_SIGACTION if PPC32 select OLD_SIGSUSPEND + select RTC_LIB select SPARSE_IRQ select SYSCTL_EXCEPTION_TRACE select VIRT_TO_BUS if !PPC64 @@ -478,19 +478,6 @@ config MPROFILE_KERNEL depends on PPC64 && CPU_LITTLE_ENDIAN def_bool !DISABLE_MPROFILE_KERNEL -config IOMMU_HELPER - def_bool PPC64 - -config SWIOTLB - bool "SWIOTLB support" - default n - select IOMMU_HELPER - ---help--- - Support for IO bounce buffering for systems without an IOMMU. - This allows us to DMA to the full physical address space on - platforms where the size of a physical address is larger - than the bus address. Not all platforms support this. - config HOTPLUG_CPU bool "Support for enabling/disabling CPUs" depends on SMP && (PPC_PSERIES || \ @@ -883,7 +870,7 @@ config PPC_MEM_KEYS page-based protections, but without requiring modification of the page tables when an application changes protection domains. - For details, see Documentation/vm/protection-keys.txt + For details, see Documentation/vm/protection-keys.rst If unsure, say y. @@ -913,9 +900,6 @@ config ZONE_DMA config NEED_DMA_MAP_STATE def_bool (PPC64 || NOT_COHERENT_CACHE) -config NEED_SG_DMA_LENGTH - def_bool y - config GENERIC_ISA_DMA bool depends on ISA_DMA_API diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 95813df90801..9b52e42e581b 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -17,17 +17,18 @@ HAS_BIARCH := $(call cc-option-yn, -m32) # Set default 32 bits cross compilers for vdso and boot wrapper CROSS32_COMPILE ?= -CROSS32CC := $(CROSS32_COMPILE)gcc -CROSS32AR := $(CROSS32_COMPILE)ar - ifeq ($(HAS_BIARCH),y) ifeq ($(CROSS32_COMPILE),) -CROSS32CC := $(CC) -m32 -KBUILD_ARFLAGS += --target=elf32-powerpc +ifdef CONFIG_PPC32 +# These options will be overridden by any -mcpu option that the CPU +# or platform code sets later on the command line, but they are needed +# to set a sane 32-bit cpu target for the 64-bit cross compiler which +# may default to the wrong ISA. +KBUILD_CFLAGS += -mcpu=powerpc +KBUILD_AFLAGS += -mcpu=powerpc +endif endif endif - -export CROSS32CC CROSS32AR ifeq ($(CROSS_COMPILE),) KBUILD_DEFCONFIG := $(shell uname -m)_defconfig @@ -74,13 +75,15 @@ endif endif ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) -override LD += -EL +KBUILD_CFLAGS += -mlittle-endian +LDFLAGS += -EL LDEMULATION := lppc GNUTARGET := powerpcle MULTIPLEWORD := -mno-multiple KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-save-toc-indirect) else -override LD += -EB +KBUILD_CFLAGS += $(call cc-option,-mbig-endian) +LDFLAGS += -EB LDEMULATION := ppc GNUTARGET := powerpc MULTIPLEWORD := -mmultiple @@ -93,19 +96,19 @@ aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1) aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2 endif -cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian -cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) ifneq ($(cc-name),clang) cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align endif +cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) +cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian ifeq ($(HAS_BIARCH),y) -override AS += -a$(BITS) -override LD += -m elf$(BITS)$(LDEMULATION) -override CC += -m$(BITS) +KBUILD_CFLAGS += -m$(BITS) +KBUILD_AFLAGS += -m$(BITS) -Wl,-a$(BITS) +LDFLAGS += -m elf$(BITS)$(LDEMULATION) KBUILD_ARFLAGS += --target=elf$(BITS)-$(GNUTARGET) endif @@ -178,6 +181,7 @@ CFLAGS-$(CONFIG_POWER6_CPU) += $(call cc-option,-mcpu=power6) CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7) CFLAGS-$(CONFIG_POWER8_CPU) += $(call cc-option,-mcpu=power8) CFLAGS-$(CONFIG_POWER9_CPU) += $(call cc-option,-mcpu=power9) +CFLAGS-$(CONFIG_PPC_8xx) += $(call cc-option,-mcpu=860) # Altivec option not allowed with e500mc64 in GCC. ifeq ($(CONFIG_ALTIVEC),y) diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 26d5d2a5b8e9..deea20c334df 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -23,19 +23,23 @@ all: $(obj)/zImage compress-$(CONFIG_KERNEL_GZIP) := CONFIG_KERNEL_GZIP compress-$(CONFIG_KERNEL_XZ) := CONFIG_KERNEL_XZ +ifdef CROSS32_COMPILE + BOOTCC := $(CROSS32_COMPILE)gcc + BOOTAR := $(CROSS32_COMPILE)ar +else + BOOTCC := $(CC) + BOOTAR := $(AR) +endif + BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -Os -msoft-float -pipe \ -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \ -D$(compress-y) -BOOTCC := $(CC) ifdef CONFIG_PPC64_BOOT_WRAPPER BOOTCFLAGS += -m64 else BOOTCFLAGS += -m32 -ifdef CROSS32_COMPILE - BOOTCC := $(CROSS32_COMPILE)gcc -endif endif BOOTCFLAGS += -isystem $(shell $(BOOTCC) -print-file-name=include) @@ -49,6 +53,8 @@ endif BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc +BOOTARFLAGS := -cr$(KBUILD_ARFLAGS) + ifdef CONFIG_DEBUG_INFO BOOTCFLAGS += -g endif @@ -120,7 +126,7 @@ src-wlib-$(CONFIG_40x) += 4xx.c planetcore.c src-wlib-$(CONFIG_44x) += 4xx.c ebony.c bamboo.c src-wlib-$(CONFIG_PPC_8xx) += mpc8xx.c planetcore.c fsl-soc.c src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c -src-wlib-$(CONFIG_EMBEDDED6xx) += mpsc.c mv64x60.c mv64x60_i2c.c ugecon.c fsl-soc.c +src-wlib-$(CONFIG_EMBEDDED6xx) += ugecon.c fsl-soc.c src-wlib-$(CONFIG_XILINX_VIRTEX) += uartlite.c src-wlib-$(CONFIG_CPM) += cpm-serial.c @@ -143,8 +149,8 @@ src-plat-$(CONFIG_PPC_82xx) += cuboot-pq2.c fixed-head.S ep8248e.c cuboot-824x.c src-plat-$(CONFIG_PPC_83xx) += cuboot-83xx.c fixed-head.S redboot-83xx.c src-plat-$(CONFIG_FSL_SOC_BOOKE) += cuboot-85xx.c cuboot-85xx-cpm2.c src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \ - cuboot-c2k.c gamecube-head.S \ - gamecube.c wii-head.S wii.c holly.c \ + gamecube-head.S gamecube.c \ + wii-head.S wii.c holly.c \ fixed-head.S mvme5100.c src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c @@ -202,7 +208,7 @@ quiet_cmd_bootas = BOOTAS $@ cmd_bootas = $(BOOTCC) -Wp,-MD,$(depfile) $(BOOTAFLAGS) -c -o $@ $< quiet_cmd_bootar = BOOTAR $@ - cmd_bootar = $(CROSS32AR) -cr$(KBUILD_ARFLAGS) $@.$$$$ $(filter-out FORCE,$^); mv $@.$$$$ $@ + cmd_bootar = $(BOOTAR) $(BOOTARFLAGS) $@.$$$$ $(filter-out FORCE,$^); mv $@.$$$$ $@ $(obj-libfdt): $(obj)/%.o: $(srctree)/scripts/dtc/libfdt/%.c FORCE $(call if_changed_dep,bootcc) @@ -339,7 +345,6 @@ image-$(CONFIG_MVME7100) += dtbImage.mvme7100 # Board ports in arch/powerpc/platform/embedded6xx/Kconfig image-$(CONFIG_STORCENTER) += cuImage.storcenter image-$(CONFIG_MPC7448HPC2) += cuImage.mpc7448hpc2 -image-$(CONFIG_PPC_C2K) += cuImage.c2k image-$(CONFIG_GAMECUBE) += dtbImage.gamecube image-$(CONFIG_WII) += dtbImage.wii image-$(CONFIG_MVME5100) += dtbImage.mvme5100 diff --git a/arch/powerpc/boot/cuboot-c2k.c b/arch/powerpc/boot/cuboot-c2k.c deleted file mode 100644 index 9309c51f1d65..000000000000 --- a/arch/powerpc/boot/cuboot-c2k.c +++ /dev/null @@ -1,189 +0,0 @@ -/* - * GEFanuc C2K platform code. - * - * Author: Remi Machet <rmachet@slac.stanford.edu> - * - * Originated from prpmc2800.c - * - * 2008 (c) Stanford University - * 2007 (c) MontaVista, Software, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include "types.h" -#include "stdio.h" -#include "io.h" -#include "ops.h" -#include "elf.h" -#include "mv64x60.h" -#include "cuboot.h" -#include "ppcboot.h" - -static u8 *bridge_base; - -static void c2k_bridge_setup(u32 mem_size) -{ - u32 i, v[30], enables, acc_bits; - u32 pci_base_hi, pci_base_lo, size, buf[2]; - unsigned long cpu_base; - int rc; - void *devp, *mv64x60_devp; - u8 *bridge_pbase, is_coherent; - struct mv64x60_cpu2pci_win *tbl; - int bus; - - bridge_pbase = mv64x60_get_bridge_pbase(); - is_coherent = mv64x60_is_coherent(); - - if (is_coherent) - acc_bits = MV64x60_PCI_ACC_CNTL_SNOOP_WB - | MV64x60_PCI_ACC_CNTL_SWAP_NONE - | MV64x60_PCI_ACC_CNTL_MBURST_32_BYTES - | MV64x60_PCI_ACC_CNTL_RDSIZE_32_BYTES; - else - acc_bits = MV64x60_PCI_ACC_CNTL_SNOOP_NONE - | MV64x60_PCI_ACC_CNTL_SWAP_NONE - | MV64x60_PCI_ACC_CNTL_MBURST_128_BYTES - | MV64x60_PCI_ACC_CNTL_RDSIZE_256_BYTES; - - mv64x60_config_ctlr_windows(bridge_base, bridge_pbase, is_coherent); - mv64x60_devp = find_node_by_compatible(NULL, "marvell,mv64360"); - if (mv64x60_devp == NULL) - fatal("Error: Missing marvell,mv64360 device tree node\n\r"); - - enables = in_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE)); - enables |= 0x007ffe00; /* Disable all cpu->pci windows */ - out_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE), enables); - - /* Get the cpu -> pci i/o & mem mappings from the device tree */ - devp = NULL; - for (bus = 0; ; bus++) { - char name[] = "pci "; - - name[strlen(name)-1] = bus+'0'; - - devp = find_node_by_alias(name); - if (devp == NULL) - break; - - if (bus >= 2) - fatal("Error: Only 2 PCI controllers are supported at" \ - " this time.\n"); - - mv64x60_config_pci_windows(bridge_base, bridge_pbase, bus, 0, - mem_size, acc_bits); - - rc = getprop(devp, "ranges", v, sizeof(v)); - if (rc == 0) - fatal("Error: Can't find marvell,mv64360-pci ranges" - " property\n\r"); - - /* Get the cpu -> pci i/o & mem mappings from the device tree */ - - for (i = 0; i < rc; i += 6) { - switch (v[i] & 0xff000000) { - case 0x01000000: /* PCI I/O Space */ - tbl = mv64x60_cpu2pci_io; - break; - case 0x02000000: /* PCI MEM Space */ - tbl = mv64x60_cpu2pci_mem; - break; - default: - continue; - } - - pci_base_hi = v[i+1]; - pci_base_lo = v[i+2]; - cpu_base = v[i+3]; - size = v[i+5]; - - buf[0] = cpu_base; - buf[1] = size; - - if (!dt_xlate_addr(devp, buf, sizeof(buf), &cpu_base)) - fatal("Error: Can't translate PCI address " \ - "0x%x\n\r", (u32)cpu_base); - - mv64x60_config_cpu2pci_window(bridge_base, bus, - pci_base_hi, pci_base_lo, cpu_base, size, tbl); - } - - enables &= ~(3<<(9+bus*5)); /* Enable cpu->pci<bus> i/o, - cpu->pci<bus> mem0 */ - out_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE), - enables); - }; -} - -static void c2k_fixups(void) -{ - u32 mem_size; - - mem_size = mv64x60_get_mem_size(bridge_base); - c2k_bridge_setup(mem_size); /* Do necessary bridge setup */ -} - -#define MV64x60_MPP_CNTL_0 0xf000 -#define MV64x60_MPP_CNTL_2 0xf008 -#define MV64x60_GPP_IO_CNTL 0xf100 -#define MV64x60_GPP_LEVEL_CNTL 0xf110 -#define MV64x60_GPP_VALUE_SET 0xf118 - -static void c2k_reset(void) -{ - u32 temp; - - udelay(5000000); - - if (bridge_base != 0) { - temp = in_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_0)); - temp &= 0xFFFF0FFF; - out_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_0), temp); - - temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL)); - temp |= 0x00000004; - out_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL), temp); - - temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL)); - temp |= 0x00000004; - out_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL), temp); - - temp = in_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_2)); - temp &= 0xFFFF0FFF; - out_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_2), temp); - - temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL)); - temp |= 0x00080000; - out_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL), temp); - - temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL)); - temp |= 0x00080000; - out_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL), temp); - - out_le32((u32 *)(bridge_base + MV64x60_GPP_VALUE_SET), - 0x00080004); - } - - for (;;); -} - -static bd_t bd; - -void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7) -{ - CUBOOT_INIT(); - - fdt_init(_dtb_start); - - bridge_base = mv64x60_get_bridge_base(); - - platform_ops.fixups = c2k_fixups; - platform_ops.exit = c2k_reset; - - if (serial_console_init() < 0) - exit(); -} diff --git a/arch/powerpc/boot/dts/c2k.dts b/arch/powerpc/boot/dts/c2k.dts deleted file mode 100644 index c5beb72d18b7..000000000000 --- a/arch/powerpc/boot/dts/c2k.dts +++ /dev/null @@ -1,366 +0,0 @@ -/* Device Tree Source for GEFanuc C2K - * - * Author: Remi Machet <rmachet@slac.stanford.edu> - * - * Originated from prpmc2800.dts - * - * 2008 (c) Stanford University - * 2007 (c) MontaVista, Software, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -/dts-v1/; - -/ { - #address-cells = <1>; - #size-cells = <1>; - model = "C2K"; - compatible = "GEFanuc,C2K"; - coherency-off; - - aliases { - pci0 = &PCI0; - pci1 = &PCI1; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu@0 { - device_type = "cpu"; - compatible = "PowerPC,7447"; - reg = <0>; - clock-frequency = <996000000>; /* 996 MHz */ - bus-frequency = <166666667>; /* 166.6666 MHz */ - timebase-frequency = <41666667>; /* 166.6666/4 MHz */ - i-cache-line-size = <32>; - d-cache-line-size = <32>; - i-cache-size = <32768>; - d-cache-size = <32768>; - }; - }; - - memory { - device_type = "memory"; - reg = <0x00000000 0x40000000>; /* 1GB */ - }; - - system-controller@d8000000 { /* Marvell Discovery */ - #address-cells = <1>; - #size-cells = <1>; - model = "mv64460"; - compatible = "marvell,mv64360"; - clock-frequency = <166666667>; /* 166.66... MHz */ - reg = <0xd8000000 0x00010000>; - virtual-reg = <0xd8000000>; - ranges = <0xd4000000 0xd4000000 0x01000000 /* PCI 0 I/O Space */ - 0x80000000 0x80000000 0x08000000 /* PCI 0 MEM Space */ - 0xd0000000 0xd0000000 0x01000000 /* PCI 1 I/O Space */ - 0xa0000000 0xa0000000 0x08000000 /* PCI 1 MEM Space */ - 0xd8100000 0xd8100000 0x00010000 /* FPGA */ - 0xd8110000 0xd8110000 0x00010000 /* FPGA USARTs */ - 0xf8000000 0xf8000000 0x08000000 /* User FLASH */ - 0x00000000 0xd8000000 0x00010000 /* Bridge's regs */ - 0xd8140000 0xd8140000 0x00040000>; /* Integrated SRAM */ - - mdio@2000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "marvell,mv64360-mdio"; - reg = <0x2000 4>; - PHY0: ethernet-phy@0 { - interrupts = <76>; /* GPP 12 */ - interrupt-parent = <&PIC>; - reg = <0>; - }; - PHY1: ethernet-phy@1 { - interrupts = <76>; /* GPP 12 */ - interrupt-parent = <&PIC>; - reg = <1>; - }; - PHY2: ethernet-phy@2 { - interrupts = <76>; /* GPP 12 */ - interrupt-parent = <&PIC>; - reg = <2>; - }; - }; - - ethernet-group@2000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "marvell,mv64360-eth-group"; - reg = <0x2000 0x2000>; - ethernet@0 { - device_type = "network"; - compatible = "marvell,mv64360-eth"; - reg = <0>; - interrupts = <32>; - interrupt-parent = <&PIC>; - phy = <&PHY0>; - local-mac-address = [ 00 00 00 00 00 00 ]; - }; - ethernet@1 { - device_type = "network"; - compatible = "marvell,mv64360-eth"; - reg = <1>; - interrupts = <33>; - interrupt-parent = <&PIC>; - phy = <&PHY1>; - local-mac-address = [ 00 00 00 00 00 00 ]; - }; - ethernet@2 { - device_type = "network"; - compatible = "marvell,mv64360-eth"; - reg = <2>; - interrupts = <34>; - interrupt-parent = <&PIC>; - phy = <&PHY2>; - local-mac-address = [ 00 00 00 00 00 00 ]; - }; - }; - - SDMA0: sdma@4000 { - compatible = "marvell,mv64360-sdma"; - reg = <0x4000 0xc18>; - virtual-reg = <0xd8004000>; - interrupt-base = <0>; - interrupts = <36>; - interrupt-parent = <&PIC>; - }; - - SDMA1: sdma@6000 { - compatible = "marvell,mv64360-sdma"; - reg = <0x6000 0xc18>; - virtual-reg = <0xd8006000>; - interrupt-base = <0>; - interrupts = <38>; - interrupt-parent = <&PIC>; - }; - - BRG0: brg@b200 { - compatible = "marvell,mv64360-brg"; - reg = <0xb200 0x8>; - clock-src = <8>; - clock-frequency = <133333333>; - current-speed = <115200>; - }; - - BRG1: brg@b208 { - compatible = "marvell,mv64360-brg"; - reg = <0xb208 0x8>; - clock-src = <8>; - clock-frequency = <133333333>; - current-speed = <115200>; - }; - - CUNIT: cunit@f200 { - reg = <0xf200 0x200>; - }; - - MPSCROUTING: mpscrouting@b400 { - reg = <0xb400 0xc>; - }; - - MPSCINTR: mpscintr@b800 { - reg = <0xb800 0x100>; - virtual-reg = <0xd800b800>; - }; - - MPSC0: mpsc@8000 { - compatible = "marvell,mv64360-mpsc"; - reg = <0x8000 0x38>; - virtual-reg = <0xd8008000>; - sdma = <&SDMA0>; - brg = <&BRG0>; - cunit = <&CUNIT>; - mpscrouting = <&MPSCROUTING>; - mpscintr = <&MPSCINTR>; - cell-index = <0>; - interrupts = <40>; - interrupt-parent = <&PIC>; - }; - - MPSC1: mpsc@9000 { - compatible = "marvell,mv64360-mpsc"; - reg = <0x9000 0x38>; - virtual-reg = <0xd8009000>; - sdma = <&SDMA1>; - brg = <&BRG1>; - cunit = <&CUNIT>; - mpscrouting = <&MPSCROUTING>; - mpscintr = <&MPSCINTR>; - cell-index = <1>; - interrupts = <42>; - interrupt-parent = <&PIC>; - }; - - wdt@b410 { /* watchdog timer */ - compatible = "marvell,mv64360-wdt"; - reg = <0xb410 0x8>; - }; - - i2c@c000 { - compatible = "marvell,mv64360-i2c"; - reg = <0xc000 0x20>; - virtual-reg = <0xd800c000>; - interrupts = <37>; - interrupt-parent = <&PIC>; - }; - - PIC: pic { - #interrupt-cells = <1>; - #address-cells = <0>; - compatible = "marvell,mv64360-pic"; - reg = <0x0000 0x88>; - interrupt-controller; - }; - - mpp@f000 { - compatible = "marvell,mv64360-mpp"; - reg = <0xf000 0x10>; - }; - - gpp@f100 { - compatible = "marvell,mv64360-gpp"; - reg = <0xf100 0x20>; - }; - - PCI0: pci@80000000 { - #address-cells = <3>; - #size-cells = <2>; - #interrupt-cells = <1>; - device_type = "pci"; - compatible = "marvell,mv64360-pci"; - reg = <0x0cf8 0x8>; - ranges = <0x01000000 0x0 0x00000000 0xd4000000 0x0 0x01000000 - 0x02000000 0x0 0x80000000 0x80000000 0x0 0x08000000>; - bus-range = <0 255>; - clock-frequency = <66000000>; - interrupt-pci-iack = <0x0c34>; - interrupt-parent = <&PIC>; - interrupt-map-mask = <0x0000 0x0 0x0 0x7>; - interrupt-map = < - /* Only one interrupt line for PMC0 slot (INTA) */ - 0x0000 0 0 1 &PIC 88 - >; - }; - - - PCI1: pci@a0000000 { - #address-cells = <3>; - #size-cells = <2>; - #interrupt-cells = <1>; - device_type = "pci"; - compatible = "marvell,mv64360-pci"; - reg = <0x0c78 0x8>; - ranges = <0x01000000 0x0 0x00000000 0xd0000000 0x0 0x01000000 - 0x02000000 0x0 0x80000000 0xa0000000 0x0 0x08000000>; - bus-range = <0 255>; - clock-frequency = <66000000>; - interrupt-pci-iack = <0x0cb4>; - interrupt-parent = <&PIC>; - interrupt-map-mask = <0xf800 0x00 0x00 0x7>; - interrupt-map = < - /* IDSEL 0x01: PMC1 ? */ - 0x0800 0 0 1 &PIC 88 - /* IDSEL 0x02: cPCI bridge */ - 0x1000 0 0 1 &PIC 88 - /* IDSEL 0x03: USB controller */ - 0x1800 0 0 1 &PIC 91 - /* IDSEL 0x04: SATA controller */ - 0x2000 0 0 1 &PIC 95 - >; - }; - - cpu-error@70 { - compatible = "marvell,mv64360-cpu-error"; - reg = <0x0070 0x10 0x0128 0x28>; - interrupts = <3>; - interrupt-parent = <&PIC>; - }; - - sram-ctrl@380 { - compatible = "marvell,mv64360-sram-ctrl"; - reg = <0x0380 0x80>; - interrupts = <13>; - interrupt-parent = <&PIC>; - }; - - pci-error@1d40 { - compatible = "marvell,mv64360-pci-error"; - reg = <0x1d40 0x40 0x0c28 0x4>; - interrupts = <12>; - interrupt-parent = <&PIC>; - }; - - pci-error@1dc0 { - compatible = "marvell,mv64360-pci-error"; - reg = <0x1dc0 0x40 0x0ca8 0x4>; - interrupts = <16>; - interrupt-parent = <&PIC>; - }; - - mem-ctrl@1400 { - compatible = "marvell,mv64360-mem-ctrl"; - reg = <0x1400 0x60>; - interrupts = <17>; - interrupt-parent = <&PIC>; - }; - /* Devices attached to the device controller */ - devicebus@45c { - #address-cells = <2>; - #size-cells = <1>; - compatible = "marvell,mv64306-devctrl"; - reg = <0x45C 0x88>; - interrupts = <1>; - interrupt-parent = <&PIC>; - ranges = <0 0 0xd8100000 0x10000 - 2 0 0xd8110000 0x10000 - 4 0 0xf8000000 0x8000000>; - fpga@0,0 { - compatible = "sbs,fpga-c2k"; - reg = <0 0 0x10000>; - }; - fpga_usart@2,0 { - compatible = "sbs,fpga_usart-c2k"; - reg = <2 0 0x10000>; - }; - nor_flash@4,0 { - compatible = "cfi-flash"; - reg = <4 0 0x8000000>; /* 128MB */ - bank-width = <4>; - device-width = <1>; - #address-cells = <1>; - #size-cells = <1>; - partition@0 { - label = "boot"; - reg = <0x00000000 0x00080000>; - }; - partition@40000 { - label = "kernel"; - reg = <0x00080000 0x00400000>; - }; - partition@440000 { - label = "initrd"; - reg = <0x00480000 0x00B80000>; - }; - partition@1000000 { - label = "rootfs"; - reg = <0x01000000 0x06800000>; - }; - partition@7800000 { - label = "recovery"; - reg = <0x07800000 0x00800000>; - read-only; - }; - }; - }; - }; - chosen { - stdout-path = &MPSC0; - }; -}; diff --git a/arch/powerpc/boot/dts/fsl/t104xqds.dtsi b/arch/powerpc/boot/dts/fsl/t104xqds.dtsi index 2fd4cbe7098f..615479732252 100644 --- a/arch/powerpc/boot/dts/fsl/t104xqds.dtsi +++ b/arch/powerpc/boot/dts/fsl/t104xqds.dtsi @@ -269,7 +269,7 @@ i2c@118000 { pca9547@77 { - compatible = "philips,pca9547"; + compatible = "nxp,pca9547"; reg = <0x77>; }; rtc@68 { diff --git a/arch/powerpc/boot/dts/sbc8349.dts b/arch/powerpc/boot/dts/sbc8349.dts deleted file mode 100644 index fc89e00b765c..000000000000 --- a/arch/powerpc/boot/dts/sbc8349.dts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * SBC8349E Device Tree Source - * - * Copyright 2007 Wind River Inc. - * - * Paul Gortmaker (see MAINTAINERS for contact information) - * - * -based largely on the Freescale MPC834x_MDS dts. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -/dts-v1/; - -/ { - model = "SBC8349E"; - compatible = "SBC834xE"; - #address-cells = <1>; - #size-cells = <1>; - - aliases { - ethernet0 = &enet0; - ethernet1 = &enet1; - serial0 = &serial0; - serial1 = &serial1; - pci0 = &pci0; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - PowerPC,8349@0 { - device_type = "cpu"; - reg = <0x0>; - d-cache-line-size = <32>; - i-cache-line-size = <32>; - d-cache-size = <32768>; - i-cache-size = <32768>; - timebase-frequency = <0>; // from bootloader - bus-frequency = <0>; // from bootloader - clock-frequency = <0>; // from bootloader - }; - }; - - memory { - device_type = "memory"; - reg = <0x00000000 0x10000000>; // 256MB at 0 - }; - - soc8349@e0000000 { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - ranges = <0x0 0xe0000000 0x00100000>; - reg = <0xe0000000 0x00000200>; - bus-frequency = <0>; - - wdt@200 { - compatible = "mpc83xx_wdt"; - reg = <0x200 0x100>; - }; - - i2c@3000 { - #address-cells = <1>; - #size-cells = <0>; - cell-index = <0>; - compatible = "fsl-i2c"; - reg = <0x3000 0x100>; - interrupts = <14 0x8>; - interrupt-parent = <&ipic>; - dfsrr; - }; - - i2c@3100 { - #address-cells = <1>; - #size-cells = <0>; - cell-index = <1>; - compatible = "fsl-i2c"; - reg = <0x3100 0x100>; - interrupts = <15 0x8>; - interrupt-parent = <&ipic>; - dfsrr; - }; - - spi@7000 { - cell-index = <0>; - compatible = "fsl,spi"; - reg = <0x7000 0x1000>; - interrupts = <16 0x8>; - interrupt-parent = <&ipic>; - mode = "cpu"; - }; - - dma@82a8 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,mpc8349-dma", "fsl,elo-dma"; - reg = <0x82a8 4>; - ranges = <0 0x8100 0x1a8>; - interrupt-parent = <&ipic>; - interrupts = <71 8>; - cell-index = <0>; - dma-channel@0 { - compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; - reg = <0 0x80>; - cell-index = <0>; - interrupt-parent = <&ipic>; - interrupts = <71 8>; - }; - dma-channel@80 { - compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; - reg = <0x80 0x80>; - cell-index = <1>; - interrupt-parent = <&ipic>; - interrupts = <71 8>; - }; - dma-channel@100 { - compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; - reg = <0x100 0x80>; - cell-index = <2>; - interrupt-parent = <&ipic>; - interrupts = <71 8>; - }; - dma-channel@180 { - compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; - reg = <0x180 0x28>; - cell-index = <3>; - interrupt-parent = <&ipic>; - interrupts = <71 8>; - }; - }; - - /* phy type (ULPI or SERIAL) are only types supported for MPH */ - /* port = 0 or 1 */ - usb@22000 { - compatible = "fsl-usb2-mph"; - reg = <0x22000 0x1000>; - #address-cells = <1>; - #size-cells = <0>; - interrupt-parent = <&ipic>; - interrupts = <39 0x8>; - phy_type = "ulpi"; - port0; - }; - - enet0: ethernet@24000 { - #address-cells = <1>; - #size-cells = <1>; - cell-index = <0>; - device_type = "network"; - model = "TSEC"; - compatible = "gianfar"; - reg = <0x24000 0x1000>; - ranges = <0x0 0x24000 0x1000>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupts = <32 0x8 33 0x8 34 0x8>; - interrupt-parent = <&ipic>; - tbi-handle = <&tbi0>; - phy-handle = <&phy0>; - linux,network-index = <0>; - - mdio@520 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,gianfar-mdio"; - reg = <0x520 0x20>; - - phy0: ethernet-phy@19 { - interrupt-parent = <&ipic>; - interrupts = <20 0x8>; - reg = <0x19>; - }; - - phy1: ethernet-phy@1a { - interrupt-parent = <&ipic>; - interrupts = <21 0x8>; - reg = <0x1a>; - }; - - tbi0: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - }; - }; - - enet1: ethernet@25000 { - #address-cells = <1>; - #size-cells = <1>; - cell-index = <1>; - device_type = "network"; - model = "TSEC"; - compatible = "gianfar"; - reg = <0x25000 0x1000>; - ranges = <0x0 0x25000 0x1000>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupts = <35 0x8 36 0x8 37 0x8>; - interrupt-parent = <&ipic>; - tbi-handle = <&tbi1>; - phy-handle = <&phy1>; - linux,network-index = <1>; - - mdio@520 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,gianfar-tbi"; - reg = <0x520 0x20>; - - tbi1: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - }; - }; - - serial0: serial@4500 { - cell-index = <0>; - device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; - reg = <0x4500 0x100>; - clock-frequency = <0>; - interrupts = <9 0x8>; - interrupt-parent = <&ipic>; - }; - - serial1: serial@4600 { - cell-index = <1>; - device_type = "serial"; - compatible = "fsl,ns16550", "ns16550"; - reg = <0x4600 0x100>; - clock-frequency = <0>; - interrupts = <10 0x8>; - interrupt-parent = <&ipic>; - }; - - crypto@30000 { - compatible = "fsl,sec2.0"; - reg = <0x30000 0x10000>; - interrupts = <11 0x8>; - interrupt-parent = <&ipic>; - fsl,num-channels = <4>; - fsl,channel-fifo-len = <24>; - fsl,exec-units-mask = <0x7e>; - fsl,descriptor-types-mask = <0x01010ebf>; - }; - - /* IPIC - * interrupts cell = <intr #, sense> - * sense values match linux IORESOURCE_IRQ_* defines: - * sense == 8: Level, low assertion - * sense == 2: Edge, high-to-low change - */ - ipic: pic@700 { - interrupt-controller; - #address-cells = <0>; - #interrupt-cells = <2>; - reg = <0x700 0x100>; - device_type = "ipic"; - }; - }; - - localbus@e0005000 { - #address-cells = <2>; - #size-cells = <1>; - compatible = "fsl,mpc8349-localbus", "simple-bus"; - reg = <0xe0005000 0x1000>; - interrupts = <77 0x8>; - interrupt-parent = <&ipic>; - ranges = <0x0 0x0 0xff800000 0x00800000 /* 8MB Flash */ - 0x1 0x0 0xf8000000 0x00002000 /* 8KB EEPROM */ - 0x2 0x0 0x10000000 0x04000000 /* 64MB SDRAM */ - 0x3 0x0 0x10000000 0x04000000>; /* 64MB SDRAM */ - - flash@0,0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "intel,28F640J3A", "cfi-flash"; - reg = <0x0 0x0 0x800000>; - bank-width = <2>; - device-width = <1>; - - partition@0 { - label = "u-boot"; - reg = <0x00000000 0x00040000>; - read-only; - }; - - partition@40000 { - label = "user"; - reg = <0x00040000 0x006c0000>; - }; - - partition@700000 { - label = "legacy u-boot"; - reg = <0x00700000 0x00100000>; - read-only; - }; - - }; - }; - - pci0: pci@e0008500 { - interrupt-map-mask = <0xf800 0x0 0x0 0x7>; - interrupt-map = < - - /* IDSEL 0x11 */ - 0x8800 0x0 0x0 0x1 &ipic 48 0x8 - 0x8800 0x0 0x0 0x2 &ipic 17 0x8 - 0x8800 0x0 0x0 0x3 &ipic 18 0x8 - 0x8800 0x0 0x0 0x4 &ipic 19 0x8>; - - interrupt-parent = <&ipic>; - interrupts = <0x42 0x8>; - bus-range = <0 0>; - ranges = <0x02000000 0x0 0x90000000 0x90000000 0x0 0x10000000 - 0x42000000 0x0 0x80000000 0x80000000 0x0 0x10000000 - 0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00100000>; - clock-frequency = <66666666>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <0xe0008500 0x100 /* internal registers */ - 0xe0008300 0x8>; /* config space access registers */ - compatible = "fsl,mpc8349-pci"; - device_type = "pci"; - }; -}; diff --git a/arch/powerpc/boot/mpc8xx.c b/arch/powerpc/boot/mpc8xx.c index add55a7f184f..c9bd9285c548 100644 --- a/arch/powerpc/boot/mpc8xx.c +++ b/arch/powerpc/boot/mpc8xx.c @@ -24,7 +24,7 @@ u32 mpc885_get_clock(u32 crystal) { u32 *immr; u32 plprcr; - int mfi, mfn, mfd, pdf, div; + int mfi, mfn, mfd, pdf; u32 ret; immr = fsl_get_immr(); @@ -43,7 +43,6 @@ u32 mpc885_get_clock(u32 crystal) } pdf = (plprcr >> 1) & 0xf; - div = (plprcr >> 20) & 3; mfd = (plprcr >> 22) & 0x1f; mfn = (plprcr >> 27) & 0x1f; diff --git a/arch/powerpc/boot/mpsc.c b/arch/powerpc/boot/mpsc.c deleted file mode 100644 index 425ad88cce8d..000000000000 --- a/arch/powerpc/boot/mpsc.c +++ /dev/null @@ -1,169 +0,0 @@ -/* - * MPSC/UART driver for the Marvell mv64360, mv64460, ... - * - * Author: Mark A. Greer <mgreer@mvista.com> - * - * 2007 (c) MontaVista Software, Inc. This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. - */ - -#include <stdarg.h> -#include <stddef.h> -#include "types.h" -#include "string.h" -#include "stdio.h" -#include "io.h" -#include "ops.h" - - -#define MPSC_CHR_1 0x000c - -#define MPSC_CHR_2 0x0010 -#define MPSC_CHR_2_TA (1<<7) -#define MPSC_CHR_2_TCS (1<<9) -#define MPSC_CHR_2_RA (1<<23) -#define MPSC_CHR_2_CRD (1<<25) -#define MPSC_CHR_2_EH (1<<31) - -#define MPSC_CHR_4 0x0018 -#define MPSC_CHR_4_Z (1<<29) - -#define MPSC_CHR_5 0x001c -#define MPSC_CHR_5_CTL1_INTR (1<<12) -#define MPSC_CHR_5_CTL1_VALID (1<<15) - -#define MPSC_CHR_10 0x0030 - -#define MPSC_INTR_CAUSE 0x0000 -#define MPSC_INTR_CAUSE_RCC (1<<6) -#define MPSC_INTR_MASK 0x0080 - -#define SDMA_SDCM 0x0008 -#define SDMA_SDCM_AR (1<<15) -#define SDMA_SDCM_AT (1<<31) - -static volatile char *mpsc_base; -static volatile char *mpscintr_base; -static u32 chr1, chr2; - -static int mpsc_open(void) -{ - chr1 = in_le32((u32 *)(mpsc_base + MPSC_CHR_1)) & 0x00ff0000; - chr2 = in_le32((u32 *)(mpsc_base + MPSC_CHR_2)) & ~(MPSC_CHR_2_TA - | MPSC_CHR_2_TCS | MPSC_CHR_2_RA | MPSC_CHR_2_CRD - | MPSC_CHR_2_EH); - out_le32((u32 *)(mpsc_base + MPSC_CHR_4), MPSC_CHR_4_Z); - out_le32((u32 *)(mpsc_base + MPSC_CHR_5), - MPSC_CHR_5_CTL1_INTR | MPSC_CHR_5_CTL1_VALID); - out_le32((u32 *)(mpsc_base + MPSC_CHR_2), chr2 | MPSC_CHR_2_EH); - return 0; -} - -static void mpsc_putc(unsigned char c) -{ - while (in_le32((u32 *)(mpsc_base + MPSC_CHR_2)) & MPSC_CHR_2_TCS); - - out_le32((u32 *)(mpsc_base + MPSC_CHR_1), chr1 | c); - out_le32((u32 *)(mpsc_base + MPSC_CHR_2), chr2 | MPSC_CHR_2_TCS); -} - -static unsigned char mpsc_getc(void) -{ - u32 cause = 0; - unsigned char c; - - while (!(cause & MPSC_INTR_CAUSE_RCC)) - cause = in_le32((u32 *)(mpscintr_base + MPSC_INTR_CAUSE)); - - c = in_8((u8 *)(mpsc_base + MPSC_CHR_10 + 2)); - out_8((u8 *)(mpsc_base + MPSC_CHR_10 + 2), c); - out_le32((u32 *)(mpscintr_base + MPSC_INTR_CAUSE), - cause & ~MPSC_INTR_CAUSE_RCC); - - return c; -} - -static u8 mpsc_tstc(void) -{ - return (u8)((in_le32((u32 *)(mpscintr_base + MPSC_INTR_CAUSE)) - & MPSC_INTR_CAUSE_RCC) != 0); -} - -static void mpsc_stop_dma(volatile char *sdma_base) -{ - out_le32((u32 *)(mpsc_base + MPSC_CHR_2),MPSC_CHR_2_TA | MPSC_CHR_2_RA); - out_le32((u32 *)(sdma_base + SDMA_SDCM), SDMA_SDCM_AR | SDMA_SDCM_AT); - - while ((in_le32((u32 *)(sdma_base + SDMA_SDCM)) - & (SDMA_SDCM_AR | SDMA_SDCM_AT)) != 0) - udelay(100); -} - -static volatile char *mpsc_get_virtreg_of_phandle(void *devp, char *prop) -{ - void *v; - int n; - - n = getprop(devp, prop, &v, sizeof(v)); - if (n != sizeof(v)) - goto err_out; - - devp = find_node_by_linuxphandle((u32)v); - if (devp == NULL) - goto err_out; - - n = getprop(devp, "virtual-reg", &v, sizeof(v)); - if (n == sizeof(v)) - return v; - -err_out: - return NULL; -} - -int mpsc_console_init(void *devp, struct serial_console_data *scdp) -{ - void *v; - int n, reg_set; - volatile char *sdma_base; - - n = getprop(devp, "virtual-reg", &v, sizeof(v)); - if (n != sizeof(v)) - goto err_out; - mpsc_base = v; - - sdma_base = mpsc_get_virtreg_of_phandle(devp, "sdma"); - if (sdma_base == NULL) - goto err_out; - - mpscintr_base = mpsc_get_virtreg_of_phandle(devp, "mpscintr"); - if (mpscintr_base == NULL) - goto err_out; - - n = getprop(devp, "cell-index", &v, sizeof(v)); - if (n != sizeof(v)) - goto err_out; - reg_set = (int)v; - - mpscintr_base += (reg_set == 0) ? 0x4 : 0xc; - - /* Make sure the mpsc ctlrs are shutdown */ - out_le32((u32 *)(mpscintr_base + MPSC_INTR_CAUSE), 0); - out_le32((u32 *)(mpscintr_base + MPSC_INTR_CAUSE), 0); - out_le32((u32 *)(mpscintr_base + MPSC_INTR_MASK), 0); - out_le32((u32 *)(mpscintr_base + MPSC_INTR_MASK), 0); - - mpsc_stop_dma(sdma_base); - - scdp->open = mpsc_open; - scdp->putc = mpsc_putc; - scdp->getc = mpsc_getc; - scdp->tstc = mpsc_tstc; - scdp->close = NULL; - - return 0; - -err_out: - return -1; -} diff --git a/arch/powerpc/boot/mv64x60.c b/arch/powerpc/boot/mv64x60.c deleted file mode 100644 index d9bb302b91d2..000000000000 --- a/arch/powerpc/boot/mv64x60.c +++ /dev/null @@ -1,581 +0,0 @@ -/* - * Marvell hostbridge routines - * - * Author: Mark A. Greer <source@mvista.com> - * - * 2004, 2005, 2007 (c) MontaVista Software, Inc. This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. - */ - -#include <stdarg.h> -#include <stddef.h> -#include "types.h" -#include "elf.h" -#include "page.h" -#include "string.h" -#include "stdio.h" -#include "io.h" -#include "ops.h" -#include "mv64x60.h" - -#define PCI_DEVFN(slot,func) ((((slot) & 0x1f) << 3) | ((func) & 0x07)) - -#define MV64x60_CPU2MEM_WINDOWS 4 -#define MV64x60_CPU2MEM_0_BASE 0x0008 -#define MV64x60_CPU2MEM_0_SIZE 0x0010 -#define MV64x60_CPU2MEM_1_BASE 0x0208 -#define MV64x60_CPU2MEM_1_SIZE 0x0210 -#define MV64x60_CPU2MEM_2_BASE 0x0018 -#define MV64x60_CPU2MEM_2_SIZE 0x0020 -#define MV64x60_CPU2MEM_3_BASE 0x0218 -#define MV64x60_CPU2MEM_3_SIZE 0x0220 - -#define MV64x60_ENET2MEM_BAR_ENABLE 0x2290 -#define MV64x60_ENET2MEM_0_BASE 0x2200 -#define MV64x60_ENET2MEM_0_SIZE 0x2204 -#define MV64x60_ENET2MEM_1_BASE 0x2208 -#define MV64x60_ENET2MEM_1_SIZE 0x220c -#define MV64x60_ENET2MEM_2_BASE 0x2210 -#define MV64x60_ENET2MEM_2_SIZE 0x2214 -#define MV64x60_ENET2MEM_3_BASE 0x2218 -#define MV64x60_ENET2MEM_3_SIZE 0x221c -#define MV64x60_ENET2MEM_4_BASE 0x2220 -#define MV64x60_ENET2MEM_4_SIZE 0x2224 -#define MV64x60_ENET2MEM_5_BASE 0x2228 -#define MV64x60_ENET2MEM_5_SIZE 0x222c -#define MV64x60_ENET2MEM_ACC_PROT_0 0x2294 -#define MV64x60_ENET2MEM_ACC_PROT_1 0x2298 -#define MV64x60_ENET2MEM_ACC_PROT_2 0x229c - -#define MV64x60_MPSC2MEM_BAR_ENABLE 0xf250 -#define MV64x60_MPSC2MEM_0_BASE 0xf200 -#define MV64x60_MPSC2MEM_0_SIZE 0xf204 -#define MV64x60_MPSC2MEM_1_BASE 0xf208 -#define MV64x60_MPSC2MEM_1_SIZE 0xf20c -#define MV64x60_MPSC2MEM_2_BASE 0xf210 -#define MV64x60_MPSC2MEM_2_SIZE 0xf214 -#define MV64x60_MPSC2MEM_3_BASE 0xf218 -#define MV64x60_MPSC2MEM_3_SIZE 0xf21c -#define MV64x60_MPSC_0_REMAP 0xf240 -#define MV64x60_MPSC_1_REMAP 0xf244 -#define MV64x60_MPSC2MEM_ACC_PROT_0 0xf254 -#define MV64x60_MPSC2MEM_ACC_PROT_1 0xf258 -#define MV64x60_MPSC2REGS_BASE 0xf25c - -#define MV64x60_IDMA2MEM_BAR_ENABLE 0x0a80 -#define MV64x60_IDMA2MEM_0_BASE 0x0a00 -#define MV64x60_IDMA2MEM_0_SIZE 0x0a04 -#define MV64x60_IDMA2MEM_1_BASE 0x0a08 -#define MV64x60_IDMA2MEM_1_SIZE 0x0a0c -#define MV64x60_IDMA2MEM_2_BASE 0x0a10 -#define MV64x60_IDMA2MEM_2_SIZE 0x0a14 -#define MV64x60_IDMA2MEM_3_BASE 0x0a18 -#define MV64x60_IDMA2MEM_3_SIZE 0x0a1c -#define MV64x60_IDMA2MEM_4_BASE 0x0a20 -#define MV64x60_IDMA2MEM_4_SIZE 0x0a24 -#define MV64x60_IDMA2MEM_5_BASE 0x0a28 -#define MV64x60_IDMA2MEM_5_SIZE 0x0a2c -#define MV64x60_IDMA2MEM_6_BASE 0x0a30 -#define MV64x60_IDMA2MEM_6_SIZE 0x0a34 -#define MV64x60_IDMA2MEM_7_BASE 0x0a38 -#define MV64x60_IDMA2MEM_7_SIZE 0x0a3c -#define MV64x60_IDMA2MEM_ACC_PROT_0 0x0a70 -#define MV64x60_IDMA2MEM_ACC_PROT_1 0x0a74 -#define MV64x60_IDMA2MEM_ACC_PROT_2 0x0a78 -#define MV64x60_IDMA2MEM_ACC_PROT_3 0x0a7c - -#define MV64x60_PCI_ACC_CNTL_WINDOWS 6 -#define MV64x60_PCI0_PCI_DECODE_CNTL 0x0d3c -#define MV64x60_PCI1_PCI_DECODE_CNTL 0x0dbc - -#define MV64x60_PCI0_BAR_ENABLE 0x0c3c -#define MV64x60_PCI02MEM_0_SIZE 0x0c08 -#define MV64x60_PCI0_ACC_CNTL_0_BASE_LO 0x1e00 -#define MV64x60_PCI0_ACC_CNTL_0_BASE_HI 0x1e04 -#define MV64x60_PCI0_ACC_CNTL_0_SIZE 0x1e08 -#define MV64x60_PCI0_ACC_CNTL_1_BASE_LO 0x1e10 -#define MV64x60_PCI0_ACC_CNTL_1_BASE_HI 0x1e14 -#define MV64x60_PCI0_ACC_CNTL_1_SIZE 0x1e18 -#define MV64x60_PCI0_ACC_CNTL_2_BASE_LO 0x1e20 -#define MV64x60_PCI0_ACC_CNTL_2_BASE_HI 0x1e24 -#define MV64x60_PCI0_ACC_CNTL_2_SIZE 0x1e28 -#define MV64x60_PCI0_ACC_CNTL_3_BASE_LO 0x1e30 -#define MV64x60_PCI0_ACC_CNTL_3_BASE_HI 0x1e34 -#define MV64x60_PCI0_ACC_CNTL_3_SIZE 0x1e38 -#define MV64x60_PCI0_ACC_CNTL_4_BASE_LO 0x1e40 -#define MV64x60_PCI0_ACC_CNTL_4_BASE_HI 0x1e44 -#define MV64x60_PCI0_ACC_CNTL_4_SIZE 0x1e48 -#define MV64x60_PCI0_ACC_CNTL_5_BASE_LO 0x1e50 -#define MV64x60_PCI0_ACC_CNTL_5_BASE_HI 0x1e54 -#define MV64x60_PCI0_ACC_CNTL_5_SIZE 0x1e58 - -#define MV64x60_PCI1_BAR_ENABLE 0x0cbc -#define MV64x60_PCI12MEM_0_SIZE 0x0c88 -#define MV64x60_PCI1_ACC_CNTL_0_BASE_LO 0x1e80 -#define MV64x60_PCI1_ACC_CNTL_0_BASE_HI 0x1e84 -#define MV64x60_PCI1_ACC_CNTL_0_SIZE 0x1e88 -#define MV64x60_PCI1_ACC_CNTL_1_BASE_LO 0x1e90 -#define MV64x60_PCI1_ACC_CNTL_1_BASE_HI 0x1e94 -#define MV64x60_PCI1_ACC_CNTL_1_SIZE 0x1e98 -#define MV64x60_PCI1_ACC_CNTL_2_BASE_LO 0x1ea0 -#define MV64x60_PCI1_ACC_CNTL_2_BASE_HI 0x1ea4 -#define MV64x60_PCI1_ACC_CNTL_2_SIZE 0x1ea8 -#define MV64x60_PCI1_ACC_CNTL_3_BASE_LO 0x1eb0 -#define MV64x60_PCI1_ACC_CNTL_3_BASE_HI 0x1eb4 -#define MV64x60_PCI1_ACC_CNTL_3_SIZE 0x1eb8 -#define MV64x60_PCI1_ACC_CNTL_4_BASE_LO 0x1ec0 -#define MV64x60_PCI1_ACC_CNTL_4_BASE_HI 0x1ec4 -#define MV64x60_PCI1_ACC_CNTL_4_SIZE 0x1ec8 -#define MV64x60_PCI1_ACC_CNTL_5_BASE_LO 0x1ed0 -#define MV64x60_PCI1_ACC_CNTL_5_BASE_HI 0x1ed4 -#define MV64x60_PCI1_ACC_CNTL_5_SIZE 0x1ed8 - -#define MV64x60_CPU2PCI_SWAP_NONE 0x01000000 - -#define MV64x60_CPU2PCI0_IO_BASE 0x0048 -#define MV64x60_CPU2PCI0_IO_SIZE 0x0050 -#define MV64x60_CPU2PCI0_IO_REMAP 0x00f0 -#define MV64x60_CPU2PCI0_MEM_0_BASE 0x0058 -#define MV64x60_CPU2PCI0_MEM_0_SIZE 0x0060 -#define MV64x60_CPU2PCI0_MEM_0_REMAP_LO 0x00f8 -#define MV64x60_CPU2PCI0_MEM_0_REMAP_HI 0x0320 - -#define MV64x60_CPU2PCI1_IO_BASE 0x0090 -#define MV64x60_CPU2PCI1_IO_SIZE 0x0098 -#define MV64x60_CPU2PCI1_IO_REMAP 0x0108 -#define MV64x60_CPU2PCI1_MEM_0_BASE 0x00a0 -#define MV64x60_CPU2PCI1_MEM_0_SIZE 0x00a8 -#define MV64x60_CPU2PCI1_MEM_0_REMAP_LO 0x0110 -#define MV64x60_CPU2PCI1_MEM_0_REMAP_HI 0x0340 - -struct mv64x60_mem_win { - u32 hi; - u32 lo; - u32 size; -}; - -struct mv64x60_pci_win { - u32 fcn; - u32 hi; - u32 lo; - u32 size; -}; - -/* PCI config access routines */ -struct { - u32 addr; - u32 data; -} static mv64x60_pci_cfgio[2] = { - { /* hose 0 */ - .addr = 0xcf8, - .data = 0xcfc, - }, - { /* hose 1 */ - .addr = 0xc78, - .data = 0xc7c, - } -}; - -u32 mv64x60_cfg_read(u8 *bridge_base, u8 hose, u8 bus, u8 devfn, u8 offset) -{ - out_le32((u32 *)(bridge_base + mv64x60_pci_cfgio[hose].addr), - (1 << 31) | (bus << 16) | (devfn << 8) | offset); - return in_le32((u32 *)(bridge_base + mv64x60_pci_cfgio[hose].data)); -} - -void mv64x60_cfg_write(u8 *bridge_base, u8 hose, u8 bus, u8 devfn, u8 offset, - u32 val) -{ - out_le32((u32 *)(bridge_base + mv64x60_pci_cfgio[hose].addr), - (1 << 31) | (bus << 16) | (devfn << 8) | offset); - out_le32((u32 *)(bridge_base + mv64x60_pci_cfgio[hose].data), val); -} - -/* I/O ctlr -> system memory setup */ -static struct mv64x60_mem_win mv64x60_cpu2mem[MV64x60_CPU2MEM_WINDOWS] = { - { - .lo = MV64x60_CPU2MEM_0_BASE, - .size = MV64x60_CPU2MEM_0_SIZE, - }, - { - .lo = MV64x60_CPU2MEM_1_BASE, - .size = MV64x60_CPU2MEM_1_SIZE, - }, - { - .lo = MV64x60_CPU2MEM_2_BASE, - .size = MV64x60_CPU2MEM_2_SIZE, - }, - { - .lo = MV64x60_CPU2MEM_3_BASE, - .size = MV64x60_CPU2MEM_3_SIZE, - }, -}; - -static struct mv64x60_mem_win mv64x60_enet2mem[MV64x60_CPU2MEM_WINDOWS] = { - { - .lo = MV64x60_ENET2MEM_0_BASE, - .size = MV64x60_ENET2MEM_0_SIZE, - }, - { - .lo = MV64x60_ENET2MEM_1_BASE, - .size = MV64x60_ENET2MEM_1_SIZE, - }, - { - .lo = MV64x60_ENET2MEM_2_BASE, - .size = MV64x60_ENET2MEM_2_SIZE, - }, - { - .lo = MV64x60_ENET2MEM_3_BASE, - .size = MV64x60_ENET2MEM_3_SIZE, - }, -}; - -static struct mv64x60_mem_win mv64x60_mpsc2mem[MV64x60_CPU2MEM_WINDOWS] = { - { - .lo = MV64x60_MPSC2MEM_0_BASE, - .size = MV64x60_MPSC2MEM_0_SIZE, - }, - { - .lo = MV64x60_MPSC2MEM_1_BASE, - .size = MV64x60_MPSC2MEM_1_SIZE, - }, - { - .lo = MV64x60_MPSC2MEM_2_BASE, - .size = MV64x60_MPSC2MEM_2_SIZE, - }, - { - .lo = MV64x60_MPSC2MEM_3_BASE, - .size = MV64x60_MPSC2MEM_3_SIZE, - }, -}; - -static struct mv64x60_mem_win mv64x60_idma2mem[MV64x60_CPU2MEM_WINDOWS] = { - { - .lo = MV64x60_IDMA2MEM_0_BASE, - .size = MV64x60_IDMA2MEM_0_SIZE, - }, - { - .lo = MV64x60_IDMA2MEM_1_BASE, - .size = MV64x60_IDMA2MEM_1_SIZE, - }, - { - .lo = MV64x60_IDMA2MEM_2_BASE, - .size = MV64x60_IDMA2MEM_2_SIZE, - }, - { - .lo = MV64x60_IDMA2MEM_3_BASE, - .size = MV64x60_IDMA2MEM_3_SIZE, - }, -}; - -static u32 mv64x60_dram_selects[MV64x60_CPU2MEM_WINDOWS] = {0xe,0xd,0xb,0x7}; - -/* - * ENET, MPSC, and IDMA ctlrs on the MV64x60 have separate windows that - * must be set up so that the respective ctlr can access system memory. - * Configure them to be same as cpu->memory windows. - */ -void mv64x60_config_ctlr_windows(u8 *bridge_base, u8 *bridge_pbase, - u8 is_coherent) -{ - u32 i, base, size, enables, prot = 0, snoop_bits = 0; - - /* Disable ctlr->mem windows */ - out_le32((u32 *)(bridge_base + MV64x60_ENET2MEM_BAR_ENABLE), 0x3f); - out_le32((u32 *)(bridge_base + MV64x60_MPSC2MEM_BAR_ENABLE), 0xf); - out_le32((u32 *)(bridge_base + MV64x60_ENET2MEM_BAR_ENABLE), 0xff); - - if (is_coherent) - snoop_bits = 0x2 << 12; /* Writeback */ - - enables = in_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE)) & 0xf; - - for (i=0; i<MV64x60_CPU2MEM_WINDOWS; i++) { - if (enables & (1 << i)) /* Set means disabled */ - continue; - - base = in_le32((u32 *)(bridge_base + mv64x60_cpu2mem[i].lo)) - << 16; - base |= snoop_bits | (mv64x60_dram_selects[i] << 8); - size = in_le32((u32 *)(bridge_base + mv64x60_cpu2mem[i].size)) - << 16; - prot |= (0x3 << (i << 1)); /* RW access */ - - out_le32((u32 *)(bridge_base + mv64x60_enet2mem[i].lo), base); - out_le32((u32 *)(bridge_base + mv64x60_enet2mem[i].size), size); - out_le32((u32 *)(bridge_base + mv64x60_mpsc2mem[i].lo), base); - out_le32((u32 *)(bridge_base + mv64x60_mpsc2mem[i].size), size); - out_le32((u32 *)(bridge_base + mv64x60_idma2mem[i].lo), base); - out_le32((u32 *)(bridge_base + mv64x60_idma2mem[i].size), size); - } - - out_le32((u32 *)(bridge_base + MV64x60_ENET2MEM_ACC_PROT_0), prot); - out_le32((u32 *)(bridge_base + MV64x60_ENET2MEM_ACC_PROT_1), prot); - out_le32((u32 *)(bridge_base + MV64x60_ENET2MEM_ACC_PROT_2), prot); - out_le32((u32 *)(bridge_base + MV64x60_MPSC2MEM_ACC_PROT_0), prot); - out_le32((u32 *)(bridge_base + MV64x60_MPSC2MEM_ACC_PROT_1), prot); - out_le32((u32 *)(bridge_base + MV64x60_IDMA2MEM_ACC_PROT_0), prot); - out_le32((u32 *)(bridge_base + MV64x60_IDMA2MEM_ACC_PROT_1), prot); - out_le32((u32 *)(bridge_base + MV64x60_IDMA2MEM_ACC_PROT_2), prot); - out_le32((u32 *)(bridge_base + MV64x60_IDMA2MEM_ACC_PROT_3), prot); - - /* Set mpsc->bridge's reg window to the bridge's internal registers. */ - out_le32((u32 *)(bridge_base + MV64x60_MPSC2REGS_BASE), - (u32)bridge_pbase); - - out_le32((u32 *)(bridge_base + MV64x60_ENET2MEM_BAR_ENABLE), enables); - out_le32((u32 *)(bridge_base + MV64x60_MPSC2MEM_BAR_ENABLE), enables); - out_le32((u32 *)(bridge_base + MV64x60_IDMA2MEM_BAR_ENABLE), enables); -} - -/* PCI MEM -> system memory, et. al. setup */ -static struct mv64x60_pci_win mv64x60_pci2mem[2] = { - { /* hose 0 */ - .fcn = 0, - .hi = 0x14, - .lo = 0x10, - .size = MV64x60_PCI02MEM_0_SIZE, - }, - { /* hose 1 */ - .fcn = 0, - .hi = 0x94, - .lo = 0x90, - .size = MV64x60_PCI12MEM_0_SIZE, - }, -}; - -static struct -mv64x60_mem_win mv64x60_pci_acc[2][MV64x60_PCI_ACC_CNTL_WINDOWS] = { - { /* hose 0 */ - { - .hi = MV64x60_PCI0_ACC_CNTL_0_BASE_HI, - .lo = MV64x60_PCI0_ACC_CNTL_0_BASE_LO, - .size = MV64x60_PCI0_ACC_CNTL_0_SIZE, - }, - { - .hi = MV64x60_PCI0_ACC_CNTL_1_BASE_HI, - .lo = MV64x60_PCI0_ACC_CNTL_1_BASE_LO, - .size = MV64x60_PCI0_ACC_CNTL_1_SIZE, - }, - { - .hi = MV64x60_PCI0_ACC_CNTL_2_BASE_HI, - .lo = MV64x60_PCI0_ACC_CNTL_2_BASE_LO, - .size = MV64x60_PCI0_ACC_CNTL_2_SIZE, - }, - { - .hi = MV64x60_PCI0_ACC_CNTL_3_BASE_HI, - .lo = MV64x60_PCI0_ACC_CNTL_3_BASE_LO, - .size = MV64x60_PCI0_ACC_CNTL_3_SIZE, - }, - }, - { /* hose 1 */ - { - .hi = MV64x60_PCI1_ACC_CNTL_0_BASE_HI, - .lo = MV64x60_PCI1_ACC_CNTL_0_BASE_LO, - .size = MV64x60_PCI1_ACC_CNTL_0_SIZE, - }, - { - .hi = MV64x60_PCI1_ACC_CNTL_1_BASE_HI, - .lo = MV64x60_PCI1_ACC_CNTL_1_BASE_LO, - .size = MV64x60_PCI1_ACC_CNTL_1_SIZE, - }, - { - .hi = MV64x60_PCI1_ACC_CNTL_2_BASE_HI, - .lo = MV64x60_PCI1_ACC_CNTL_2_BASE_LO, - .size = MV64x60_PCI1_ACC_CNTL_2_SIZE, - }, - { - .hi = MV64x60_PCI1_ACC_CNTL_3_BASE_HI, - .lo = MV64x60_PCI1_ACC_CNTL_3_BASE_LO, - .size = MV64x60_PCI1_ACC_CNTL_3_SIZE, - }, - }, -}; - -static struct mv64x60_mem_win mv64x60_pci2reg[2] = { - { - .hi = 0x24, - .lo = 0x20, - .size = 0, - }, - { - .hi = 0xa4, - .lo = 0xa0, - .size = 0, - }, -}; - -/* Only need to use 1 window (per hose) to get access to all of system memory */ -void mv64x60_config_pci_windows(u8 *bridge_base, u8 *bridge_pbase, u8 hose, - u8 bus, u32 mem_size, u32 acc_bits) -{ - u32 i, offset, bar_enable, enables; - - /* Disable all windows but PCI MEM -> Bridge's regs window */ - enables = ~(1 << 9); - bar_enable = hose ? MV64x60_PCI1_BAR_ENABLE : MV64x60_PCI0_BAR_ENABLE; - out_le32((u32 *)(bridge_base + bar_enable), enables); - - for (i=0; i<MV64x60_PCI_ACC_CNTL_WINDOWS; i++) - out_le32((u32 *)(bridge_base + mv64x60_pci_acc[hose][i].lo), 0); - - /* If mem_size is 0, leave windows disabled */ - if (mem_size == 0) - return; - - /* Cause automatic updates of PCI remap regs */ - offset = hose ? - MV64x60_PCI1_PCI_DECODE_CNTL : MV64x60_PCI0_PCI_DECODE_CNTL; - i = in_le32((u32 *)(bridge_base + offset)); - out_le32((u32 *)(bridge_base + offset), i & ~0x1); - - mem_size = (mem_size - 1) & 0xfffff000; - - /* Map PCI MEM addr 0 -> System Mem addr 0 */ - mv64x60_cfg_write(bridge_base, hose, bus, - PCI_DEVFN(0, mv64x60_pci2mem[hose].fcn), - mv64x60_pci2mem[hose].hi, 0); - mv64x60_cfg_write(bridge_base, hose, bus, - PCI_DEVFN(0, mv64x60_pci2mem[hose].fcn), - mv64x60_pci2mem[hose].lo, 0); - out_le32((u32 *)(bridge_base + mv64x60_pci2mem[hose].size),mem_size); - - acc_bits |= MV64x60_PCI_ACC_CNTL_ENABLE; - out_le32((u32 *)(bridge_base + mv64x60_pci_acc[hose][0].hi), 0); - out_le32((u32 *)(bridge_base + mv64x60_pci_acc[hose][0].lo), acc_bits); - out_le32((u32 *)(bridge_base + mv64x60_pci_acc[hose][0].size),mem_size); - - /* Set PCI MEM->bridge's reg window to where they are in CPU mem map */ - i = (u32)bridge_base; - i &= 0xffff0000; - i |= (0x2 << 1); - mv64x60_cfg_write(bridge_base, hose, bus, PCI_DEVFN(0,0), - mv64x60_pci2reg[hose].hi, 0); - mv64x60_cfg_write(bridge_base, hose, bus, PCI_DEVFN(0,0), - mv64x60_pci2reg[hose].lo, i); - - enables &= ~0x1; /* Enable PCI MEM -> System Mem window 0 */ - out_le32((u32 *)(bridge_base + bar_enable), enables); -} - -/* CPU -> PCI I/O & MEM setup */ -struct mv64x60_cpu2pci_win mv64x60_cpu2pci_io[2] = { - { /* hose 0 */ - .lo = MV64x60_CPU2PCI0_IO_BASE, - .size = MV64x60_CPU2PCI0_IO_SIZE, - .remap_hi = 0, - .remap_lo = MV64x60_CPU2PCI0_IO_REMAP, - }, - { /* hose 1 */ - .lo = MV64x60_CPU2PCI1_IO_BASE, - .size = MV64x60_CPU2PCI1_IO_SIZE, - .remap_hi = 0, - .remap_lo = MV64x60_CPU2PCI1_IO_REMAP, - }, -}; - -struct mv64x60_cpu2pci_win mv64x60_cpu2pci_mem[2] = { - { /* hose 0 */ - .lo = MV64x60_CPU2PCI0_MEM_0_BASE, - .size = MV64x60_CPU2PCI0_MEM_0_SIZE, - .remap_hi = MV64x60_CPU2PCI0_MEM_0_REMAP_HI, - .remap_lo = MV64x60_CPU2PCI0_MEM_0_REMAP_LO, - }, - { /* hose 1 */ - .lo = MV64x60_CPU2PCI1_MEM_0_BASE, - .size = MV64x60_CPU2PCI1_MEM_0_SIZE, - .remap_hi = MV64x60_CPU2PCI1_MEM_0_REMAP_HI, - .remap_lo = MV64x60_CPU2PCI1_MEM_0_REMAP_LO, - }, -}; - -/* Only need to set up 1 window to pci mem space */ -void mv64x60_config_cpu2pci_window(u8 *bridge_base, u8 hose, u32 pci_base_hi, - u32 pci_base_lo, u32 cpu_base, u32 size, - struct mv64x60_cpu2pci_win *offset_tbl) -{ - cpu_base >>= 16; - cpu_base |= MV64x60_CPU2PCI_SWAP_NONE; - out_le32((u32 *)(bridge_base + offset_tbl[hose].lo), cpu_base); - - if (offset_tbl[hose].remap_hi != 0) - out_le32((u32 *)(bridge_base + offset_tbl[hose].remap_hi), - pci_base_hi); - out_le32((u32 *)(bridge_base + offset_tbl[hose].remap_lo), - pci_base_lo >> 16); - - size = (size - 1) >> 16; - out_le32((u32 *)(bridge_base + offset_tbl[hose].size), size); -} - -/* Read mem ctlr to get the amount of mem in system */ -u32 mv64x60_get_mem_size(u8 *bridge_base) -{ - u32 enables, i, v; - u32 mem = 0; - - enables = in_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE)) & 0xf; - - for (i=0; i<MV64x60_CPU2MEM_WINDOWS; i++) - if (!(enables & (1<<i))) { - v = in_le32((u32*)(bridge_base - + mv64x60_cpu2mem[i].size)); - v = ((v & 0xffff) + 1) << 16; - mem += v; - } - - return mem; -} - -/* Get physical address of bridge's registers */ -u8 *mv64x60_get_bridge_pbase(void) -{ - u32 v[2]; - void *devp; - - devp = find_node_by_compatible(NULL, "marvell,mv64360"); - if (devp == NULL) - goto err_out; - if (getprop(devp, "reg", v, sizeof(v)) != sizeof(v)) - goto err_out; - - return (u8 *)v[0]; - -err_out: - return 0; -} - -/* Get virtual address of bridge's registers */ -u8 *mv64x60_get_bridge_base(void) -{ - u32 v; - void *devp; - - devp = find_node_by_compatible(NULL, "marvell,mv64360"); - if (devp == NULL) - goto err_out; - if (getprop(devp, "virtual-reg", &v, sizeof(v)) != sizeof(v)) - goto err_out; - - return (u8 *)v; - -err_out: - return 0; -} - -u8 mv64x60_is_coherent(void) -{ - u32 v; - void *devp; - - devp = finddevice("/"); - if (devp == NULL) - return 1; /* Assume coherency on */ - - if (getprop(devp, "coherency-off", &v, sizeof(v)) < 0) - return 1; /* Coherency on */ - else - return 0; -} diff --git a/arch/powerpc/boot/mv64x60.h b/arch/powerpc/boot/mv64x60.h deleted file mode 100644 index b827105e6e54..000000000000 --- a/arch/powerpc/boot/mv64x60.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Author: Mark A. Greer <source@mvista.com> - * - * 2007 (c) MontaVista Software, Inc. This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. - */ - -#ifndef _PPC_BOOT_MV64x60_H_ -#define _PPC_BOOT_MV64x60_H_ - -#define MV64x60_CPU_BAR_ENABLE 0x0278 - -#define MV64x60_PCI_ACC_CNTL_ENABLE (1<<0) -#define MV64x60_PCI_ACC_CNTL_REQ64 (1<<1) -#define MV64x60_PCI_ACC_CNTL_SNOOP_NONE 0x00000000 -#define MV64x60_PCI_ACC_CNTL_SNOOP_WT 0x00000004 -#define MV64x60_PCI_ACC_CNTL_SNOOP_WB 0x00000008 -#define MV64x60_PCI_ACC_CNTL_SNOOP_MASK 0x0000000c -#define MV64x60_PCI_ACC_CNTL_ACCPROT (1<<4) -#define MV64x60_PCI_ACC_CNTL_WRPROT (1<<5) -#define MV64x60_PCI_ACC_CNTL_SWAP_BYTE 0x00000000 -#define MV64x60_PCI_ACC_CNTL_SWAP_NONE 0x00000040 -#define MV64x60_PCI_ACC_CNTL_SWAP_BYTE_WORD 0x00000080 -#define MV64x60_PCI_ACC_CNTL_SWAP_WORD 0x000000c0 -#define MV64x60_PCI_ACC_CNTL_SWAP_MASK 0x000000c0 -#define MV64x60_PCI_ACC_CNTL_MBURST_32_BYTES 0x00000000 -#define MV64x60_PCI_ACC_CNTL_MBURST_64_BYTES 0x00000100 -#define MV64x60_PCI_ACC_CNTL_MBURST_128_BYTES 0x00000200 -#define MV64x60_PCI_ACC_CNTL_MBURST_MASK 0x00000300 -#define MV64x60_PCI_ACC_CNTL_RDSIZE_32_BYTES 0x00000000 -#define MV64x60_PCI_ACC_CNTL_RDSIZE_64_BYTES 0x00000400 -#define MV64x60_PCI_ACC_CNTL_RDSIZE_128_BYTES 0x00000800 -#define MV64x60_PCI_ACC_CNTL_RDSIZE_256_BYTES 0x00000c00 -#define MV64x60_PCI_ACC_CNTL_RDSIZE_MASK 0x00000c00 - -struct mv64x60_cpu2pci_win { - u32 lo; - u32 size; - u32 remap_hi; - u32 remap_lo; -}; - -extern struct mv64x60_cpu2pci_win mv64x60_cpu2pci_io[2]; -extern struct mv64x60_cpu2pci_win mv64x60_cpu2pci_mem[2]; - -u32 mv64x60_cfg_read(u8 *bridge_base, u8 hose, u8 bus, u8 devfn, - u8 offset); -void mv64x60_cfg_write(u8 *bridge_base, u8 hose, u8 bus, u8 devfn, - u8 offset, u32 val); - -void mv64x60_config_ctlr_windows(u8 *bridge_base, u8 *bridge_pbase, - u8 is_coherent); -void mv64x60_config_pci_windows(u8 *bridge_base, u8 *bridge_pbase, u8 hose, - u8 bus, u32 mem_size, u32 acc_bits); -void mv64x60_config_cpu2pci_window(u8 *bridge_base, u8 hose, u32 pci_base_hi, - u32 pci_base_lo, u32 cpu_base, u32 size, - struct mv64x60_cpu2pci_win *offset_tbl); -u32 mv64x60_get_mem_size(u8 *bridge_base); -u8 *mv64x60_get_bridge_pbase(void); -u8 *mv64x60_get_bridge_base(void); -u8 mv64x60_is_coherent(void); - -int mv64x60_i2c_open(void); -int mv64x60_i2c_read(u32 devaddr, u8 *buf, u32 offset, u32 offset_size, - u32 count); -void mv64x60_i2c_close(void); - -#endif /* _PPC_BOOT_MV64x60_H_ */ diff --git a/arch/powerpc/boot/mv64x60_i2c.c b/arch/powerpc/boot/mv64x60_i2c.c deleted file mode 100644 index 52a3212b6638..000000000000 --- a/arch/powerpc/boot/mv64x60_i2c.c +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Bootloader version of the i2c driver for the MV64x60. - * - * Author: Dale Farnsworth <dfarnsworth@mvista.com> - * Maintained by: Mark A. Greer <mgreer@mvista.com> - * - * 2003, 2007 (c) MontaVista, Software, Inc. This file is licensed under - * the terms of the GNU General Public License version 2. This program is - * licensed "as is" without any warranty of any kind, whether express or - * implied. - */ - -#include <stdarg.h> -#include <stddef.h> -#include "types.h" -#include "elf.h" -#include "page.h" -#include "string.h" -#include "stdio.h" -#include "io.h" -#include "ops.h" -#include "mv64x60.h" - -/* Register defines */ -#define MV64x60_I2C_REG_SLAVE_ADDR 0x00 -#define MV64x60_I2C_REG_DATA 0x04 -#define MV64x60_I2C_REG_CONTROL 0x08 -#define MV64x60_I2C_REG_STATUS 0x0c -#define MV64x60_I2C_REG_BAUD 0x0c -#define MV64x60_I2C_REG_EXT_SLAVE_ADDR 0x10 -#define MV64x60_I2C_REG_SOFT_RESET 0x1c - -#define MV64x60_I2C_CONTROL_ACK 0x04 -#define MV64x60_I2C_CONTROL_IFLG 0x08 -#define MV64x60_I2C_CONTROL_STOP 0x10 -#define MV64x60_I2C_CONTROL_START 0x20 -#define MV64x60_I2C_CONTROL_TWSIEN 0x40 -#define MV64x60_I2C_CONTROL_INTEN 0x80 - -#define MV64x60_I2C_STATUS_BUS_ERR 0x00 -#define MV64x60_I2C_STATUS_MAST_START 0x08 -#define MV64x60_I2C_STATUS_MAST_REPEAT_START 0x10 -#define MV64x60_I2C_STATUS_MAST_WR_ADDR_ACK 0x18 -#define MV64x60_I2C_STATUS_MAST_WR_ADDR_NO_ACK 0x20 -#define MV64x60_I2C_STATUS_MAST_WR_ACK 0x28 -#define MV64x60_I2C_STATUS_MAST_WR_NO_ACK 0x30 -#define MV64x60_I2C_STATUS_MAST_LOST_ARB 0x38 -#define MV64x60_I2C_STATUS_MAST_RD_ADDR_ACK 0x40 -#define MV64x60_I2C_STATUS_MAST_RD_ADDR_NO_ACK 0x48 -#define MV64x60_I2C_STATUS_MAST_RD_DATA_ACK 0x50 -#define MV64x60_I2C_STATUS_MAST_RD_DATA_NO_ACK 0x58 -#define MV64x60_I2C_STATUS_MAST_WR_ADDR_2_ACK 0xd0 -#define MV64x60_I2C_STATUS_MAST_WR_ADDR_2_NO_ACK 0xd8 -#define MV64x60_I2C_STATUS_MAST_RD_ADDR_2_ACK 0xe0 -#define MV64x60_I2C_STATUS_MAST_RD_ADDR_2_NO_ACK 0xe8 -#define MV64x60_I2C_STATUS_NO_STATUS 0xf8 - -static u8 *ctlr_base; - -static int mv64x60_i2c_wait_for_status(int wanted) -{ - int i; - int status; - - for (i=0; i<1000; i++) { - udelay(10); - status = in_le32((u32 *)(ctlr_base + MV64x60_I2C_REG_STATUS)) - & 0xff; - if (status == wanted) - return status; - } - return -status; -} - -static int mv64x60_i2c_control(int control, int status) -{ - out_le32((u32 *)(ctlr_base + MV64x60_I2C_REG_CONTROL), control & 0xff); - return mv64x60_i2c_wait_for_status(status); -} - -static int mv64x60_i2c_read_byte(int control, int status) -{ - out_le32((u32 *)(ctlr_base + MV64x60_I2C_REG_CONTROL), control & 0xff); - if (mv64x60_i2c_wait_for_status(status) < 0) - return -1; - return in_le32((u32 *)(ctlr_base + MV64x60_I2C_REG_DATA)) & 0xff; -} - -static int mv64x60_i2c_write_byte(int data, int control, int status) -{ - out_le32((u32 *)(ctlr_base + MV64x60_I2C_REG_DATA), data & 0xff); - out_le32((u32 *)(ctlr_base + MV64x60_I2C_REG_CONTROL), control & 0xff); - return mv64x60_i2c_wait_for_status(status); -} - -int mv64x60_i2c_read(u32 devaddr, u8 *buf, u32 offset, u32 offset_size, - u32 count) -{ - int i; - int data; - int control; - int status; - - if (ctlr_base == NULL) - return -1; - - /* send reset */ - out_le32((u32 *)(ctlr_base + MV64x60_I2C_REG_SOFT_RESET), 0); - out_le32((u32 *)(ctlr_base + MV64x60_I2C_REG_SLAVE_ADDR), 0); - out_le32((u32 *)(ctlr_base + MV64x60_I2C_REG_EXT_SLAVE_ADDR), 0); - out_le32((u32 *)(ctlr_base + MV64x60_I2C_REG_BAUD), (4 << 3) | 0x4); - - if (mv64x60_i2c_control(MV64x60_I2C_CONTROL_TWSIEN, - MV64x60_I2C_STATUS_NO_STATUS) < 0) - return -1; - - /* send start */ - control = MV64x60_I2C_CONTROL_START | MV64x60_I2C_CONTROL_TWSIEN; - status = MV64x60_I2C_STATUS_MAST_START; - if (mv64x60_i2c_control(control, status) < 0) - return -1; - - /* select device for writing */ - data = devaddr & ~0x1; - control = MV64x60_I2C_CONTROL_TWSIEN; - status = MV64x60_I2C_STATUS_MAST_WR_ADDR_ACK; - if (mv64x60_i2c_write_byte(data, control, status) < 0) - return -1; - - /* send offset of data */ - control = MV64x60_I2C_CONTROL_TWSIEN; - status = MV64x60_I2C_STATUS_MAST_WR_ACK; - if (offset_size > 1) { - if (mv64x60_i2c_write_byte(offset >> 8, control, status) < 0) - return -1; - } - if (mv64x60_i2c_write_byte(offset, control, status) < 0) - return -1; - - /* resend start */ - control = MV64x60_I2C_CONTROL_START | MV64x60_I2C_CONTROL_TWSIEN; - status = MV64x60_I2C_STATUS_MAST_REPEAT_START; - if (mv64x60_i2c_control(control, status) < 0) - return -1; - - /* select device for reading */ - data = devaddr | 0x1; - control = MV64x60_I2C_CONTROL_TWSIEN; - status = MV64x60_I2C_STATUS_MAST_RD_ADDR_ACK; - if (mv64x60_i2c_write_byte(data, control, status) < 0) - return -1; - - /* read all but last byte of data */ - control = MV64x60_I2C_CONTROL_ACK | MV64x60_I2C_CONTROL_TWSIEN; - status = MV64x60_I2C_STATUS_MAST_RD_DATA_ACK; - - for (i=1; i<count; i++) { - data = mv64x60_i2c_read_byte(control, status); - if (data < 0) { - printf("errors on iteration %d\n", i); - return -1; - } - *buf++ = data; - } - - /* read last byte of data */ - control = MV64x60_I2C_CONTROL_TWSIEN; - status = MV64x60_I2C_STATUS_MAST_RD_DATA_NO_ACK; - data = mv64x60_i2c_read_byte(control, status); - if (data < 0) - return -1; - *buf++ = data; - - /* send stop */ - control = MV64x60_I2C_CONTROL_STOP | MV64x60_I2C_CONTROL_TWSIEN; - status = MV64x60_I2C_STATUS_NO_STATUS; - if (mv64x60_i2c_control(control, status) < 0) - return -1; - - return count; -} - -int mv64x60_i2c_open(void) -{ - u32 v; - void *devp; - - devp = find_node_by_compatible(NULL, "marvell,mv64360-i2c"); - if (devp == NULL) - goto err_out; - if (getprop(devp, "virtual-reg", &v, sizeof(v)) != sizeof(v)) - goto err_out; - - ctlr_base = (u8 *)v; - return 0; - -err_out: - return -1; -} - -void mv64x60_i2c_close(void) -{ - ctlr_base = NULL; -} diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h index fad1862f4b2d..cd043726ed88 100644 --- a/arch/powerpc/boot/ops.h +++ b/arch/powerpc/boot/ops.h @@ -86,7 +86,6 @@ void start(void); void fdt_init(void *blob); int serial_console_init(void); int ns16550_console_init(void *devp, struct serial_console_data *scdp); -int mpsc_console_init(void *devp, struct serial_console_data *scdp); int cpm_console_init(void *devp, struct serial_console_data *scdp); int mpc5200_psc_console_init(void *devp, struct serial_console_data *scdp); int uartlite_console_init(void *devp, struct serial_console_data *scdp); diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c index 88955095ec07..48e3743faedf 100644 --- a/arch/powerpc/boot/serial.c +++ b/arch/powerpc/boot/serial.c @@ -120,10 +120,6 @@ int serial_console_init(void) if (dt_is_compatible(devp, "ns16550") || dt_is_compatible(devp, "pnpPNP,501")) rc = ns16550_console_init(devp, &serial_cd); -#ifdef CONFIG_EMBEDDED6xx - else if (dt_is_compatible(devp, "marvell,mv64360-mpsc")) - rc = mpsc_console_init(devp, &serial_cd); -#endif #ifdef CONFIG_CPM else if (dt_is_compatible(devp, "fsl,cpm1-scc-uart") || dt_is_compatible(devp, "fsl,cpm1-smc-uart") || diff --git a/arch/powerpc/configs/83xx/sbc834x_defconfig b/arch/powerpc/configs/83xx/sbc834x_defconfig deleted file mode 100644 index 7d74699334da..000000000000 --- a/arch/powerpc/configs/83xx/sbc834x_defconfig +++ /dev/null @@ -1,74 +0,0 @@ -CONFIG_SYSVIPC=y -CONFIG_LOG_BUF_SHIFT=14 -CONFIG_BLK_DEV_INITRD=y -CONFIG_EXPERT=y -# CONFIG_KALLSYMS is not set -CONFIG_SLAB=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set -# CONFIG_PPC_CHRP is not set -# CONFIG_PPC_PMAC is not set -CONFIG_PPC_83xx=y -CONFIG_SBC834x=y -CONFIG_GEN_RTC=y -CONFIG_PCI=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=m -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_SYN_COOKIES=y -# CONFIG_IPV6 is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -# CONFIG_FW_LOADER is not set -CONFIG_MTD=y -CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_BLOCK=y -CONFIG_MTD_CFI=y -CONFIG_MTD_CFI_INTELEXT=y -CONFIG_MTD_PHYSMAP_OF=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=32768 -CONFIG_SCSI=y -# CONFIG_SCSI_PROC_FS is not set -CONFIG_BLK_DEV_SD=y -# CONFIG_SCSI_LOWLEVEL is not set -CONFIG_NETDEVICES=y -CONFIG_GIANFAR=y -CONFIG_BROADCOM_PHY=y -# CONFIG_INPUT_KEYBOARD is not set -# CONFIG_INPUT_MOUSE is not set -# CONFIG_SERIO is not set -# CONFIG_VT is not set -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -# CONFIG_SERIAL_8250_PCI is not set -CONFIG_SERIAL_8250_NR_UARTS=2 -CONFIG_SERIAL_8250_RUNTIME_UARTS=2 -# CONFIG_HW_RANDOM is not set -CONFIG_I2C=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_MPC=y -CONFIG_WATCHDOG=y -# CONFIG_USB_HID is not set -CONFIG_USB=y -CONFIG_USB_MON=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_FSL=y -CONFIG_USB_STORAGE=y -CONFIG_EXT2_FS=y -CONFIG_EXT4_FS=y -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_NFS_FS=y -CONFIG_NFS_V4=y -CONFIG_ROOT_NFS=y -CONFIG_CRYPTO_ECB=m -CONFIG_CRYPTO_PCBC=m -# CONFIG_CRYPTO_HW is not set diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig deleted file mode 100644 index 6c1196b0f81e..000000000000 --- a/arch/powerpc/configs/c2k_defconfig +++ /dev/null @@ -1,389 +0,0 @@ -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_AUDIT=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_PROFILING=y -CONFIG_OPROFILE=m -CONFIG_KPROBES=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_OSF_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_SGI_PARTITION=y -CONFIG_SUN_PARTITION=y -# CONFIG_PPC_CHRP is not set -# CONFIG_PPC_PMAC is not set -CONFIG_EMBEDDED6xx=y -CONFIG_PPC_C2K=y -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=m -CONFIG_CPU_FREQ_GOV_ONDEMAND=m -CONFIG_GEN_RTC=y -CONFIG_HIGHMEM=y -CONFIG_PREEMPT_VOLUNTARY=y -CONFIG_BINFMT_MISC=y -CONFIG_PM=y -CONFIG_PCI_MSI=y -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_SHPC=m -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=y -CONFIG_NET_KEY=m -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_NET_IPIP=m -CONFIG_IP_MROUTE=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_IPCOMP=m -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_TUNNEL=m -CONFIG_NETFILTER=y -# CONFIG_NETFILTER_XT_MATCH_SCTP is not set -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_RAW=m -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_IP6_NF_IPTABLES=m -CONFIG_IP6_NF_MATCH_EUI64=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_RT=m -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m -CONFIG_BRIDGE_NF_EBTABLES=m -CONFIG_BRIDGE_EBT_BROUTE=m -CONFIG_BRIDGE_EBT_T_FILTER=m -CONFIG_BRIDGE_EBT_T_NAT=m -CONFIG_BRIDGE_EBT_802_3=m -CONFIG_BRIDGE_EBT_AMONG=m -CONFIG_BRIDGE_EBT_ARP=m -CONFIG_BRIDGE_EBT_IP=m -CONFIG_BRIDGE_EBT_LIMIT=m -CONFIG_BRIDGE_EBT_MARK=m -CONFIG_BRIDGE_EBT_PKTTYPE=m -CONFIG_BRIDGE_EBT_STP=m -CONFIG_BRIDGE_EBT_VLAN=m -CONFIG_BRIDGE_EBT_ARPREPLY=m -CONFIG_BRIDGE_EBT_DNAT=m -CONFIG_BRIDGE_EBT_MARK_T=m -CONFIG_BRIDGE_EBT_REDIRECT=m -CONFIG_BRIDGE_EBT_SNAT=m -CONFIG_BRIDGE_EBT_LOG=m -CONFIG_IP_SCTP=m -CONFIG_ATM=m -CONFIG_ATM_CLIP=m -CONFIG_ATM_LANE=m -CONFIG_ATM_BR2684=m -CONFIG_BRIDGE=m -CONFIG_VLAN_8021Q=m -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_CBQ=m -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_ATM=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_CLS_TCINDEX=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_FW=m -CONFIG_NET_CLS_U32=m -CONFIG_CLS_U32_PERF=y -CONFIG_NET_CLS_RSVP=m -CONFIG_NET_CLS_RSVP6=m -CONFIG_NET_CLS_IND=y -CONFIG_BT=m -CONFIG_BT_RFCOMM=m -CONFIG_BT_RFCOMM_TTY=y -CONFIG_BT_BNEP=m -CONFIG_BT_BNEP_MC_FILTER=y -CONFIG_BT_BNEP_PROTO_FILTER=y -CONFIG_BT_HIDP=m -CONFIG_BT_HCIUART=m -CONFIG_BT_HCIUART_H4=y -CONFIG_BT_HCIUART_BCSP=y -CONFIG_BT_HCIBCM203X=m -CONFIG_BT_HCIBFUSB=m -CONFIG_BT_HCIVHCI=m -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_MTD=y -CONFIG_MTD_BLOCK=y -CONFIG_MTD_CFI=y -CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_COMPLEX_MAPPINGS=y -CONFIG_MTD_PHYSMAP_OF=y -CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=16384 -CONFIG_SCSI=m -CONFIG_BLK_DEV_SD=m -CONFIG_CHR_DEV_ST=m -CONFIG_CHR_DEV_OSST=m -CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y -CONFIG_CHR_DEV_SG=m -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_ISCSI_ATTRS=m -CONFIG_BLK_DEV_3W_XXXX_RAID=m -CONFIG_SCSI_3W_9XXX=m -CONFIG_SCSI_ACARD=m -CONFIG_SCSI_AACRAID=m -CONFIG_SCSI_AIC7XXX=m -CONFIG_AIC7XXX_CMDS_PER_DEVICE=4 -CONFIG_AIC7XXX_RESET_DELAY_MS=15000 -# CONFIG_AIC7XXX_DEBUG_ENABLE is not set -# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set -CONFIG_SCSI_AIC79XX=m -CONFIG_AIC79XX_CMDS_PER_DEVICE=4 -CONFIG_AIC79XX_RESET_DELAY_MS=15000 -# CONFIG_AIC79XX_DEBUG_ENABLE is not set -# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set -CONFIG_SCSI_ARCMSR=m -CONFIG_MEGARAID_NEWGEN=y -CONFIG_MEGARAID_MM=m -CONFIG_MEGARAID_MAILBOX=m -CONFIG_MEGARAID_SAS=m -CONFIG_SCSI_GDTH=m -CONFIG_SCSI_IPS=m -CONFIG_SCSI_INITIO=m -CONFIG_SCSI_SYM53C8XX_2=m -CONFIG_SCSI_QLOGIC_1280=m -CONFIG_NETDEVICES=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -CONFIG_NETCONSOLE=m -CONFIG_TUN=m -# CONFIG_ATM_DRIVERS is not set -CONFIG_MV643XX_ETH=y -CONFIG_VITESSE_PHY=y -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_KEYBOARD is not set -# CONFIG_INPUT_MOUSE is not set -CONFIG_INPUT_MISC=y -CONFIG_INPUT_UINPUT=m -# CONFIG_SERIO is not set -# CONFIG_LEGACY_PTYS is not set -CONFIG_SERIAL_NONSTANDARD=y -CONFIG_SERIAL_MPSC=y -CONFIG_SERIAL_MPSC_CONSOLE=y -CONFIG_NVRAM=m -CONFIG_RAW_DRIVER=y -CONFIG_MAX_RAW_DEVS=8192 -CONFIG_I2C=m -CONFIG_I2C_CHARDEV=m -CONFIG_I2C_MV64XXX=m -CONFIG_HWMON=m -CONFIG_SENSORS_ADM1021=m -CONFIG_SENSORS_ADM1025=m -CONFIG_SENSORS_ADM1026=m -CONFIG_SENSORS_ADM1031=m -CONFIG_SENSORS_DS1621=m -CONFIG_SENSORS_GL518SM=m -CONFIG_SENSORS_MAX1619=m -CONFIG_SENSORS_LM75=m -CONFIG_SENSORS_LM77=m -CONFIG_SENSORS_LM78=m -CONFIG_SENSORS_LM80=m -CONFIG_SENSORS_LM83=m -CONFIG_SENSORS_LM85=m -CONFIG_SENSORS_LM87=m -CONFIG_SENSORS_LM90=m -CONFIG_SENSORS_PCF8591=m -CONFIG_SENSORS_VIA686A=m -CONFIG_SENSORS_W83781D=m -CONFIG_SENSORS_W83L785TS=m -CONFIG_WATCHDOG=y -CONFIG_SOFT_WATCHDOG=m -CONFIG_PCIPCWATCHDOG=m -CONFIG_WDTPCI=m -CONFIG_USBPCWATCHDOG=m -# CONFIG_VGA_CONSOLE is not set -CONFIG_USB=m -CONFIG_USB_MON=m -CONFIG_USB_EHCI_HCD=m -CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_OHCI_HCD=m -CONFIG_USB_OHCI_HCD_PPC_OF_BE=y -CONFIG_USB_UHCI_HCD=m -CONFIG_USB_ACM=m -CONFIG_USB_PRINTER=m -CONFIG_USB_STORAGE=m -CONFIG_USB_STORAGE_DATAFAB=m -CONFIG_USB_STORAGE_FREECOM=m -CONFIG_USB_STORAGE_ISD200=m -CONFIG_USB_STORAGE_SDDR09=m -CONFIG_USB_STORAGE_SDDR55=m -CONFIG_USB_STORAGE_JUMPSHOT=m -CONFIG_USB_MDC800=m -CONFIG_USB_MICROTEK=m -CONFIG_USB_SERIAL=m -CONFIG_USB_SERIAL_GENERIC=y -CONFIG_USB_SERIAL_BELKIN=m -CONFIG_USB_SERIAL_WHITEHEAT=m -CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m -CONFIG_USB_SERIAL_EMPEG=m -CONFIG_USB_SERIAL_FTDI_SIO=m -CONFIG_USB_SERIAL_VISOR=m -CONFIG_USB_SERIAL_IPAQ=m -CONFIG_USB_SERIAL_IR=m -CONFIG_USB_SERIAL_EDGEPORT=m -CONFIG_USB_SERIAL_EDGEPORT_TI=m -CONFIG_USB_SERIAL_KEYSPAN_PDA=m -CONFIG_USB_SERIAL_KEYSPAN=m -CONFIG_USB_SERIAL_KLSI=m -CONFIG_USB_SERIAL_KOBIL_SCT=m -CONFIG_USB_SERIAL_MCT_U232=m -CONFIG_USB_SERIAL_PL2303=m -CONFIG_USB_SERIAL_SAFE=m -CONFIG_USB_SERIAL_SAFE_PADDED=y -CONFIG_USB_SERIAL_CYBERJACK=m -CONFIG_USB_SERIAL_XIRCOM=m -CONFIG_USB_SERIAL_OMNINET=m -CONFIG_USB_EMI62=m -CONFIG_USB_RIO500=m -CONFIG_USB_LEGOTOWER=m -CONFIG_USB_LCD=m -CONFIG_USB_LED=m -CONFIG_USB_TEST=m -CONFIG_USB_ATM=m -CONFIG_USB_SPEEDTOUCH=m -CONFIG_INFINIBAND=m -CONFIG_INFINIBAND_USER_MAD=m -CONFIG_INFINIBAND_USER_ACCESS=m -CONFIG_INFINIBAND_MTHCA=m -CONFIG_INFINIBAND_IPOIB=m -CONFIG_INFINIBAND_IPOIB_CM=y -CONFIG_INFINIBAND_SRP=m -CONFIG_DMADEVICES=y -CONFIG_EXT4_FS=m -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -CONFIG_QUOTA=y -CONFIG_QFMT_V2=y -CONFIG_AUTOFS4_FS=m -CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_FAT_DEFAULT_IOCHARSET="ascii" -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_HFS_FS=m -CONFIG_HFSPLUS_FS=m -CONFIG_JFFS2_FS=y -CONFIG_CRAMFS=m -CONFIG_VXFS_FS=m -CONFIG_NFS_FS=y -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y -CONFIG_ROOT_NFS=y -CONFIG_CIFS=m -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_POSIX=y -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="utf8" -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_CRC_CCITT=m -CONFIG_CRC_T10DIF=m -CONFIG_DEBUG_INFO=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_STACK_USAGE=y -CONFIG_DEBUG_HIGHMEM=y -CONFIG_DEBUG_STACKOVERFLOW=y -CONFIG_DETECT_HUNG_TASK=y -CONFIG_DEBUG_SPINLOCK=y -CONFIG_BOOTX_TEXT=y -CONFIG_PPC_EARLY_DEBUG=y -CONFIG_SECURITY=y -CONFIG_SECURITY_NETWORK=y -CONFIG_SECURITY_SELINUX=y -CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_DISABLE=y -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig index 9e92aa6a52ba..6ab34e60495f 100644 --- a/arch/powerpc/configs/powernv_defconfig +++ b/arch/powerpc/configs/powernv_defconfig @@ -38,7 +38,9 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_PARTITION_ADVANCED=y +CONFIG_SCOM_DEBUGFS=y CONFIG_OPAL_PRD=y +CONFIG_PPC_MEMTRACE=y # CONFIG_PPC_PSERIES is not set # CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y @@ -54,7 +56,10 @@ CONFIG_NUMA=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_DEFERRED_STRUCT_PAGE_INIT=y CONFIG_PPC_64K_PAGES=y CONFIG_PPC_SUBPAGE_PROT=y CONFIG_SCHED_SMT=y @@ -72,7 +77,13 @@ CONFIG_SYN_COOKIES=y CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m -# CONFIG_IPV6 is not set +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_INET6_XFRM_MODE_TRANSPORT=m +CONFIG_INET6_XFRM_MODE_TUNNEL=m +CONFIG_INET6_XFRM_MODE_BEET=m +CONFIG_IPV6_SIT=m CONFIG_NETFILTER=y # CONFIG_NETFILTER_ADVANCED is not set CONFIG_BRIDGE=m @@ -81,33 +92,28 @@ CONFIG_NET_SCHED=y CONFIG_NET_CLS_BPF=m CONFIG_NET_CLS_ACT=y CONFIG_NET_ACT_BPF=m +CONFIG_DNS_RESOLVER=y CONFIG_BPF_JIT=y +# CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_MTD=y CONFIG_MTD_BLOCK=y CONFIG_MTD_POWERNV_FLASH=y -CONFIG_PARPORT=m -CONFIG_PARPORT_PC=m -CONFIG_BLK_DEV_FD=m CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM=m CONFIG_BLK_DEV_RAM_SIZE=65536 -CONFIG_VIRTIO_BLK=m CONFIG_BLK_DEV_NVME=y -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_AMD74XX=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m -CONFIG_BLK_DEV_SR=y +CONFIG_BLK_DEV_SR=m CONFIG_BLK_DEV_SR_VENDOR=y -CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_FC_ATTRS=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_FC_ATTRS=m CONFIG_SCSI_SRP_ATTRS=y CONFIG_SCSI_CXGB3_ISCSI=m CONFIG_SCSI_CXGB4_ISCSI=m @@ -121,7 +127,6 @@ CONFIG_SCSI_IPR=y CONFIG_SCSI_QLA_FC=m CONFIG_SCSI_QLA_ISCSI=m CONFIG_SCSI_LPFC=m -CONFIG_SCSI_VIRTIO=m CONFIG_SCSI_DH=y CONFIG_SCSI_DH_RDAC=m CONFIG_SCSI_DH_ALUA=m @@ -152,16 +157,16 @@ CONFIG_DUMMY=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_VXLAN=m -CONFIG_NETCONSOLE=y +CONFIG_NETCONSOLE=m CONFIG_TUN=m CONFIG_VETH=m -CONFIG_VIRTIO_NET=m CONFIG_VORTEX=m CONFIG_ACENIC=m CONFIG_ACENIC_OMIT_TIGON_I=y CONFIG_PCNET32=m CONFIG_TIGON3=y CONFIG_BNX2X=m +# CONFIG_CAVIUM_PTP is not set CONFIG_CHELSIO_T1=m CONFIG_BE2NET=m CONFIG_S2IO=m @@ -172,46 +177,62 @@ CONFIG_IXGB=m CONFIG_IXGBE=m CONFIG_I40E=m CONFIG_MLX4_EN=m +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_CORE_IPOIB=y CONFIG_MYRI10GE=m CONFIG_QLGE=m CONFIG_NETXEN_NIC=m -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPPOE=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m +CONFIG_USB_NET_DRIVERS=m +# CONFIG_WLAN is not set CONFIG_INPUT_EVDEV=m -CONFIG_INPUT_MISC=y -# CONFIG_SERIO_SERPORT is not set +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO is not set +# CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_PCI=m CONFIG_SERIAL_JSM=m -CONFIG_VIRTIO_CONSOLE=m CONFIG_IPMI_HANDLER=y CONFIG_IPMI_DEVICE_INTERFACE=y CONFIG_IPMI_POWERNV=y -CONFIG_RAW_DRIVER=y -CONFIG_MAX_RAW_DEVS=1024 +# CONFIG_DEVPORT is not set CONFIG_I2C_CHARDEV=y +# CONFIG_PTP_1588_CLOCK is not set CONFIG_DRM=y CONFIG_DRM_AST=y CONFIG_FIRMWARE_EDID=y CONFIG_FB_OF=y -CONFIG_FB_MATROX=y +CONFIG_FB_MATROX=m CONFIG_FB_MATROX_MILLENIUM=y CONFIG_FB_MATROX_MYSTIQUE=y CONFIG_FB_MATROX_G=y -CONFIG_FB_RADEON=y -CONFIG_FB_IBM_GXT4500=y +CONFIG_FB_RADEON=m +CONFIG_FB_IBM_GXT4500=m CONFIG_LCD_PLATFORM=m +CONFIG_BACKLIGHT_GENERIC=m # CONFIG_VGA_CONSOLE is not set CONFIG_LOGO=y -CONFIG_HID_GYRATION=y -CONFIG_HID_PANTHERLORD=y -CONFIG_HID_PETALYNX=y -CONFIG_HID_SAMSUNG=y -CONFIG_HID_SUNPLUS=y +CONFIG_HID_A4TECH=m +CONFIG_HID_APPLE=m +CONFIG_HID_BELKIN=m +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CYPRESS=m +CONFIG_HID_EZKEY=m +CONFIG_HID_GYRATION=m +CONFIG_HID_ITE=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LOGITECH=m +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_PANTHERLORD=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SUNPLUS=m +CONFIG_USB_HID=m CONFIG_USB_HIDDEV=y CONFIG_USB=y CONFIG_USB_MON=m @@ -219,6 +240,7 @@ CONFIG_USB_XHCI_HCD=y CONFIG_USB_EHCI_HCD=y # CONFIG_USB_EHCI_HCD_PPC_OF is not set CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=m CONFIG_USB_STORAGE=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=m @@ -236,8 +258,9 @@ CONFIG_INFINIBAND_SRP=m CONFIG_INFINIBAND_ISER=m CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=y -CONFIG_VIRTIO_PCI=m -CONFIG_VIRTIO_BALLOON=m +# CONFIG_VIRTIO_MENU is not set +CONFIG_LIBNVDIMM=y +# CONFIG_ND_BLK is not set CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -253,12 +276,13 @@ CONFIG_XFS_POSIX_ACL=y CONFIG_BTRFS_FS=m CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_NILFS2_FS=m +CONFIG_FANOTIFY=y CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=m CONFIG_OVERLAY_FS=m CONFIG_ISO9660_FS=y CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=y +CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y CONFIG_TMPFS=y @@ -270,9 +294,9 @@ CONFIG_SQUASHFS_XATTR=y CONFIG_SQUASHFS_LZO=y CONFIG_SQUASHFS_XZ=y CONFIG_PSTORE=y -CONFIG_NFS_FS=y +CONFIG_NFS_FS=m CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFSD=m CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y @@ -291,9 +315,7 @@ CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_SOFTLOCKUP_DETECTOR=y CONFIG_HARDLOCKUP_DETECTOR=y CONFIG_LATENCYTOP=y -CONFIG_FTRACE=y CONFIG_FUNCTION_TRACER=y -CONFIG_FUNCTION_GRAPH_TRACER=y CONFIG_SCHED_TRACER=y CONFIG_FTRACE_SYSCALLS=y CONFIG_BLK_DEV_IO_TRACE=y @@ -303,10 +325,10 @@ CONFIG_FTR_FIXUP_SELFTEST=y CONFIG_MSI_BITMAP_SELFTEST=y CONFIG_XMON=y CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_CRC32C_VPMSUM=m +CONFIG_CRYPTO_CRCT10DIF_VPMSUM=m CONFIG_CRYPTO_MD5_PPC=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_SHA1_PPC=m diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig index 0b0f78823a1b..10940533da71 100644 --- a/arch/powerpc/configs/wii_defconfig +++ b/arch/powerpc/configs/wii_defconfig @@ -49,7 +49,9 @@ CONFIG_BLK_DEV_RAM_COUNT=2 CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_NETDEVICES=y +# CONFIG_ETHERNET is not set CONFIG_B43=y +CONFIG_B43_BUSES_SSB=y CONFIG_B43_SDIO=y # CONFIG_B43_PHY_LP is not set CONFIG_B43_DEBUG=y @@ -57,6 +59,7 @@ CONFIG_INPUT_FF_MEMLESS=m CONFIG_INPUT_JOYDEV=y CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_ATKBD is not set +CONFIG_KEYBOARD_GPIO=y # CONFIG_MOUSE_PS2 is not set CONFIG_INPUT_JOYSTICK=y CONFIG_INPUT_MISC=y @@ -71,6 +74,9 @@ CONFIG_I2C_CHARDEV=y CONFIG_I2C_GPIO=y CONFIG_GPIOLIB=y CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_HLWD=y +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_GPIO=y # CONFIG_HWMON is not set CONFIG_SSB_DEBUG=y CONFIG_FB=y @@ -88,6 +94,14 @@ CONFIG_HID_APPLE=m CONFIG_HID_WACOM=m CONFIG_MMC=y CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_OF_HLWD=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_PANIC=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=y CONFIG_EXT2_FS=y diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index d9713ad62e3c..aa9e785c59c2 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -36,8 +36,7 @@ void kexec_copy_flush(struct kimage *image); /* pseries hcall tracing */ extern struct static_key hcall_tracepoint_key; void __trace_hcall_entry(unsigned long opcode, unsigned long *args); -void __trace_hcall_exit(long opcode, unsigned long retval, - unsigned long *retbuf); +void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf); /* OPAL tracing */ #ifdef HAVE_JUMP_LABEL extern struct static_key opal_tracepoint_key; @@ -81,18 +80,12 @@ void machine_check_exception(struct pt_regs *regs); void emulation_assist_interrupt(struct pt_regs *regs); /* signals, syscalls and interrupts */ -#ifdef CONFIG_PPC64 -int sys_swapcontext(struct ucontext __user *old_ctx, - struct ucontext __user *new_ctx, - long ctx_size, long r6, long r7, long r8, struct pt_regs *regs); -#else long sys_swapcontext(struct ucontext __user *old_ctx, struct ucontext __user *new_ctx, - int ctx_size, int r6, int r7, int r8, struct pt_regs *regs); -int sys_debug_setcontext(struct ucontext __user *ctx, - int ndbg, struct sig_dbg_op __user *dbg, - int r6, int r7, int r8, - struct pt_regs *regs); + long ctx_size); +#ifdef CONFIG_PPC32 +long sys_debug_setcontext(struct ucontext __user *ctx, + int ndbg, struct sig_dbg_op __user *dbg); int ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp); unsigned long __init early_init(unsigned long dt_ptr); @@ -141,4 +134,7 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip); void pnv_power9_force_smt4_catch(void); void pnv_power9_force_smt4_release(void); +void tm_enable(void); +void tm_disable(void); +void tm_abort(uint8_t cause); #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index c7c63959ba91..f67b3f6e36be 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -76,6 +76,21 @@ do { \ ___p1; \ }) +#ifdef CONFIG_PPC_BOOK3S_64 +/* + * Prevent execution of subsequent instructions until preceding branches have + * been fully resolved and are no longer executing speculatively. + */ +#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; nop + +// This also acts as a compiler barrier due to the memory clobber. +#define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory") + +#else /* !CONFIG_PPC_BOOK3S_64 */ +#define barrier_nospec_asm +#define barrier_nospec() +#endif + #include <asm-generic/barrier.h> #endif /* _ASM_POWERPC_BARRIER_H */ diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h index 5073cc75f1c8..6a6673907e45 100644 --- a/arch/powerpc/include/asm/book3s/32/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h @@ -99,6 +99,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) static inline void pgtable_free(void *table, unsigned index_size) { if (!index_size) { + pgtable_page_dtor(virt_to_page(table)); free_page((unsigned long)table); } else { BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE); diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index c615abdce119..02f5acd7ccc4 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -235,15 +235,18 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, } -static inline void __ptep_set_access_flags(struct mm_struct *mm, +static inline void __ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, pte_t entry, - unsigned long address) + unsigned long address, + int psize) { unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); unsigned long clr = ~pte_val(entry) & _PAGE_RO; pte_update(ptep, clr, set); + + flush_tlb_page(vma, address); } #define __HAVE_ARCH_PTE_SAME diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index 4b5423030d4b..9a3798660cef 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -38,8 +38,12 @@ #define H_PAGE_4K_PFN 0x0 #define H_PAGE_THP_HUGE 0x0 #define H_PAGE_COMBO 0x0 -#define H_PTE_FRAG_NR 0 -#define H_PTE_FRAG_SIZE_SHIFT 0 + +/* 8 bytes per each pte entry */ +#define H_PTE_FRAG_SIZE_SHIFT (H_PTE_INDEX_SIZE + 3) +#define H_PTE_FRAG_NR (PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT) +#define H_PMD_FRAG_SIZE_SHIFT (H_PMD_INDEX_SIZE + 3) +#define H_PMD_FRAG_NR (PAGE_SIZE >> H_PMD_FRAG_SIZE_SHIFT) /* memory key bits, only 8 keys supported */ #define H_PTE_PKEY_BIT0 0 diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h index cc82745355b3..c81793d47af9 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -46,6 +46,13 @@ #define H_PTE_FRAG_SIZE_SHIFT (H_PTE_INDEX_SIZE + 3 + 1) #define H_PTE_FRAG_NR (PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT) +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) +#define H_PMD_FRAG_SIZE_SHIFT (H_PMD_INDEX_SIZE + 3 + 1) +#else +#define H_PMD_FRAG_SIZE_SHIFT (H_PMD_INDEX_SIZE + 3) +#endif +#define H_PMD_FRAG_NR (PAGE_SIZE >> H_PMD_FRAG_SIZE_SHIFT) + #ifndef __ASSEMBLY__ #include <asm/errno.h> diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index cc8cd656ccfe..0387b155f13d 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -23,16 +23,6 @@ H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT) #define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE) -#if (defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)) && \ - defined(CONFIG_PPC_64K_PAGES) -/* - * only with hash 64k we need to use the second half of pmd page table - * to store pointer to deposited pgtable_t - */ -#define H_PMD_CACHE_INDEX (H_PMD_INDEX_SIZE + 1) -#else -#define H_PMD_CACHE_INDEX H_PMD_INDEX_SIZE -#endif /* * We store the slot details in the second half of page table. * Increase the pud level table so that hugetlb ptes can be stored diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 5094696eecd6..9c8c669a6b6a 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -134,10 +134,11 @@ typedef struct { #ifdef CONFIG_PPC_SUBPAGE_PROT struct subpage_prot_table spt; #endif /* CONFIG_PPC_SUBPAGE_PROT */ -#ifdef CONFIG_PPC_64K_PAGES - /* for 4K PTE fragment support */ + /* + * pagetable fragment support + */ void *pte_frag; -#endif + void *pmd_frag; #ifdef CONFIG_SPAPR_TCE_IOMMU struct list_head iommu_group_mem_list; #endif diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index 558a159600ad..01ee40f11f3a 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -42,7 +42,9 @@ extern struct kmem_cache *pgtable_cache[]; }) extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int); +extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long); extern void pte_fragment_free(unsigned long *, int); +extern void pmd_fragment_free(unsigned long *); extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift); #ifdef CONFIG_SMP extern void __tlb_remove_table(void *_table); @@ -88,8 +90,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) * need to do this for 4k. */ #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) && \ - ((H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX) || \ - (H_PGD_INDEX_SIZE == H_PMD_CACHE_INDEX)) + (H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX) memset(pgd, 0, PGD_TABLE_SIZE); #endif return pgd; @@ -124,36 +125,35 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) } static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, - unsigned long address) + unsigned long address) { /* * By now all the pud entries should be none entries. So go * ahead and flush the page walk cache */ flush_tlb_pgtable(tlb, address); - pgtable_free_tlb(tlb, pud, PUD_CACHE_INDEX); + pgtable_free_tlb(tlb, pud, PUD_INDEX); } static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { - return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), - pgtable_gfp_flags(mm, GFP_KERNEL)); + return pmd_fragment_alloc(mm, addr); } static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { - kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd); + pmd_fragment_free((unsigned long *)pmd); } static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, - unsigned long address) + unsigned long address) { /* * By now all the pud entries should be none entries. So go * ahead and flush the page walk cache */ flush_tlb_pgtable(tlb, address); - return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX); + return pgtable_free_tlb(tlb, pmd, PMD_INDEX); } static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, @@ -173,31 +173,6 @@ static inline pgtable_t pmd_pgtable(pmd_t pmd) return (pgtable_t)pmd_page_vaddr(pmd); } -#ifdef CONFIG_PPC_4K_PAGES -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) -{ - return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); -} - -static inline pgtable_t pte_alloc_one(struct mm_struct *mm, - unsigned long address) -{ - struct page *page; - pte_t *pte; - - pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT); - if (!pte) - return NULL; - page = virt_to_page(pte); - if (!pgtable_page_ctor(page)) { - __free_page(page); - return NULL; - } - return pte; -} -#else /* if CONFIG_PPC_64K_PAGES */ - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { @@ -209,7 +184,6 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, { return (pgtable_t)pte_fragment_alloc(mm, address, 0); } -#endif static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { @@ -229,7 +203,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, * ahead and flush the page walk cache */ flush_tlb_pgtable(tlb, address); - pgtable_free_tlb(tlb, table, 0); + pgtable_free_tlb(tlb, table, PTE_INDEX); } #define check_pgt_cache() do { } while (0) diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 47b5ffc8715d..63cee159022b 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -212,13 +212,13 @@ extern unsigned long __pte_index_size; extern unsigned long __pmd_index_size; extern unsigned long __pud_index_size; extern unsigned long __pgd_index_size; -extern unsigned long __pmd_cache_index; extern unsigned long __pud_cache_index; #define PTE_INDEX_SIZE __pte_index_size #define PMD_INDEX_SIZE __pmd_index_size #define PUD_INDEX_SIZE __pud_index_size #define PGD_INDEX_SIZE __pgd_index_size -#define PMD_CACHE_INDEX __pmd_cache_index +/* pmd table use page table fragments */ +#define PMD_CACHE_INDEX 0 #define PUD_CACHE_INDEX __pud_cache_index /* * Because of use of pte fragments and THP, size of page table @@ -246,6 +246,12 @@ extern unsigned long __pte_frag_size_shift; #define PTE_FRAG_SIZE_SHIFT __pte_frag_size_shift #define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT) +extern unsigned long __pmd_frag_nr; +#define PMD_FRAG_NR __pmd_frag_nr +extern unsigned long __pmd_frag_size_shift; +#define PMD_FRAG_SIZE_SHIFT __pmd_frag_size_shift +#define PMD_FRAG_SIZE (1UL << PMD_FRAG_SIZE_SHIFT) + #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) #define PTRS_PER_PUD (1 << PUD_INDEX_SIZE) @@ -273,6 +279,16 @@ extern unsigned long __pte_frag_size_shift; /* Bits to mask out from a PGD to get to the PUD page */ #define PGD_MASKED_BITS 0xc0000000000000ffUL +/* + * Used as an indicator for rcu callback functions + */ +enum pgtable_index { + PTE_INDEX = 0, + PMD_INDEX, + PUD_INDEX, + PGD_INDEX, +}; + extern unsigned long __vmalloc_start; extern unsigned long __vmalloc_end; #define VMALLOC_START __vmalloc_start @@ -319,9 +335,6 @@ extern unsigned long pci_io_base; /* Advertise special mapping type for AGP */ #define HAVE_PAGE_AGP -/* Advertise support for _PAGE_SPECIAL */ -#define __HAVE_ARCH_PTE_SPECIAL - #ifndef __ASSEMBLY__ /* @@ -751,12 +764,14 @@ static inline bool check_pte_access(unsigned long access, unsigned long ptev) * Generic functions with hash/radix callbacks */ -static inline void __ptep_set_access_flags(struct mm_struct *mm, +static inline void __ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, pte_t entry, - unsigned long address) + unsigned long address, + int psize) { if (radix_enabled()) - return radix__ptep_set_access_flags(mm, ptep, entry, address); + return radix__ptep_set_access_flags(vma, ptep, entry, + address, psize); return hash__ptep_set_access_flags(ptep, entry); } diff --git a/arch/powerpc/include/asm/book3s/64/radix-4k.h b/arch/powerpc/include/asm/book3s/64/radix-4k.h index ca366ec86310..863c3e8286fb 100644 --- a/arch/powerpc/include/asm/book3s/64/radix-4k.h +++ b/arch/powerpc/include/asm/book3s/64/radix-4k.h @@ -15,4 +15,7 @@ #define RADIX_PTE_FRAG_SIZE_SHIFT (RADIX_PTE_INDEX_SIZE + 3) #define RADIX_PTE_FRAG_NR (PAGE_SIZE >> RADIX_PTE_FRAG_SIZE_SHIFT) +#define RADIX_PMD_FRAG_SIZE_SHIFT (RADIX_PMD_INDEX_SIZE + 3) +#define RADIX_PMD_FRAG_NR (PAGE_SIZE >> RADIX_PMD_FRAG_SIZE_SHIFT) + #endif /* _ASM_POWERPC_PGTABLE_RADIX_4K_H */ diff --git a/arch/powerpc/include/asm/book3s/64/radix-64k.h b/arch/powerpc/include/asm/book3s/64/radix-64k.h index 830082496876..ccb78ca9d0c5 100644 --- a/arch/powerpc/include/asm/book3s/64/radix-64k.h +++ b/arch/powerpc/include/asm/book3s/64/radix-64k.h @@ -16,4 +16,8 @@ */ #define RADIX_PTE_FRAG_SIZE_SHIFT (RADIX_PTE_INDEX_SIZE + 3) #define RADIX_PTE_FRAG_NR (PAGE_SIZE >> RADIX_PTE_FRAG_SIZE_SHIFT) + +#define RADIX_PMD_FRAG_SIZE_SHIFT (RADIX_PMD_INDEX_SIZE + 3) +#define RADIX_PMD_FRAG_NR (PAGE_SIZE >> RADIX_PMD_FRAG_SIZE_SHIFT) + #endif /* _ASM_POWERPC_PGTABLE_RADIX_64K_H */ diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index 705193e7192f..ef9f96742ce1 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -124,23 +124,28 @@ extern void radix__mark_rodata_ro(void); extern void radix__mark_initmem_nx(void); #endif +extern void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, + pte_t entry, unsigned long address, + int psize); + static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr, unsigned long set) { - pte_t pte; - unsigned long old_pte, new_pte; - - do { - pte = READ_ONCE(*ptep); - old_pte = pte_val(pte); - new_pte = (old_pte | set) & ~clr; - - } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); - - return old_pte; + __be64 old_be, tmp_be; + + __asm__ __volatile__( + "1: ldarx %0,0,%3 # pte_update\n" + " andc %1,%0,%5 \n" + " or %1,%1,%4 \n" + " stdcx. %1,0,%3 \n" + " bne- 1b" + : "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep) + : "r" (ptep), "r" (cpu_to_be64(set)), "r" (cpu_to_be64(clr)) + : "cc" ); + + return be64_to_cpu(old_be); } - static inline unsigned long radix__pte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long clr, @@ -176,48 +181,14 @@ static inline pte_t radix__ptep_get_and_clear_full(struct mm_struct *mm, unsigned long old_pte; if (full) { - /* - * If we are trying to clear the pte, we can skip - * the DD1 pte update sequence and batch the tlb flush. The - * tlb flush batching is done by mmu gather code. We - * still keep the cmp_xchg update to make sure we get - * correct R/C bit which might be updated via Nest MMU. - */ - old_pte = __radix_pte_update(ptep, ~0ul, 0); + old_pte = pte_val(*ptep); + *ptep = __pte(0); } else old_pte = radix__pte_update(mm, addr, ptep, ~0ul, 0, 0); return __pte(old_pte); } -/* - * Set the dirty and/or accessed bits atomically in a linux PTE, this - * function doesn't need to invalidate tlb. - */ -static inline void radix__ptep_set_access_flags(struct mm_struct *mm, - pte_t *ptep, pte_t entry, - unsigned long address) -{ - - unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | - _PAGE_RW | _PAGE_EXEC); - - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { - - unsigned long old_pte, new_pte; - - old_pte = __radix_pte_update(ptep, ~0, 0); - /* - * new value of pte - */ - new_pte = old_pte | set; - radix__flush_tlb_pte_p9_dd1(old_pte, mm, address); - __radix_pte_update(ptep, 0, new_pte); - } else - __radix_pte_update(ptep, 0, set); - asm volatile("ptesync" : : : "memory"); -} - static inline int radix__pte_same(pte_t pte_a, pte_t pte_b) { return ((pte_raw(pte_a) ^ pte_raw(pte_b)) == 0); @@ -232,7 +203,24 @@ static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, int percpu) { *ptep = pte; - asm volatile("ptesync" : : : "memory"); + + /* + * The architecture suggests a ptesync after setting the pte, which + * orders the store that updates the pte with subsequent page table + * walk accesses which may load the pte. Without this it may be + * possible for a subsequent access to result in spurious fault. + * + * This is not necessary for correctness, because a spurious fault + * is tolerated by the page fault handler, and this store will + * eventually be seen. In testing, there was no noticable increase + * in user faults on POWER9. Avoiding ptesync here is a significant + * win for things like fork. If a future microarchitecture benefits + * from ptesync, it should probably go into update_mmu_cache, rather + * than set_pte_at (which is used to set ptes unrelated to faults). + * + * Spurious faults to vmalloc region are not tolerated, so there is + * a ptesync in flush_cache_vmap. + */ } static inline int radix__pmd_bad(pmd_t pmd) diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index 19b45ba6caf9..ef5c3f2994c9 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h @@ -51,4 +51,11 @@ extern void radix__flush_tlb_all(void); extern void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm, unsigned long address); +extern void radix__flush_tlb_lpid_page(unsigned int lpid, + unsigned long addr, + unsigned long page_size); +extern void radix__flush_pwc_lpid(unsigned int lpid); +extern void radix__local_flush_tlb_lpid(unsigned int lpid); +extern void radix__local_flush_tlb_lpid_guest(unsigned int lpid); + #endif diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h index 0cac17253513..ebf572ea621e 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h @@ -4,7 +4,7 @@ #define MMU_NO_CONTEXT ~0UL - +#include <linux/mm_types.h> #include <asm/book3s/64/tlbflush-hash.h> #include <asm/book3s/64/tlbflush-radix.h> @@ -137,6 +137,16 @@ static inline void flush_all_mm(struct mm_struct *mm) #define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr) #define flush_all_mm(mm) local_flush_all_mm(mm) #endif /* CONFIG_SMP */ + +#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault +static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, + unsigned long address) +{ + /* See ptep_set_access_flags comment */ + if (atomic_read(&vma->vm_mm->context.copros) > 0) + flush_tlb_page(vma, address); +} + /* * flush the page walk cache for the address */ diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index c1d257aa4c2d..66298461b640 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -9,11 +9,14 @@ #if defined(CONFIG_PPC_8xx) || defined(CONFIG_403GCX) #define L1_CACHE_SHIFT 4 #define MAX_COPY_PREFETCH 1 +#define IFETCH_ALIGN_SHIFT 2 #elif defined(CONFIG_PPC_E500MC) #define L1_CACHE_SHIFT 6 #define MAX_COPY_PREFETCH 4 +#define IFETCH_ALIGN_SHIFT 3 #elif defined(CONFIG_PPC32) #define MAX_COPY_PREFETCH 4 +#define IFETCH_ALIGN_SHIFT 3 /* 603 fetches 2 insn at a time */ #if defined(CONFIG_PPC_47x) #define L1_CACHE_SHIFT 7 #else diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index 11843e37d9cf..0d72ec75da63 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -23,9 +23,21 @@ #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_icache_page(vma, page) do { } while (0) -#define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0) +#ifdef CONFIG_PPC_BOOK3S_64 +/* + * Book3s has no ptesync after setting a pte, so without this ptesync it's + * possible for a kernel virtual mapping access to return a spurious fault + * if it's accessed right after the pte is set. The page fault handler does + * not expect this type of fault. flush_cache_vmap is not exactly the right + * place to put this, but it seems to work well enough. + */ +#define flush_cache_vmap(start, end) do { asm volatile("ptesync" ::: "memory"); } while (0) +#else +#define flush_cache_vmap(start, end) do { } while (0) +#endif + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *page); #define flush_dcache_mmap_lock(mapping) do { } while (0) diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index 842124b199b5..a78a57e5058d 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h @@ -12,6 +12,8 @@ #ifdef CONFIG_GENERIC_CSUM #include <asm-generic/checksum.h> #else +#include <linux/bitops.h> +#include <linux/in6.h> /* * Computes the checksum of a memory block at src, length len, * and adds in "sum" (32-bit), while copying the block to dst. @@ -55,11 +57,7 @@ static inline __sum16 csum_fold(__wsum sum) static inline u32 from64to32(u64 x) { - /* add up 32-bit and 32-bit for 32+c bit */ - x = (x & 0xffffffff) + (x >> 32); - /* add up carry.. */ - x = (x & 0xffffffff) + (x >> 32); - return (u32)x; + return (x + ror64(x, 32)) >> 32; } static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, @@ -112,7 +110,7 @@ static inline __wsum csum_add(__wsum csum, __wsum addend) #ifdef __powerpc64__ res += (__force u64)addend; - return (__force __wsum) from64to32(res); + return (__force __wsum)((u32)res + (res >> 32)); #else asm("addc %0,%0,%1;" "addze %0,%0;" @@ -214,6 +212,11 @@ static inline __sum16 ip_compute_csum(const void *buff, int len) return csum_fold(csum_partial(buff, len, 0)); } +#define _HAVE_ARCH_IPV6_CSUM +__sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, __u8 proto, __wsum sum); + #endif #endif /* __KERNEL__ */ #endif diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index 62168e1158f1..85c8af2bb272 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h @@ -17,7 +17,6 @@ typedef u32 compat_size_t; typedef s32 compat_ssize_t; -typedef s32 compat_time_t; typedef s32 compat_clock_t; typedef s32 compat_pid_t; typedef u32 __compat_uid_t; @@ -45,16 +44,6 @@ typedef u32 compat_ulong_t; typedef u64 compat_u64; typedef u32 compat_uptr_t; -struct compat_timespec { - compat_time_t tv_sec; - s32 tv_nsec; -}; - -struct compat_timeval { - compat_time_t tv_sec; - s32 tv_usec; -}; - struct compat_stat { compat_dev_t st_dev; compat_ino_t st_ino; @@ -173,10 +162,10 @@ struct compat_ipc64_perm { struct compat_semid64_ds { struct compat_ipc64_perm sem_perm; - unsigned int __unused1; - compat_time_t sem_otime; - unsigned int __unused2; - compat_time_t sem_ctime; + unsigned int sem_otime_high; + unsigned int sem_otime; + unsigned int sem_ctime_high; + unsigned int sem_ctime; compat_ulong_t sem_nsems; compat_ulong_t __unused3; compat_ulong_t __unused4; @@ -184,12 +173,12 @@ struct compat_semid64_ds { struct compat_msqid64_ds { struct compat_ipc64_perm msg_perm; - unsigned int __unused1; - compat_time_t msg_stime; - unsigned int __unused2; - compat_time_t msg_rtime; - unsigned int __unused3; - compat_time_t msg_ctime; + unsigned int msg_stime_high; + unsigned int msg_stime; + unsigned int msg_rtime_high; + unsigned int msg_rtime; + unsigned int msg_ctime_high; + unsigned int msg_ctime; compat_ulong_t msg_cbytes; compat_ulong_t msg_qnum; compat_ulong_t msg_qbytes; @@ -201,12 +190,12 @@ struct compat_msqid64_ds { struct compat_shmid64_ds { struct compat_ipc64_perm shm_perm; - unsigned int __unused1; - compat_time_t shm_atime; - unsigned int __unused2; - compat_time_t shm_dtime; - unsigned int __unused3; - compat_time_t shm_ctime; + unsigned int shm_atime_high; + unsigned int shm_atime; + unsigned int shm_dtime_high; + unsigned int shm_dtime; + unsigned int shm_ctime_high; + unsigned int shm_ctime; unsigned int __unused4; compat_size_t shm_segsz; compat_pid_t shm_cpid; diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 66fcab13c8b4..9c0a3083571b 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -215,6 +215,7 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTR_P9_TM_HV_ASSIST LONG_ASM_CONST(0x0000100000000000) #define CPU_FTR_P9_TM_XER_SO_BUG LONG_ASM_CONST(0x0000200000000000) #define CPU_FTR_P9_TLBIE_BUG LONG_ASM_CONST(0x0000400000000000) +#define CPU_FTR_P9_TIDR LONG_ASM_CONST(0x0000800000000000) #ifndef __ASSEMBLY__ @@ -462,7 +463,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \ - CPU_FTR_P9_TLBIE_BUG) + CPU_FTR_P9_TLBIE_BUG | CPU_FTR_P9_TIDR) #define CPU_FTRS_POWER9_DD1 ((CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1) & \ (~CPU_FTR_SAO)) #define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9 diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index 99b541865d8d..bc4903badb3f 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -47,9 +47,23 @@ static inline unsigned long cputime_to_usecs(const cputime_t ct) * has to be populated in the new task */ #ifdef CONFIG_PPC64 +#define get_accounting(tsk) (&get_paca()->accounting) static inline void arch_vtime_task_switch(struct task_struct *tsk) { } #else -void arch_vtime_task_switch(struct task_struct *tsk); +#define get_accounting(tsk) (&task_thread_info(tsk)->accounting) +/* + * Called from the context switch with interrupts disabled, to charge all + * accumulated times to the current process, and to prepare accounting on + * the next process. + */ +static inline void arch_vtime_task_switch(struct task_struct *prev) +{ + struct cpu_accounting_data *acct = get_accounting(current); + struct cpu_accounting_data *acct0 = get_accounting(prev); + + acct->starttime = acct0->starttime; + acct->startspurr = acct0->startspurr; +} #endif #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index c2266ca61853..677102baf3cd 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -106,6 +106,9 @@ struct eeh_pe { #define eeh_pe_for_each_dev(pe, edev, tmp) \ list_for_each_entry_safe(edev, tmp, &pe->edevs, list) +#define eeh_for_each_pe(root, pe) \ + for (pe = root; pe; pe = eeh_pe_next(pe, root)) + static inline bool eeh_pe_passed(struct eeh_pe *pe) { return pe ? !!atomic_read(&pe->pass_dev_cnt) : false; @@ -262,19 +265,21 @@ static inline bool eeh_state_active(int state) == (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); } -typedef void *(*eeh_traverse_func)(void *data, void *flag); +typedef void *(*eeh_edev_traverse_func)(struct eeh_dev *edev, void *flag); +typedef void *(*eeh_pe_traverse_func)(struct eeh_pe *pe, void *flag); void eeh_set_pe_aux_size(int size); int eeh_phb_pe_create(struct pci_controller *phb); struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb); +struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, struct eeh_pe *root); struct eeh_pe *eeh_pe_get(struct pci_controller *phb, int pe_no, int config_addr); int eeh_add_to_parent_pe(struct eeh_dev *edev); int eeh_rmv_from_parent_pe(struct eeh_dev *edev); void eeh_pe_update_time_stamp(struct eeh_pe *pe); void *eeh_pe_traverse(struct eeh_pe *root, - eeh_traverse_func fn, void *flag); + eeh_pe_traverse_func fn, void *flag); void *eeh_pe_dev_traverse(struct eeh_pe *root, - eeh_traverse_func fn, void *flag); + eeh_edev_traverse_func fn, void *flag); void eeh_pe_restore_bars(struct eeh_pe *pe); const char *eeh_pe_loc_get(struct eeh_pe *pe); struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe); diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index a9b64df34e2a..fcfd05672b1b 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -211,6 +211,14 @@ label##3: \ FTR_ENTRY_OFFSET 951b-952b; \ .popsection; +#define NOSPEC_BARRIER_FIXUP_SECTION \ +953: \ + .pushsection __barrier_nospec_fixup,"a"; \ + .align 2; \ +954: \ + FTR_ENTRY_OFFSET 953b-954b; \ + .popsection; + #ifndef __ASSEMBLY__ #include <linux/types.h> @@ -219,6 +227,7 @@ extern long stf_barrier_fallback; extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup; extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup; extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; +extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup; void apply_feature_fixups(void); void setup_feature_keys(void); diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index b2dabd06659d..3dfb80b86561 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -48,9 +48,6 @@ #else /* !__ASSEMBLY__ */ extern void _mcount(void); -#ifdef CONFIG_DYNAMIC_FTRACE -# define FTRACE_ADDR ((unsigned long)ftrace_caller) -# define FTRACE_REGS_ADDR FTRACE_ADDR static inline unsigned long ftrace_call_adjust(unsigned long addr) { /* reloction of mcount call site is the same as the address */ @@ -60,15 +57,15 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) struct dyn_arch_ftrace { struct module *mod; }; -#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* __ASSEMBLY__ */ #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS #define ARCH_SUPPORTS_FTRACE_OPS 1 #endif -#endif +#endif /* CONFIG_FUNCTION_TRACER */ -#if defined(CONFIG_FTRACE_SYSCALLS) && !defined(__ASSEMBLY__) +#ifndef __ASSEMBLY__ +#ifdef CONFIG_FTRACE_SYSCALLS /* * Some syscall entry functions on powerpc start with "ppc_" (fork and clone, * for instance) or ppc32_/ppc64_. We should also match the sys_ variant with @@ -94,7 +91,25 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name (!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) || (!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4)); } -#endif -#endif /* CONFIG_FTRACE_SYSCALLS && !__ASSEMBLY__ */ +#endif /* PPC64_ELF_ABI_v1 */ +#endif /* CONFIG_FTRACE_SYSCALLS */ + +#ifdef CONFIG_PPC64 +#include <asm/paca.h> + +static inline void this_cpu_disable_ftrace(void) +{ + get_paca()->ftrace_enabled = 0; +} + +static inline void this_cpu_enable_ftrace(void) +{ + get_paca()->ftrace_enabled = 1; +} +#else /* CONFIG_PPC64 */ +static inline void this_cpu_disable_ftrace(void) { } +static inline void this_cpu_enable_ftrace(void) { } +#endif /* CONFIG_PPC64 */ +#endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_FTRACE */ diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h index 5986d473722b..f1e9067bd5ac 100644 --- a/arch/powerpc/include/asm/hardirq.h +++ b/arch/powerpc/include/asm/hardirq.h @@ -8,6 +8,7 @@ typedef struct { unsigned int __softirq_pending; unsigned int timer_irqs_event; + unsigned int broadcast_irqs_event; unsigned int timer_irqs_others; unsigned int pmu_irqs; unsigned int mce_exceptions; @@ -25,15 +26,8 @@ typedef struct { DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); #define __ARCH_IRQ_STAT - -#define local_softirq_pending() __this_cpu_read(irq_stat.__softirq_pending) - -#define __ARCH_SET_SOFTIRQ_PENDING #define __ARCH_IRQ_EXIT_IRQS_DISABLED -#define set_softirq_pending(x) __this_cpu_write(irq_stat.__softirq_pending, (x)) -#define or_softirq_pending(x) __this_cpu_or(irq_stat.__softirq_pending, (x)) - static inline void ack_bad_irq(unsigned int irq) { printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 78540c074d70..3225eb6402cc 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -166,22 +166,9 @@ static inline pte_t huge_pte_wrprotect(pte_t pte) return pte_wrprotect(pte); } -static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep, - pte_t pte, int dirty) -{ -#ifdef HUGETLB_NEED_PRELOAD - /* - * The "return 1" forces a call of update_mmu_cache, which will write a - * TLB entry. Without this, platforms that don't do a write of the TLB - * entry in the TLB miss handler asm will fault ad infinitum. - */ - ptep_set_access_flags(vma, addr, ptep, pte, dirty); - return 1; -#else - return ptep_set_access_flags(vma, addr, ptep, pte, dirty); -#endif -} +extern int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty); static inline pte_t huge_ptep_get(pte_t *ptep) { @@ -202,7 +189,7 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma, static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, unsigned pdshift) { - return 0; + return NULL; } #endif /* CONFIG_HUGETLB_PAGE */ diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 2e2dddab5d65..662c8347d699 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -279,6 +279,7 @@ #define H_GET_MPP_X 0x314 #define H_SET_MODE 0x31C #define H_CLEAR_HPT 0x358 +#define H_REQUEST_VMC 0x360 #define H_RESIZE_HPT_PREPARE 0x36C #define H_RESIZE_HPT_COMMIT 0x370 #define H_REGISTER_PROC_TBL 0x37C diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 855e17d158b1..e151774cb577 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -55,6 +55,7 @@ extern void replay_system_reset(void); extern void __replay_interrupt(unsigned int vector); extern void timer_interrupt(struct pt_regs *); +extern void timer_broadcast_interrupt(void); extern void performance_monitor_exception(struct pt_regs *regs); extern void WatchdogException(struct pt_regs *regs); extern void unknown_exception(struct pt_regs *regs); @@ -228,8 +229,8 @@ static inline bool arch_irqs_disabled(void) #define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory") #define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory") #else -#define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1) -#define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1) +#define __hard_irq_enable() __mtmsrd(MSR_EE|MSR_RI, 1) +#define __hard_irq_disable() __mtmsrd(MSR_RI, 1) #endif #define hard_irq_disable() do { \ @@ -237,8 +238,12 @@ static inline bool arch_irqs_disabled(void) __hard_irq_disable(); \ flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \ local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \ - if (!arch_irqs_disabled_flags(flags)) \ + if (!arch_irqs_disabled_flags(flags)) { \ + asm ("stdx %%r1, 0, %1 ;" \ + : "=m" (local_paca->saved_r1) \ + : "b" (&local_paca->saved_r1)); \ trace_hardirqs_off(); \ + } \ } while(0) static inline bool lazy_irq_pending(void) diff --git a/arch/powerpc/include/asm/imc-pmu.h b/arch/powerpc/include/asm/imc-pmu.h index d76cb11be3e3..69f516ecb2fd 100644 --- a/arch/powerpc/include/asm/imc-pmu.h +++ b/arch/powerpc/include/asm/imc-pmu.h @@ -128,4 +128,5 @@ extern int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id); extern void thread_imc_disable(void); extern int get_max_nest_dev(void); +extern void unregister_thread_imc(void); #endif /* __ASM_POWERPC_IMC_PMU_H */ diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index af074923d598..e0331e754568 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -367,6 +367,11 @@ static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr) *(volatile unsigned long __force *)PCI_FIX_ADDR(addr) = v; } +static inline void __raw_writeq_be(unsigned long v, volatile void __iomem *addr) +{ + __raw_writeq((__force unsigned long)cpu_to_be64(v), addr); +} + /* * Real mode versions of the above. Those instructions are only supposed * to be used in hypervisor real mode as per the architecture spec. @@ -395,6 +400,11 @@ static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr) : : "r" (val), "r" (paddr) : "memory"); } +static inline void __raw_rm_writeq_be(u64 val, volatile void __iomem *paddr) +{ + __raw_rm_writeq((__force u64)cpu_to_be64(val), paddr); +} + static inline u8 __raw_rm_readb(volatile void __iomem *paddr) { u8 ret; diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index ffe7c71e1132..a47de82fb8e2 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -83,7 +83,7 @@ struct machdep_calls { int (*set_rtc_time)(struct rtc_time *); void (*get_rtc_time)(struct rtc_time *); - unsigned long (*get_boot_time)(void); + time64_t (*get_boot_time)(void); unsigned char (*rtc_read_val)(int addr); void (*rtc_write_val)(int addr, unsigned char val); diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h index cda94a0f5146..e20072972e35 100644 --- a/arch/powerpc/include/asm/mmu-book3e.h +++ b/arch/powerpc/include/asm/mmu-book3e.h @@ -230,10 +230,6 @@ typedef struct { unsigned int id; unsigned int active; unsigned long vdso_base; -#ifdef CONFIG_PPC_64K_PAGES - /* for 4K PTE fragment support */ - void *pte_frag; -#endif } mm_context_t; /* Page size definitions, common between 32 and 64-bit @@ -275,8 +271,6 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) */ #if defined(CONFIG_PPC_4K_PAGES) #define mmu_virtual_psize MMU_PAGE_4K -#elif defined(CONFIG_PPC_64K_PAGES) -#define mmu_virtual_psize MMU_PAGE_64K #else #error Unsupported page size #endif diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 1835ca1505d6..896efa559996 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -250,11 +250,6 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, #define thread_pkey_regs_restore(new_thread, old_thread) #define thread_pkey_regs_init(thread) -static inline int vma_pkey(struct vm_area_struct *vma) -{ - return 0; -} - static inline u64 pte_to_hpte_pkey_bits(u64 pteflags) { return 0x0UL; diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index 4f6573934792..d8374f984f39 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -50,10 +50,6 @@ struct mod_arch_specific { unsigned int stubs_section; /* Index of stubs section in module */ unsigned int toc_section; /* What section is the TOC? */ bool toc_fixed; /* Have we fixed up .TOC.? */ -#ifdef CONFIG_DYNAMIC_FTRACE - unsigned long toc; - unsigned long tramp; -#endif /* For module function descriptor dereference */ unsigned long start_opd; @@ -62,10 +58,14 @@ struct mod_arch_specific { /* Indices of PLT sections within module. */ unsigned int core_plt_section; unsigned int init_plt_section; +#endif /* powerpc64 */ + #ifdef CONFIG_DYNAMIC_FTRACE unsigned long tramp; +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + unsigned long tramp_regs; +#endif #endif -#endif /* powerpc64 */ /* List of BUG addresses, source line numbers and filenames */ struct list_head bug_list; diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h index e94cede14522..ce1e0aabaa64 100644 --- a/arch/powerpc/include/asm/mpc52xx.h +++ b/arch/powerpc/include/asm/mpc52xx.h @@ -350,14 +350,14 @@ extern struct mpc52xx_suspend mpc52xx_suspend; extern int __init mpc52xx_pm_init(void); extern int mpc52xx_set_wakeup_gpio(u8 pin, u8 level); -#ifdef CONFIG_PPC_LITE5200 -extern int __init lite5200_pm_init(void); - /* lite5200 calls mpc5200 suspend functions, so here they are */ extern int mpc52xx_pm_prepare(void); extern int mpc52xx_pm_enter(suspend_state_t); extern void mpc52xx_pm_finish(void); extern char saved_sram[0x4000]; /* reuse buffer from mpc52xx suspend */ + +#ifdef CONFIG_PPC_LITE5200 +int __init lite5200_pm_init(void); #endif #endif /* CONFIG_PM */ diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h index 9c80939b4d14..0f571e0ebca1 100644 --- a/arch/powerpc/include/asm/nmi.h +++ b/arch/powerpc/include/asm/nmi.h @@ -8,4 +8,10 @@ extern void arch_touch_nmi_watchdog(void); static inline void arch_touch_nmi_watchdog(void) {} #endif +#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_STACKTRACE) +extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask, + bool exclude_self); +#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace +#endif + #endif /* _ASM_NMI_H */ diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h index 29d37bd1f3b3..1707781d2f20 100644 --- a/arch/powerpc/include/asm/nohash/32/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h @@ -100,6 +100,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) static inline void pgtable_free(void *table, unsigned index_size) { if (!index_size) { + pgtable_page_dtor(virt_to_page(table)); free_page((unsigned long)table); } else { BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE); diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 03bbd1149530..7c46a98cc7f4 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -133,7 +133,7 @@ extern int icache_44x_need_flush; #ifndef __ASSEMBLY__ #define pte_clear(mm, addr, ptep) \ - do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0) + do { pte_update(ptep, ~0, 0); } while (0) #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) @@ -146,21 +146,6 @@ static inline void pmd_clear(pmd_t *pmdp) /* - * When flushing the tlb entry for a page, we also need to flush the hash - * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. - */ -extern int flush_hash_pages(unsigned context, unsigned long va, - unsigned long pmdval, int count); - -/* Add an HPTE to the hash table */ -extern void add_hash_page(unsigned context, unsigned long va, - unsigned long pmdval); - -/* Flush an entry from the TLB/hash table */ -extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, - unsigned long address); - -/* * PTE updates. This function is called whenever an existing * valid PTE is updated. This does -not- include set_pte_at() * which nowadays only sets a new PTE. @@ -246,12 +231,6 @@ static inline int __ptep_test_and_clear_young(unsigned int context, unsigned lon { unsigned long old; old = pte_update(ptep, _PAGE_ACCESSED, 0); -#if _PAGE_HASHPTE != 0 - if (old & _PAGE_HASHPTE) { - unsigned long ptephys = __pa(ptep) & PAGE_MASK; - flush_hash_pages(context, addr, ptephys, 1); - } -#endif return (old & _PAGE_ACCESSED) != 0; } #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ @@ -261,7 +240,7 @@ static inline int __ptep_test_and_clear_young(unsigned int context, unsigned lon static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); + return __pte(pte_update(ptep, ~0, 0)); } #define __HAVE_ARCH_PTEP_SET_WRPROTECT @@ -277,19 +256,27 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, } -static inline void __ptep_set_access_flags(struct mm_struct *mm, +static inline void __ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, pte_t entry, - unsigned long address) + unsigned long address, + int psize) { unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); unsigned long clr = ~pte_val(entry) & (_PAGE_RO | _PAGE_NA); pte_update(ptep, clr, set); + + flush_tlb_page(vma, address); +} + +static inline int pte_young(pte_t pte) +{ + return pte_val(pte) & _PAGE_ACCESSED; } #define __HAVE_ARCH_PTE_SAME -#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) +#define pte_same(A,B) ((pte_val(A) ^ pte_val(B)) == 0) /* * Note that on Book E processors, the pmd contains the kernel virtual @@ -330,7 +317,7 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm, /* * Encode and decode a swap entry. * Note that the bits we use in a PTE for representing a swap entry - * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used). + * must not include the _PAGE_PRESENT bit. * -- paulus */ #define __swp_type(entry) ((entry).val & 0x1f) diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h index 124f9ac23a1e..bb4b3a4b92a0 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-40x.h +++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h @@ -52,12 +52,9 @@ #define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */ #define _PMD_BAD 0x802 -#define _PMD_SIZE 0x0e0 /* size field, != 0 for large-page PMD entry */ #define _PMD_SIZE_4M 0x0c0 #define _PMD_SIZE_16M 0x0e0 -#define PMD_PAGE_SIZE(pmdval) (1024 << (((pmdval) & _PMD_SIZE) >> 4)) - /* Until my rework is finished, 40x still needs atomic PTE updates */ #define PTE_ATOMIC_UPDATES 1 diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h index 9721c7867b9c..0e693f322cb2 100644 --- a/arch/powerpc/include/asm/nohash/64/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h @@ -52,8 +52,6 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); } -#ifndef CONFIG_PPC_64K_PAGES - #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, (unsigned long)PUD) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) @@ -86,6 +84,18 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, #define pmd_pgtable(pmd) pmd_page(pmd) +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), + pgtable_gfp_flags(mm, GFP_KERNEL)); +} + +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ + kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd); +} + + static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { @@ -120,84 +130,47 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) __free_page(ptepage); } -extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift); -#ifdef CONFIG_SMP -extern void __tlb_remove_table(void *_table); -#endif -static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, - unsigned long address) +static inline void pgtable_free(void *table, int shift) { - tlb_flush_pgtable(tlb, address); - pgtable_free_tlb(tlb, page_address(table), 0); + if (!shift) { + pgtable_page_dtor(virt_to_page(table)); + free_page((unsigned long)table); + } else { + BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); + kmem_cache_free(PGT_CACHE(shift), table); + } } -#else /* if CONFIG_PPC_64K_PAGES */ - -extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int); -extern void pte_fragment_free(unsigned long *, int); -extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift); #ifdef CONFIG_SMP -extern void __tlb_remove_table(void *_table); -#endif - -#define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd) - -static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, - pte_t *pte) +static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) { - pmd_set(pmd, (unsigned long)pte); -} + unsigned long pgf = (unsigned long)table; -static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, - pgtable_t pte_page) -{ - pmd_set(pmd, (unsigned long)pte_page); + BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); + pgf |= shift; + tlb_remove_table(tlb, (void *)pgf); } -static inline pgtable_t pmd_pgtable(pmd_t pmd) +static inline void __tlb_remove_table(void *_table) { - return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS); -} + void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); + unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) -{ - return (pte_t *)pte_fragment_alloc(mm, address, 1); + pgtable_free(table, shift); } -static inline pgtable_t pte_alloc_one(struct mm_struct *mm, - unsigned long address) -{ - return (pgtable_t)pte_fragment_alloc(mm, address, 0); -} - -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) -{ - pte_fragment_free((unsigned long *)pte, 1); -} - -static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) +#else +static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) { - pte_fragment_free((unsigned long *)ptepage, 0); + pgtable_free(table, shift); } +#endif static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, unsigned long address) { tlb_flush_pgtable(tlb, address); - pgtable_free_tlb(tlb, table, 0); -} -#endif /* CONFIG_PPC_64K_PAGES */ - -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), - pgtable_gfp_flags(mm, GFP_KERNEL)); -} - -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) -{ - kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd); + pgtable_free_tlb(tlb, page_address(table), 0); } #define __pmd_free_tlb(tlb, pmd, addr) \ diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h deleted file mode 100644 index 7210c2818e41..000000000000 --- a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h +++ /dev/null @@ -1,57 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H -#define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H - -#define __ARCH_USE_5LEVEL_HACK -#include <asm-generic/pgtable-nopud.h> - - -#define PTE_INDEX_SIZE 8 -#define PMD_INDEX_SIZE 10 -#define PUD_INDEX_SIZE 0 -#define PGD_INDEX_SIZE 12 - -/* - * we support 32 fragments per PTE page of 64K size - */ -#define PTE_FRAG_NR 32 -/* - * We use a 2K PTE page fragment and another 2K for storing - * real_pte_t hash index - */ -#define PTE_FRAG_SIZE_SHIFT 11 -#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT) - -#ifndef __ASSEMBLY__ -#define PTE_TABLE_SIZE PTE_FRAG_SIZE -#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) -#define PUD_TABLE_SIZE (0) -#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) -#endif /* __ASSEMBLY__ */ - -#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) -#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) -#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) - -/* PMD_SHIFT determines what a second-level page table entry can map */ -#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) -#define PMD_SIZE (1UL << PMD_SHIFT) -#define PMD_MASK (~(PMD_SIZE-1)) - -/* PGDIR_SHIFT determines what a third-level page table entry can map */ -#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) -#define PGDIR_SIZE (1UL << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE-1)) - -/* - * Bits to mask out from a PMD to get to the PTE page - * PMDs point to PTE table fragments which are PTE_FRAG_SIZE aligned. - */ -#define PMD_MASKED_BITS (PTE_FRAG_SIZE - 1) -/* Bits to mask out from a PGD/PUD to get to the PMD page */ -#define PUD_MASKED_BITS 0x1ff - -#define pgd_pte(pgd) (pud_pte(((pud_t){ pgd }))) -#define pte_pgd(pte) ((pgd_t)pte_pud(pte)) - -#endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H */ diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 5c5f75d005ad..dd0c7236208f 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -6,13 +6,13 @@ * the ppc64 hashed page table. */ -#ifdef CONFIG_PPC_64K_PAGES -#include <asm/nohash/64/pgtable-64k.h> -#else #include <asm/nohash/64/pgtable-4k.h> -#endif #include <asm/barrier.h> +#ifdef CONFIG_PPC_64K_PAGES +#error "Page size not supported" +#endif + #define FIRST_USER_ADDRESS 0UL /* @@ -173,8 +173,6 @@ static inline void pgd_set(pgd_t *pgdp, unsigned long val) /* to find an entry in a kernel page-table-directory */ /* This now only contains the vmalloc pages */ #define pgd_offset_k(address) pgd_offset(&init_mm, address) -extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, unsigned long pte, int huge); /* Atomic PTE updates */ static inline unsigned long pte_update(struct mm_struct *mm, @@ -188,14 +186,12 @@ static inline unsigned long pte_update(struct mm_struct *mm, __asm__ __volatile__( "1: ldarx %0,0,%3 # pte_update\n\ - andi. %1,%0,%6\n\ - bne- 1b \n\ andc %1,%0,%4 \n\ - or %1,%1,%7\n\ + or %1,%1,%6\n\ stdcx. %1,0,%3 \n\ bne- 1b" : "=&r" (old), "=&r" (tmp), "=m" (*ptep) - : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set) + : "r" (ptep), "r" (clr), "m" (*ptep), "r" (set) : "cc" ); #else unsigned long old = pte_val(*ptep); @@ -205,20 +201,20 @@ static inline unsigned long pte_update(struct mm_struct *mm, if (!huge) assert_pte_locked(mm, addr); -#ifdef CONFIG_PPC_BOOK3S_64 - if (old & _PAGE_HASHPTE) - hpte_need_flush(mm, addr, ptep, old, huge); -#endif - return old; } +static inline int pte_young(pte_t pte) +{ + return pte_val(pte) & _PAGE_ACCESSED; +} + static inline int __ptep_test_and_clear_young(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { unsigned long old; - if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) + if (pte_young(*ptep)) return 0; old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); return (old & _PAGE_ACCESSED) != 0; @@ -285,9 +281,10 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, /* Set the dirty and/or accessed bits atomically in a linux PTE, this * function doesn't need to flush the hash entry */ -static inline void __ptep_set_access_flags(struct mm_struct *mm, +static inline void __ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, pte_t entry, - unsigned long address) + unsigned long address, + int psize) { unsigned long bits = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); @@ -297,22 +294,22 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm, __asm__ __volatile__( "1: ldarx %0,0,%4\n\ - andi. %1,%0,%6\n\ - bne- 1b \n\ or %0,%3,%0\n\ stdcx. %0,0,%4\n\ bne- 1b" :"=&r" (old), "=&r" (tmp), "=m" (*ptep) - :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) + :"r" (bits), "r" (ptep), "m" (*ptep) :"cc"); #else unsigned long old = pte_val(*ptep); *ptep = __pte(old | bits); #endif + + flush_tlb_page(vma, address); } #define __HAVE_ARCH_PTE_SAME -#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) +#define pte_same(A,B) ((pte_val(A) ^ pte_val(B)) == 0) #define pte_ERROR(e) \ pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) @@ -324,11 +321,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm, /* Encode and de-code a swap entry */ #define MAX_SWAPFILES_CHECK() do { \ BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \ - /* \ - * Don't have overlapping bits with _PAGE_HPTEFLAGS \ - * We filter HPTEFLAGS on set_pte. \ - */ \ - BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \ } while (0) /* * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT; diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index c56de1e8026f..2160be2e4339 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -17,7 +17,6 @@ static inline int pte_write(pte_t pte) } static inline int pte_read(pte_t pte) { return 1; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } -static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } @@ -148,70 +147,33 @@ extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, int percpu) { -#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) - /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the - * helper pte_update() which does an atomic update. We need to do that - * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a - * per-CPU PTE such as a kmap_atomic, we do a simple update preserving - * the hash bits instead (ie, same as the non-SMP case) - */ - if (percpu) - *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) - | (pte_val(pte) & ~_PAGE_HASHPTE)); - else - pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); - -#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) /* Second case is 32-bit with 64-bit PTE. In this case, we * can just store as long as we do the two halves in the right order - * with a barrier in between. This is possible because we take care, - * in the hash code, to pre-invalidate if the PTE was already hashed, - * which synchronizes us with any concurrent invalidation. - * In the percpu case, we also fallback to the simple update preserving - * the hash bits + * with a barrier in between. + * In the percpu case, we also fallback to the simple update */ - if (percpu) { - *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) - | (pte_val(pte) & ~_PAGE_HASHPTE)); + if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) { + __asm__ __volatile__("\ + stw%U0%X0 %2,%0\n\ + eieio\n\ + stw%U0%X0 %L2,%1" + : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) + : "r" (pte) : "memory"); return; } -#if _PAGE_HASHPTE != 0 - if (pte_val(*ptep) & _PAGE_HASHPTE) - flush_hash_entry(mm, ptep, addr); -#endif - __asm__ __volatile__("\ - stw%U0%X0 %2,%0\n\ - eieio\n\ - stw%U0%X0 %L2,%1" - : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) - : "r" (pte) : "memory"); - -#elif defined(CONFIG_PPC_STD_MMU_32) - /* Third case is 32-bit hash table in UP mode, we need to preserve - * the _PAGE_HASHPTE bit since we may not have invalidated the previous - * translation in the hash yet (done in a subsequent flush_tlb_xxx()) - * and see we need to keep track that this PTE needs invalidating - */ - *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) - | (pte_val(pte) & ~_PAGE_HASHPTE)); - -#else /* Anything else just stores the PTE normally. That covers all 64-bit * cases, and 32-bit non-hash with 32-bit PTEs. */ *ptep = pte; -#ifdef CONFIG_PPC_BOOK3E_64 /* * With hardware tablewalk, a sync is needed to ensure that * subsequent accesses see the PTE we just wrote. Unlike userspace * mappings, we can't tolerate spurious faults, so make sure * the new PTE will be seen the first time. */ - if (is_kernel_addr(addr)) + if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr)) mb(); -#endif -#endif } diff --git a/arch/powerpc/include/asm/nohash/pte-book3e.h b/arch/powerpc/include/asm/nohash/pte-book3e.h index ccee8eb509bb..12730b81cd98 100644 --- a/arch/powerpc/include/asm/nohash/pte-book3e.h +++ b/arch/powerpc/include/asm/nohash/pte-book3e.h @@ -57,14 +57,8 @@ #define _PAGE_USER (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */ #define _PAGE_PRIVILEGED (_PAGE_BAP_SR) -#define _PAGE_HASHPTE 0 -#define _PAGE_BUSY 0 - #define _PAGE_SPECIAL _PAGE_SW0 -/* Flags to be preserved on PTE modifications */ -#define _PAGE_HPTEFLAGS _PAGE_BUSY - /* Base page size */ #ifdef CONFIG_PPC_64K_PAGES #define _PAGE_PSIZE _PAGE_PSIZE_64K diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h index d886a5b7ff21..3bab299eda49 100644 --- a/arch/powerpc/include/asm/opal-api.h +++ b/arch/powerpc/include/asm/opal-api.h @@ -201,13 +201,21 @@ #define OPAL_SET_POWER_SHIFT_RATIO 155 #define OPAL_SENSOR_GROUP_CLEAR 156 #define OPAL_PCI_SET_P2P 157 +#define OPAL_QUIESCE 158 #define OPAL_NPU_SPA_SETUP 159 #define OPAL_NPU_SPA_CLEAR_CACHE 160 #define OPAL_NPU_TL_SET 161 +#define OPAL_SENSOR_READ_U64 162 #define OPAL_PCI_GET_PBCQ_TUNNEL_BAR 164 #define OPAL_PCI_SET_PBCQ_TUNNEL_BAR 165 #define OPAL_LAST 165 +#define QUIESCE_HOLD 1 /* Spin all calls at entry */ +#define QUIESCE_REJECT 2 /* Fail all calls with OPAL_BUSY */ +#define QUIESCE_LOCK_BREAK 3 /* Set to ignore locks. */ +#define QUIESCE_RESUME 4 /* Un-quiesce */ +#define QUIESCE_RESUME_FAST_REBOOT 5 /* Un-quiesce, fast reboot */ + /* Device tree flags */ /* diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 03e1a920491e..e1b2910c6e81 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -201,6 +201,7 @@ int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer, int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer, uint64_t length); int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data); +int64_t opal_sensor_read_u64(u32 sensor_hndl, int token, __be64 *sensor_data); int64_t opal_handle_hmi(void); int64_t opal_register_dump_region(uint32_t id, uint64_t start, uint64_t end); int64_t opal_unregister_dump_region(uint32_t id); @@ -293,6 +294,7 @@ int opal_set_power_shift_ratio(u32 handle, int token, u32 psr); int opal_sensor_group_clear(u32 group_hndl, int token); s64 opal_signal_system_reset(s32 cpu); +s64 opal_quiesce(u64 shutdown_type, s32 cpu); /* Internal functions */ extern int early_init_dt_scan_opal(unsigned long node, const char *uname, @@ -323,9 +325,10 @@ extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg); extern int opal_async_wait_response_interruptible(uint64_t token, struct opal_msg *msg); extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data); +extern int opal_get_sensor_data_u64(u32 sensor_hndl, u64 *sensor_data); struct rtc_time; -extern unsigned long opal_get_boot_time(void); +extern time64_t opal_get_boot_time(void); extern void opal_nvram_init(void); extern void opal_flash_update_init(void); extern void opal_flash_update_print_message(void); diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 3f109a3e3edb..6d34bd71139d 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -161,7 +161,7 @@ struct paca_struct { struct task_struct *__current; /* Pointer to current */ u64 kstack; /* Saved Kernel stack addr */ u64 stab_rr; /* stab/slb round-robin counter */ - u64 saved_r1; /* r1 save for RTAS calls or PM */ + u64 saved_r1; /* r1 save for RTAS calls or PM or EE=0 */ u64 saved_msr; /* MSR saved here by enter_rtas */ u16 trap_save; /* Used when bad stack is encountered */ u8 irq_soft_mask; /* mask for irq soft masking */ @@ -222,6 +222,7 @@ struct paca_struct { u8 hmi_event_available; /* HMI event is available */ u8 hmi_p9_special_emu; /* HMI P9 special emulation */ #endif + u8 ftrace_enabled; /* Hard disable ftrace */ /* Stuff for accurate time accounting */ struct cpu_accounting_data accounting; diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index dec9ce5ba8af..db7be0779d55 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -39,6 +39,7 @@ #ifndef __ASSEMBLY__ #ifdef CONFIG_HUGETLB_PAGE +extern bool hugetlb_disabled; extern unsigned int HPAGE_SHIFT; #else #define HPAGE_SHIFT PAGE_SHIFT diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 401c62aad5e4..2af9ded80540 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -92,24 +92,6 @@ extern int pci_mmap_legacy_page_range(struct pci_bus *bus, #define HAVE_PCI_LEGACY 1 -#ifdef CONFIG_PPC64 - -/* The PCI address space does not equal the physical memory address - * space (we have an IOMMU). The IDE and SCSI device layers use - * this boolean for bounce buffer decisions. - */ -#define PCI_DMA_BUS_IS_PHYS (0) - -#else /* 32-bit */ - -/* The PCI address space does equal the physical memory - * address space (no IOMMU). The IDE and SCSI device layers use - * this boolean for bounce buffer decisions. - */ -#define PCI_DMA_BUS_IS_PHYS (1) - -#endif /* CONFIG_PPC64 */ - extern void pcibios_claim_one_bus(struct pci_bus *b); extern void pcibios_finish_adding_to_bus(struct pci_bus *bus); diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index ab7d2d996be4..14c79a7dc855 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -8,6 +8,7 @@ #include <asm/processor.h> /* For TASK_SIZE */ #include <asm/mmu.h> #include <asm/page.h> +#include <asm/tlbflush.h> struct mm_struct; diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h index 0409c80c32c0..5ba80cffb505 100644 --- a/arch/powerpc/include/asm/pkeys.h +++ b/arch/powerpc/include/asm/pkeys.h @@ -15,19 +15,6 @@ DECLARE_STATIC_KEY_TRUE(pkey_disabled); extern int pkeys_total; /* total pkeys as per device tree */ extern u32 initial_allocation_mask; /* bits set for reserved keys */ -/* - * Define these here temporarily so we're not dependent on patching linux/mm.h. - * Once it's updated we can drop these. - */ -#ifndef VM_PKEY_BIT0 -# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 -# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 -# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 -# define VM_PKEY_BIT2 VM_HIGH_ARCH_2 -# define VM_PKEY_BIT3 VM_HIGH_ARCH_3 -# define VM_PKEY_BIT4 VM_HIGH_ARCH_4 -#endif - #define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2 | \ VM_PKEY_BIT3 | VM_PKEY_BIT4) diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 96c1a46acbd0..cff5a411e595 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -39,10 +39,10 @@ static inline long extended_cede_processor(unsigned long latency_hint) set_cede_latency_hint(latency_hint); rc = cede_processor(); -#ifdef CONFIG_TRACE_IRQFLAGS - /* Ensure that H_CEDE returns with IRQs on */ - if (WARN_ON(!(mfmsr() & MSR_EE))) - __hard_irq_enable(); +#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG + /* Ensure that H_CEDE returns with IRQs on */ + if (WARN_ON(!(mfmsr() & MSR_EE))) + __hard_irq_enable(); #endif set_cede_latency_hint(old_latency_hint); diff --git a/arch/powerpc/include/asm/pmac_pfunc.h b/arch/powerpc/include/asm/pmac_pfunc.h index 73bd8f28f2a8..cee4e9f5b8cf 100644 --- a/arch/powerpc/include/asm/pmac_pfunc.h +++ b/arch/powerpc/include/asm/pmac_pfunc.h @@ -245,6 +245,7 @@ extern void pmf_put_function(struct pmf_function *func); extern int pmf_call_one(struct pmf_function *func, struct pmf_args *args); +int pmac_pfunc_base_install(void); /* Suspend/resume code called by via-pmu directly for now */ extern void pmac_pfunc_base_suspend(void); diff --git a/arch/powerpc/include/asm/pnv-ocxl.h b/arch/powerpc/include/asm/pnv-ocxl.h index f6945d3bc971..208b5503f4ed 100644 --- a/arch/powerpc/include/asm/pnv-ocxl.h +++ b/arch/powerpc/include/asm/pnv-ocxl.h @@ -28,7 +28,7 @@ extern int pnv_ocxl_map_xsl_regs(struct pci_dev *dev, void __iomem **dsisr, extern int pnv_ocxl_spa_setup(struct pci_dev *dev, void *spa_mem, int PE_mask, void **platform_data); extern void pnv_ocxl_spa_release(void *platform_data); -extern int pnv_ocxl_spa_remove_pe(void *platform_data, int pe_handle); +extern int pnv_ocxl_spa_remove_pe_from_cache(void *platform_data, int pe_handle); extern int pnv_ocxl_alloc_xive_irq(u32 *irq, u64 *trigger_addr); extern void pnv_ocxl_free_xive_irq(u32 irq); diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 18883b8a6dac..4436887bc415 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -162,6 +162,7 @@ /* VMX Vector Store Instructions */ #define OP_31_XOP_STVX 231 +#define OP_31 31 #define OP_LWZ 32 #define OP_STFS 52 #define OP_STFSU 53 diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 13f7f4c0e1ea..75ece56dcd62 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -80,10 +80,8 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) #else #define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base) #define REST_GPR(n, base) lwz n,GPR0+4*(n)(base) -#define SAVE_NVGPRS(base) SAVE_GPR(13, base); SAVE_8GPRS(14, base); \ - SAVE_10GPRS(22, base) -#define REST_NVGPRS(base) REST_GPR(13, base); REST_8GPRS(14, base); \ - REST_10GPRS(22, base) +#define SAVE_NVGPRS(base) stmw 13, GPR0+4*13(base) +#define REST_NVGPRS(base) lmw 13, GPR0+4*13(base) #endif #define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base) diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index c4b36a494a63..5debe337ea9d 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -249,7 +249,7 @@ struct thread_struct { unsigned long ksp_vsid; #endif struct pt_regs *regs; /* Pointer to saved register state */ - mm_segment_t fs; /* for get_fs() validation */ + mm_segment_t addr_limit; /* for get_fs() validation */ #ifdef CONFIG_BOOKE /* BookE base exception scratch space; align on cacheline */ unsigned long normsave[8] ____cacheline_aligned; @@ -264,10 +264,6 @@ struct thread_struct { struct thread_fp_state *fp_save_area; int fpexc_mode; /* floating-point exception mode */ unsigned int align_ctl; /* alignment handling control */ -#ifdef CONFIG_PPC64 - unsigned long start_tb; /* Start purr when proc switched in */ - unsigned long accum_tb; /* Total accumulated purr for process */ -#endif #ifdef CONFIG_HAVE_HW_BREAKPOINT struct perf_event *ptrace_bps[HBP_NUM]; /* @@ -383,7 +379,7 @@ struct thread_struct { #define INIT_THREAD { \ .ksp = INIT_SP, \ .ksp_limit = INIT_SP_LIMIT, \ - .fs = KERNEL_DS, \ + .addr_limit = KERNEL_DS, \ .pgdir = swapper_pg_dir, \ .fpexc_mode = MSR_FE0 | MSR_FE1, \ SPEFSCR_INIT \ @@ -392,7 +388,7 @@ struct thread_struct { #define INIT_THREAD { \ .ksp = INIT_SP, \ .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ - .fs = KERNEL_DS, \ + .addr_limit = KERNEL_DS, \ .fpexc_mode = 0, \ .ppr = INIT_PPR, \ .fscr = FSCR_TAR | FSCR_EBB \ diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h index c4a72c7a8c83..bef56141a549 100644 --- a/arch/powerpc/include/asm/pte-common.h +++ b/arch/powerpc/include/asm/pte-common.h @@ -60,10 +60,6 @@ #ifndef _PMD_PRESENT_MASK #define _PMD_PRESENT_MASK _PMD_PRESENT #endif -#ifndef _PMD_SIZE -#define _PMD_SIZE 0 -#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() -#endif #ifndef _PMD_USER #define _PMD_USER 0 #endif @@ -88,11 +84,7 @@ #define _PTE_NONE_MASK _PAGE_HPTEFLAGS #endif -/* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a - * kernel without large page PMD support - */ #ifndef __ASSEMBLY__ -extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); /* * Don't just check for any non zero bits in __PAGE_USER, since for book3e @@ -216,9 +208,6 @@ static inline bool pte_user(pte_t pte) #define PAGE_AGP (PAGE_KERNEL_NC) #define HAVE_PAGE_AGP -/* Advertise support for _PAGE_SPECIAL */ -#define __HAVE_ARCH_PTE_SPECIAL - #ifndef _PAGE_READ /* if not defined, we should not find _PAGE_WRITE too */ #define _PAGE_READ 0 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index cb0f272ce123..75c5b2cd9d66 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -146,6 +146,12 @@ #define MSR_64BIT 0 #endif +/* Condition Register related */ +#define CR0_SHIFT 28 +#define CR0_MASK 0xF +#define CR0_TBEGIN_FAILURE (0x2 << 28) /* 0b0010 */ + + /* Power Management - Processor Stop Status and Control Register Fields */ #define PSSCR_RL_MASK 0x0000000F /* Requested Level */ #define PSSCR_MTL_MASK 0x000000F0 /* Maximum Transition Level */ @@ -239,13 +245,27 @@ #define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */ #define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */ #define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */ -#define TEXASR_ABORT __MASK(63-31) /* terminated by tabort or treclaim */ -#define TEXASR_SUSP __MASK(63-32) /* tx failed in suspended state */ -#define TEXASR_HV __MASK(63-34) /* MSR[HV] when failure occurred */ -#define TEXASR_PR __MASK(63-35) /* MSR[PR] when failure occurred */ -#define TEXASR_FS __MASK(63-36) /* TEXASR Failure Summary */ -#define TEXASR_EXACT __MASK(63-37) /* TFIAR value is exact */ + +#define TEXASR_FC_LG (63 - 7) /* Failure Code */ +#define TEXASR_AB_LG (63 - 31) /* Abort */ +#define TEXASR_SU_LG (63 - 32) /* Suspend */ +#define TEXASR_HV_LG (63 - 34) /* Hypervisor state*/ +#define TEXASR_PR_LG (63 - 35) /* Privilege level */ +#define TEXASR_FS_LG (63 - 36) /* failure summary */ +#define TEXASR_EX_LG (63 - 37) /* TFIAR exact bit */ +#define TEXASR_ROT_LG (63 - 38) /* ROT bit */ + +#define TEXASR_ABORT __MASK(TEXASR_AB_LG) /* terminated by tabort or treclaim */ +#define TEXASR_SUSP __MASK(TEXASR_SU_LG) /* tx failed in suspended state */ +#define TEXASR_HV __MASK(TEXASR_HV_LG) /* MSR[HV] when failure occurred */ +#define TEXASR_PR __MASK(TEXASR_PR_LG) /* MSR[PR] when failure occurred */ +#define TEXASR_FS __MASK(TEXASR_FS_LG) /* TEXASR Failure Summary */ +#define TEXASR_EXACT __MASK(TEXASR_EX_LG) /* TFIAR value is exact */ +#define TEXASR_ROT __MASK(TEXASR_ROT_LG) +#define TEXASR_FC (ASM_CONST(0xFF) << TEXASR_FC_LG) + #define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */ + #define SPRN_TIDR 144 /* Thread ID register */ #define SPRN_CTRLF 0x088 #define SPRN_CTRLT 0x098 diff --git a/arch/powerpc/include/asm/rheap.h b/arch/powerpc/include/asm/rheap.h index 172381769cfc..8e83703d6736 100644 --- a/arch/powerpc/include/asm/rheap.h +++ b/arch/powerpc/include/asm/rheap.h @@ -83,6 +83,9 @@ extern int rh_get_stats(rh_info_t * info, int what, int max_stats, /* Simple dump of remote heap info */ extern void rh_dump(rh_info_t * info); +/* Simple dump of remote info block */ +void rh_dump_blk(rh_info_t *info, rh_block_t *blk); + /* Set owner of taken block */ extern int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner); diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index ec9dd79398ee..71e393c46a49 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -361,7 +361,7 @@ extern int rtas_offline_cpus_mask(cpumask_var_t cpus); extern int rtas_ibm_suspend_me(u64 handle); struct rtc_time; -extern unsigned long rtas_get_boot_time(void); +extern time64_t rtas_get_boot_time(void); extern void rtas_get_rtc_time(struct rtc_time *rtc_time); extern int rtas_set_rtc_time(struct rtc_time *rtc_time); diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 27fa52ed6d00..8721fd004291 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -52,6 +52,15 @@ enum l1d_flush_type { void setup_rfi_flush(enum l1d_flush_type, bool enable); void do_rfi_flush_fixups(enum l1d_flush_type types); +void setup_barrier_nospec(void); +void do_barrier_nospec_fixups(bool enable); +extern bool barrier_nospec_enabled; + +#ifdef CONFIG_PPC_BOOK3S_64 +void do_barrier_nospec_fixups_range(bool enable, void *start, void *end); +#else +static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }; +#endif #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index cfecfee1194b..29ffaabdf75b 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -58,6 +58,7 @@ struct smp_ops_t { extern void smp_flush_nmi_ipi(u64 delay_us); extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us); +extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us); extern void smp_send_debugger_break(void); extern void start_secondary_resume(void); extern void smp_generic_give_timebase(void); diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h index ab9d849644d0..4547891a684b 100644 --- a/arch/powerpc/include/asm/sstep.h +++ b/arch/powerpc/include/asm/sstep.h @@ -97,6 +97,8 @@ enum instruction_type { #define SIZE(n) ((n) << 12) #define GETSIZE(w) ((w) >> 12) +#define GETTYPE(t) ((t) & INSTR_TYPE_MASK) + #define MKOP(t, f, s) ((t) | (f) | SIZE(s)) struct instruction_op { diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index be8c9fa23983..5b03d8a82409 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -94,6 +94,5 @@ static inline void clear_task_ebb(struct task_struct *t) extern int set_thread_uses_vas(void); extern int set_thread_tidr(struct task_struct *t); -extern void clear_thread_tidr(struct task_struct *t); #endif /* _ASM_POWERPC_SWITCH_TO_H */ diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index 1b90a3516a35..398171fdcd9f 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -16,7 +16,7 @@ asmlinkage long sys_mmap2(unsigned long addr, size_t len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff); asmlinkage long ppc64_personality(unsigned long personality); -asmlinkage int ppc_rtas(struct rtas_args __user *uargs); +asmlinkage long sys_rtas(struct rtas_args __user *uargs); #endif /* __KERNEL__ */ #endif /* __ASM_POWERPC_SYSCALLS_H */ diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index d61f9c96d916..79a3b47e4839 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -147,7 +147,7 @@ SYSCALL_SPU(setfsuid) SYSCALL_SPU(setfsgid) SYSCALL_SPU(llseek) COMPAT_SYS_SPU(getdents) -SYSX_SPU(sys_select,ppc32_select,sys_select) +COMPAT_SPU_NEW(select) SYSCALL_SPU(flock) SYSCALL_SPU(msync) COMPAT_SYS_SPU(readv) @@ -245,7 +245,7 @@ SYSCALL_SPU(epoll_create) SYSCALL_SPU(epoll_ctl) SYSCALL_SPU(epoll_wait) SYSCALL_SPU(remap_file_pages) -SYSX_SPU(sys_timer_create,compat_sys_timer_create,sys_timer_create) +COMPAT_SYS_SPU(timer_create) COMPAT_SYS_SPU(timer_settime) COMPAT_SYS_SPU(timer_gettime) SYSCALL_SPU(timer_getoverrun) @@ -260,7 +260,7 @@ COMPAT_SYS_SPU(utimes) COMPAT_SYS_SPU(statfs64) COMPAT_SYS_SPU(fstatfs64) SYSX(sys_ni_syscall,ppc_fadvise64_64,ppc_fadvise64_64) -PPC_SYS_SPU(rtas) +SYSCALL_SPU(rtas) OLDSYS(debug_setcontext) SYSCALL(ni_syscall) COMPAT_SYS(migrate_pages) diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 5964145db03d..f308dfeb2746 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -79,8 +79,7 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ -#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling - TIF_NEED_RESCHED */ +#define TIF_FSCHECK 3 /* Check FS is USER_DS on return */ #define TIF_32BIT 4 /* 32 bit binary */ #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ #define TIF_PATCH_PENDING 6 /* pending live patching update */ @@ -99,6 +98,7 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src #if defined(CONFIG_PPC64) #define TIF_ELF2ABI 18 /* function descriptors must die! */ #endif +#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */ /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) @@ -118,13 +118,15 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE) #define _TIF_NOHZ (1<<TIF_NOHZ) +#define _TIF_FSCHECK (1<<TIF_FSCHECK) #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \ _TIF_NOHZ) #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ - _TIF_RESTORE_TM | _TIF_PATCH_PENDING) + _TIF_RESTORE_TM | _TIF_PATCH_PENDING | \ + _TIF_FSCHECK) #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) /* Bits in local_flags */ diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index db546c034905..b80d492ceb29 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -26,9 +26,6 @@ extern unsigned long tb_ticks_per_usec; extern unsigned long tb_ticks_per_sec; extern struct clock_event_device decrementer_clockevent; -struct rtc_time; -extern void to_tm(int tim, struct rtc_time * tm); -extern void tick_broadcast_ipi_handler(void); extern void generic_calibrate_decr(void); extern void hdec_interrupt(struct pt_regs *regs); @@ -196,14 +193,6 @@ extern u64 mulhdu(u64, u64); extern void div128_by_32(u64 dividend_high, u64 dividend_low, unsigned divisor, struct div_result *dr); -/* Used to store Processor Utilization register (purr) values */ - -struct cpu_usage { - u64 current_tb; /* Holds the current purr register values */ -}; - -DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array); - extern void secondary_cpu_time_init(void); extern void __init time_init(void); diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index a7eabff27a0f..9138baccebb0 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h @@ -76,6 +76,19 @@ static inline int mm_is_thread_local(struct mm_struct *mm) return false; return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)); } +static inline void mm_reset_thread_local(struct mm_struct *mm) +{ + WARN_ON(atomic_read(&mm->context.copros) > 0); + /* + * It's possible for mm_access to take a reference on mm_users to + * access the remote mm from another thread, but it's not allowed + * to set mm_cpumask, so mm_users may be > 1 here. + */ + WARN_ON(current->mm != mm); + atomic_set(&mm->context.active_cpus, 1); + cpumask_clear(mm_cpumask(mm)); + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); +} #else /* CONFIG_PPC_BOOK3S_64 */ static inline int mm_is_thread_local(struct mm_struct *mm) { diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h index b1658c97047c..e94f6db5e367 100644 --- a/arch/powerpc/include/asm/tm.h +++ b/arch/powerpc/include/asm/tm.h @@ -10,12 +10,10 @@ #ifndef __ASSEMBLY__ -extern void tm_enable(void); extern void tm_reclaim(struct thread_struct *thread, uint8_t cause); extern void tm_reclaim_current(uint8_t cause); extern void tm_recheckpoint(struct thread_struct *thread); -extern void tm_abort(uint8_t cause); extern void tm_save_sprs(struct thread_struct *thread); extern void tm_restore_sprs(struct thread_struct *thread); diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h index 33f3b479138b..d018e8602694 100644 --- a/arch/powerpc/include/asm/trace.h +++ b/arch/powerpc/include/asm/trace.h @@ -81,8 +81,7 @@ TRACE_EVENT_FN_COND(hcall_entry, TRACE_EVENT_FN_COND(hcall_exit, - TP_PROTO(unsigned long opcode, unsigned long retval, - unsigned long *retbuf), + TP_PROTO(unsigned long opcode, long retval, unsigned long *retbuf), TP_ARGS(opcode, retval, retbuf), @@ -90,7 +89,7 @@ TRACE_EVENT_FN_COND(hcall_exit, TP_STRUCT__entry( __field(unsigned long, opcode) - __field(unsigned long, retval) + __field(long, retval) ), TP_fast_assign( @@ -98,7 +97,7 @@ TRACE_EVENT_FN_COND(hcall_exit, __entry->retval = retval; ), - TP_printk("opcode=%lu retval=%lu", __entry->opcode, __entry->retval), + TP_printk("opcode=%lu retval=%ld", __entry->opcode, __entry->retval), hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc ); diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index a62ee663b2c8..468653ce844c 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -30,8 +30,14 @@ #endif #define get_ds() (KERNEL_DS) -#define get_fs() (current->thread.fs) -#define set_fs(val) (current->thread.fs = (val)) +#define get_fs() (current->thread.addr_limit) + +static inline void set_fs(mm_segment_t fs) +{ + current->thread.addr_limit = fs; + /* On user-mode return check addr_limit (fs) is correct */ + set_thread_flag(TIF_FSCHECK); +} #define segment_eq(a, b) ((a).seg == (b).seg) @@ -252,6 +258,7 @@ do { \ __chk_user_ptr(ptr); \ if (!is_kernel_addr((unsigned long)__gu_addr)) \ might_fault(); \ + barrier_nospec(); \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ (x) = (__typeof__(*(ptr)))__gu_val; \ __gu_err; \ @@ -263,8 +270,10 @@ do { \ unsigned long __gu_val = 0; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ might_fault(); \ - if (access_ok(VERIFY_READ, __gu_addr, (size))) \ + if (access_ok(VERIFY_READ, __gu_addr, (size))) { \ + barrier_nospec(); \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + } \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) @@ -275,6 +284,7 @@ do { \ unsigned long __gu_val; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ __chk_user_ptr(ptr); \ + barrier_nospec(); \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ @@ -302,15 +312,19 @@ static inline unsigned long raw_copy_from_user(void *to, switch (n) { case 1: + barrier_nospec(); __get_user_size(*(u8 *)to, from, 1, ret); break; case 2: + barrier_nospec(); __get_user_size(*(u16 *)to, from, 2, ret); break; case 4: + barrier_nospec(); __get_user_size(*(u32 *)to, from, 4, ret); break; case 8: + barrier_nospec(); __get_user_size(*(u64 *)to, from, 8, ret); break; } @@ -318,6 +332,7 @@ static inline unsigned long raw_copy_from_user(void *to, return 0; } + barrier_nospec(); return __copy_tofrom_user((__force void __user *)to, from, n); } diff --git a/arch/powerpc/include/asm/xive-regs.h b/arch/powerpc/include/asm/xive-regs.h index fa4288822b68..6de989f8defd 100644 --- a/arch/powerpc/include/asm/xive-regs.h +++ b/arch/powerpc/include/asm/xive-regs.h @@ -123,10 +123,4 @@ #define TM_QW3_NSR_I PPC_BIT8(2) #define TM_QW3_NSR_GRP_LVL PPC_BIT8(3,7) -/* Utilities to manipulate these (originaly from OPAL) */ -#define MASK_TO_LSH(m) (__builtin_ffsl(m) - 1) -#define GETFIELD(m, v) (((v) & (m)) >> MASK_TO_LSH(m)) -#define SETFIELD(m, v, val) \ - (((v) & ~(m)) | ((((typeof(v))(val)) << MASK_TO_LSH(m)) & (m))) - #endif /* _ASM_POWERPC_XIVE_REGS_H */ diff --git a/arch/powerpc/include/asm/xmon.h b/arch/powerpc/include/asm/xmon.h index eb42a0c6e1d9..30ff69bd8f43 100644 --- a/arch/powerpc/include/asm/xmon.h +++ b/arch/powerpc/include/asm/xmon.h @@ -29,7 +29,7 @@ static inline void xmon_register_spus(struct list_head *list) { }; extern int cpus_are_in_xmon(void); #endif -extern void xmon_printf(const char *format, ...); +extern __printf(1, 2) void xmon_printf(const char *format, ...); #endif /* __KERNEL __ */ #endif /* __ASM_POWERPC_XMON_H */ diff --git a/arch/powerpc/include/asm/xor.h b/arch/powerpc/include/asm/xor.h index a36c2069d8ed..7d6dc503349d 100644 --- a/arch/powerpc/include/asm/xor.h +++ b/arch/powerpc/include/asm/xor.h @@ -24,17 +24,7 @@ #include <asm/cputable.h> #include <asm/cpu_has_feature.h> - -void xor_altivec_2(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in); -void xor_altivec_3(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in, unsigned long *v3_in); -void xor_altivec_4(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in, unsigned long *v3_in, - unsigned long *v4_in); -void xor_altivec_5(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in, unsigned long *v3_in, - unsigned long *v4_in, unsigned long *v5_in); +#include <asm/xor_altivec.h> static struct xor_block_template xor_block_altivec = { .name = "altivec", diff --git a/arch/powerpc/include/asm/xor_altivec.h b/arch/powerpc/include/asm/xor_altivec.h new file mode 100644 index 000000000000..6ca923510b59 --- /dev/null +++ b/arch/powerpc/include/asm/xor_altivec.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_XOR_ALTIVEC_H +#define _ASM_POWERPC_XOR_ALTIVEC_H + +#ifdef CONFIG_ALTIVEC + +void xor_altivec_2(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in); +void xor_altivec_3(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in, unsigned long *v3_in); +void xor_altivec_4(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in, unsigned long *v3_in, + unsigned long *v4_in); +void xor_altivec_5(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in, unsigned long *v3_in, + unsigned long *v4_in, unsigned long *v5_in); + +#endif +#endif /* _ASM_POWERPC_XOR_ALTIVEC_H */ diff --git a/arch/powerpc/include/uapi/asm/msgbuf.h b/arch/powerpc/include/uapi/asm/msgbuf.h index 65beb0942500..2b1b37797a47 100644 --- a/arch/powerpc/include/uapi/asm/msgbuf.h +++ b/arch/powerpc/include/uapi/asm/msgbuf.h @@ -10,18 +10,18 @@ struct msqid64_ds { struct ipc64_perm msg_perm; -#ifndef __powerpc64__ - unsigned int __unused1; -#endif +#ifdef __powerpc64__ __kernel_time_t msg_stime; /* last msgsnd time */ -#ifndef __powerpc64__ - unsigned int __unused2; -#endif __kernel_time_t msg_rtime; /* last msgrcv time */ -#ifndef __powerpc64__ - unsigned int __unused3; -#endif __kernel_time_t msg_ctime; /* last change time */ +#else + unsigned long msg_stime_high; + unsigned long msg_stime; /* last msgsnd time */ + unsigned long msg_rtime_high; + unsigned long msg_rtime; /* last msgrcv time */ + unsigned long msg_ctime_high; + unsigned long msg_ctime; /* last change time */ +#endif unsigned long msg_cbytes; /* current number of bytes on queue */ unsigned long msg_qnum; /* number of messages in queue */ unsigned long msg_qbytes; /* max number of bytes on queue */ diff --git a/arch/powerpc/include/uapi/asm/sembuf.h b/arch/powerpc/include/uapi/asm/sembuf.h index 8f393d60f02d..3f60946f77e3 100644 --- a/arch/powerpc/include/uapi/asm/sembuf.h +++ b/arch/powerpc/include/uapi/asm/sembuf.h @@ -15,20 +15,20 @@ * between kernel and user space. * * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values + * - 2 miscellaneous 32/64-bit values */ struct semid64_ds { struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ #ifndef __powerpc64__ - unsigned long __unused1; -#endif + unsigned long sem_otime_high; + unsigned long sem_otime; /* last semop time */ + unsigned long sem_ctime_high; + unsigned long sem_ctime; /* last change time */ +#else __kernel_time_t sem_otime; /* last semop time */ -#ifndef __powerpc64__ - unsigned long __unused2; -#endif __kernel_time_t sem_ctime; /* last change time */ +#endif unsigned long sem_nsems; /* no. of semaphores in array */ unsigned long __unused3; unsigned long __unused4; diff --git a/arch/powerpc/include/uapi/asm/shmbuf.h b/arch/powerpc/include/uapi/asm/shmbuf.h index deb1c3e503d3..b591c4d7e4c5 100644 --- a/arch/powerpc/include/uapi/asm/shmbuf.h +++ b/arch/powerpc/include/uapi/asm/shmbuf.h @@ -16,25 +16,22 @@ * between kernel and user space. * * Pad space is left for: - * - 64-bit time_t to solve y2038 problem * - 2 miscellaneous 32-bit values */ struct shmid64_ds { struct ipc64_perm shm_perm; /* operation perms */ -#ifndef __powerpc64__ - unsigned long __unused1; -#endif +#ifdef __powerpc64__ __kernel_time_t shm_atime; /* last attach time */ -#ifndef __powerpc64__ - unsigned long __unused2; -#endif __kernel_time_t shm_dtime; /* last detach time */ -#ifndef __powerpc64__ - unsigned long __unused3; -#endif __kernel_time_t shm_ctime; /* last change time */ -#ifndef __powerpc64__ +#else + unsigned long shm_atime_high; + unsigned long shm_atime; /* last attach time */ + unsigned long shm_dtime_high; + unsigned long shm_dtime; /* last detach time */ + unsigned long shm_ctime_high; + unsigned long shm_ctime; /* last change time */ unsigned long __unused4; #endif size_t shm_segsz; /* size of segment (bytes) */ diff --git a/arch/powerpc/include/uapi/asm/siginfo.h b/arch/powerpc/include/uapi/asm/siginfo.h index 9f142451a01f..1d51d9b88221 100644 --- a/arch/powerpc/include/uapi/asm/siginfo.h +++ b/arch/powerpc/include/uapi/asm/siginfo.h @@ -15,19 +15,4 @@ #include <asm-generic/siginfo.h> -/* - * SIGFPE si_codes - */ -#ifdef __KERNEL__ -#define FPE_FIXME 0 /* Broken dup of SI_USER */ -#endif /* __KERNEL__ */ - -/* - * SIGTRAP si_codes - */ -#ifdef __KERNEL__ -#define TRAP_FIXME 0 /* Broken dup of SI_USER */ -#endif /* __KERNEL__ */ - - #endif /* _ASM_POWERPC_SIGINFO_H */ diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 3e6c0744c174..11550a3d1ac2 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -339,7 +339,7 @@ int fix_alignment(struct pt_regs *regs) if (r < 0) return -EINVAL; - type = op.type & INSTR_TYPE_MASK; + type = GETTYPE(op.type); if (!OP_IS_LOAD_STORE(type)) { if (op.type != CACHEOP + DCBZ) return -EINVAL; diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 373dc1d6ef44..9fc9e0977009 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -13,6 +13,7 @@ * 2 of the License, or (at your option) any later version. */ +#include <linux/compat.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> @@ -42,7 +43,6 @@ #include <asm/paca.h> #include <asm/lppaca.h> #include <asm/cache.h> -#include <asm/compat.h> #include <asm/mmu.h> #include <asm/hvcall.h> #include <asm/xics.h> @@ -180,6 +180,7 @@ int main(void) OFFSET(PACAKMSR, paca_struct, kernel_msr); OFFSET(PACAIRQSOFTMASK, paca_struct, irq_soft_mask); OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened); + OFFSET(PACA_FTRACE_ENABLED, paca_struct, ftrace_enabled); #ifdef CONFIG_PPC_BOOK3S OFFSET(PACACONTEXTID, paca_struct, mm_ctx_id); #ifdef CONFIG_PPC_MM_SLICES diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 6537cba1a758..b2072d5bbf2b 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -157,20 +157,20 @@ void btext_map(void) /* By default, we are no longer mapped */ boot_text_mapped = 0; - if (dispDeviceBase == 0) + if (!dispDeviceBase) return; base = ((unsigned long) dispDeviceBase) & 0xFFFFF000UL; offset = ((unsigned long) dispDeviceBase) - base; size = dispDeviceRowBytes * dispDeviceRect[3] + offset + dispDeviceRect[0]; vbase = __ioremap(base, size, pgprot_val(pgprot_noncached_wc(__pgprot(0)))); - if (vbase == 0) + if (!vbase) return; logicalDisplayBase = vbase + offset; boot_text_mapped = 1; } -int btext_initialize(struct device_node *np) +static int btext_initialize(struct device_node *np) { unsigned int width, height, depth, pitch; unsigned long address = 0; @@ -270,7 +270,7 @@ static unsigned char * calc_base(int x, int y) unsigned char *base; base = logicalDisplayBase; - if (base == 0) + if (!base) base = dispDeviceBase; base += (x + dispDeviceRect[0]) * (dispDeviceDepth >> 3); base += (y + dispDeviceRect[1]) * dispDeviceRowBytes; @@ -281,7 +281,7 @@ static unsigned char * calc_base(int x, int y) void btext_update_display(unsigned long phys, int width, int height, int depth, int pitch) { - if (dispDeviceBase == 0) + if (!dispDeviceBase) return; /* check it's the same frame buffer (within 256MB) */ diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index da20569de9d4..155170d70324 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -235,8 +235,6 @@ static inline dma_addr_t dma_nommu_map_page(struct device *dev, enum dma_data_direction dir, unsigned long attrs) { - BUG_ON(dir == DMA_NONE); - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) __dma_sync_page(page, offset, size, dir); @@ -309,8 +307,6 @@ int dma_set_coherent_mask(struct device *dev, u64 mask) } EXPORT_SYMBOL(dma_set_coherent_mask); -#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) - int dma_set_mask(struct device *dev, u64 dma_mask) { if (ppc_md.dma_set_mask) @@ -361,7 +357,6 @@ EXPORT_SYMBOL_GPL(dma_get_required_mask); static int __init dma_init(void) { - dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); #ifdef CONFIG_PCI dma_debug_add_bus(&pci_bus_type); #endif diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index c904477abaf3..4be1c0de9406 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -717,6 +717,7 @@ static __init void cpufeatures_cpu_quirks(void) if ((version & 0xffff0000) == 0x004e0000) { cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR); cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG; + cur_cpu_spec->cpu_features |= CPU_FTR_P9_TIDR; } /* diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index bc640e4c5ca5..5746809cfaad 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -263,9 +263,8 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) return n; } -static void *eeh_dump_pe_log(void *data, void *flag) +static void *eeh_dump_pe_log(struct eeh_pe *pe, void *flag) { - struct eeh_pe *pe = data; struct eeh_dev *edev, *tmp; size_t *plen = flag; @@ -542,8 +541,12 @@ int eeh_dev_check_failure(struct eeh_dev *edev) /* Frozen parent PE ? */ ret = eeh_ops->get_state(parent_pe, NULL); - if (ret > 0 && !eeh_state_active(ret)) + if (ret > 0 && !eeh_state_active(ret)) { pe = parent_pe; + pr_err("EEH: Failure of PHB#%x-PE#%x will be handled at parent PHB#%x-PE#%x.\n", + pe->phb->global_number, pe->addr, + pe->phb->global_number, parent_pe->addr); + } /* Next parent level */ parent_pe = parent_pe->parent; @@ -686,9 +689,9 @@ int eeh_pci_enable(struct eeh_pe *pe, int function) return rc; } -static void *eeh_disable_and_save_dev_state(void *data, void *userdata) +static void *eeh_disable_and_save_dev_state(struct eeh_dev *edev, + void *userdata) { - struct eeh_dev *edev = data; struct pci_dev *pdev = eeh_dev_to_pci_dev(edev); struct pci_dev *dev = userdata; @@ -714,9 +717,8 @@ static void *eeh_disable_and_save_dev_state(void *data, void *userdata) return NULL; } -static void *eeh_restore_dev_state(void *data, void *userdata) +static void *eeh_restore_dev_state(struct eeh_dev *edev, void *userdata) { - struct eeh_dev *edev = data; struct pci_dn *pdn = eeh_dev_to_pdn(edev); struct pci_dev *pdev = eeh_dev_to_pci_dev(edev); struct pci_dev *dev = userdata; @@ -856,11 +858,10 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat * the indicated device and its children so that the bunch of the * devices could be reset properly. */ -static void *eeh_set_dev_freset(void *data, void *flag) +static void *eeh_set_dev_freset(struct eeh_dev *edev, void *flag) { struct pci_dev *dev; unsigned int *freset = (unsigned int *)flag; - struct eeh_dev *edev = (struct eeh_dev *)data; dev = eeh_dev_to_pci_dev(edev); if (dev) @@ -1775,18 +1776,6 @@ static int proc_eeh_show(struct seq_file *m, void *v) return 0; } -static int proc_eeh_open(struct inode *inode, struct file *file) -{ - return single_open(file, proc_eeh_show, NULL); -} - -static const struct file_operations proc_eeh_operations = { - .open = proc_eeh_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - #ifdef CONFIG_DEBUG_FS static int eeh_enable_dbgfs_set(void *data, u64 val) { @@ -1828,7 +1817,7 @@ DEFINE_SIMPLE_ATTRIBUTE(eeh_freeze_dbgfs_ops, eeh_freeze_dbgfs_get, static int __init eeh_init_proc(void) { if (machine_is(pseries) || machine_is(powernv)) { - proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations); + proc_create_single("powerpc/eeh", 0, NULL, proc_eeh_show); #ifdef CONFIG_DEBUG_FS debugfs_create_file("eeh_enable", 0600, powerpc_debugfs_root, NULL, diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index b8a329f04814..67619b4b3f96 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -39,18 +39,82 @@ struct eeh_rmv_data { int removed; }; -/** - * eeh_pcid_name - Retrieve name of PCI device driver - * @pdev: PCI device - * - * This routine is used to retrieve the name of PCI device driver - * if that's valid. - */ -static inline const char *eeh_pcid_name(struct pci_dev *pdev) +static int eeh_result_priority(enum pci_ers_result result) +{ + switch (result) { + case PCI_ERS_RESULT_NONE: + return 1; + case PCI_ERS_RESULT_NO_AER_DRIVER: + return 2; + case PCI_ERS_RESULT_RECOVERED: + return 3; + case PCI_ERS_RESULT_CAN_RECOVER: + return 4; + case PCI_ERS_RESULT_DISCONNECT: + return 5; + case PCI_ERS_RESULT_NEED_RESET: + return 6; + default: + WARN_ONCE(1, "Unknown pci_ers_result value: %d\n", (int)result); + return 0; + } +}; + +const char *pci_ers_result_name(enum pci_ers_result result) +{ + switch (result) { + case PCI_ERS_RESULT_NONE: + return "none"; + case PCI_ERS_RESULT_CAN_RECOVER: + return "can recover"; + case PCI_ERS_RESULT_NEED_RESET: + return "need reset"; + case PCI_ERS_RESULT_DISCONNECT: + return "disconnect"; + case PCI_ERS_RESULT_RECOVERED: + return "recovered"; + case PCI_ERS_RESULT_NO_AER_DRIVER: + return "no AER driver"; + default: + WARN_ONCE(1, "Unknown result type: %d\n", (int)result); + return "unknown"; + } +}; + +static __printf(2, 3) void eeh_edev_info(const struct eeh_dev *edev, + const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + printk(KERN_INFO "EEH: PE#%x (PCI %s): %pV\n", edev->pe_config_addr, + edev->pdev ? dev_name(&edev->pdev->dev) : "none", &vaf); + + va_end(args); +} + +static enum pci_ers_result pci_ers_merge_result(enum pci_ers_result old, + enum pci_ers_result new) +{ + if (eeh_result_priority(new) > eeh_result_priority(old)) + return new; + return old; +} + +static bool eeh_dev_removed(struct eeh_dev *edev) { - if (pdev && pdev->dev.driver) - return pdev->dev.driver->name; - return ""; + return !edev || (edev->mode & EEH_DEV_REMOVED); +} + +static bool eeh_edev_actionable(struct eeh_dev *edev) +{ + return (edev->pdev && !eeh_dev_removed(edev) && + !eeh_pe_passed(edev->pe)); } /** @@ -98,22 +162,20 @@ static inline void eeh_pcid_put(struct pci_dev *pdev) * do real work because EEH should freeze DMA transfers for those PCI * devices encountering EEH errors, which includes MSI or MSI-X. */ -static void eeh_disable_irq(struct pci_dev *dev) +static void eeh_disable_irq(struct eeh_dev *edev) { - struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); - /* Don't disable MSI and MSI-X interrupts. They are * effectively disabled by the DMA Stopped state * when an EEH error occurs. */ - if (dev->msi_enabled || dev->msix_enabled) + if (edev->pdev->msi_enabled || edev->pdev->msix_enabled) return; - if (!irq_has_action(dev->irq)) + if (!irq_has_action(edev->pdev->irq)) return; edev->mode |= EEH_DEV_IRQ_DISABLED; - disable_irq_nosync(dev->irq); + disable_irq_nosync(edev->pdev->irq); } /** @@ -123,10 +185,8 @@ static void eeh_disable_irq(struct pci_dev *dev) * This routine must be called to enable interrupt while failed * device could be resumed. */ -static void eeh_enable_irq(struct pci_dev *dev) +static void eeh_enable_irq(struct eeh_dev *edev) { - struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); - if ((edev->mode) & EEH_DEV_IRQ_DISABLED) { edev->mode &= ~EEH_DEV_IRQ_DISABLED; /* @@ -149,23 +209,13 @@ static void eeh_enable_irq(struct pci_dev *dev) * * tglx */ - if (irqd_irq_disabled(irq_get_irq_data(dev->irq))) - enable_irq(dev->irq); + if (irqd_irq_disabled(irq_get_irq_data(edev->pdev->irq))) + enable_irq(edev->pdev->irq); } } -static bool eeh_dev_removed(struct eeh_dev *edev) -{ - /* EEH device removed ? */ - if (!edev || (edev->mode & EEH_DEV_REMOVED)) - return true; - - return false; -} - -static void *eeh_dev_save_state(void *data, void *userdata) +static void *eeh_dev_save_state(struct eeh_dev *edev, void *userdata) { - struct eeh_dev *edev = data; struct pci_dev *pdev; if (!edev) @@ -189,144 +239,155 @@ static void *eeh_dev_save_state(void *data, void *userdata) return NULL; } -/** - * eeh_report_error - Report pci error to each device driver - * @data: eeh device - * @userdata: return value - * - * Report an EEH error to each device driver, collect up and - * merge the device driver responses. Cumulative response - * passed back in "userdata". - */ -static void *eeh_report_error(void *data, void *userdata) +static void eeh_set_channel_state(struct eeh_pe *root, enum pci_channel_state s) { - struct eeh_dev *edev = (struct eeh_dev *)data; - struct pci_dev *dev = eeh_dev_to_pci_dev(edev); - enum pci_ers_result rc, *res = userdata; - struct pci_driver *driver; + struct eeh_pe *pe; + struct eeh_dev *edev, *tmp; - if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe)) - return NULL; + eeh_for_each_pe(root, pe) + eeh_pe_for_each_dev(pe, edev, tmp) + if (eeh_edev_actionable(edev)) + edev->pdev->error_state = s; +} - device_lock(&dev->dev); - dev->error_state = pci_channel_io_frozen; +static void eeh_set_irq_state(struct eeh_pe *root, bool enable) +{ + struct eeh_pe *pe; + struct eeh_dev *edev, *tmp; - driver = eeh_pcid_get(dev); - if (!driver) goto out_no_dev; + eeh_for_each_pe(root, pe) { + eeh_pe_for_each_dev(pe, edev, tmp) { + if (!eeh_edev_actionable(edev)) + continue; - eeh_disable_irq(dev); + if (!eeh_pcid_get(edev->pdev)) + continue; - if (!driver->err_handler || - !driver->err_handler->error_detected) - goto out; + if (enable) + eeh_enable_irq(edev); + else + eeh_disable_irq(edev); - rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen); + eeh_pcid_put(edev->pdev); + } + } +} - /* A driver that needs a reset trumps all others */ - if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; - if (*res == PCI_ERS_RESULT_NONE) *res = rc; +typedef enum pci_ers_result (*eeh_report_fn)(struct eeh_dev *, + struct pci_driver *); +static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn, + enum pci_ers_result *result) +{ + struct pci_driver *driver; + enum pci_ers_result new_result; + + device_lock(&edev->pdev->dev); + if (eeh_edev_actionable(edev)) { + driver = eeh_pcid_get(edev->pdev); + + if (!driver) + eeh_edev_info(edev, "no driver"); + else if (!driver->err_handler) + eeh_edev_info(edev, "driver not EEH aware"); + else if (edev->mode & EEH_DEV_NO_HANDLER) + eeh_edev_info(edev, "driver bound too late"); + else { + new_result = fn(edev, driver); + eeh_edev_info(edev, "%s driver reports: '%s'", + driver->name, + pci_ers_result_name(new_result)); + if (result) + *result = pci_ers_merge_result(*result, + new_result); + } + if (driver) + eeh_pcid_put(edev->pdev); + } else { + eeh_edev_info(edev, "not actionable (%d,%d,%d)", !!edev->pdev, + !eeh_dev_removed(edev), !eeh_pe_passed(edev->pe)); + } + device_unlock(&edev->pdev->dev); +} - edev->in_error = true; - pci_uevent_ers(dev, PCI_ERS_RESULT_NONE); +static void eeh_pe_report(const char *name, struct eeh_pe *root, + eeh_report_fn fn, enum pci_ers_result *result) +{ + struct eeh_pe *pe; + struct eeh_dev *edev, *tmp; -out: - eeh_pcid_put(dev); -out_no_dev: - device_unlock(&dev->dev); - return NULL; + pr_info("EEH: Beginning: '%s'\n", name); + eeh_for_each_pe(root, pe) eeh_pe_for_each_dev(pe, edev, tmp) + eeh_pe_report_edev(edev, fn, result); + if (result) + pr_info("EEH: Finished:'%s' with aggregate recovery state:'%s'\n", + name, pci_ers_result_name(*result)); + else + pr_info("EEH: Finished:'%s'", name); } /** - * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled - * @data: eeh device - * @userdata: return value + * eeh_report_error - Report pci error to each device driver + * @edev: eeh device + * @driver: device's PCI driver * - * Tells each device driver that IO ports, MMIO and config space I/O - * are now enabled. Collects up and merges the device driver responses. - * Cumulative response passed back in "userdata". + * Report an EEH error to each device driver. */ -static void *eeh_report_mmio_enabled(void *data, void *userdata) +static enum pci_ers_result eeh_report_error(struct eeh_dev *edev, + struct pci_driver *driver) { - struct eeh_dev *edev = (struct eeh_dev *)data; - struct pci_dev *dev = eeh_dev_to_pci_dev(edev); - enum pci_ers_result rc, *res = userdata; - struct pci_driver *driver; - - if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe)) - return NULL; - - device_lock(&dev->dev); - driver = eeh_pcid_get(dev); - if (!driver) goto out_no_dev; + enum pci_ers_result rc; + struct pci_dev *dev = edev->pdev; - if (!driver->err_handler || - !driver->err_handler->mmio_enabled || - (edev->mode & EEH_DEV_NO_HANDLER)) - goto out; + if (!driver->err_handler->error_detected) + return PCI_ERS_RESULT_NONE; - rc = driver->err_handler->mmio_enabled(dev); + eeh_edev_info(edev, "Invoking %s->error_detected(IO frozen)", + driver->name); + rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen); - /* A driver that needs a reset trumps all others */ - if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; - if (*res == PCI_ERS_RESULT_NONE) *res = rc; + edev->in_error = true; + pci_uevent_ers(dev, PCI_ERS_RESULT_NONE); + return rc; +} -out: - eeh_pcid_put(dev); -out_no_dev: - device_unlock(&dev->dev); - return NULL; +/** + * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled + * @edev: eeh device + * @driver: device's PCI driver + * + * Tells each device driver that IO ports, MMIO and config space I/O + * are now enabled. + */ +static enum pci_ers_result eeh_report_mmio_enabled(struct eeh_dev *edev, + struct pci_driver *driver) +{ + if (!driver->err_handler->mmio_enabled) + return PCI_ERS_RESULT_NONE; + eeh_edev_info(edev, "Invoking %s->mmio_enabled()", driver->name); + return driver->err_handler->mmio_enabled(edev->pdev); } /** * eeh_report_reset - Tell device that slot has been reset - * @data: eeh device - * @userdata: return value + * @edev: eeh device + * @driver: device's PCI driver * * This routine must be called while EEH tries to reset particular * PCI device so that the associated PCI device driver could take * some actions, usually to save data the driver needs so that the * driver can work again while the device is recovered. */ -static void *eeh_report_reset(void *data, void *userdata) +static enum pci_ers_result eeh_report_reset(struct eeh_dev *edev, + struct pci_driver *driver) { - struct eeh_dev *edev = (struct eeh_dev *)data; - struct pci_dev *dev = eeh_dev_to_pci_dev(edev); - enum pci_ers_result rc, *res = userdata; - struct pci_driver *driver; - - if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe)) - return NULL; - - device_lock(&dev->dev); - dev->error_state = pci_channel_io_normal; - - driver = eeh_pcid_get(dev); - if (!driver) goto out_no_dev; - - eeh_enable_irq(dev); - - if (!driver->err_handler || - !driver->err_handler->slot_reset || - (edev->mode & EEH_DEV_NO_HANDLER) || - (!edev->in_error)) - goto out; - - rc = driver->err_handler->slot_reset(dev); - if ((*res == PCI_ERS_RESULT_NONE) || - (*res == PCI_ERS_RESULT_RECOVERED)) *res = rc; - if (*res == PCI_ERS_RESULT_DISCONNECT && - rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; - -out: - eeh_pcid_put(dev); -out_no_dev: - device_unlock(&dev->dev); - return NULL; + if (!driver->err_handler->slot_reset || !edev->in_error) + return PCI_ERS_RESULT_NONE; + eeh_edev_info(edev, "Invoking %s->slot_reset()", driver->name); + return driver->err_handler->slot_reset(edev->pdev); } -static void *eeh_dev_restore_state(void *data, void *userdata) +static void *eeh_dev_restore_state(struct eeh_dev *edev, void *userdata) { - struct eeh_dev *edev = data; struct pci_dev *pdev; if (!edev) @@ -355,91 +416,53 @@ static void *eeh_dev_restore_state(void *data, void *userdata) /** * eeh_report_resume - Tell device to resume normal operations - * @data: eeh device - * @userdata: return value + * @edev: eeh device + * @driver: device's PCI driver * * This routine must be called to notify the device driver that it * could resume so that the device driver can do some initialization * to make the recovered device work again. */ -static void *eeh_report_resume(void *data, void *userdata) +static enum pci_ers_result eeh_report_resume(struct eeh_dev *edev, + struct pci_driver *driver) { - struct eeh_dev *edev = (struct eeh_dev *)data; - struct pci_dev *dev = eeh_dev_to_pci_dev(edev); - bool was_in_error; - struct pci_driver *driver; + if (!driver->err_handler->resume || !edev->in_error) + return PCI_ERS_RESULT_NONE; - if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe)) - return NULL; - - device_lock(&dev->dev); - dev->error_state = pci_channel_io_normal; - - driver = eeh_pcid_get(dev); - if (!driver) goto out_no_dev; - - was_in_error = edev->in_error; - edev->in_error = false; - eeh_enable_irq(dev); - - if (!driver->err_handler || - !driver->err_handler->resume || - (edev->mode & EEH_DEV_NO_HANDLER) || !was_in_error) { - edev->mode &= ~EEH_DEV_NO_HANDLER; - goto out; - } - - driver->err_handler->resume(dev); + eeh_edev_info(edev, "Invoking %s->resume()", driver->name); + driver->err_handler->resume(edev->pdev); - pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED); -out: - eeh_pcid_put(dev); + pci_uevent_ers(edev->pdev, PCI_ERS_RESULT_RECOVERED); #ifdef CONFIG_PCI_IOV if (eeh_ops->notify_resume && eeh_dev_to_pdn(edev)) eeh_ops->notify_resume(eeh_dev_to_pdn(edev)); #endif -out_no_dev: - device_unlock(&dev->dev); - return NULL; + return PCI_ERS_RESULT_NONE; } /** * eeh_report_failure - Tell device driver that device is dead. - * @data: eeh device - * @userdata: return value + * @edev: eeh device + * @driver: device's PCI driver * * This informs the device driver that the device is permanently * dead, and that no further recovery attempts will be made on it. */ -static void *eeh_report_failure(void *data, void *userdata) +static enum pci_ers_result eeh_report_failure(struct eeh_dev *edev, + struct pci_driver *driver) { - struct eeh_dev *edev = (struct eeh_dev *)data; - struct pci_dev *dev = eeh_dev_to_pci_dev(edev); - struct pci_driver *driver; + enum pci_ers_result rc; - if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe)) - return NULL; - - device_lock(&dev->dev); - dev->error_state = pci_channel_io_perm_failure; - - driver = eeh_pcid_get(dev); - if (!driver) goto out_no_dev; + if (!driver->err_handler->error_detected) + return PCI_ERS_RESULT_NONE; - eeh_disable_irq(dev); + eeh_edev_info(edev, "Invoking %s->error_detected(permanent failure)", + driver->name); + rc = driver->err_handler->error_detected(edev->pdev, + pci_channel_io_perm_failure); - if (!driver->err_handler || - !driver->err_handler->error_detected) - goto out; - - driver->err_handler->error_detected(dev, pci_channel_io_perm_failure); - - pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT); -out: - eeh_pcid_put(dev); -out_no_dev: - device_unlock(&dev->dev); - return NULL; + pci_uevent_ers(edev->pdev, PCI_ERS_RESULT_DISCONNECT); + return rc; } static void *eeh_add_virt_device(void *data, void *userdata) @@ -458,9 +481,11 @@ static void *eeh_add_virt_device(void *data, void *userdata) driver = eeh_pcid_get(dev); if (driver) { - eeh_pcid_put(dev); - if (driver->err_handler) + if (driver->err_handler) { + eeh_pcid_put(dev); return NULL; + } + eeh_pcid_put(dev); } #ifdef CONFIG_PCI_IOV @@ -469,10 +494,9 @@ static void *eeh_add_virt_device(void *data, void *userdata) return NULL; } -static void *eeh_rmv_device(void *data, void *userdata) +static void *eeh_rmv_device(struct eeh_dev *edev, void *userdata) { struct pci_driver *driver; - struct eeh_dev *edev = (struct eeh_dev *)data; struct pci_dev *dev = eeh_dev_to_pci_dev(edev); struct eeh_rmv_data *rmv_data = (struct eeh_rmv_data *)userdata; int *removed = rmv_data ? &rmv_data->removed : NULL; @@ -497,17 +521,19 @@ static void *eeh_rmv_device(void *data, void *userdata) if (eeh_dev_removed(edev)) return NULL; - driver = eeh_pcid_get(dev); - if (driver) { - eeh_pcid_put(dev); - if (removed && - eeh_pe_passed(edev->pe)) - return NULL; - if (removed && - driver->err_handler && - driver->err_handler->error_detected && - driver->err_handler->slot_reset) + if (removed) { + if (eeh_pe_passed(edev->pe)) return NULL; + driver = eeh_pcid_get(dev); + if (driver) { + if (driver->err_handler && + driver->err_handler->error_detected && + driver->err_handler->slot_reset) { + eeh_pcid_put(dev); + return NULL; + } + eeh_pcid_put(dev); + } } /* Remove it from PCI subsystem */ @@ -542,9 +568,8 @@ static void *eeh_rmv_device(void *data, void *userdata) return NULL; } -static void *eeh_pe_detach_dev(void *data, void *userdata) +static void *eeh_pe_detach_dev(struct eeh_pe *pe, void *userdata) { - struct eeh_pe *pe = (struct eeh_pe *)data; struct eeh_dev *edev, *tmp; eeh_pe_for_each_dev(pe, edev, tmp) { @@ -565,9 +590,8 @@ static void *eeh_pe_detach_dev(void *data, void *userdata) * PE reset (for 3 times), we try to clear the frozen state * for 3 times as well. */ -static void *__eeh_clear_pe_frozen_state(void *data, void *flag) +static void *__eeh_clear_pe_frozen_state(struct eeh_pe *pe, void *flag) { - struct eeh_pe *pe = (struct eeh_pe *)data; bool clear_sw_state = *(bool *)flag; int i, rc = 1; @@ -762,6 +786,7 @@ void eeh_handle_normal_event(struct eeh_pe *pe) { struct pci_bus *bus; struct eeh_dev *edev, *tmp; + struct eeh_pe *tmp_pe; int rc = 0; enum pci_ers_result result = PCI_ERS_RESULT_NONE; struct eeh_rmv_data rmv_data = {LIST_HEAD_INIT(rmv_data.edev_list), 0}; @@ -778,14 +803,13 @@ void eeh_handle_normal_event(struct eeh_pe *pe) eeh_pe_update_time_stamp(pe); pe->freeze_count++; if (pe->freeze_count > eeh_max_freezes) { - pr_err("EEH: PHB#%x-PE#%x has failed %d times in the\n" - "last hour and has been permanently disabled.\n", + pr_err("EEH: PHB#%x-PE#%x has failed %d times in the last hour and has been permanently disabled.\n", pe->phb->global_number, pe->addr, pe->freeze_count); goto hard_fail; } - pr_warn("EEH: This PCI device has failed %d times in the last hour\n", - pe->freeze_count); + pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n", + pe->freeze_count, eeh_max_freezes); /* Walk the various device drivers attached to this slot through * a reset sequence, giving each an opportunity to do what it needs @@ -798,7 +822,10 @@ void eeh_handle_normal_event(struct eeh_pe *pe) * hotplug for this case. */ pr_info("EEH: Notify device drivers to shutdown\n"); - eeh_pe_dev_traverse(pe, eeh_report_error, &result); + eeh_set_channel_state(pe, pci_channel_io_frozen); + eeh_set_irq_state(pe, false); + eeh_pe_report("error_detected(IO frozen)", pe, eeh_report_error, + &result); if ((pe->type & EEH_PE_PHB) && result != PCI_ERS_RESULT_NONE && result != PCI_ERS_RESULT_NEED_RESET) @@ -845,7 +872,8 @@ void eeh_handle_normal_event(struct eeh_pe *pe) result = PCI_ERS_RESULT_NEED_RESET; } else { pr_info("EEH: Notify device drivers to resume I/O\n"); - eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result); + eeh_pe_report("mmio_enabled", pe, + eeh_report_mmio_enabled, &result); } } @@ -888,7 +916,9 @@ void eeh_handle_normal_event(struct eeh_pe *pe) pr_info("EEH: Notify device drivers " "the completion of reset\n"); result = PCI_ERS_RESULT_NONE; - eeh_pe_dev_traverse(pe, eeh_report_reset, &result); + eeh_set_channel_state(pe, pci_channel_io_normal); + eeh_set_irq_state(pe, true); + eeh_pe_report("slot_reset", pe, eeh_report_reset, &result); } /* All devices should claim they have recovered by now. */ @@ -909,8 +939,17 @@ void eeh_handle_normal_event(struct eeh_pe *pe) /* Tell all device drivers that they can resume operations */ pr_info("EEH: Notify device driver to resume\n"); - eeh_pe_dev_traverse(pe, eeh_report_resume, NULL); + eeh_set_channel_state(pe, pci_channel_io_normal); + eeh_set_irq_state(pe, true); + eeh_pe_report("resume", pe, eeh_report_resume, NULL); + eeh_for_each_pe(pe, tmp_pe) { + eeh_pe_for_each_dev(tmp_pe, edev, tmp) { + edev->mode &= ~EEH_DEV_NO_HANDLER; + edev->in_error = false; + } + } + pr_info("EEH: Recovery successful.\n"); goto final; hard_fail: @@ -926,7 +965,10 @@ hard_fail: eeh_slot_error_detail(pe, EEH_LOG_PERM); /* Notify all devices that they're about to go down. */ - eeh_pe_dev_traverse(pe, eeh_report_failure, NULL); + eeh_set_channel_state(pe, pci_channel_io_perm_failure); + eeh_set_irq_state(pe, false); + eeh_pe_report("error_detected(permanent failure)", pe, + eeh_report_failure, NULL); /* Mark the PE to be removed permanently */ eeh_pe_state_mark(pe, EEH_PE_REMOVED); @@ -1035,7 +1077,9 @@ void eeh_handle_special_event(void) /* Notify all devices to be down */ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); - eeh_pe_dev_traverse(pe, + eeh_set_channel_state(pe, pci_channel_io_perm_failure); + eeh_pe_report( + "error_detected(permanent failure)", pe, eeh_report_failure, NULL); bus = eeh_pe_bus_get(phb_pe); if (!bus) { diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index ee5a67d57aab..1b238ecc553e 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -142,8 +142,7 @@ struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb) * The function is used to retrieve the next PE in the * hierarchy PE tree. */ -static struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, - struct eeh_pe *root) +struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, struct eeh_pe *root) { struct list_head *next = pe->child_list.next; @@ -173,12 +172,12 @@ static struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, * to be traversed. */ void *eeh_pe_traverse(struct eeh_pe *root, - eeh_traverse_func fn, void *flag) + eeh_pe_traverse_func fn, void *flag) { struct eeh_pe *pe; void *ret; - for (pe = root; pe; pe = eeh_pe_next(pe, root)) { + eeh_for_each_pe(root, pe) { ret = fn(pe, flag); if (ret) return ret; } @@ -196,7 +195,7 @@ void *eeh_pe_traverse(struct eeh_pe *root, * PE and its child PEs. */ void *eeh_pe_dev_traverse(struct eeh_pe *root, - eeh_traverse_func fn, void *flag) + eeh_edev_traverse_func fn, void *flag) { struct eeh_pe *pe; struct eeh_dev *edev, *tmp; @@ -209,7 +208,7 @@ void *eeh_pe_dev_traverse(struct eeh_pe *root, } /* Traverse root PE */ - for (pe = root; pe; pe = eeh_pe_next(pe, root)) { + eeh_for_each_pe(root, pe) { eeh_pe_for_each_dev(pe, edev, tmp) { ret = fn(edev, flag); if (ret) @@ -235,9 +234,8 @@ struct eeh_pe_get_flag { int config_addr; }; -static void *__eeh_pe_get(void *data, void *flag) +static void *__eeh_pe_get(struct eeh_pe *pe, void *flag) { - struct eeh_pe *pe = (struct eeh_pe *)data; struct eeh_pe_get_flag *tmp = (struct eeh_pe_get_flag *) flag; /* Unexpected PHB PE */ @@ -551,9 +549,8 @@ void eeh_pe_update_time_stamp(struct eeh_pe *pe) * PE. Also, the associated PCI devices will be put into IO frozen * state as well. */ -static void *__eeh_pe_state_mark(void *data, void *flag) +static void *__eeh_pe_state_mark(struct eeh_pe *pe, void *flag) { - struct eeh_pe *pe = (struct eeh_pe *)data; int state = *((int *)flag); struct eeh_dev *edev, *tmp; struct pci_dev *pdev; @@ -595,9 +592,8 @@ void eeh_pe_state_mark(struct eeh_pe *pe, int state) } EXPORT_SYMBOL_GPL(eeh_pe_state_mark); -static void *__eeh_pe_dev_mode_mark(void *data, void *flag) +static void *__eeh_pe_dev_mode_mark(struct eeh_dev *edev, void *flag) { - struct eeh_dev *edev = data; int mode = *((int *)flag); edev->mode |= mode; @@ -625,9 +621,8 @@ void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode) * given PE. Besides, we also clear the check count of the PE * as well. */ -static void *__eeh_pe_state_clear(void *data, void *flag) +static void *__eeh_pe_state_clear(struct eeh_pe *pe, void *flag) { - struct eeh_pe *pe = (struct eeh_pe *)data; int state = *((int *)flag); struct eeh_dev *edev, *tmp; struct pci_dev *pdev; @@ -858,9 +853,8 @@ static void eeh_restore_device_bars(struct eeh_dev *edev) * the expansion ROM base address, the latency timer, and etc. * from the saved values in the device node. */ -static void *eeh_restore_one_device_bars(void *data, void *flag) +static void *eeh_restore_one_device_bars(struct eeh_dev *edev, void *flag) { - struct eeh_dev *edev = (struct eeh_dev *)data; struct pci_dn *pdn = eeh_dev_to_pdn(edev); /* Do special restore for bridges */ diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 51695608c68b..b10e01021214 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -36,6 +36,7 @@ #include <asm/context_tracking.h> #include <asm/tm.h> #include <asm/ppc-opcode.h> +#include <asm/barrier.h> #include <asm/export.h> #ifdef CONFIG_PPC_BOOK3S #include <asm/exception-64s.h> @@ -178,6 +179,15 @@ system_call: /* label this so stack traces look sane */ clrldi r8,r8,32 15: slwi r0,r0,4 + + barrier_nospec_asm + /* + * Prevent the load of the handler below (based on the user-passed + * system call number) being speculatively executed until the test + * against NR_syscalls and branch to .Lsyscall_enosys above has + * committed. + */ + ldx r12,r11,r0 /* Fetch system call handler [ptr] */ mtctr r12 bctrl /* Call handler */ @@ -596,6 +606,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) * actually hit this code path. */ + isync slbie r6 slbie r6 /* Workaround POWER5 < DD2.1 issue */ slbmte r7,r0 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index f283958129f2..285c6465324a 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1501,6 +1501,7 @@ masked_##_H##interrupt: \ xori r10,r10,MSR_EE; /* clear MSR_EE */ \ mtspr SPRN_##_H##SRR1,r10; \ 2: mtcrf 0x80,r9; \ + std r1,PACAR1(r13); \ ld r9,PACA_EXGEN+EX_R9(r13); \ ld r10,PACA_EXGEN+EX_R10(r13); \ ld r11,PACA_EXGEN+EX_R11(r13); \ diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 3c2c2688918f..07e8396d472b 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -335,6 +335,26 @@ static unsigned long get_fadump_area_size(void) return size; } +static void __init fadump_reserve_crash_area(unsigned long base, + unsigned long size) +{ + struct memblock_region *reg; + unsigned long mstart, mend, msize; + + for_each_memblock(memory, reg) { + mstart = max_t(unsigned long, base, reg->base); + mend = reg->base + reg->size; + mend = min(base + size, mend); + + if (mstart < mend) { + msize = mend - mstart; + memblock_reserve(mstart, msize); + pr_info("Reserved %ldMB of memory at %#016lx for saving crash dump\n", + (msize >> 20), mstart); + } + } +} + int __init fadump_reserve_mem(void) { unsigned long base, size, memory_boundary; @@ -380,7 +400,16 @@ int __init fadump_reserve_mem(void) memory_boundary = memblock_end_of_DRAM(); if (fw_dump.dump_active) { - printk(KERN_INFO "Firmware-assisted dump is active.\n"); + pr_info("Firmware-assisted dump is active.\n"); + +#ifdef CONFIG_HUGETLB_PAGE + /* + * FADump capture kernel doesn't care much about hugepages. + * In fact, handling hugepages in capture kernel is asking for + * trouble. So, disable HugeTLB support when fadump is active. + */ + hugetlb_disabled = true; +#endif /* * If last boot has crashed then reserve all the memory * above boot_memory_size so that we don't touch it until @@ -389,11 +418,7 @@ int __init fadump_reserve_mem(void) */ base = fw_dump.boot_memory_size; size = memory_boundary - base; - memblock_reserve(base, size); - printk(KERN_INFO "Reserved %ldMB of memory at %ldMB " - "for saving crash dump\n", - (unsigned long)(size >> 20), - (unsigned long)(base >> 20)); + fadump_reserve_crash_area(base, size); fw_dump.fadumphdr_addr = be64_to_cpu(fdm_active->rmr_region.destination_address) + @@ -1155,6 +1180,9 @@ void fadump_cleanup(void) init_fadump_mem_struct(&fdm, be64_to_cpu(fdm_active->cpu_state_data.destination_address)); fadump_invalidate_dump(&fdm); + } else if (fw_dump.dump_registered) { + /* Un-register Firmware-assisted dump if it was registered. */ + fadump_unregister_dump(&fdm); } } diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index d8670a37d70c..6cab07e76732 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -913,7 +913,7 @@ start_here: tovirt(r6,r6) lis r5, abatron_pteptrs@h ori r5, r5, abatron_pteptrs@l - stw r5, 0xf0(r0) /* Must match your Abatron config file */ + stw r5, 0xf0(0) /* Must match your Abatron config file */ tophys(r5,r5) stw r6, 0(r5) diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 4c1012b80d3b..80547dad37da 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -178,8 +178,8 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) if (cpu_has_feature(CPU_FTR_DAWR)) { length_max = 512 ; /* 64 doublewords */ /* DAWR region can't cross 512 boundary */ - if ((bp->attr.bp_addr >> 10) != - ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10)) + if ((bp->attr.bp_addr >> 9) != + ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 9)) return -EINVAL; } if (info->len > diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 061aa0f47bb1..0682fef1f385 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -89,7 +89,7 @@ atomic_t ppc_n_lost_interrupts; #ifdef CONFIG_TAU_INT extern int tau_initialized; -extern int tau_interrupts(int); +u32 tau_interrupts(unsigned long cpu); #endif #endif /* CONFIG_PPC32 */ @@ -508,6 +508,11 @@ int arch_show_interrupts(struct seq_file *p, int prec) seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event); seq_printf(p, " Local timer interrupts for timer event device\n"); + seq_printf(p, "%*s: ", prec, "BCT"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event); + seq_printf(p, " Broadcast timer interrupts for timer event device\n"); + seq_printf(p, "%*s: ", prec, "LOC"); for_each_online_cpu(j) seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others); @@ -567,6 +572,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu) { u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event; + sum += per_cpu(irq_stat, cpu).broadcast_irqs_event; sum += per_cpu(irq_stat, cpu).pmu_irqs; sum += per_cpu(irq_stat, cpu).mce_exceptions; sum += per_cpu(irq_stat, cpu).spurious_irqs; diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 9ad37f827a97..683b5b3805bd 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -25,6 +25,7 @@ #include <linux/kvm_para.h> #include <linux/slab.h> #include <linux/of.h> +#include <linux/pagemap.h> #include <asm/reg.h> #include <asm/sections.h> @@ -672,14 +673,13 @@ static void kvm_use_magic_page(void) { u32 *p; u32 *start, *end; - u32 tmp; u32 features; /* Tell the host to map the magic page to -4096 on all CPUs */ on_each_cpu(kvm_map_magic_page, &features, 1); /* Quick self-test to see if the mapping works */ - if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) { + if (!fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) { kvm_patching_worked = false; return; } diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 2694d078741d..936c7e2d421e 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -98,12 +98,14 @@ void machine_kexec(struct kimage *image) int save_ftrace_enabled; save_ftrace_enabled = __ftrace_enabled_save(); + this_cpu_disable_ftrace(); if (ppc_md.machine_kexec) ppc_md.machine_kexec(image); else default_machine_kexec(image); + this_cpu_enable_ftrace(); __ftrace_enabled_restore(save_ftrace_enabled); /* Fall back to normal restart if we're still alive. */ diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 1044bf15d5ed..a0f6f45005bd 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -231,16 +231,16 @@ static void kexec_prepare_cpus(void) /* we are sure every CPU has IRQs off at this point */ kexec_all_irq_disabled = 1; - /* after we tell the others to go down */ - if (ppc_md.kexec_cpu_down) - ppc_md.kexec_cpu_down(0, 0); - /* * Before removing MMU mappings make sure all CPUs have entered real * mode: */ kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE); + /* after we tell the others to go down */ + if (ppc_md.kexec_cpu_down) + ppc_md.kexec_cpu_down(0, 0); + put_cpu(); } diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S index 384357cb8bc0..0b196cdcd15d 100644 --- a/arch/powerpc/kernel/misc.S +++ b/arch/powerpc/kernel/misc.S @@ -25,23 +25,12 @@ /* * Returns (address we are running at) - (address we were linked at) * for use before the text and data are mapped to KERNELBASE. - */ - -_GLOBAL(reloc_offset) - mflr r0 - bl 1f -1: mflr r3 - PPC_LL r4,(2f-1b)(r3) - subf r3,r4,r3 - mtlr r0 - blr - .align 3 -2: PPC_LONG 1b - -/* * add_reloc_offset(x) returns x + reloc_offset(). */ + +_GLOBAL(reloc_offset) + li r3, 0 _GLOBAL(add_reloc_offset) mflr r0 bl 1f @@ -60,6 +49,10 @@ _GLOBAL(setjmp) PPC_STL r0,0(r3) PPC_STL r1,SZL(r3) PPC_STL r2,2*SZL(r3) +#ifdef CONFIG_PPC32 + mfcr r12 + stmw r12, 3*SZL(r3) +#else mfcr r0 PPC_STL r0,3*SZL(r3) PPC_STL r13,4*SZL(r3) @@ -81,14 +74,16 @@ _GLOBAL(setjmp) PPC_STL r29,20*SZL(r3) PPC_STL r30,21*SZL(r3) PPC_STL r31,22*SZL(r3) +#endif li r3,0 blr _GLOBAL(longjmp) - PPC_LCMPI r4,0 - bne 1f - li r4,1 -1: PPC_LL r13,4*SZL(r3) +#ifdef CONFIG_PPC32 + lmw r12, 3*SZL(r3) + mtcrf 0x38, r12 +#else + PPC_LL r13,4*SZL(r3) PPC_LL r14,5*SZL(r3) PPC_LL r15,6*SZL(r3) PPC_LL r16,7*SZL(r3) @@ -109,11 +104,14 @@ _GLOBAL(longjmp) PPC_LL r31,22*SZL(r3) PPC_LL r0,3*SZL(r3) mtcrf 0x38,r0 +#endif PPC_LL r0,0(r3) PPC_LL r1,SZL(r3) PPC_LL r2,2*SZL(r3) mtlr r0 - mr r3,r4 + mr. r3, r4 + bnelr + li r3, 1 blr _GLOBAL(current_stack_pointer) diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 3f7ba0f5bf29..1b3c6835e730 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -72,6 +72,12 @@ int module_finalize(const Elf_Ehdr *hdr, do_feature_fixups(powerpc_firmware_features, (void *)sect->sh_addr, (void *)sect->sh_addr + sect->sh_size); + + sect = find_section(hdr, sechdrs, "__spec_barrier_fixup"); + if (sect != NULL) + do_barrier_nospec_fixups_range(barrier_nospec_enabled, + (void *)sect->sh_addr, + (void *)sect->sh_addr + sect->sh_size); #endif sect = find_section(hdr, sechdrs, "__lwsync_fixup"); diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c index 5a7a78f12562..88d83771f462 100644 --- a/arch/powerpc/kernel/module_32.c +++ b/arch/powerpc/kernel/module_32.c @@ -109,12 +109,12 @@ static unsigned long get_plt_size(const Elf32_Ehdr *hdr, for (i = 1; i < hdr->e_shnum; i++) { /* If it's called *.init*, and we're not init, we're not interested */ - if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != 0) + if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != NULL) != is_init) continue; /* We don't want to look at debug sections. */ - if (strstr(secstrings + sechdrs[i].sh_name, ".debug") != 0) + if (strstr(secstrings + sechdrs[i].sh_name, ".debug")) continue; if (sechdrs[i].sh_type == SHT_RELA) { diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index a2636c250b7b..1b7419579820 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -280,6 +280,10 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, #ifdef CONFIG_DYNAMIC_FTRACE /* make the trampoline to the ftrace_caller */ relocs++; +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + /* an additional one for ftrace_regs_caller */ + relocs++; +#endif #endif pr_debug("Looks like a total of %lu stubs, max\n", relocs); @@ -463,8 +467,11 @@ static unsigned long stub_for_addr(const Elf64_Shdr *sechdrs, } #ifdef CC_USING_MPROFILE_KERNEL -static bool is_early_mcount_callsite(u32 *instruction) +static bool is_mprofile_mcount_callsite(const char *name, u32 *instruction) { + if (strcmp("_mcount", name)) + return false; + /* * Check if this is one of the -mprofile-kernel sequences. */ @@ -496,8 +503,7 @@ static void squash_toc_save_inst(const char *name, unsigned long addr) #else static void squash_toc_save_inst(const char *name, unsigned long addr) { } -/* without -mprofile-kernel, mcount calls are never early */ -static bool is_early_mcount_callsite(u32 *instruction) +static bool is_mprofile_mcount_callsite(const char *name, u32 *instruction) { return false; } @@ -505,11 +511,11 @@ static bool is_early_mcount_callsite(u32 *instruction) /* We expect a noop next: if it is, replace it with instruction to restore r2. */ -static int restore_r2(u32 *instruction, struct module *me) +static int restore_r2(const char *name, u32 *instruction, struct module *me) { u32 *prev_insn = instruction - 1; - if (is_early_mcount_callsite(prev_insn)) + if (is_mprofile_mcount_callsite(name, prev_insn)) return 1; /* @@ -650,7 +656,8 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, value = stub_for_addr(sechdrs, value, me); if (!value) return -ENOENT; - if (!restore_r2((u32 *)location + 1, me)) + if (!restore_r2(strtab + sym->st_name, + (u32 *)location + 1, me)) return -ENOEXEC; squash_toc_save_inst(strtab + sym->st_name, value); @@ -762,7 +769,8 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, * via the paca (in r13). The target (ftrace_caller()) is responsible for * saving and restoring the toc before returning. */ -static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs, struct module *me) +static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs, + struct module *me, unsigned long addr) { struct ppc64_stub_entry *entry; unsigned int i, num_stubs; @@ -789,9 +797,10 @@ static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs, struct module memcpy(entry->jump, stub_insns, sizeof(stub_insns)); /* Stub uses address relative to kernel toc (from the paca) */ - reladdr = (unsigned long)ftrace_caller - kernel_toc_addr(); + reladdr = addr - kernel_toc_addr(); if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { - pr_err("%s: Address of ftrace_caller out of range of kernel_toc.\n", me->name); + pr_err("%s: Address of %ps out of range of kernel_toc.\n", + me->name, (void *)addr); return 0; } @@ -799,22 +808,29 @@ static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs, struct module entry->jump[2] |= PPC_LO(reladdr); /* Eventhough we don't use funcdata in the stub, it's needed elsewhere. */ - entry->funcdata = func_desc((unsigned long)ftrace_caller); + entry->funcdata = func_desc(addr); entry->magic = STUB_MAGIC; return (unsigned long)entry; } #else -static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs, struct module *me) +static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs, + struct module *me, unsigned long addr) { - return stub_for_addr(sechdrs, (unsigned long)ftrace_caller, me); + return stub_for_addr(sechdrs, addr, me); } #endif int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs) { - mod->arch.toc = my_r2(sechdrs, mod); - mod->arch.tramp = create_ftrace_stub(sechdrs, mod); + mod->arch.tramp = create_ftrace_stub(sechdrs, mod, + (unsigned long)ftrace_caller); +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + mod->arch.tramp_regs = create_ftrace_stub(sechdrs, mod, + (unsigned long)ftrace_regs_caller); + if (!mod->arch.tramp_regs) + return -ENOENT; +#endif if (!mod->arch.tramp) return -ENOENT; diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index ba681dac7b46..22e9d281324d 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -1030,7 +1030,7 @@ loff_t __init nvram_create_partition(const char *name, int sig, return -ENOSPC; /* Create our OS partition */ - new_part = kmalloc(sizeof(*new_part), GFP_KERNEL); + new_part = kzalloc(sizeof(*new_part), GFP_KERNEL); if (!new_part) { pr_err("%s: kmalloc failed\n", __func__); return -ENOMEM; @@ -1039,7 +1039,7 @@ loff_t __init nvram_create_partition(const char *name, int sig, new_part->index = free_part->index; new_part->header.signature = sig; new_part->header.length = size; - strncpy(new_part->header.name, name, 12); + memcpy(new_part->header.name, name, strnlen(name, sizeof(new_part->header.name))); new_part->header.checksum = nvram_checksum(&new_part->header); rc = nvram_write_header(new_part); diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 85ad2f78b889..4f861055a852 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -11,11 +11,13 @@ #include <linux/sched.h> #include <linux/errno.h> #include <linux/bootmem.h> +#include <linux/syscalls.h> #include <linux/irq.h> #include <linux/list.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/export.h> +#include <linux/syscalls.h> #include <asm/processor.h> #include <asm/io.h> @@ -283,7 +285,11 @@ pci_bus_to_hose(int bus) * Note that the returned IO or memory base is a physical address */ -long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpragmas" +#pragma GCC diagnostic ignored "-Wattribute-alias" +SYSCALL_DEFINE3(pciconfig_iobase, long, which, + unsigned long, bus, unsigned long, devfn) { struct pci_controller* hose; long result = -EOPNOTSUPP; @@ -307,5 +313,4 @@ long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn) return result; } - - +#pragma GCC diagnostic pop diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 15ce0306b092..812171c09f42 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -203,8 +203,11 @@ void pcibios_setup_phb_io_space(struct pci_controller *hose) #define IOBASE_ISA_IO 3 #define IOBASE_ISA_MEM 4 -long sys_pciconfig_iobase(long which, unsigned long in_bus, - unsigned long in_devfn) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpragmas" +#pragma GCC diagnostic ignored "-Wattribute-alias" +SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, in_bus, + unsigned long, in_devfn) { struct pci_controller* hose; struct pci_bus *tmp_bus, *bus = NULL; @@ -256,6 +259,7 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus, return -EOPNOTSUPP; } +#pragma GCC diagnostic pop #ifdef CONFIG_NUMA int pcibus_to_node(struct pci_bus *bus) diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S index 1b1787d52896..8afbe213d729 100644 --- a/arch/powerpc/kernel/ppc_save_regs.S +++ b/arch/powerpc/kernel/ppc_save_regs.S @@ -25,6 +25,9 @@ */ _GLOBAL(ppc_save_regs) PPC_STL r0,0*SZL(r3) +#ifdef CONFIG_PPC32 + stmw r2, 2*SZL(r3) +#else PPC_STL r2,2*SZL(r3) PPC_STL r3,3*SZL(r3) PPC_STL r4,4*SZL(r3) @@ -55,6 +58,7 @@ _GLOBAL(ppc_save_regs) PPC_STL r29,29*SZL(r3) PPC_STL r30,30*SZL(r3) PPC_STL r31,31*SZL(r3) +#endif /* go up one stack frame for SP */ PPC_LL r4,0(r1) PPC_STL r4,1*SZL(r3) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 1237f13fed51..9ef4aea9fffe 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -154,6 +154,7 @@ unsigned long msr_check_and_set(unsigned long bits) return newmsr; } +EXPORT_SYMBOL_GPL(msr_check_and_set); void __msr_check_and_clear(unsigned long bits) { @@ -632,6 +633,7 @@ void do_break (struct pt_regs *regs, unsigned long address, hw_breakpoint_disable(); /* Deliver the signal to userspace */ + clear_siginfo(&info); info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = TRAP_HWBKPT; @@ -845,10 +847,6 @@ bool ppc_breakpoint_available(void) } EXPORT_SYMBOL_GPL(ppc_breakpoint_available); -#ifdef CONFIG_PPC64 -DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); -#endif - static inline bool hw_brk_match(struct arch_hw_breakpoint *a, struct arch_hw_breakpoint *b) { @@ -1154,7 +1152,7 @@ static inline void restore_sprs(struct thread_struct *old_thread, mtspr(SPRN_TAR, new_thread->tar); } - if (cpu_has_feature(CPU_FTR_ARCH_300) && + if (cpu_has_feature(CPU_FTR_P9_TIDR) && old_thread->tidr != new_thread->tidr) mtspr(SPRN_TIDR, new_thread->tidr); #endif @@ -1181,20 +1179,6 @@ struct task_struct *__switch_to(struct task_struct *prev, WARN_ON(!irqs_disabled()); -#ifdef CONFIG_PPC64 - /* - * Collect processor utilization data per process - */ - if (firmware_has_feature(FW_FEATURE_SPLPAR)) { - struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array); - long unsigned start_tb, current_tb; - start_tb = old_thread->start_tb; - cu->current_tb = current_tb = mfspr(SPRN_PURR); - old_thread->accum_tb += (current_tb - start_tb); - new_thread->start_tb = current_tb; - } -#endif /* CONFIG_PPC64 */ - #ifdef CONFIG_PPC_BOOK3S_64 batch = this_cpu_ptr(&ppc64_tlb_batch); if (batch->active) { @@ -1437,7 +1421,7 @@ void show_regs(struct pt_regs * regs) pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); #endif #ifdef CONFIG_PPC64 - pr_cont("SOFTE: %ld ", regs->softe); + pr_cont("IRQMASK: %lx ", regs->softe); #endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (MSR_TM_ACTIVE(regs->msr)) @@ -1496,104 +1480,42 @@ int set_thread_uses_vas(void) } #ifdef CONFIG_PPC64 -static DEFINE_SPINLOCK(vas_thread_id_lock); -static DEFINE_IDA(vas_thread_ida); - -/* - * We need to assign a unique thread id to each thread in a process. +/** + * Assign a TIDR (thread ID) for task @t and set it in the thread + * structure. For now, we only support setting TIDR for 'current' task. * - * This thread id, referred to as TIDR, and separate from the Linux's tgid, - * is intended to be used to direct an ASB_Notify from the hardware to the - * thread, when a suitable event occurs in the system. + * Since the TID value is a truncated form of it PID, it is possible + * (but unlikely) for 2 threads to have the same TID. In the unlikely event + * that 2 threads share the same TID and are waiting, one of the following + * cases will happen: * - * One such event is a "paste" instruction in the context of Fast Thread - * Wakeup (aka Core-to-core wake up in the Virtual Accelerator Switchboard - * (VAS) in POWER9. + * 1. The correct thread is running, the wrong thread is not + * In this situation, the correct thread is woken and proceeds to pass it's + * condition check. * - * To get a unique TIDR per process we could simply reuse task_pid_nr() but - * the problem is that task_pid_nr() is not yet available copy_thread() is - * called. Fixing that would require changing more intrusive arch-neutral - * code in code path in copy_process()?. + * 2. Neither threads are running + * In this situation, neither thread will be woken. When scheduled, the waiting + * threads will execute either a wait, which will return immediately, followed + * by a condition check, which will pass for the correct thread and fail + * for the wrong thread, or they will execute the condition check immediately. * - * Further, to assign unique TIDRs within each process, we need an atomic - * field (or an IDR) in task_struct, which again intrudes into the arch- - * neutral code. So try to assign globally unique TIDRs for now. + * 3. The wrong thread is running, the correct thread is not + * The wrong thread will be woken, but will fail it's condition check and + * re-execute wait. The correct thread, when scheduled, will execute either + * it's condition check (which will pass), or wait, which returns immediately + * when called the first time after the thread is scheduled, followed by it's + * condition check (which will pass). * - * NOTE: TIDR 0 indicates that the thread does not need a TIDR value. - * For now, only threads that expect to be notified by the VAS - * hardware need a TIDR value and we assign values > 0 for those. - */ -#define MAX_THREAD_CONTEXT ((1 << 16) - 1) -static int assign_thread_tidr(void) -{ - int index; - int err; - unsigned long flags; - -again: - if (!ida_pre_get(&vas_thread_ida, GFP_KERNEL)) - return -ENOMEM; - - spin_lock_irqsave(&vas_thread_id_lock, flags); - err = ida_get_new_above(&vas_thread_ida, 1, &index); - spin_unlock_irqrestore(&vas_thread_id_lock, flags); - - if (err == -EAGAIN) - goto again; - else if (err) - return err; - - if (index > MAX_THREAD_CONTEXT) { - spin_lock_irqsave(&vas_thread_id_lock, flags); - ida_remove(&vas_thread_ida, index); - spin_unlock_irqrestore(&vas_thread_id_lock, flags); - return -ENOMEM; - } - - return index; -} - -static void free_thread_tidr(int id) -{ - unsigned long flags; - - spin_lock_irqsave(&vas_thread_id_lock, flags); - ida_remove(&vas_thread_ida, id); - spin_unlock_irqrestore(&vas_thread_id_lock, flags); -} - -/* - * Clear any TIDR value assigned to this thread. - */ -void clear_thread_tidr(struct task_struct *t) -{ - if (!t->thread.tidr) - return; - - if (!cpu_has_feature(CPU_FTR_ARCH_300)) { - WARN_ON_ONCE(1); - return; - } - - mtspr(SPRN_TIDR, 0); - free_thread_tidr(t->thread.tidr); - t->thread.tidr = 0; -} - -void arch_release_task_struct(struct task_struct *t) -{ - clear_thread_tidr(t); -} - -/* - * Assign a unique TIDR (thread id) for task @t and set it in the thread - * structure. For now, we only support setting TIDR for 'current' task. + * 4. Both threads are running + * Both threads will be woken. The wrong thread will fail it's condition check + * and execute another wait, while the correct thread will pass it's condition + * check. + * + * @t: the task to set the thread ID for */ int set_thread_tidr(struct task_struct *t) { - int rc; - - if (!cpu_has_feature(CPU_FTR_ARCH_300)) + if (!cpu_has_feature(CPU_FTR_P9_TIDR)) return -EINVAL; if (t != current) @@ -1602,11 +1524,7 @@ int set_thread_tidr(struct task_struct *t) if (t->thread.tidr) return 0; - rc = assign_thread_tidr(); - if (rc < 0) - return rc; - - t->thread.tidr = rc; + t->thread.tidr = (u16)task_pid_nr(t); mtspr(SPRN_TIDR, t->thread.tidr); return 0; diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 9dbed488aba1..05e7fb47a7a4 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -332,25 +332,10 @@ static int __init early_init_dt_scan_cpus(unsigned long node, * NOTE: This must match the parsing done in smp_setup_cpu_maps. */ for (i = 0; i < nthreads; i++) { - /* - * version 2 of the kexec param format adds the phys cpuid of - * booted proc. - */ - if (fdt_version(initial_boot_params) >= 2) { - if (be32_to_cpu(intserv[i]) == - fdt_boot_cpuid_phys(initial_boot_params)) { - found = boot_cpu_count; - found_thread = i; - } - } else { - /* - * Check if it's the boot-cpu, set it's hw index now, - * unfortunately this format did not support booting - * off secondary threads. - */ - if (of_get_flat_dt_prop(node, - "linux,boot-cpu", NULL) != NULL) - found = boot_cpu_count; + if (be32_to_cpu(intserv[i]) == + fdt_boot_cpuid_phys(initial_boot_params)) { + found = boot_cpu_count; + found_thread = i; } #ifdef CONFIG_SMP /* logical cpu id is always 0 on UP kernels */ diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index f9d6befb55a6..5425dd3d6a9f 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -103,7 +103,7 @@ int of_workarounds; #ifdef DEBUG_PROM #define prom_debug(x...) prom_printf(x) #else -#define prom_debug(x...) +#define prom_debug(x...) do { } while (0) #endif @@ -301,6 +301,10 @@ static void __init prom_print(const char *msg) } +/* + * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that + * we do not need __udivdi3 or __umoddi3 on 32bits. + */ static void __init prom_print_hex(unsigned long val) { int i, nibbles = sizeof(val)*2; @@ -334,12 +338,14 @@ static void __init prom_print_dec(unsigned long val) call_prom("write", 3, 1, prom.stdout, buf+i, size); } +__printf(1, 2) static void __init prom_printf(const char *format, ...) { const char *p, *q, *s; va_list args; unsigned long v; long vs; + int n = 0; va_start(args, format); for (p = format; *p != 0; p = q) { @@ -358,6 +364,10 @@ static void __init prom_printf(const char *format, ...) ++q; if (*q == 0) break; + while (*q == 'l') { + ++q; + ++n; + } switch (*q) { case 's': ++q; @@ -366,39 +376,55 @@ static void __init prom_printf(const char *format, ...) break; case 'x': ++q; - v = va_arg(args, unsigned long); + switch (n) { + case 0: + v = va_arg(args, unsigned int); + break; + case 1: + v = va_arg(args, unsigned long); + break; + case 2: + default: + v = va_arg(args, unsigned long long); + break; + } prom_print_hex(v); break; - case 'd': + case 'u': ++q; - vs = va_arg(args, int); - if (vs < 0) { - prom_print("-"); - vs = -vs; + switch (n) { + case 0: + v = va_arg(args, unsigned int); + break; + case 1: + v = va_arg(args, unsigned long); + break; + case 2: + default: + v = va_arg(args, unsigned long long); + break; } - prom_print_dec(vs); + prom_print_dec(v); break; - case 'l': + case 'd': ++q; - if (*q == 0) + switch (n) { + case 0: + vs = va_arg(args, int); break; - else if (*q == 'x') { - ++q; - v = va_arg(args, unsigned long); - prom_print_hex(v); - } else if (*q == 'u') { /* '%lu' */ - ++q; - v = va_arg(args, unsigned long); - prom_print_dec(v); - } else if (*q == 'd') { /* %ld */ - ++q; + case 1: vs = va_arg(args, long); - if (vs < 0) { - prom_print("-"); - vs = -vs; - } - prom_print_dec(vs); + break; + case 2: + default: + vs = va_arg(args, long long); + break; } + if (vs < 0) { + prom_print("-"); + vs = -vs; + } + prom_print_dec(vs); break; } } @@ -1160,7 +1186,7 @@ static void __init prom_send_capabilities(void) */ cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()); - prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n", + prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n", cores, NR_CPUS); ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores); @@ -1242,7 +1268,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align) if (align) base = _ALIGN_UP(base, align); - prom_debug("alloc_up(%x, %x)\n", size, align); + prom_debug("%s(%lx, %lx)\n", __func__, size, align); if (ram_top == 0) prom_panic("alloc_up() called with mem not initialized\n"); @@ -1253,7 +1279,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align) for(; (base + size) <= alloc_top; base = _ALIGN_UP(base + 0x100000, align)) { - prom_debug(" trying: 0x%x\n\r", base); + prom_debug(" trying: 0x%lx\n\r", base); addr = (unsigned long)prom_claim(base, size, 0); if (addr != PROM_ERROR && addr != 0) break; @@ -1265,12 +1291,12 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align) return 0; alloc_bottom = addr + size; - prom_debug(" -> %x\n", addr); - prom_debug(" alloc_bottom : %x\n", alloc_bottom); - prom_debug(" alloc_top : %x\n", alloc_top); - prom_debug(" alloc_top_hi : %x\n", alloc_top_high); - prom_debug(" rmo_top : %x\n", rmo_top); - prom_debug(" ram_top : %x\n", ram_top); + prom_debug(" -> %lx\n", addr); + prom_debug(" alloc_bottom : %lx\n", alloc_bottom); + prom_debug(" alloc_top : %lx\n", alloc_top); + prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); + prom_debug(" rmo_top : %lx\n", rmo_top); + prom_debug(" ram_top : %lx\n", ram_top); return addr; } @@ -1285,7 +1311,7 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align, { unsigned long base, addr = 0; - prom_debug("alloc_down(%x, %x, %s)\n", size, align, + prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align, highmem ? "(high)" : "(low)"); if (ram_top == 0) prom_panic("alloc_down() called with mem not initialized\n"); @@ -1313,7 +1339,7 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align, base = _ALIGN_DOWN(alloc_top - size, align); for (; base > alloc_bottom; base = _ALIGN_DOWN(base - 0x100000, align)) { - prom_debug(" trying: 0x%x\n\r", base); + prom_debug(" trying: 0x%lx\n\r", base); addr = (unsigned long)prom_claim(base, size, 0); if (addr != PROM_ERROR && addr != 0) break; @@ -1324,12 +1350,12 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align, alloc_top = addr; bail: - prom_debug(" -> %x\n", addr); - prom_debug(" alloc_bottom : %x\n", alloc_bottom); - prom_debug(" alloc_top : %x\n", alloc_top); - prom_debug(" alloc_top_hi : %x\n", alloc_top_high); - prom_debug(" rmo_top : %x\n", rmo_top); - prom_debug(" ram_top : %x\n", ram_top); + prom_debug(" -> %lx\n", addr); + prom_debug(" alloc_bottom : %lx\n", alloc_bottom); + prom_debug(" alloc_top : %lx\n", alloc_top); + prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); + prom_debug(" rmo_top : %lx\n", rmo_top); + prom_debug(" ram_top : %lx\n", ram_top); return addr; } @@ -1455,7 +1481,7 @@ static void __init prom_init_mem(void) if (size == 0) continue; - prom_debug(" %x %x\n", base, size); + prom_debug(" %lx %lx\n", base, size); if (base == 0 && (of_platform & PLATFORM_LPAR)) rmo_top = size; if ((base + size) > ram_top) @@ -1475,12 +1501,12 @@ static void __init prom_init_mem(void) if (prom_memory_limit) { if (prom_memory_limit <= alloc_bottom) { - prom_printf("Ignoring mem=%x <= alloc_bottom.\n", - prom_memory_limit); + prom_printf("Ignoring mem=%lx <= alloc_bottom.\n", + prom_memory_limit); prom_memory_limit = 0; } else if (prom_memory_limit >= ram_top) { - prom_printf("Ignoring mem=%x >= ram_top.\n", - prom_memory_limit); + prom_printf("Ignoring mem=%lx >= ram_top.\n", + prom_memory_limit); prom_memory_limit = 0; } else { ram_top = prom_memory_limit; @@ -1512,12 +1538,13 @@ static void __init prom_init_mem(void) alloc_bottom = PAGE_ALIGN(prom_initrd_end); prom_printf("memory layout at init:\n"); - prom_printf(" memory_limit : %x (16 MB aligned)\n", prom_memory_limit); - prom_printf(" alloc_bottom : %x\n", alloc_bottom); - prom_printf(" alloc_top : %x\n", alloc_top); - prom_printf(" alloc_top_hi : %x\n", alloc_top_high); - prom_printf(" rmo_top : %x\n", rmo_top); - prom_printf(" ram_top : %x\n", ram_top); + prom_printf(" memory_limit : %lx (16 MB aligned)\n", + prom_memory_limit); + prom_printf(" alloc_bottom : %lx\n", alloc_bottom); + prom_printf(" alloc_top : %lx\n", alloc_top); + prom_printf(" alloc_top_hi : %lx\n", alloc_top_high); + prom_printf(" rmo_top : %lx\n", rmo_top); + prom_printf(" ram_top : %lx\n", ram_top); } static void __init prom_close_stdin(void) @@ -1578,7 +1605,7 @@ static void __init prom_instantiate_opal(void) return; } - prom_printf("instantiating opal at 0x%x...", base); + prom_printf("instantiating opal at 0x%llx...", base); if (call_prom_ret("call-method", 4, 3, rets, ADDR("load-opal-runtime"), @@ -1594,10 +1621,10 @@ static void __init prom_instantiate_opal(void) reserve_mem(base, size); - prom_debug("opal base = 0x%x\n", base); - prom_debug("opal align = 0x%x\n", align); - prom_debug("opal entry = 0x%x\n", entry); - prom_debug("opal size = 0x%x\n", (long)size); + prom_debug("opal base = 0x%llx\n", base); + prom_debug("opal align = 0x%llx\n", align); + prom_debug("opal entry = 0x%llx\n", entry); + prom_debug("opal size = 0x%llx\n", size); prom_setprop(opal_node, "/ibm,opal", "opal-base-address", &base, sizeof(base)); @@ -1674,7 +1701,7 @@ static void __init prom_instantiate_rtas(void) prom_debug("rtas base = 0x%x\n", base); prom_debug("rtas entry = 0x%x\n", entry); - prom_debug("rtas size = 0x%x\n", (long)size); + prom_debug("rtas size = 0x%x\n", size); prom_debug("prom_instantiate_rtas: end...\n"); } @@ -1732,7 +1759,7 @@ static void __init prom_instantiate_sml(void) if (base == 0) prom_panic("Could not allocate memory for sml\n"); - prom_printf("instantiating sml at 0x%x...", base); + prom_printf("instantiating sml at 0x%llx...", base); memset((void *)base, 0, size); @@ -1751,8 +1778,8 @@ static void __init prom_instantiate_sml(void) prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size", &size, sizeof(size)); - prom_debug("sml base = 0x%x\n", base); - prom_debug("sml size = 0x%x\n", (long)size); + prom_debug("sml base = 0x%llx\n", base); + prom_debug("sml size = 0x%x\n", size); prom_debug("prom_instantiate_sml: end...\n"); } @@ -1845,7 +1872,7 @@ static void __init prom_initialize_tce_table(void) prom_debug("TCE table: %s\n", path); prom_debug("\tnode = 0x%x\n", node); - prom_debug("\tbase = 0x%x\n", base); + prom_debug("\tbase = 0x%llx\n", base); prom_debug("\tsize = 0x%x\n", minsize); /* Initialize the table to have a one-to-one mapping @@ -1932,12 +1959,12 @@ static void __init prom_hold_cpus(void) } prom_debug("prom_hold_cpus: start...\n"); - prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop); - prom_debug(" 1) *spinloop = 0x%x\n", *spinloop); - prom_debug(" 1) acknowledge = 0x%x\n", + prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop); + prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop); + prom_debug(" 1) acknowledge = 0x%lx\n", (unsigned long)acknowledge); - prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge); - prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold); + prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge); + prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold); /* Set the common spinloop variable, so all of the secondary cpus * will block when they are awakened from their OF spinloop. @@ -1965,7 +1992,7 @@ static void __init prom_hold_cpus(void) prom_getprop(node, "reg", ®, sizeof(reg)); cpu_no = be32_to_cpu(reg); - prom_debug("cpu hw idx = %lu\n", cpu_no); + prom_debug("cpu hw idx = %u\n", cpu_no); /* Init the acknowledge var which will be reset by * the secondary cpu when it awakens from its OF @@ -1975,7 +2002,7 @@ static void __init prom_hold_cpus(void) if (cpu_no != prom.cpu) { /* Primary Thread of non-boot cpu or any thread */ - prom_printf("starting cpu hw idx %lu... ", cpu_no); + prom_printf("starting cpu hw idx %u... ", cpu_no); call_prom("start-cpu", 3, 0, node, secondary_hold, cpu_no); @@ -1986,11 +2013,11 @@ static void __init prom_hold_cpus(void) if (*acknowledge == cpu_no) prom_printf("done\n"); else - prom_printf("failed: %x\n", *acknowledge); + prom_printf("failed: %lx\n", *acknowledge); } #ifdef CONFIG_SMP else - prom_printf("boot cpu hw idx %lu\n", cpu_no); + prom_printf("boot cpu hw idx %u\n", cpu_no); #endif /* CONFIG_SMP */ } @@ -2268,7 +2295,7 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end, while ((*mem_start + needed) > *mem_end) { unsigned long room, chunk; - prom_debug("Chunk exhausted, claiming more at %x...\n", + prom_debug("Chunk exhausted, claiming more at %lx...\n", alloc_bottom); room = alloc_top - alloc_bottom; if (room > DEVTREE_CHUNK_SIZE) @@ -2494,7 +2521,7 @@ static void __init flatten_device_tree(void) room = alloc_top - alloc_bottom - 0x4000; if (room > DEVTREE_CHUNK_SIZE) room = DEVTREE_CHUNK_SIZE; - prom_debug("starting device tree allocs at %x\n", alloc_bottom); + prom_debug("starting device tree allocs at %lx\n", alloc_bottom); /* Now try to claim that */ mem_start = (unsigned long)alloc_up(room, PAGE_SIZE); @@ -2557,7 +2584,7 @@ static void __init flatten_device_tree(void) int i; prom_printf("reserved memory map:\n"); for (i = 0; i < mem_reserve_cnt; i++) - prom_printf(" %x - %x\n", + prom_printf(" %llx - %llx\n", be64_to_cpu(mem_reserve_map[i].base), be64_to_cpu(mem_reserve_map[i].size)); } @@ -2567,9 +2594,9 @@ static void __init flatten_device_tree(void) */ mem_reserve_cnt = MEM_RESERVE_MAP_SIZE; - prom_printf("Device tree strings 0x%x -> 0x%x\n", + prom_printf("Device tree strings 0x%lx -> 0x%lx\n", dt_string_start, dt_string_end); - prom_printf("Device tree struct 0x%x -> 0x%x\n", + prom_printf("Device tree struct 0x%lx -> 0x%lx\n", dt_struct_start, dt_struct_end); } @@ -3001,7 +3028,7 @@ static void __init prom_find_boot_cpu(void) prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); prom.cpu = be32_to_cpu(rval); - prom_debug("Booting CPU hw index = %lu\n", prom.cpu); + prom_debug("Booting CPU hw index = %d\n", prom.cpu); } static void __init prom_check_initrd(unsigned long r3, unsigned long r4) @@ -3023,8 +3050,8 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4) reserve_mem(prom_initrd_start, prom_initrd_end - prom_initrd_start); - prom_debug("initrd_start=0x%x\n", prom_initrd_start); - prom_debug("initrd_end=0x%x\n", prom_initrd_end); + prom_debug("initrd_start=0x%lx\n", prom_initrd_start); + prom_debug("initrd_end=0x%lx\n", prom_initrd_end); } #endif /* CONFIG_BLK_DEV_INITRD */ } @@ -3277,7 +3304,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, /* Don't print anything after quiesce under OPAL, it crashes OFW */ if (of_platform != PLATFORM_OPAL) { prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase); - prom_debug("->dt_header_start=0x%x\n", hdr); + prom_debug("->dt_header_start=0x%lx\n", hdr); } #ifdef CONFIG_PPC32 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index d23cf632edf0..9667666eb18e 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -2443,6 +2443,7 @@ static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, /* Create a new breakpoint request if one doesn't exist already */ hw_breakpoint_init(&attr); attr.bp_addr = hw_brk.address; + attr.bp_len = 8; arch_bp_generic_fields(hw_brk.type, &attr.bp_type); @@ -3081,27 +3082,19 @@ long arch_ptrace(struct task_struct *child, long request, #endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ - if (!access_ok(VERIFY_WRITE, datavp, - sizeof(struct ppc_debug_info))) + if (copy_to_user(datavp, &dbginfo, + sizeof(struct ppc_debug_info))) return -EFAULT; - ret = __copy_to_user(datavp, &dbginfo, - sizeof(struct ppc_debug_info)) ? - -EFAULT : 0; - break; + return 0; } case PPC_PTRACE_SETHWDEBUG: { struct ppc_hw_breakpoint bp_info; - if (!access_ok(VERIFY_READ, datavp, - sizeof(struct ppc_hw_breakpoint))) + if (copy_from_user(&bp_info, datavp, + sizeof(struct ppc_hw_breakpoint))) return -EFAULT; - ret = __copy_from_user(&bp_info, datavp, - sizeof(struct ppc_hw_breakpoint)) ? - -EFAULT : 0; - if (!ret) - ret = ppc_set_hwdebug(child, &bp_info); - break; + return ppc_set_hwdebug(child, &bp_info); } case PPC_PTRACE_DELHWDEBUG: { diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c index fb070d8cad07..487dcd8da4de 100644 --- a/arch/powerpc/kernel/rtas-proc.c +++ b/arch/powerpc/kernel/rtas-proc.c @@ -154,18 +154,6 @@ static ssize_t ppc_rtas_tone_volume_write(struct file *file, static int ppc_rtas_tone_volume_show(struct seq_file *m, void *v); static int ppc_rtas_rmo_buf_show(struct seq_file *m, void *v); -static int sensors_open(struct inode *inode, struct file *file) -{ - return single_open(file, ppc_rtas_sensors_show, NULL); -} - -static const struct file_operations ppc_rtas_sensors_operations = { - .open = sensors_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - static int poweron_open(struct inode *inode, struct file *file) { return single_open(file, ppc_rtas_poweron_show, NULL); @@ -231,18 +219,6 @@ static const struct file_operations ppc_rtas_tone_volume_operations = { .release = single_release, }; -static int rmo_buf_open(struct inode *inode, struct file *file) -{ - return single_open(file, ppc_rtas_rmo_buf_show, NULL); -} - -static const struct file_operations ppc_rtas_rmo_buf_ops = { - .open = rmo_buf_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - static int ppc_rtas_find_all_sensors(void); static void ppc_rtas_process_sensor(struct seq_file *m, struct individual_sensor *s, int state, int error, const char *loc); @@ -267,20 +243,20 @@ static int __init proc_rtas_init(void) &ppc_rtas_clock_operations); proc_create("powerpc/rtas/poweron", 0644, NULL, &ppc_rtas_poweron_operations); - proc_create("powerpc/rtas/sensors", 0444, NULL, - &ppc_rtas_sensors_operations); + proc_create_single("powerpc/rtas/sensors", 0444, NULL, + ppc_rtas_sensors_show); proc_create("powerpc/rtas/frequency", 0644, NULL, &ppc_rtas_tone_freq_operations); proc_create("powerpc/rtas/volume", 0644, NULL, &ppc_rtas_tone_volume_operations); - proc_create("powerpc/rtas/rmo_buffer", 0400, NULL, - &ppc_rtas_rmo_buf_ops); + proc_create_single("powerpc/rtas/rmo_buffer", 0400, NULL, + ppc_rtas_rmo_buf_show); return 0; } __initcall(proc_rtas_init); -static int parse_number(const char __user *p, size_t count, unsigned long *val) +static int parse_number(const char __user *p, size_t count, u64 *val) { char buf[40]; char *end; @@ -293,7 +269,7 @@ static int parse_number(const char __user *p, size_t count, unsigned long *val) buf[count] = 0; - *val = simple_strtoul(buf, &end, 10); + *val = simple_strtoull(buf, &end, 10); if (*end && *end != '\n') return -EINVAL; @@ -307,17 +283,17 @@ static ssize_t ppc_rtas_poweron_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct rtc_time tm; - unsigned long nowtime; + time64_t nowtime; int error = parse_number(buf, count, &nowtime); if (error) return error; power_on_time = nowtime; /* save the time */ - to_tm(nowtime, &tm); + rtc_time64_to_tm(nowtime, &tm); error = rtas_call(rtas_token("set-time-for-power-on"), 7, 1, NULL, - tm.tm_year, tm.tm_mon, tm.tm_mday, + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, 0 /* nano */); if (error) printk(KERN_WARNING "error: setting poweron time returned: %s\n", @@ -373,14 +349,14 @@ static ssize_t ppc_rtas_clock_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct rtc_time tm; - unsigned long nowtime; + time64_t nowtime; int error = parse_number(buf, count, &nowtime); if (error) return error; - to_tm(nowtime, &tm); + rtc_time64_to_tm(nowtime, &tm); error = rtas_call(rtas_token("set-time-of-day"), 7, 1, NULL, - tm.tm_year, tm.tm_mon, tm.tm_mday, + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, 0); if (error) printk(KERN_WARNING "error: setting the clock returned: %s\n", @@ -401,8 +377,8 @@ static int ppc_rtas_clock_show(struct seq_file *m, void *v) unsigned int year, mon, day, hour, min, sec; year = ret[0]; mon = ret[1]; day = ret[2]; hour = ret[3]; min = ret[4]; sec = ret[5]; - seq_printf(m, "%lu\n", - mktime(year, mon, day, hour, min, sec)); + seq_printf(m, "%lld\n", + mktime64(year, mon, day, hour, min, sec)); } return 0; } @@ -528,7 +504,7 @@ static void ppc_rtas_process_sensor(struct seq_file *m, "EPOW power off" }; const char * battery_cyclestate[] = { "None", "In progress", "Requested" }; - const char * battery_charging[] = { "Charging", "Discharching", + const char * battery_charging[] = { "Charging", "Discharging", "No current flow" }; const char * ibm_drconnector[] = { "Empty", "Present", "Unusable", "Exchange" }; @@ -731,7 +707,7 @@ static void get_location_code(struct seq_file *m, struct individual_sensor *s, static ssize_t ppc_rtas_tone_freq_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - unsigned long freq; + u64 freq; int error = parse_number(buf, count, &freq); if (error) return error; @@ -756,7 +732,7 @@ static int ppc_rtas_tone_freq_show(struct seq_file *m, void *v) static ssize_t ppc_rtas_tone_volume_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - unsigned long volume; + u64 volume; int error = parse_number(buf, count, &volume); if (error) return error; diff --git a/arch/powerpc/kernel/rtas-rtc.c b/arch/powerpc/kernel/rtas-rtc.c index 49600985c7ef..a28239b8b0c0 100644 --- a/arch/powerpc/kernel/rtas-rtc.c +++ b/arch/powerpc/kernel/rtas-rtc.c @@ -13,7 +13,7 @@ #define MAX_RTC_WAIT 5000 /* 5 sec */ #define RTAS_CLOCK_BUSY (-2) -unsigned long __init rtas_get_boot_time(void) +time64_t __init rtas_get_boot_time(void) { int ret[8]; int error; @@ -38,7 +38,7 @@ unsigned long __init rtas_get_boot_time(void) return 0; } - return mktime(ret[0], ret[1], ret[2], ret[3], ret[4], ret[5]); + return mktime64(ret[0], ret[1], ret[2], ret[3], ret[4], ret[5]); } /* NOTE: get_rtc_time will get an error if executed in interrupt context diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 3f1c4fcbe0aa..7fb9f83dcde8 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -26,6 +26,7 @@ #include <linux/memblock.h> #include <linux/slab.h> #include <linux/reboot.h> +#include <linux/syscalls.h> #include <asm/prom.h> #include <asm/rtas.h> @@ -1050,7 +1051,10 @@ struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log, } /* We assume to be passed big endian arguments */ -asmlinkage int ppc_rtas(struct rtas_args __user *uargs) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpragmas" +#pragma GCC diagnostic ignored "-Wattribute-alias" +SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) { struct rtas_args args; unsigned long flags; @@ -1136,6 +1140,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) return 0; } +#pragma GCC diagnostic pop /* * Call early during boot, before mem init, to retrieve the RTAS diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index b98a722da915..a8b277362931 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -10,10 +10,78 @@ #include <asm/debugfs.h> #include <asm/security_features.h> +#include <asm/setup.h> unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; +bool barrier_nospec_enabled; + +static void enable_barrier_nospec(bool enable) +{ + barrier_nospec_enabled = enable; + do_barrier_nospec_fixups(enable); +} + +void setup_barrier_nospec(void) +{ + bool enable; + + /* + * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well. + * But there's a good reason not to. The two flags we check below are + * both are enabled by default in the kernel, so if the hcall is not + * functional they will be enabled. + * On a system where the host firmware has been updated (so the ori + * functions as a barrier), but on which the hypervisor (KVM/Qemu) has + * not been updated, we would like to enable the barrier. Dropping the + * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is + * we potentially enable the barrier on systems where the host firmware + * is not updated, but that's harmless as it's a no-op. + */ + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && + security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR); + + enable_barrier_nospec(enable); +} + +#ifdef CONFIG_DEBUG_FS +static int barrier_nospec_set(void *data, u64 val) +{ + switch (val) { + case 0: + case 1: + break; + default: + return -EINVAL; + } + + if (!!val == !!barrier_nospec_enabled) + return 0; + + enable_barrier_nospec(!!val); + + return 0; +} + +static int barrier_nospec_get(void *data, u64 *val) +{ + *val = barrier_nospec_enabled ? 1 : 0; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_barrier_nospec, + barrier_nospec_get, barrier_nospec_set, "%llu\n"); + +static __init int barrier_nospec_debugfs_init(void) +{ + debugfs_create_file("barrier_nospec", 0600, powerpc_debugfs_root, NULL, + &fops_barrier_nospec); + return 0; +} +device_initcall(barrier_nospec_debugfs_init); +#endif /* CONFIG_DEBUG_FS */ + ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) { bool thread_priv; @@ -52,6 +120,9 @@ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, c if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) return sprintf(buf, "Not affected\n"); + if (barrier_nospec_enabled) + return sprintf(buf, "Mitigation: __user pointer sanitization\n"); + return sprintf(buf, "Vulnerable\n"); } diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 0af5c11b9e78..62b1a40d8957 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -192,12 +192,6 @@ void machine_halt(void) machine_hang(); } - -#ifdef CONFIG_TAU -extern u32 cpu_temp(unsigned long cpu); -extern u32 cpu_temp_both(unsigned long cpu); -#endif /* CONFIG_TAU */ - #ifdef CONFIG_SMP DEFINE_PER_CPU(unsigned int, cpu_pvr); #endif diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h index d144df54ad40..c6a592b67386 100644 --- a/arch/powerpc/kernel/setup.h +++ b/arch/powerpc/kernel/setup.h @@ -62,4 +62,10 @@ void kvm_cma_reserve(void); static inline void kvm_cma_reserve(void) { }; #endif +#ifdef CONFIG_TAU +u32 cpu_temp(unsigned long cpu); +u32 cpu_temp_both(unsigned long cpu); +u32 tau_interrupts(unsigned long cpu); +#endif /* CONFIG_TAU */ + #endif /* __ARCH_POWERPC_KERNEL_SETUP_H */ diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index b78f142a4148..7a7ce8ad455e 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -346,6 +346,13 @@ void __init early_setup(unsigned long dt_ptr) */ cpu_ready_for_interrupts(); + /* + * We enable ftrace here, but since we only support DYNAMIC_FTRACE, it + * will only actually get enabled on the boot cpu much later once + * ftrace itself has been initialized. + */ + this_cpu_enable_ftrace(); + DBG(" <- early_setup()\n"); #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index 61db86ecd318..fb932f1202c7 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -15,6 +15,7 @@ #include <linux/key.h> #include <linux/context_tracking.h> #include <linux/livepatch.h> +#include <linux/syscalls.h> #include <asm/hw_breakpoint.h> #include <linux/uaccess.h> #include <asm/unistd.h> @@ -150,6 +151,9 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) { user_exit(); + /* Check valid addr_limit, TIF check is done there */ + addr_limit_user_check(); + if (thread_info_flags & _TIF_UPROBE) uprobe_notify_resume(regs); diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h index a6467f843acf..800433685888 100644 --- a/arch/powerpc/kernel/signal.h +++ b/arch/powerpc/kernel/signal.h @@ -49,10 +49,8 @@ extern int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, #else /* CONFIG_PPC64 */ -extern long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, - struct pt_regs *regs); -extern long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, - struct pt_regs *regs); +extern long sys_rt_sigreturn(void); +extern long sys_sigreturn(void); static inline int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct task_struct *tsk) diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 492f03451877..5eedbb282d42 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -25,9 +25,10 @@ #include <linux/errno.h> #include <linux/elf.h> #include <linux/ptrace.h> +#include <linux/pagemap.h> #include <linux/ratelimit.h> -#ifdef CONFIG_PPC64 #include <linux/syscalls.h> +#ifdef CONFIG_PPC64 #include <linux/compat.h> #else #include <linux/wait.h> @@ -57,10 +58,6 @@ #ifdef CONFIG_PPC64 -#define sys_rt_sigreturn compat_sys_rt_sigreturn -#define sys_swapcontext compat_sys_swapcontext -#define sys_sigreturn compat_sys_sigreturn - #define old_sigaction old_sigaction32 #define sigcontext sigcontext32 #define mcontext mcontext32 @@ -1041,11 +1038,18 @@ static int do_setcontext_tm(struct ucontext __user *ucp, } #endif -long sys_swapcontext(struct ucontext __user *old_ctx, - struct ucontext __user *new_ctx, - int ctx_size, int r6, int r7, int r8, struct pt_regs *regs) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpragmas" +#pragma GCC diagnostic ignored "-Wattribute-alias" +#ifdef CONFIG_PPC64 +COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, + struct ucontext __user *, new_ctx, int, ctx_size) +#else +SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, + struct ucontext __user *, new_ctx, long, ctx_size) +#endif { - unsigned char tmp __maybe_unused; + struct pt_regs *regs = current_pt_regs(); int ctx_has_vsx_region = 0; #ifdef CONFIG_PPC64 @@ -1109,9 +1113,8 @@ long sys_swapcontext(struct ucontext __user *old_ctx, } if (new_ctx == NULL) return 0; - if (!access_ok(VERIFY_READ, new_ctx, ctx_size) - || __get_user(tmp, (u8 __user *) new_ctx) - || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1)) + if (!access_ok(VERIFY_READ, new_ctx, ctx_size) || + fault_in_pages_readable((u8 __user *)new_ctx, ctx_size)) return -EFAULT; /* @@ -1131,11 +1134,16 @@ long sys_swapcontext(struct ucontext __user *old_ctx, set_thread_flag(TIF_RESTOREALL); return 0; } +#pragma GCC diagnostic pop -long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, - struct pt_regs *regs) +#ifdef CONFIG_PPC64 +COMPAT_SYSCALL_DEFINE0(rt_sigreturn) +#else +SYSCALL_DEFINE0(rt_sigreturn) +#endif { struct rt_sigframe __user *rt_sf; + struct pt_regs *regs = current_pt_regs(); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM struct ucontext __user *uc_transact; unsigned long msr_hi; @@ -1223,15 +1231,16 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, return 0; } +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpragmas" +#pragma GCC diagnostic ignored "-Wattribute-alias" #ifdef CONFIG_PPC32 -int sys_debug_setcontext(struct ucontext __user *ctx, - int ndbg, struct sig_dbg_op __user *dbg, - int r6, int r7, int r8, - struct pt_regs *regs) +SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx, + int, ndbg, struct sig_dbg_op __user *, dbg) { + struct pt_regs *regs = current_pt_regs(); struct sig_dbg_op op; int i; - unsigned char tmp __maybe_unused; unsigned long new_msr = regs->msr; #ifdef CONFIG_PPC_ADV_DEBUG_REGS unsigned long new_dbcr0 = current->thread.debug.dbcr0; @@ -1287,9 +1296,8 @@ int sys_debug_setcontext(struct ucontext __user *ctx, current->thread.debug.dbcr0 = new_dbcr0; #endif - if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx)) - || __get_user(tmp, (u8 __user *) ctx) - || __get_user(tmp, (u8 __user *) (ctx + 1) - 1)) + if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx)) || + fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx))) return -EFAULT; /* @@ -1329,6 +1337,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx, return 0; } #endif +#pragma GCC diagnostic pop /* * OK, we're invoking a handler @@ -1419,9 +1428,13 @@ badframe: /* * Do a signal return; undo the signal stack. */ -long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, - struct pt_regs *regs) +#ifdef CONFIG_PPC64 +COMPAT_SYSCALL_DEFINE0(sigreturn) +#else +SYSCALL_DEFINE0(sigreturn) +#endif { + struct pt_regs *regs = current_pt_regs(); struct sigframe __user *sf; struct sigcontext __user *sc; struct sigcontext sigctx; diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 720117690822..d42b60020389 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -24,6 +24,7 @@ #include <linux/elf.h> #include <linux/ptrace.h> #include <linux/ratelimit.h> +#include <linux/syscalls.h> #include <asm/sigcontext.h> #include <asm/ucontext.h> @@ -624,17 +625,17 @@ static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp) /* * Handle {get,set,swap}_context operations */ -int sys_swapcontext(struct ucontext __user *old_ctx, - struct ucontext __user *new_ctx, - long ctx_size, long r6, long r7, long r8, struct pt_regs *regs) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpragmas" +#pragma GCC diagnostic ignored "-Wattribute-alias" +SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, + struct ucontext __user *, new_ctx, long, ctx_size) { unsigned char tmp; sigset_t set; unsigned long new_msr = 0; int ctx_has_vsx_region = 0; - BUG_ON(regs != current->thread.regs); - if (new_ctx && get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR])) return -EFAULT; @@ -692,24 +693,22 @@ int sys_swapcontext(struct ucontext __user *old_ctx, set_thread_flag(TIF_RESTOREALL); return 0; } +#pragma GCC diagnostic pop /* * Do a signal return; undo the signal stack. */ -int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7, unsigned long r8, - struct pt_regs *regs) +SYSCALL_DEFINE0(rt_sigreturn) { + struct pt_regs *regs = current_pt_regs(); struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1]; sigset_t set; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM unsigned long msr; #endif - BUG_ON(current->thread.regs != regs); - /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 9ca7148b5881..5eadfffabe35 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -59,6 +59,7 @@ #include <asm/kexec.h> #include <asm/asm-prototypes.h> #include <asm/cpu_has_feature.h> +#include <asm/ftrace.h> #ifdef DEBUG #include <asm/udbg.h> @@ -155,11 +156,13 @@ static irqreturn_t reschedule_action(int irq, void *data) return IRQ_HANDLED; } +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST static irqreturn_t tick_broadcast_ipi_action(int irq, void *data) { - tick_broadcast_ipi_handler(); + timer_broadcast_interrupt(); return IRQ_HANDLED; } +#endif #ifdef CONFIG_NMI_IPI static irqreturn_t nmi_ipi_action(int irq, void *data) @@ -172,7 +175,9 @@ static irqreturn_t nmi_ipi_action(int irq, void *data) static irq_handler_t smp_ipi_action[] = { [PPC_MSG_CALL_FUNCTION] = call_function_action, [PPC_MSG_RESCHEDULE] = reschedule_action, +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action, +#endif #ifdef CONFIG_NMI_IPI [PPC_MSG_NMI_IPI] = nmi_ipi_action, #endif @@ -186,8 +191,12 @@ static irq_handler_t smp_ipi_action[] = { const char *smp_ipi_name[] = { [PPC_MSG_CALL_FUNCTION] = "ipi call function", [PPC_MSG_RESCHEDULE] = "ipi reschedule", +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast", +#endif +#ifdef CONFIG_NMI_IPI [PPC_MSG_NMI_IPI] = "nmi ipi", +#endif }; /* optional function to request ipi, for controllers with >= 4 ipis */ @@ -277,8 +286,10 @@ irqreturn_t smp_ipi_demux_relaxed(void) generic_smp_call_function_interrupt(); if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE)) scheduler_ipi(); +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST)) - tick_broadcast_ipi_handler(); + timer_broadcast_interrupt(); +#endif #ifdef CONFIG_NMI_IPI if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI)) nmi_ipi_action(0, NULL); @@ -419,9 +430,9 @@ out: return ret; } -static void do_smp_send_nmi_ipi(int cpu) +static void do_smp_send_nmi_ipi(int cpu, bool safe) { - if (smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu)) + if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu)) return; if (cpu >= 0) { @@ -461,7 +472,7 @@ void smp_flush_nmi_ipi(u64 delay_us) * - delay_us > 0 is the delay before giving up waiting for targets to * enter the handler, == 0 specifies indefinite delay. */ -int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) +int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe) { unsigned long flags; int me = raw_smp_processor_id(); @@ -494,7 +505,7 @@ int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) nmi_ipi_busy_count++; nmi_ipi_unlock(); - do_smp_send_nmi_ipi(cpu); + do_smp_send_nmi_ipi(cpu, safe); while (!cpumask_empty(&nmi_ipi_pending_mask)) { udelay(1); @@ -516,6 +527,16 @@ int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) return ret; } + +int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) +{ + return __smp_send_nmi_ipi(cpu, fn, delay_us, false); +} + +int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) +{ + return __smp_send_nmi_ipi(cpu, fn, delay_us, true); +} #endif /* CONFIG_NMI_IPI */ #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST @@ -559,7 +580,7 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) * entire NMI dance and waiting for * cpus to clear pending mask, etc. */ - do_smp_send_nmi_ipi(cpu); + do_smp_send_nmi_ipi(cpu, false); } } } @@ -1066,6 +1087,9 @@ void start_secondary(void *unused) local_irq_enable(); + /* We can enable ftrace for secondary cpus now */ + this_cpu_enable_ftrace(); + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); BUG(); @@ -1162,6 +1186,8 @@ int __cpu_disable(void) if (!smp_ops->cpu_disable) return -ENOSYS; + this_cpu_disable_ftrace(); + err = smp_ops->cpu_disable(); if (err) return err; @@ -1180,6 +1206,12 @@ void __cpu_die(unsigned int cpu) void cpu_die(void) { + /* + * Disable on the down path. This will be re-enabled by + * start_secondary() via start_secondary_resume() below + */ + this_cpu_disable_ftrace(); + if (ppc_md.cpu_die) ppc_md.cpu_die(); diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c index d534ed901538..07e97f289c52 100644 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c @@ -1,21 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 + /* - * Stack trace utility + * Stack trace utility functions etc. * * Copyright 2008 Christoph Hellwig, IBM Corp. - * - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. + * Copyright 2018 SUSE Linux GmbH + * Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp. */ #include <linux/export.h> +#include <linux/kallsyms.h> +#include <linux/module.h> +#include <linux/nmi.h> #include <linux/sched.h> #include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <linux/stacktrace.h> #include <asm/ptrace.h> #include <asm/processor.h> +#include <linux/ftrace.h> +#include <asm/kprobes.h> + +#include <asm/paca.h> /* * Save stack-backtrace addresses into a stack_trace buffer. @@ -76,3 +82,164 @@ save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) save_context_stack(trace, regs->gpr[1], current, 0); } EXPORT_SYMBOL_GPL(save_stack_trace_regs); + +#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE +int +save_stack_trace_tsk_reliable(struct task_struct *tsk, + struct stack_trace *trace) +{ + unsigned long sp; + unsigned long stack_page = (unsigned long)task_stack_page(tsk); + unsigned long stack_end; + int graph_idx = 0; + + /* + * The last frame (unwinding first) may not yet have saved + * its LR onto the stack. + */ + int firstframe = 1; + + if (tsk == current) + sp = current_stack_pointer(); + else + sp = tsk->thread.ksp; + + stack_end = stack_page + THREAD_SIZE; + if (!is_idle_task(tsk)) { + /* + * For user tasks, this is the SP value loaded on + * kernel entry, see "PACAKSAVE(r13)" in _switch() and + * system_call_common()/EXCEPTION_PROLOG_COMMON(). + * + * Likewise for non-swapper kernel threads, + * this also happens to be the top of the stack + * as setup by copy_thread(). + * + * Note that stack backlinks are not properly setup by + * copy_thread() and thus, a forked task() will have + * an unreliable stack trace until it's been + * _switch()'ed to for the first time. + */ + stack_end -= STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); + } else { + /* + * idle tasks have a custom stack layout, + * c.f. cpu_idle_thread_init(). + */ + stack_end -= STACK_FRAME_OVERHEAD; + } + + if (sp < stack_page + sizeof(struct thread_struct) || + sp > stack_end - STACK_FRAME_MIN_SIZE) { + return 1; + } + + for (;;) { + unsigned long *stack = (unsigned long *) sp; + unsigned long newsp, ip; + + /* sanity check: ABI requires SP to be aligned 16 bytes. */ + if (sp & 0xF) + return 1; + + /* Mark stacktraces with exception frames as unreliable. */ + if (sp <= stack_end - STACK_INT_FRAME_SIZE && + stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { + return 1; + } + + newsp = stack[0]; + /* Stack grows downwards; unwinder may only go up. */ + if (newsp <= sp) + return 1; + + if (newsp != stack_end && + newsp > stack_end - STACK_FRAME_MIN_SIZE) { + return 1; /* invalid backlink, too far up. */ + } + + /* Examine the saved LR: it must point into kernel code. */ + ip = stack[STACK_FRAME_LR_SAVE]; + if (!firstframe && !__kernel_text_address(ip)) + return 1; + firstframe = 0; + + /* + * FIXME: IMHO these tests do not belong in + * arch-dependent code, they are generic. + */ + ip = ftrace_graph_ret_addr(tsk, &graph_idx, ip, NULL); +#ifdef CONFIG_KPROBES + /* + * Mark stacktraces with kretprobed functions on them + * as unreliable. + */ + if (ip == (unsigned long)kretprobe_trampoline) + return 1; +#endif + + if (!trace->skip) + trace->entries[trace->nr_entries++] = ip; + else + trace->skip--; + + if (newsp == stack_end) + break; + + if (trace->nr_entries >= trace->max_entries) + return -E2BIG; + + sp = newsp; + } + return 0; +} +EXPORT_SYMBOL_GPL(save_stack_trace_tsk_reliable); +#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */ + +#ifdef CONFIG_PPC_BOOK3S_64 +static void handle_backtrace_ipi(struct pt_regs *regs) +{ + nmi_cpu_backtrace(regs); +} + +static void raise_backtrace_ipi(cpumask_t *mask) +{ + unsigned int cpu; + + for_each_cpu(cpu, mask) { + if (cpu == smp_processor_id()) + handle_backtrace_ipi(NULL); + else + smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, 5 * USEC_PER_SEC); + } + + for_each_cpu(cpu, mask) { + struct paca_struct *p = paca_ptrs[cpu]; + + cpumask_clear_cpu(cpu, mask); + + pr_warn("CPU %d didn't respond to backtrace IPI, inspecting paca.\n", cpu); + if (!virt_addr_valid(p)) { + pr_warn("paca pointer appears corrupt? (%px)\n", p); + continue; + } + + pr_warn("irq_soft_mask: 0x%02x in_mce: %d in_nmi: %d", + p->irq_soft_mask, p->in_mce, p->in_nmi); + + if (virt_addr_valid(p->__current)) + pr_cont(" current: %d (%s)\n", p->__current->pid, + p->__current->comm); + else + pr_cont(" current pointer corrupt? (%px)\n", p->__current); + + pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1); + show_stack(p->__current, (unsigned long *)p->saved_r1); + } +} + +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) +{ + nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi); +} +#endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index c11c73373691..bdf58ba1a94b 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -52,15 +52,6 @@ #include <asm/syscalls.h> #include <asm/switch_to.h> - -asmlinkage long ppc32_select(u32 n, compat_ulong_t __user *inp, - compat_ulong_t __user *outp, compat_ulong_t __user *exp, - compat_uptr_t tvp_x) -{ - /* sign extend n */ - return compat_sys_select((int)n, inp, outp, exp, compat_ptr(tvp_x)); -} - unsigned long compat_sys_mmap2(unsigned long addr, size_t len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index 466216506eb2..083fa06962fd 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c @@ -62,6 +62,9 @@ out: return ret; } +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpragmas" +#pragma GCC diagnostic ignored "-Wattribute-alias" SYSCALL_DEFINE6(mmap2, unsigned long, addr, size_t, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, pgoff) @@ -75,6 +78,7 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len, { return do_mmap2(addr, len, prot, flags, fd, offset, PAGE_SHIFT); } +#pragma GCC diagnostic pop #ifdef CONFIG_PPC32 /* diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index 7ccb7f81f8db..919a32746ede 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S @@ -35,7 +35,7 @@ #endif #define SYSCALL_SPU(func) SYSCALL(func) #define COMPAT_SYS_SPU(func) COMPAT_SYS(func) -#define PPC_SYS_SPU(func) PPC_SYS(func) +#define COMPAT_SPU_NEW(func) COMPAT_SYS(func) #define SYSX_SPU(f, f3264, f32) SYSX(f, f3264, f32) .section .rodata,"a" diff --git a/arch/powerpc/kernel/systbl_chk.c b/arch/powerpc/kernel/systbl_chk.c index 55323a620cfe..4653258722ac 100644 --- a/arch/powerpc/kernel/systbl_chk.c +++ b/arch/powerpc/kernel/systbl_chk.c @@ -31,7 +31,7 @@ #define SYSCALL_SPU(func) SYSCALL(func) #define COMPAT_SYS_SPU(func) COMPAT_SYS(func) -#define PPC_SYS_SPU(func) PPC_SYS(func) +#define COMPAT_SPU_NEW(func) COMPAT_SYS(_new##func) #define SYSX_SPU(f, f3264, f32) SYSX(f, f3264, f32) /* Just insert a marker for ni_syscalls */ diff --git a/arch/powerpc/kernel/systbl_chk.sh b/arch/powerpc/kernel/systbl_chk.sh index 31b6e7c358ca..f2e356c2a345 100644 --- a/arch/powerpc/kernel/systbl_chk.sh +++ b/arch/powerpc/kernel/systbl_chk.sh @@ -16,7 +16,7 @@ awk 'BEGIN { num = -1; } # Ignore the beginning of the file /^START_TABLE/ { num = 0; next; } /^END_TABLE/ { if (num != $2) { - printf "NR_syscalls (%s) is not one more than the last syscall (%s)\n", + printf "Error: NR_syscalls (%s) is not one more than the last syscall (%s)\n", $2, num - 1; exit(1); } @@ -25,7 +25,7 @@ awk 'BEGIN { num = -1; } # Ignore the beginning of the file { if (num == -1) next; if (($1 != -1) && ($1 != num)) { - printf "Syscall %s out of order (expected %s)\n", + printf "Error: Syscall %s out of order (expected %s)\n", $1, num; exit(1); }; diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c index 8cdd852aedd1..e2ab8a111b69 100644 --- a/arch/powerpc/kernel/tau_6xx.c +++ b/arch/powerpc/kernel/tau_6xx.c @@ -27,6 +27,9 @@ #include <asm/cache.h> #include <asm/8xx_immap.h> #include <asm/machdep.h> +#include <asm/asm-prototypes.h> + +#include "setup.h" static struct tau_temp { @@ -50,7 +53,7 @@ struct timer_list tau_timer; #define shrink_timer 2*HZ /* period between shrinking the window */ #define min_window 2 /* minimum window size, degrees C */ -void set_thresholds(unsigned long cpu) +static void set_thresholds(unsigned long cpu) { #ifdef CONFIG_TAU_INT /* @@ -70,7 +73,7 @@ void set_thresholds(unsigned long cpu) #endif } -void TAUupdate(int cpu) +static void TAUupdate(int cpu) { unsigned thrm; @@ -205,7 +208,7 @@ static void tau_timeout_smp(struct timer_list *unused) int tau_initialized = 0; -void __init TAU_init_smp(void * info) +static void __init TAU_init_smp(void *info) { unsigned long cpu = smp_processor_id(); @@ -217,7 +220,7 @@ void __init TAU_init_smp(void * info) set_thresholds(cpu); } -int __init TAU_init(void) +static int __init TAU_init(void) { /* We assume in SMP that if one CPU has TAU support, they * all have it --BenH @@ -259,12 +262,12 @@ u32 cpu_temp_both(unsigned long cpu) return ((tau[cpu].high << 16) | tau[cpu].low); } -int cpu_temp(unsigned long cpu) +u32 cpu_temp(unsigned long cpu) { return ((tau[cpu].high + tau[cpu].low) / 2); } -int tau_interrupts(unsigned long cpu) +u32 tau_interrupts(unsigned long cpu) { return (tau[cpu].interrupts); } diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 360e71d455cc..70f145e02487 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -163,12 +163,6 @@ EXPORT_SYMBOL(__cputime_usec_factor); void (*dtl_consumer)(struct dtl_entry *, u64); #endif -#ifdef CONFIG_PPC64 -#define get_accounting(tsk) (&get_paca()->accounting) -#else -#define get_accounting(tsk) (&task_thread_info(tsk)->accounting) -#endif - static void calc_cputime_factors(void) { struct div_result res; @@ -421,21 +415,6 @@ void vtime_flush(struct task_struct *tsk) acct->softirq_time = 0; } -#ifdef CONFIG_PPC32 -/* - * Called from the context switch with interrupts disabled, to charge all - * accumulated times to the current process, and to prepare accounting on - * the next process. - */ -void arch_vtime_task_switch(struct task_struct *prev) -{ - struct cpu_accounting_data *acct = get_accounting(current); - - acct->starttime = get_accounting(prev)->starttime; - acct->startspurr = get_accounting(prev)->startspurr; -} -#endif /* CONFIG_PPC32 */ - #else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #define calc_cputime_factors() #endif @@ -513,6 +492,35 @@ static inline void clear_irq_work_pending(void) "i" (offsetof(struct paca_struct, irq_work_pending))); } +void arch_irq_work_raise(void) +{ + preempt_disable(); + set_irq_work_pending_flag(); + /* + * Non-nmi code running with interrupts disabled will replay + * irq_happened before it re-enables interrupts, so setthe + * decrementer there instead of causing a hardware exception + * which would immediately hit the masked interrupt handler + * and have the net effect of setting the decrementer in + * irq_happened. + * + * NMI interrupts can not check this when they return, so the + * decrementer hardware exception is raised, which will fire + * when interrupts are next enabled. + * + * BookE does not support this yet, it must audit all NMI + * interrupt handlers to ensure they call nmi_enter() so this + * check would be correct. + */ + if (IS_ENABLED(CONFIG_BOOKE) || !irqs_disabled() || in_nmi()) { + set_dec(1); + } else { + hard_irq_disable(); + local_paca->irq_happened |= PACA_IRQ_DEC; + } + preempt_enable(); +} + #else /* 32-bit */ DEFINE_PER_CPU(u8, irq_work_pending); @@ -521,8 +529,6 @@ DEFINE_PER_CPU(u8, irq_work_pending); #define test_irq_work_pending() __this_cpu_read(irq_work_pending) #define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0) -#endif /* 32 vs 64 bit */ - void arch_irq_work_raise(void) { preempt_disable(); @@ -531,6 +537,8 @@ void arch_irq_work_raise(void) preempt_enable(); } +#endif /* 32 vs 64 bit */ + #else /* CONFIG_IRQ_WORK */ #define test_irq_work_pending() 0 @@ -538,60 +546,16 @@ void arch_irq_work_raise(void) #endif /* CONFIG_IRQ_WORK */ -static void __timer_interrupt(void) -{ - struct pt_regs *regs = get_irq_regs(); - u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); - struct clock_event_device *evt = this_cpu_ptr(&decrementers); - u64 now; - - trace_timer_interrupt_entry(regs); - - if (test_irq_work_pending()) { - clear_irq_work_pending(); - irq_work_run(); - } - - now = get_tb_or_rtc(); - if (now >= *next_tb) { - *next_tb = ~(u64)0; - if (evt->event_handler) - evt->event_handler(evt); - __this_cpu_inc(irq_stat.timer_irqs_event); - } else { - now = *next_tb - now; - if (now <= decrementer_max) - set_dec(now); - /* We may have raced with new irq work */ - if (test_irq_work_pending()) - set_dec(1); - __this_cpu_inc(irq_stat.timer_irqs_others); - } - -#ifdef CONFIG_PPC64 - /* collect purr register values often, for accurate calculations */ - if (firmware_has_feature(FW_FEATURE_SPLPAR)) { - struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array); - cu->current_tb = mfspr(SPRN_PURR); - } -#endif - - trace_timer_interrupt_exit(regs); -} - /* * timer_interrupt - gets called when the decrementer overflows, * with interrupts disabled. */ -void timer_interrupt(struct pt_regs * regs) +void timer_interrupt(struct pt_regs *regs) { - struct pt_regs *old_regs; + struct clock_event_device *evt = this_cpu_ptr(&decrementers); u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); - - /* Ensure a positive value is written to the decrementer, or else - * some CPUs will continue to take decrementer exceptions. - */ - set_dec(decrementer_max); + struct pt_regs *old_regs; + u64 now; /* Some implementations of hotplug will get timer interrupts while * offline, just ignore these and we also need to set @@ -599,11 +563,23 @@ void timer_interrupt(struct pt_regs * regs) * don't replay timer interrupt when return, otherwise we'll trap * here infinitely :( */ - if (!cpu_online(smp_processor_id())) { + if (unlikely(!cpu_online(smp_processor_id()))) { *next_tb = ~(u64)0; + set_dec(decrementer_max); return; } + /* Ensure a positive value is written to the decrementer, or else + * some CPUs will continue to take decrementer exceptions. When the + * PPC_WATCHDOG (decrementer based) is configured, keep this at most + * 31 bits, which is about 4 seconds on most systems, which gives + * the watchdog a chance of catching timer interrupt hard lockups. + */ + if (IS_ENABLED(CONFIG_PPC_WATCHDOG)) + set_dec(0x7fffffff); + else + set_dec(decrementer_max); + /* Conditionally hard-enable interrupts now that the DEC has been * bumped to its maximum value */ @@ -617,13 +593,46 @@ void timer_interrupt(struct pt_regs * regs) old_regs = set_irq_regs(regs); irq_enter(); + trace_timer_interrupt_entry(regs); + + if (test_irq_work_pending()) { + clear_irq_work_pending(); + irq_work_run(); + } + + now = get_tb_or_rtc(); + if (now >= *next_tb) { + *next_tb = ~(u64)0; + if (evt->event_handler) + evt->event_handler(evt); + __this_cpu_inc(irq_stat.timer_irqs_event); + } else { + now = *next_tb - now; + if (now <= decrementer_max) + set_dec(now); + /* We may have raced with new irq work */ + if (test_irq_work_pending()) + set_dec(1); + __this_cpu_inc(irq_stat.timer_irqs_others); + } - __timer_interrupt(); + trace_timer_interrupt_exit(regs); irq_exit(); set_irq_regs(old_regs); } EXPORT_SYMBOL(timer_interrupt); +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +void timer_broadcast_interrupt(void) +{ + u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); + + *next_tb = ~(u64)0; + tick_receive_broadcast(); + __this_cpu_inc(irq_stat.broadcast_irqs_event); +} +#endif + /* * Hypervisor decrementer interrupts shouldn't occur but are sometimes * left pending on exit from a KVM guest. We don't need to do anything @@ -781,21 +790,19 @@ void __init generic_calibrate_decr(void) } } -int update_persistent_clock(struct timespec now) +int update_persistent_clock64(struct timespec64 now) { struct rtc_time tm; if (!ppc_md.set_rtc_time) return -ENODEV; - to_tm(now.tv_sec + 1 + timezone_offset, &tm); - tm.tm_year -= 1900; - tm.tm_mon -= 1; + rtc_time64_to_tm(now.tv_sec + 1 + timezone_offset, &tm); return ppc_md.set_rtc_time(&tm); } -static void __read_persistent_clock(struct timespec *ts) +static void __read_persistent_clock(struct timespec64 *ts) { struct rtc_time tm; static int first = 1; @@ -819,11 +826,10 @@ static void __read_persistent_clock(struct timespec *ts) } ppc_md.get_rtc_time(&tm); - ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, - tm.tm_hour, tm.tm_min, tm.tm_sec); + ts->tv_sec = rtc_tm_to_time64(&tm); } -void read_persistent_clock(struct timespec *ts) +void read_persistent_clock64(struct timespec64 *ts) { __read_persistent_clock(ts); @@ -971,15 +977,6 @@ static int decrementer_shutdown(struct clock_event_device *dev) return 0; } -/* Interrupt handler for the timer broadcast IPI */ -void tick_broadcast_ipi_handler(void) -{ - u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); - - *next_tb = get_tb_or_rtc(); - __timer_interrupt(); -} - static void register_decrementer_clockevent(int cpu) { struct clock_event_device *dec = &per_cpu(decrementers, cpu); @@ -1141,56 +1138,6 @@ void __init time_init(void) #endif } - -#define FEBRUARY 2 -#define STARTOFTIME 1970 -#define SECDAY 86400L -#define SECYR (SECDAY * 365) -#define leapyear(year) ((year) % 4 == 0 && \ - ((year) % 100 != 0 || (year) % 400 == 0)) -#define days_in_year(a) (leapyear(a) ? 366 : 365) -#define days_in_month(a) (month_days[(a) - 1]) - -static int month_days[12] = { - 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 -}; - -void to_tm(int tim, struct rtc_time * tm) -{ - register int i; - register long hms, day; - - day = tim / SECDAY; - hms = tim % SECDAY; - - /* Hours, minutes, seconds are easy */ - tm->tm_hour = hms / 3600; - tm->tm_min = (hms % 3600) / 60; - tm->tm_sec = (hms % 3600) % 60; - - /* Number of years in days */ - for (i = STARTOFTIME; day >= days_in_year(i); i++) - day -= days_in_year(i); - tm->tm_year = i; - - /* Number of months in days left */ - if (leapyear(tm->tm_year)) - days_in_month(FEBRUARY) = 29; - for (i = 1; day >= days_in_month(i); i++) - day -= days_in_month(i); - days_in_month(FEBRUARY) = 28; - tm->tm_mon = i; - - /* Days are what is left over (+1) from all that. */ - tm->tm_mday = day + 1; - - /* - * No-one uses the day of the week. - */ - tm->tm_wday = -1; -} -EXPORT_SYMBOL(to_tm); - /* * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit * result. diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index b92ac8e711db..ff12f47a96b6 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -12,6 +12,7 @@ #include <asm/ptrace.h> #include <asm/reg.h> #include <asm/bug.h> +#include <asm/export.h> #ifdef CONFIG_VSX /* See fpu.S, this is borrowed from there */ @@ -55,6 +56,16 @@ _GLOBAL(tm_enable) or r4, r4, r3 mtmsrd r4 1: blr +EXPORT_SYMBOL_GPL(tm_enable); + +_GLOBAL(tm_disable) + mfmsr r4 + li r3, MSR_TM >> 32 + sldi r3, r3, 32 + andc r4, r4, r3 + mtmsrd r4 + blr +EXPORT_SYMBOL_GPL(tm_disable); _GLOBAL(tm_save_sprs) mfspr r0, SPRN_TFHAR @@ -78,6 +89,7 @@ _GLOBAL(tm_restore_sprs) _GLOBAL(tm_abort) TABORT(R3) blr +EXPORT_SYMBOL_GPL(tm_abort); /* void tm_reclaim(struct thread_struct *thread, * uint8_t cause) diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index 4741fe112f05..c076a32093fd 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -357,6 +357,8 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { unsigned int op[2]; void *ip = (void *)rec->ip; + unsigned long entry, ptr, tramp; + struct module *mod = rec->arch.mod; /* read where this goes */ if (probe_kernel_read(op, ip, sizeof(op))) @@ -368,19 +370,44 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) return -EINVAL; } - /* If we never set up a trampoline to ftrace_caller, then bail */ - if (!rec->arch.mod->arch.tramp) { + /* If we never set up ftrace trampoline(s), then bail */ +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + if (!mod->arch.tramp || !mod->arch.tramp_regs) { +#else + if (!mod->arch.tramp) { +#endif pr_err("No ftrace trampoline\n"); return -EINVAL; } +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + if (rec->flags & FTRACE_FL_REGS) + tramp = mod->arch.tramp_regs; + else +#endif + tramp = mod->arch.tramp; + + if (module_trampoline_target(mod, tramp, &ptr)) { + pr_err("Failed to get trampoline target\n"); + return -EFAULT; + } + + pr_devel("trampoline target %lx", ptr); + + entry = ppc_global_function_entry((void *)addr); + /* This should match what was called */ + if (ptr != entry) { + pr_err("addr %lx does not match expected %lx\n", ptr, entry); + return -EINVAL; + } + /* Ensure branch is within 24 bits */ - if (!create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { + if (!create_branch(ip, tramp, BRANCH_SET_LINK)) { pr_err("Branch out of range\n"); return -EINVAL; } - if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { + if (patch_branch(ip, tramp, BRANCH_SET_LINK)) { pr_err("REL24 out of range!\n"); return -EINVAL; } @@ -388,14 +415,6 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) return 0; } -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS -int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, - unsigned long addr) -{ - return ftrace_make_call(rec, addr); -} -#endif - #else /* !CONFIG_PPC64: */ static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) @@ -472,53 +491,158 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) #endif /* CONFIG_MODULES */ } -int ftrace_update_ftrace_func(ftrace_func_t func) +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +#ifdef CONFIG_MODULES +static int +__ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr) { - unsigned long ip = (unsigned long)(&ftrace_call); - unsigned int old, new; - int ret; + unsigned int op; + unsigned long ip = rec->ip; + unsigned long entry, ptr, tramp; + struct module *mod = rec->arch.mod; - old = *(unsigned int *)&ftrace_call; - new = ftrace_call_replace(ip, (unsigned long)func, 1); - ret = ftrace_modify_code(ip, old, new); + /* If we never set up ftrace trampolines, then bail */ + if (!mod->arch.tramp || !mod->arch.tramp_regs) { + pr_err("No ftrace trampoline\n"); + return -EINVAL; + } - return ret; -} + /* read where this goes */ + if (probe_kernel_read(&op, (void *)ip, sizeof(int))) { + pr_err("Fetching opcode failed.\n"); + return -EFAULT; + } -static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable) -{ - unsigned long ftrace_addr = (unsigned long)FTRACE_ADDR; - int ret; + /* Make sure that that this is still a 24bit jump */ + if (!is_bl_op(op)) { + pr_err("Not expected bl: opcode is %x\n", op); + return -EINVAL; + } + + /* lets find where the pointer goes */ + tramp = find_bl_target(ip, op); + entry = ppc_global_function_entry((void *)old_addr); + + pr_devel("ip:%lx jumps to %lx", ip, tramp); - ret = ftrace_update_record(rec, enable); + if (tramp != entry) { + /* old_addr is not within range, so we must have used a trampoline */ + if (module_trampoline_target(mod, tramp, &ptr)) { + pr_err("Failed to get trampoline target\n"); + return -EFAULT; + } + + pr_devel("trampoline target %lx", ptr); + + /* This should match what was called */ + if (ptr != entry) { + pr_err("addr %lx does not match expected %lx\n", ptr, entry); + return -EINVAL; + } + } + + /* The new target may be within range */ + if (test_24bit_addr(ip, addr)) { + /* within range */ + if (patch_branch((unsigned int *)ip, addr, BRANCH_SET_LINK)) { + pr_err("REL24 out of range!\n"); + return -EINVAL; + } - switch (ret) { - case FTRACE_UPDATE_IGNORE: return 0; - case FTRACE_UPDATE_MAKE_CALL: - return ftrace_make_call(rec, ftrace_addr); - case FTRACE_UPDATE_MAKE_NOP: - return ftrace_make_nop(NULL, rec, ftrace_addr); + } + + if (rec->flags & FTRACE_FL_REGS) + tramp = mod->arch.tramp_regs; + else + tramp = mod->arch.tramp; + + if (module_trampoline_target(mod, tramp, &ptr)) { + pr_err("Failed to get trampoline target\n"); + return -EFAULT; + } + + pr_devel("trampoline target %lx", ptr); + + entry = ppc_global_function_entry((void *)addr); + /* This should match what was called */ + if (ptr != entry) { + pr_err("addr %lx does not match expected %lx\n", ptr, entry); + return -EINVAL; + } + + /* Ensure branch is within 24 bits */ + if (!create_branch((unsigned int *)ip, tramp, BRANCH_SET_LINK)) { + pr_err("Branch out of range\n"); + return -EINVAL; + } + + if (patch_branch((unsigned int *)ip, tramp, BRANCH_SET_LINK)) { + pr_err("REL24 out of range!\n"); + return -EINVAL; } return 0; } +#endif -void ftrace_replace_code(int enable) +int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr) { - struct ftrace_rec_iter *iter; - struct dyn_ftrace *rec; + unsigned long ip = rec->ip; + unsigned int old, new; + + /* + * If the calling address is more that 24 bits away, + * then we had to use a trampoline to make the call. + * Otherwise just update the call site. + */ + if (test_24bit_addr(ip, addr) && test_24bit_addr(ip, old_addr)) { + /* within range */ + old = ftrace_call_replace(ip, old_addr, 1); + new = ftrace_call_replace(ip, addr, 1); + return ftrace_modify_code(ip, old, new); + } + +#ifdef CONFIG_MODULES + /* + * Out of range jumps are called from modules. + */ + if (!rec->arch.mod) { + pr_err("No module loaded\n"); + return -EINVAL; + } + + return __ftrace_modify_call(rec, old_addr, addr); +#else + /* We should not get here without modules */ + return -EINVAL; +#endif /* CONFIG_MODULES */ +} +#endif + +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + unsigned long ip = (unsigned long)(&ftrace_call); + unsigned int old, new; int ret; - for (iter = ftrace_rec_iter_start(); iter; - iter = ftrace_rec_iter_next(iter)) { - rec = ftrace_rec_iter_record(iter); - ret = __ftrace_replace_code(rec, enable); - if (ret) { - ftrace_bug(ret, rec); - return; - } + old = *(unsigned int *)&ftrace_call; + new = ftrace_call_replace(ip, (unsigned long)func, 1); + ret = ftrace_modify_code(ip, old, new); + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + /* Also update the regs callback function */ + if (!ret) { + ip = (unsigned long)(&ftrace_regs_call); + old = *(unsigned int *)&ftrace_regs_call; + new = ftrace_call_replace(ip, (unsigned long)func, 1); + ret = ftrace_modify_code(ip, old, new); } +#endif + + return ret; } /* @@ -538,7 +662,6 @@ int __init ftrace_dyn_arch_init(void) #ifdef CONFIG_FUNCTION_GRAPH_TRACER -#ifdef CONFIG_DYNAMIC_FTRACE extern void ftrace_graph_call(void); extern void ftrace_graph_stub(void); @@ -567,7 +690,6 @@ int ftrace_disable_ftrace_graph_caller(void) return ftrace_modify_code(ip, old, new); } -#endif /* CONFIG_DYNAMIC_FTRACE */ /* * Hook the return address and push it in the stack of return addrs diff --git a/arch/powerpc/kernel/trace/ftrace_32.S b/arch/powerpc/kernel/trace/ftrace_32.S index afef2c076282..2c29098f630f 100644 --- a/arch/powerpc/kernel/trace/ftrace_32.S +++ b/arch/powerpc/kernel/trace/ftrace_32.S @@ -14,7 +14,6 @@ #include <asm/ftrace.h> #include <asm/export.h> -#ifdef CONFIG_DYNAMIC_FTRACE _GLOBAL(mcount) _GLOBAL(_mcount) /* @@ -47,26 +46,7 @@ _GLOBAL(ftrace_graph_stub) MCOUNT_RESTORE_FRAME /* old link register ends up in ctr reg */ bctr -#else -_GLOBAL(mcount) -_GLOBAL(_mcount) - - MCOUNT_SAVE_FRAME - subi r3, r3, MCOUNT_INSN_SIZE - LOAD_REG_ADDR(r5, ftrace_trace_function) - lwz r5,0(r5) - - mtctr r5 - bctrl - nop - -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - b ftrace_graph_caller -#endif - MCOUNT_RESTORE_FRAME - bctr -#endif EXPORT_SYMBOL(_mcount) _GLOBAL(ftrace_stub) diff --git a/arch/powerpc/kernel/trace/ftrace_64.S b/arch/powerpc/kernel/trace/ftrace_64.S index e5ccea19821e..e25f77c10a72 100644 --- a/arch/powerpc/kernel/trace/ftrace_64.S +++ b/arch/powerpc/kernel/trace/ftrace_64.S @@ -14,7 +14,6 @@ #include <asm/ppc-opcode.h> #include <asm/export.h> -#ifdef CONFIG_DYNAMIC_FTRACE _GLOBAL(mcount) _GLOBAL(_mcount) EXPORT_SYMBOL(_mcount) @@ -23,34 +22,6 @@ EXPORT_SYMBOL(_mcount) mtlr r0 bctr -#else /* CONFIG_DYNAMIC_FTRACE */ -_GLOBAL_TOC(_mcount) -EXPORT_SYMBOL(_mcount) - /* Taken from output of objdump from lib64/glibc */ - mflr r3 - ld r11, 0(r1) - stdu r1, -112(r1) - std r3, 128(r1) - ld r4, 16(r11) - - subi r3, r3, MCOUNT_INSN_SIZE - LOAD_REG_ADDR(r5,ftrace_trace_function) - ld r5,0(r5) - ld r5,0(r5) - mtctr r5 - bctrl - nop - -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - b ftrace_graph_caller -#endif - ld r0, 128(r1) - mtlr r0 - addi r1, r1, 112 -_GLOBAL(ftrace_stub) - blr -#endif /* CONFIG_DYNAMIC_FTRACE */ - #ifdef CONFIG_FUNCTION_GRAPH_TRACER _GLOBAL(return_to_handler) /* need to save return values */ diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S index 3f3e81852422..9a5b5a513604 100644 --- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S +++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S @@ -17,11 +17,10 @@ #include <asm/bug.h> #include <asm/ptrace.h> -#ifdef CONFIG_DYNAMIC_FTRACE /* * - * ftrace_caller() is the function that replaces _mcount() when ftrace is - * active. + * ftrace_caller()/ftrace_regs_caller() is the function that replaces _mcount() + * when ftrace is active. * * We arrive here after a function A calls function B, and we are the trace * function for B. When we enter r1 points to A's stack frame, B has not yet @@ -37,7 +36,7 @@ * Our job is to save the register state into a struct pt_regs (on the stack) * and then arrange for the ftrace function to be called. */ -_GLOBAL(ftrace_caller) +_GLOBAL(ftrace_regs_caller) /* Save the original return address in A's stack frame */ std r0,LRSAVE(r1) @@ -47,6 +46,12 @@ _GLOBAL(ftrace_caller) /* Save all gprs to pt_regs */ SAVE_GPR(0, r1) SAVE_10GPRS(2, r1) + + /* Ok to continue? */ + lbz r3, PACA_FTRACE_ENABLED(r13) + cmpdi r3, 0 + beq ftrace_no_trace + SAVE_10GPRS(12, r1) SAVE_10GPRS(22, r1) @@ -94,8 +99,8 @@ _GLOBAL(ftrace_caller) addi r6, r1 ,STACK_FRAME_OVERHEAD /* ftrace_call(r3, r4, r5, r6) */ -.globl ftrace_call -ftrace_call: +.globl ftrace_regs_call +ftrace_regs_call: bl ftrace_stub nop @@ -156,6 +161,7 @@ ftrace_call: bne- livepatch_handler #endif +ftrace_caller_common: #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: @@ -168,6 +174,74 @@ _GLOBAL(ftrace_graph_stub) _GLOBAL(ftrace_stub) blr +ftrace_no_trace: + mflr r3 + mtctr r3 + REST_GPR(3, r1) + addi r1, r1, SWITCH_FRAME_SIZE + mtlr r0 + bctr + +_GLOBAL(ftrace_caller) + /* Save the original return address in A's stack frame */ + std r0, LRSAVE(r1) + + /* Create our stack frame + pt_regs */ + stdu r1, -SWITCH_FRAME_SIZE(r1) + + /* Save all gprs to pt_regs */ + SAVE_8GPRS(3, r1) + + lbz r3, PACA_FTRACE_ENABLED(r13) + cmpdi r3, 0 + beq ftrace_no_trace + + /* Get the _mcount() call site out of LR */ + mflr r7 + std r7, _NIP(r1) + + /* Save callee's TOC in the ABI compliant location */ + std r2, 24(r1) + ld r2, PACATOC(r13) /* get kernel TOC in r2 */ + + addis r3, r2, function_trace_op@toc@ha + addi r3, r3, function_trace_op@toc@l + ld r5, 0(r3) + + /* Calculate ip from nip-4 into r3 for call below */ + subi r3, r7, MCOUNT_INSN_SIZE + + /* Put the original return address in r4 as parent_ip */ + mr r4, r0 + + /* Set pt_regs to NULL */ + li r6, 0 + + /* ftrace_call(r3, r4, r5, r6) */ +.globl ftrace_call +ftrace_call: + bl ftrace_stub + nop + + ld r3, _NIP(r1) + mtctr r3 + + /* Restore gprs */ + REST_8GPRS(3,r1) + + /* Restore callee's TOC */ + ld r2, 24(r1) + + /* Pop our stack frame */ + addi r1, r1, SWITCH_FRAME_SIZE + + /* Reload original LR */ + ld r0, LRSAVE(r1) + mtlr r0 + + /* Handle function_graph or go back */ + b ftrace_caller_common + #ifdef CONFIG_LIVEPATCH /* * This function runs in the mcount context, between two functions. As @@ -236,8 +310,6 @@ livepatch_handler: blr #endif /* CONFIG_LIVEPATCH */ -#endif /* CONFIG_DYNAMIC_FTRACE */ - #ifdef CONFIG_FUNCTION_GRAPH_TRACER _GLOBAL(ftrace_graph_caller) stdu r1, -112(r1) diff --git a/arch/powerpc/kernel/trace/ftrace_64_pg.S b/arch/powerpc/kernel/trace/ftrace_64_pg.S index f095358da96e..4c515c4023de 100644 --- a/arch/powerpc/kernel/trace/ftrace_64_pg.S +++ b/arch/powerpc/kernel/trace/ftrace_64_pg.S @@ -14,8 +14,11 @@ #include <asm/ppc-opcode.h> #include <asm/export.h> -#ifdef CONFIG_DYNAMIC_FTRACE _GLOBAL_TOC(ftrace_caller) + lbz r3, PACA_FTRACE_ENABLED(r13) + cmpdi r3, 0 + beqlr + /* Taken from output of objdump from lib64/glibc */ mflr r3 ld r11, 0(r1) @@ -39,7 +42,6 @@ _GLOBAL(ftrace_graph_stub) _GLOBAL(ftrace_stub) blr -#endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER _GLOBAL(ftrace_graph_caller) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 0904492e7032..0e17dcb48720 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -296,7 +296,6 @@ NOKPROBE_SYMBOL(die); void user_single_step_siginfo(struct task_struct *tsk, struct pt_regs *regs, siginfo_t *info) { - memset(info, 0, sizeof(*info)); info->si_signo = SIGTRAP; info->si_code = TRAP_TRACE; info->si_addr = (void __user *)regs->nip; @@ -334,7 +333,7 @@ void _exception_pkey(int signr, struct pt_regs *regs, int code, */ thread_pkey_regs_save(¤t->thread); - memset(&info, 0, sizeof(info)); + clear_siginfo(&info); info.si_signo = signr; info.si_code = code; info.si_addr = (void __user *) addr; @@ -970,7 +969,7 @@ void unknown_exception(struct pt_regs *regs) printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", regs->nip, regs->msr, regs->trap); - _exception(SIGTRAP, regs, TRAP_FIXME, 0); + _exception(SIGTRAP, regs, TRAP_UNK, 0); exception_exit(prev_state); } @@ -992,7 +991,7 @@ bail: void RunModeException(struct pt_regs *regs) { - _exception(SIGTRAP, regs, TRAP_FIXME, 0); + _exception(SIGTRAP, regs, TRAP_UNK, 0); } void single_step_exception(struct pt_regs *regs) @@ -1032,7 +1031,7 @@ static void emulate_single_step(struct pt_regs *regs) static inline int __parse_fpscr(unsigned long fpscr) { - int ret = FPE_FIXME; + int ret = FPE_FLTUNK; /* Invalid operation */ if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) @@ -1973,7 +1972,7 @@ void SPEFloatingPointException(struct pt_regs *regs) extern int do_spe_mathemu(struct pt_regs *regs); unsigned long spefscr; int fpexc_mode; - int code = FPE_FIXME; + int code = FPE_FLTUNK; int err; flush_spe_to_thread(current); @@ -2042,7 +2041,7 @@ void SPEFloatingPointRoundException(struct pt_regs *regs) printk(KERN_ERR "unrecognized spe instruction " "in %s at %lx\n", current->comm, regs->nip); } else { - _exception(SIGFPE, regs, FPE_FIXME, regs->nip); + _exception(SIGFPE, regs, FPE_FLTUNK, regs->nip); return; } } diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile index b8c434d1d459..50112d4473bb 100644 --- a/arch/powerpc/kernel/vdso32/Makefile +++ b/arch/powerpc/kernel/vdso32/Makefile @@ -8,8 +8,15 @@ obj-vdso32 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o \ # Build rules -ifeq ($(CONFIG_PPC32),y) -CROSS32CC := $(CC) +ifdef CROSS32_COMPILE + VDSOCC := $(CROSS32_COMPILE)gcc +else + VDSOCC := $(CC) +endif + +CC32FLAGS := +ifdef CONFIG_PPC64 +CC32FLAGS += -m32 endif targets := $(obj-vdso32) vdso32.so vdso32.so.dbg @@ -45,9 +52,9 @@ $(obj-vdso32): %.o: %.S FORCE # actual build commands quiet_cmd_vdso32ld = VDSO32L $@ - cmd_vdso32ld = $(CROSS32CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) + cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) quiet_cmd_vdso32as = VDSO32A $@ - cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $< + cmd_vdso32as = $(VDSOCC) $(a_flags) $(CC32FLAGS) -c -o $@ $< # install commands for the unstripped file quiet_cmd_vdso_install = INSTALL $@ diff --git a/arch/powerpc/kernel/vecemu.c b/arch/powerpc/kernel/vecemu.c index 8812085883fd..4acd3fb2b38e 100644 --- a/arch/powerpc/kernel/vecemu.c +++ b/arch/powerpc/kernel/vecemu.c @@ -8,6 +8,7 @@ #include <linux/sched.h> #include <asm/ptrace.h> #include <asm/processor.h> +#include <asm/switch_to.h> #include <linux/uaccess.h> /* Functions in vector.S */ diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index b8d82678f8b4..5baac79df97e 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -89,7 +89,7 @@ SECTIONS */ .text BLOCK(0) : AT(ADDR(.text) - LOAD_OFFSET) { #ifdef CONFIG_LD_HEAD_STUB_CATCH - *(.linker_stub_catch); + KEEP(*(.linker_stub_catch)); . = . ; #endif @@ -98,7 +98,7 @@ SECTIONS ALIGN_FUNCTION(); #endif /* careful! __ftr_alt_* sections need to be close to .text */ - *(.text.hot .text .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text); + *(.text.hot TEXT_MAIN .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text); SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT @@ -153,6 +153,13 @@ SECTIONS *(__rfi_flush_fixup) __stop___rfi_flush_fixup = .; } + + . = ALIGN(8); + __spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) { + __start___barrier_nospec_fixup = .; + *(__barrier_nospec_fixup) + __stop___barrier_nospec_fixup = .; + } #endif EXCEPTION_TABLE(0) @@ -184,10 +191,10 @@ SECTIONS .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { INIT_DATA __vtop_table_begin = .; - *(.vtop_fixup); + KEEP(*(.vtop_fixup)); __vtop_table_end = .; __ptov_table_begin = .; - *(.ptov_fixup); + KEEP(*(.ptov_fixup)); __ptov_table_end = .; } @@ -208,26 +215,26 @@ SECTIONS . = ALIGN(8); __ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) { __start___ftr_fixup = .; - *(__ftr_fixup) + KEEP(*(__ftr_fixup)) __stop___ftr_fixup = .; } . = ALIGN(8); __mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) { __start___mmu_ftr_fixup = .; - *(__mmu_ftr_fixup) + KEEP(*(__mmu_ftr_fixup)) __stop___mmu_ftr_fixup = .; } . = ALIGN(8); __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) { __start___lwsync_fixup = .; - *(__lwsync_fixup) + KEEP(*(__lwsync_fixup)) __stop___lwsync_fixup = .; } #ifdef CONFIG_PPC64 . = ALIGN(8); __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) { __start___fw_ftr_fixup = .; - *(__fw_ftr_fixup) + KEEP(*(__fw_ftr_fixup)) __stop___fw_ftr_fixup = .; } #endif @@ -240,7 +247,7 @@ SECTIONS . = ALIGN(8); .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { __machine_desc_start = . ; - *(.machine.desc) + KEEP(*(.machine.desc)) __machine_desc_end = . ; } #ifdef CONFIG_RELOCATABLE @@ -288,7 +295,7 @@ SECTIONS .data : AT(ADDR(.data) - LOAD_OFFSET) { DATA_DATA *(.data.rel*) - *(.sdata) + *(SDATA_MAIN) *(.sdata2) *(.got.plt) *(.got) *(.plt) @@ -303,7 +310,7 @@ SECTIONS .opd : AT(ADDR(.opd) - LOAD_OFFSET) { __start_opd = .; - *(.opd) + KEEP(*(.opd)) __end_opd = .; } diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c index 6256dc3b0087..1d82274f7e9f 100644 --- a/arch/powerpc/kernel/watchdog.c +++ b/arch/powerpc/kernel/watchdog.c @@ -64,7 +64,7 @@ * means the CPU(s) with their bit still set in the pending mask have had * their heartbeat stop, and action is taken. * - * Some platforms implement true NMI IPIs, which can by used by the SMP + * Some platforms implement true NMI IPIs, which can be used by the SMP * watchdog to detect an unresponsive CPU and pull it out of its stuck * state with the NMI IPI, to get crash/debug data from it. This way the * SMP watchdog can detect hardware interrupts off lockups. @@ -111,7 +111,13 @@ static inline void wd_smp_unlock(unsigned long *flags) static void wd_lockup_ipi(struct pt_regs *regs) { - pr_emerg("CPU %d Hard LOCKUP\n", raw_smp_processor_id()); + int cpu = raw_smp_processor_id(); + u64 tb = get_tb(); + + pr_emerg("CPU %d Hard LOCKUP\n", cpu); + pr_emerg("CPU %d TB:%lld, last heartbeat TB:%lld (%lldms ago)\n", + cpu, tb, per_cpu(wd_timer_tb, cpu), + tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000); print_modules(); print_irqtrace_events(current); if (regs) @@ -154,6 +160,9 @@ static void watchdog_smp_panic(int cpu, u64 tb) pr_emerg("CPU %d detected hard LOCKUP on other CPUs %*pbl\n", cpu, cpumask_pr_args(&wd_smp_cpus_pending)); + pr_emerg("CPU %d TB:%lld, last SMP heartbeat TB:%lld (%lldms ago)\n", + cpu, tb, wd_smp_last_reset_tb, + tb_to_ns(tb - wd_smp_last_reset_tb) / 1000000); if (!sysctl_hardlockup_all_cpu_backtrace) { /* @@ -194,10 +203,19 @@ static void wd_smp_clear_cpu_pending(int cpu, u64 tb) { if (!cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) { if (unlikely(cpumask_test_cpu(cpu, &wd_smp_cpus_stuck))) { + struct pt_regs *regs = get_irq_regs(); unsigned long flags; - pr_emerg("CPU %d became unstuck\n", cpu); wd_smp_lock(&flags); + + pr_emerg("CPU %d became unstuck TB:%lld\n", + cpu, tb); + print_irqtrace_events(current); + if (regs) + show_regs(regs); + else + dump_stack(); + cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck); wd_smp_unlock(&flags); } @@ -245,8 +263,6 @@ void soft_nmi_interrupt(struct pt_regs *regs) tb = get_tb(); if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) { - per_cpu(wd_timer_tb, cpu) = tb; - wd_smp_lock(&flags); if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) { wd_smp_unlock(&flags); @@ -254,7 +270,11 @@ void soft_nmi_interrupt(struct pt_regs *regs) } set_cpu_stuck(cpu, tb); - pr_emerg("CPU %d self-detected hard LOCKUP @ %pS\n", cpu, (void *)regs->nip); + pr_emerg("CPU %d self-detected hard LOCKUP @ %pS\n", + cpu, (void *)regs->nip); + pr_emerg("CPU %d TB:%lld, last heartbeat TB:%lld (%lldms ago)\n", + cpu, tb, per_cpu(wd_timer_tb, cpu), + tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000); print_modules(); print_irqtrace_events(current); show_regs(regs); diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 361f42c8c73e..481da8f93fa4 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -200,6 +200,7 @@ void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr, } static struct kmem_cache *kvm_pte_cache; +static struct kmem_cache *kvm_pmd_cache; static pte_t *kvmppc_pte_alloc(void) { @@ -217,6 +218,16 @@ static inline int pmd_is_leaf(pmd_t pmd) return !!(pmd_val(pmd) & _PAGE_PTE); } +static pmd_t *kvmppc_pmd_alloc(void) +{ + return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL); +} + +static void kvmppc_pmd_free(pmd_t *pmdp) +{ + kmem_cache_free(kvm_pmd_cache, pmdp); +} + static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, unsigned int level, unsigned long mmu_seq) { @@ -239,7 +250,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, if (pud && pud_present(*pud) && !pud_huge(*pud)) pmd = pmd_offset(pud, gpa); else if (level <= 1) - new_pmd = pmd_alloc_one(kvm->mm, gpa); + new_pmd = kvmppc_pmd_alloc(); if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd))) new_ptep = kvmppc_pte_alloc(); @@ -382,7 +393,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, if (new_pud) pud_free(kvm->mm, new_pud); if (new_pmd) - pmd_free(kvm->mm, new_pmd); + kvmppc_pmd_free(new_pmd); if (new_ptep) kvmppc_pte_free(new_ptep); return ret; @@ -758,7 +769,7 @@ void kvmppc_free_radix(struct kvm *kvm) kvmppc_pte_free(pte); pmd_clear(pmd); } - pmd_free(kvm->mm, pmd_offset(pud, 0)); + kvmppc_pmd_free(pmd_offset(pud, 0)); pud_clear(pud); } pud_free(kvm->mm, pud_offset(pgd, 0)); @@ -770,20 +781,35 @@ void kvmppc_free_radix(struct kvm *kvm) static void pte_ctor(void *addr) { - memset(addr, 0, PTE_TABLE_SIZE); + memset(addr, 0, RADIX_PTE_TABLE_SIZE); +} + +static void pmd_ctor(void *addr) +{ + memset(addr, 0, RADIX_PMD_TABLE_SIZE); } int kvmppc_radix_init(void) { - unsigned long size = sizeof(void *) << PTE_INDEX_SIZE; + unsigned long size = sizeof(void *) << RADIX_PTE_INDEX_SIZE; kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor); if (!kvm_pte_cache) return -ENOMEM; + + size = sizeof(void *) << RADIX_PMD_INDEX_SIZE; + + kvm_pmd_cache = kmem_cache_create("kvm-pmd", size, size, 0, pmd_ctor); + if (!kvm_pmd_cache) { + kmem_cache_destroy(kvm_pte_cache); + return -ENOMEM; + } + return 0; } void kvmppc_radix_exit(void) { kmem_cache_destroy(kvm_pte_cache); + kmem_cache_destroy(kvm_pmd_cache); } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 9963f65c212b..cb6d2313b19f 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2912,8 +2912,12 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) srcu_idx = srcu_read_lock(&vc->kvm->srcu); + this_cpu_disable_ftrace(); + trap = __kvmppc_vcore_entry(); + this_cpu_enable_ftrace(); + srcu_read_unlock(&vc->kvm->srcu, srcu_idx); trace_hardirqs_off(); diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 07ca1b2a7966..b97d261d3b89 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -342,6 +342,9 @@ kvm_start_guest: ld r2,PACATOC(r13) + li r0,0 + stb r0,PACA_FTRACE_ENABLED(r13) + li r0,KVM_HWTHREAD_IN_KVM stb r0,HSTATE_HWTHREAD_STATE(r13) diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 653901042ad7..d0ca13ad8231 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -26,13 +26,14 @@ obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \ memcpy_power7.o obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \ - string_64.o memcpy_64.o memcmp_64.o pmem.o + memcpy_64.o pmem.o obj64-$(CONFIG_SMP) += locks.o obj64-$(CONFIG_ALTIVEC) += vmx-helper.o obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o -obj-y += checksum_$(BITS).o checksum_wrappers.o +obj-y += checksum_$(BITS).o checksum_wrappers.o \ + string_$(BITS).o memcmp_$(BITS).o obj-y += sstep.o ldstfp.o quad.o obj64-y += quad.o diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S index 9a671c774b22..aa224069f93a 100644 --- a/arch/powerpc/lib/checksum_32.S +++ b/arch/powerpc/lib/checksum_32.S @@ -47,16 +47,25 @@ _GLOBAL(__csum_partial) bdnz 2b 21: srwi. r6,r4,4 /* # blocks of 4 words to do */ beq 3f + lwz r0,4(r3) mtctr r6 -22: lwz r0,4(r3) lwz r6,8(r3) + adde r5,r5,r0 lwz r7,12(r3) + adde r5,r5,r6 lwzu r8,16(r3) + adde r5,r5,r7 + bdz 23f +22: lwz r0,4(r3) + adde r5,r5,r8 + lwz r6,8(r3) adde r5,r5,r0 + lwz r7,12(r3) adde r5,r5,r6 + lwzu r8,16(r3) adde r5,r5,r7 - adde r5,r5,r8 bdnz 22b +23: adde r5,r5,r8 3: andi. r0,r4,2 beq+ 4f lhz r0,4(r3) @@ -293,3 +302,36 @@ dst_error: EX_TABLE(51b, dst_error); EXPORT_SYMBOL(csum_partial_copy_generic) + +/* + * __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + * const struct in6_addr *daddr, + * __u32 len, __u8 proto, __wsum sum) + */ + +_GLOBAL(csum_ipv6_magic) + lwz r8, 0(r3) + lwz r9, 4(r3) + addc r0, r7, r8 + lwz r10, 8(r3) + adde r0, r0, r9 + lwz r11, 12(r3) + adde r0, r0, r10 + lwz r8, 0(r4) + adde r0, r0, r11 + lwz r9, 4(r4) + adde r0, r0, r8 + lwz r10, 8(r4) + adde r0, r0, r9 + lwz r11, 12(r4) + adde r0, r0, r10 + add r5, r5, r6 /* assumption: len + proto doesn't carry */ + adde r0, r0, r11 + adde r0, r0, r5 + addze r0, r0 + rotlwi r3, r0, 16 + add r3, r0, r3 + not r3, r3 + rlwinm r3, r3, 16, 16, 31 + blr +EXPORT_SYMBOL(csum_ipv6_magic) diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S index d7f1a966136e..886ed94b9c13 100644 --- a/arch/powerpc/lib/checksum_64.S +++ b/arch/powerpc/lib/checksum_64.S @@ -429,3 +429,31 @@ dstnr; stb r6,0(r4) stw r6,0(r8) blr EXPORT_SYMBOL(csum_partial_copy_generic) + +/* + * __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + * const struct in6_addr *daddr, + * __u32 len, __u8 proto, __wsum sum) + */ + +_GLOBAL(csum_ipv6_magic) + ld r8, 0(r3) + ld r9, 8(r3) + add r5, r5, r6 + addc r0, r8, r9 + ld r10, 0(r4) + ld r11, 8(r4) + adde r0, r0, r10 + add r5, r5, r7 + adde r0, r0, r11 + adde r0, r0, r5 + addze r0, r0 + rotldi r3, r0, 32 /* fold two 32 bit halves together */ + add r3, r0, r3 + srdi r0, r3, 32 + rotlwi r3, r0, 16 /* fold two 16 bit halves together */ + add r3, r0, r3 + not r3, r3 + rlwinm r3, r3, 16, 16, 31 + blr +EXPORT_SYMBOL(csum_ipv6_magic) diff --git a/arch/powerpc/lib/feature-fixups-test.S b/arch/powerpc/lib/feature-fixups-test.S index f4613118132e..f16cec989506 100644 --- a/arch/powerpc/lib/feature-fixups-test.S +++ b/arch/powerpc/lib/feature-fixups-test.S @@ -167,16 +167,52 @@ globl(ftr_fixup_test6_expected) blt 2b b 3f b 1b -2: or 1,1,1 +3: or 1,1,1 + or 2,2,2 + or 3,3,3 + +globl(ftr_fixup_test7) + or 1,1,1 +BEGIN_FTR_SECTION + or 2,2,2 + or 2,2,2 or 2,2,2 -3: or 3,3,3 + or 2,2,2 + or 2,2,2 + or 2,2,2 + or 2,2,2 +FTR_SECTION_ELSE +2: b 3f +3: or 5,5,5 + beq 3b + b 1f + or 6,6,6 + b 2b + bdnz 3b +1: +ALT_FTR_SECTION_END(0, 1) + or 1,1,1 + or 1,1,1 + +globl(end_ftr_fixup_test7) + nop +globl(ftr_fixup_test7_expected) + or 1,1,1 +2: b 3f +3: or 5,5,5 + beq 3b + b 1f + or 6,6,6 + b 2b + bdnz 3b +1: or 1,1,1 #if 0 /* Test that if we have a larger else case the assembler spots it and * reports an error. #if 0'ed so as not to break the build normally. */ -ftr_fixup_test7: +ftr_fixup_test_too_big: or 1,1,1 BEGIN_FTR_SECTION or 2,2,2 diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index e1bcdc32a851..8b69f868298c 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -277,6 +277,43 @@ void do_rfi_flush_fixups(enum l1d_flush_type types) (types & L1D_FLUSH_MTTRIG) ? "mttrig type" : "unknown"); } + +void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end) +{ + unsigned int instr, *dest; + long *start, *end; + int i; + + start = fixup_start; + end = fixup_end; + + instr = 0x60000000; /* nop */ + + if (enable) { + pr_info("barrier-nospec: using ORI speculation barrier\n"); + instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */ + } + + for (i = 0; start < end; start++, i++) { + dest = (void *)start + *start; + + pr_devel("patching dest %lx\n", (unsigned long)dest); + patch_instruction(dest, instr); + } + + printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); +} + +void do_barrier_nospec_fixups(bool enable) +{ + void *start, *end; + + start = PTRRELOC(&__start___barrier_nospec_fixup), + end = PTRRELOC(&__stop___barrier_nospec_fixup); + + do_barrier_nospec_fixups_range(enable, start, end); +} + #endif /* CONFIG_PPC_BOOK3S_64 */ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) @@ -400,7 +437,7 @@ static void test_basic_patching(void) extern unsigned int end_ftr_fixup_test1[]; extern unsigned int ftr_fixup_test1_orig[]; extern unsigned int ftr_fixup_test1_expected[]; - int size = end_ftr_fixup_test1 - ftr_fixup_test1; + int size = 4 * (end_ftr_fixup_test1 - ftr_fixup_test1); fixup.value = fixup.mask = 8; fixup.start_off = calc_offset(&fixup, ftr_fixup_test1 + 1); @@ -432,7 +469,7 @@ static void test_alternative_patching(void) extern unsigned int ftr_fixup_test2_orig[]; extern unsigned int ftr_fixup_test2_alt[]; extern unsigned int ftr_fixup_test2_expected[]; - int size = end_ftr_fixup_test2 - ftr_fixup_test2; + int size = 4 * (end_ftr_fixup_test2 - ftr_fixup_test2); fixup.value = fixup.mask = 0xF; fixup.start_off = calc_offset(&fixup, ftr_fixup_test2 + 1); @@ -464,7 +501,7 @@ static void test_alternative_case_too_big(void) extern unsigned int end_ftr_fixup_test3[]; extern unsigned int ftr_fixup_test3_orig[]; extern unsigned int ftr_fixup_test3_alt[]; - int size = end_ftr_fixup_test3 - ftr_fixup_test3; + int size = 4 * (end_ftr_fixup_test3 - ftr_fixup_test3); fixup.value = fixup.mask = 0xC; fixup.start_off = calc_offset(&fixup, ftr_fixup_test3 + 1); @@ -491,7 +528,7 @@ static void test_alternative_case_too_small(void) extern unsigned int ftr_fixup_test4_orig[]; extern unsigned int ftr_fixup_test4_alt[]; extern unsigned int ftr_fixup_test4_expected[]; - int size = end_ftr_fixup_test4 - ftr_fixup_test4; + int size = 4 * (end_ftr_fixup_test4 - ftr_fixup_test4); unsigned long flag; /* Check a high-bit flag */ @@ -525,7 +562,7 @@ static void test_alternative_case_with_branch(void) extern unsigned int ftr_fixup_test5[]; extern unsigned int end_ftr_fixup_test5[]; extern unsigned int ftr_fixup_test5_expected[]; - int size = end_ftr_fixup_test5 - ftr_fixup_test5; + int size = 4 * (end_ftr_fixup_test5 - ftr_fixup_test5); check(memcmp(ftr_fixup_test5, ftr_fixup_test5_expected, size) == 0); } @@ -535,11 +572,21 @@ static void test_alternative_case_with_external_branch(void) extern unsigned int ftr_fixup_test6[]; extern unsigned int end_ftr_fixup_test6[]; extern unsigned int ftr_fixup_test6_expected[]; - int size = end_ftr_fixup_test6 - ftr_fixup_test6; + int size = 4 * (end_ftr_fixup_test6 - ftr_fixup_test6); check(memcmp(ftr_fixup_test6, ftr_fixup_test6_expected, size) == 0); } +static void test_alternative_case_with_branch_to_end(void) +{ + extern unsigned int ftr_fixup_test7[]; + extern unsigned int end_ftr_fixup_test7[]; + extern unsigned int ftr_fixup_test7_expected[]; + int size = 4 * (end_ftr_fixup_test7 - ftr_fixup_test7); + + check(memcmp(ftr_fixup_test7, ftr_fixup_test7_expected, size) == 0); +} + static void test_cpu_macros(void) { extern u8 ftr_fixup_test_FTR_macros[]; @@ -595,6 +642,7 @@ static int __init test_feature_fixups(void) test_alternative_case_too_small(); test_alternative_case_with_branch(); test_alternative_case_with_external_branch(); + test_alternative_case_with_branch_to_end(); test_cpu_macros(); test_fw_macros(); test_lwsync_macros(); diff --git a/arch/powerpc/lib/memcmp_32.S b/arch/powerpc/lib/memcmp_32.S new file mode 100644 index 000000000000..5010e376f7b8 --- /dev/null +++ b/arch/powerpc/lib/memcmp_32.S @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * memcmp for PowerPC32 + * + * Copyright (C) 1996 Paul Mackerras. + * + */ + +#include <asm/ppc_asm.h> +#include <asm/export.h> + + .text + +_GLOBAL(memcmp) + srawi. r7, r5, 2 /* Divide len by 4 */ + mr r6, r3 + beq- 3f + mtctr r7 + li r7, 0 +1: lwzx r3, r6, r7 + lwzx r0, r4, r7 + addi r7, r7, 4 + cmplw cr0, r3, r0 + bdnzt eq, 1b + bne 5f +3: andi. r3, r5, 3 + beqlr + cmplwi cr1, r3, 2 + blt- cr1, 4f + lhzx r3, r6, r7 + lhzx r0, r4, r7 + addi r7, r7, 2 + subf. r3, r0, r3 + beqlr cr1 + bnelr +4: lbzx r3, r6, r7 + lbzx r0, r4, r7 + subf. r3, r0, r3 + blr +5: li r3, 1 + bgtlr + li r3, -1 + blr +EXPORT_SYMBOL(memcmp) diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 34d68f1b1b40..d81568f783e5 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -1065,9 +1065,10 @@ static nokprobe_inline void do_popcnt(const struct pt_regs *regs, { unsigned long long out = v1; - out -= (out >> 1) & 0x5555555555555555; - out = (0x3333333333333333 & out) + (0x3333333333333333 & (out >> 2)); - out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0f; + out -= (out >> 1) & 0x5555555555555555ULL; + out = (0x3333333333333333ULL & out) + + (0x3333333333333333ULL & (out >> 2)); + out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL; if (size == 8) { /* popcntb */ op->val = out; @@ -1076,7 +1077,7 @@ static nokprobe_inline void do_popcnt(const struct pt_regs *regs, out += out >> 8; out += out >> 16; if (size == 32) { /* popcntw */ - op->val = out & 0x0000003f0000003f; + op->val = out & 0x0000003f0000003fULL; return; } @@ -1114,7 +1115,7 @@ static nokprobe_inline void do_prty(const struct pt_regs *regs, res ^= res >> 16; if (size == 32) { /* prtyw */ - op->val = res & 0x0000000100000001; + op->val = res & 0x0000000100000001ULL; return; } @@ -2544,6 +2545,15 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, #endif /* __powerpc64__ */ } + +#ifdef CONFIG_VSX + if ((GETTYPE(op->type) == LOAD_VSX || + GETTYPE(op->type) == STORE_VSX) && + !cpu_has_feature(CPU_FTR_VSX)) { + return -1; + } +#endif /* CONFIG_VSX */ + return 0; logical_done: @@ -2641,7 +2651,7 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op) unsigned long next_pc; next_pc = truncate_if_32bit(regs->msr, regs->nip + 4); - switch (op->type & INSTR_TYPE_MASK) { + switch (GETTYPE(op->type)) { case COMPUTE: if (op->type & SETREG) regs->gpr[op->reg] = op->val; @@ -2739,7 +2749,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op) err = 0; size = GETSIZE(op->type); - type = op->type & INSTR_TYPE_MASK; + type = GETTYPE(op->type); cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE); ea = truncate_if_32bit(regs->msr, op->ea); @@ -3001,7 +3011,7 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) } err = 0; - type = op.type & INSTR_TYPE_MASK; + type = GETTYPE(op.type); if (OP_IS_LOAD_STORE(type)) { err = emulate_loadstore(regs, &op); diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S index a787776822d8..4b41970e9ed8 100644 --- a/arch/powerpc/lib/string.S +++ b/arch/powerpc/lib/string.S @@ -8,10 +8,9 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ -#include <asm/processor.h> -#include <asm/errno.h> #include <asm/ppc_asm.h> #include <asm/export.h> +#include <asm/cache.h> .text @@ -23,7 +22,7 @@ _GLOBAL(strncpy) mtctr r5 addi r6,r3,-1 addi r4,r4,-1 - .balign 16 + .balign IFETCH_ALIGN_BYTES 1: lbzu r0,1(r4) cmpwi 0,r0,0 stbu r0,1(r6) @@ -43,7 +42,7 @@ _GLOBAL(strncmp) mtctr r5 addi r5,r3,-1 addi r4,r4,-1 - .balign 16 + .balign IFETCH_ALIGN_BYTES 1: lbzu r3,1(r5) cmpwi 1,r3,0 lbzu r0,1(r4) @@ -55,29 +54,12 @@ _GLOBAL(strncmp) blr EXPORT_SYMBOL(strncmp) -#ifdef CONFIG_PPC32 -_GLOBAL(memcmp) - PPC_LCMPI 0,r5,0 - beq- 2f - mtctr r5 - addi r6,r3,-1 - addi r4,r4,-1 -1: lbzu r3,1(r6) - lbzu r0,1(r4) - subf. r3,r0,r3 - bdnzt 2,1b - blr -2: li r3,0 - blr -EXPORT_SYMBOL(memcmp) -#endif - _GLOBAL(memchr) PPC_LCMPI 0,r5,0 beq- 2f mtctr r5 addi r3,r3,-1 - .balign 16 + .balign IFETCH_ALIGN_BYTES 1: lbzu r0,1(r3) cmpw 0,r0,r4 bdnzf 2,1b @@ -85,47 +67,3 @@ _GLOBAL(memchr) 2: li r3,0 blr EXPORT_SYMBOL(memchr) - -#ifdef CONFIG_PPC32 -_GLOBAL(__clear_user) - addi r6,r3,-4 - li r3,0 - li r5,0 - cmplwi 0,r4,4 - blt 7f - /* clear a single word */ -11: stwu r5,4(r6) - beqlr - /* clear word sized chunks */ - andi. r0,r6,3 - add r4,r0,r4 - subf r6,r0,r6 - srwi r0,r4,2 - andi. r4,r4,3 - mtctr r0 - bdz 7f -1: stwu r5,4(r6) - bdnz 1b - /* clear byte sized chunks */ -7: cmpwi 0,r4,0 - beqlr - mtctr r4 - addi r6,r6,3 -8: stbu r5,1(r6) - bdnz 8b - blr -90: mr r3,r4 - blr -91: mfctr r3 - slwi r3,r3,2 - add r3,r3,r4 - blr -92: mfctr r3 - blr - - EX_TABLE(11b, 90b) - EX_TABLE(1b, 91b) - EX_TABLE(8b, 92b) - -EXPORT_SYMBOL(__clear_user) -#endif diff --git a/arch/powerpc/lib/string_32.S b/arch/powerpc/lib/string_32.S new file mode 100644 index 000000000000..f69a6aab7bfb --- /dev/null +++ b/arch/powerpc/lib/string_32.S @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * String handling functions for PowerPC32 + * + * Copyright (C) 1996 Paul Mackerras. + * + */ + +#include <asm/ppc_asm.h> +#include <asm/export.h> +#include <asm/cache.h> + + .text + +CACHELINE_BYTES = L1_CACHE_BYTES +LG_CACHELINE_BYTES = L1_CACHE_SHIFT +CACHELINE_MASK = (L1_CACHE_BYTES-1) + +_GLOBAL(__clear_user) +/* + * Use dcbz on the complete cache lines in the destination + * to set them to zero. This requires that the destination + * area is cacheable. + */ + cmplwi cr0, r4, 4 + mr r10, r3 + li r3, 0 + blt 7f + +11: stw r3, 0(r10) + beqlr + andi. r0, r10, 3 + add r11, r0, r4 + subf r6, r0, r10 + + clrlwi r7, r6, 32 - LG_CACHELINE_BYTES + add r8, r7, r11 + srwi r9, r8, LG_CACHELINE_BYTES + addic. r9, r9, -1 /* total number of complete cachelines */ + ble 2f + xori r0, r7, CACHELINE_MASK & ~3 + srwi. r0, r0, 2 + beq 3f + mtctr r0 +4: stwu r3, 4(r6) + bdnz 4b +3: mtctr r9 + li r7, 4 +10: dcbz r7, r6 + addi r6, r6, CACHELINE_BYTES + bdnz 10b + clrlwi r11, r8, 32 - LG_CACHELINE_BYTES + addi r11, r11, 4 + +2: srwi r0 ,r11 ,2 + mtctr r0 + bdz 6f +1: stwu r3, 4(r6) + bdnz 1b +6: andi. r11, r11, 3 + beqlr + mtctr r11 + addi r6, r6, 3 +8: stbu r3, 1(r6) + bdnz 8b + blr + +7: cmpwi cr0, r4, 0 + beqlr + mtctr r4 + addi r6, r10, -1 +9: stbu r3, 1(r6) + bdnz 9b + blr + +90: mr r3, r4 + blr +91: add r3, r10, r4 + subf r3, r6, r3 + blr + + EX_TABLE(11b, 90b) + EX_TABLE(4b, 91b) + EX_TABLE(10b, 91b) + EX_TABLE(1b, 91b) + EX_TABLE(8b, 91b) + EX_TABLE(9b, 91b) + +EXPORT_SYMBOL(__clear_user) diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c index 2534c1447554..6c47daa61614 100644 --- a/arch/powerpc/lib/test_emulate_step.c +++ b/arch/powerpc/lib/test_emulate_step.c @@ -387,10 +387,14 @@ static void __init test_lxvd2x_stxvd2x(void) /* lxvd2x vsr39, r3, r4 */ stepped = emulate_step(®s, TEST_LXVD2X(39, 3, 4)); - if (stepped == 1) + if (stepped == 1 && cpu_has_feature(CPU_FTR_VSX)) { show_result("lxvd2x", "PASS"); - else - show_result("lxvd2x", "FAIL"); + } else { + if (!cpu_has_feature(CPU_FTR_VSX)) + show_result("lxvd2x", "PASS (!CPU_FTR_VSX)"); + else + show_result("lxvd2x", "FAIL"); + } /*** stxvd2x ***/ @@ -404,10 +408,15 @@ static void __init test_lxvd2x_stxvd2x(void) stepped = emulate_step(®s, TEST_STXVD2X(39, 3, 4)); if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] && - cached_b[2] == c.b[2] && cached_b[3] == c.b[3]) + cached_b[2] == c.b[2] && cached_b[3] == c.b[3] && + cpu_has_feature(CPU_FTR_VSX)) { show_result("stxvd2x", "PASS"); - else - show_result("stxvd2x", "FAIL"); + } else { + if (!cpu_has_feature(CPU_FTR_VSX)) + show_result("stxvd2x", "PASS (!CPU_FTR_VSX)"); + else + show_result("stxvd2x", "FAIL"); + } } #else static void __init test_lxvd2x_stxvd2x(void) diff --git a/arch/powerpc/lib/xor_vmx_glue.c b/arch/powerpc/lib/xor_vmx_glue.c index 6521fe5e8cef..dab2b6bfcf36 100644 --- a/arch/powerpc/lib/xor_vmx_glue.c +++ b/arch/powerpc/lib/xor_vmx_glue.c @@ -13,6 +13,7 @@ #include <linux/export.h> #include <linux/sched.h> #include <asm/switch_to.h> +#include <asm/xor_altivec.h> #include "xor_vmx.h" void xor_altivec_2(unsigned long bytes, unsigned long *v1_in, diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index c01d627e687a..b1ca7a0974e3 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -22,6 +22,7 @@ #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> +#include <linux/pagemap.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> @@ -66,37 +67,33 @@ static inline bool notify_page_fault(struct pt_regs *regs) } /* - * Check whether the instruction at regs->nip is a store using + * Check whether the instruction inst is a store using * an update addressing form which will update r1. */ -static bool store_updates_sp(struct pt_regs *regs) +static bool store_updates_sp(unsigned int inst) { - unsigned int inst; - - if (get_user(inst, (unsigned int __user *)regs->nip)) - return false; /* check for 1 in the rA field */ if (((inst >> 16) & 0x1f) != 1) return false; /* check major opcode */ switch (inst >> 26) { - case 37: /* stwu */ - case 39: /* stbu */ - case 45: /* sthu */ - case 53: /* stfsu */ - case 55: /* stfdu */ + case OP_STWU: + case OP_STBU: + case OP_STHU: + case OP_STFSU: + case OP_STFDU: return true; - case 62: /* std or stdu */ + case OP_STD: /* std or stdu */ return (inst & 3) == 1; - case 31: + case OP_31: /* check minor opcode */ switch ((inst >> 1) & 0x3ff) { - case 181: /* stdux */ - case 183: /* stwux */ - case 247: /* stbux */ - case 439: /* sthux */ - case 695: /* stfsux */ - case 759: /* stfdux */ + case OP_31_XOP_STDUX: + case OP_31_XOP_STWUX: + case OP_31_XOP_STBUX: + case OP_31_XOP_STHUX: + case OP_31_XOP_STFSUX: + case OP_31_XOP_STFDUX: return true; } } @@ -168,6 +165,7 @@ static int do_sigbus(struct pt_regs *regs, unsigned long address, return SIGBUS; current->thread.trap_nr = BUS_ADRERR; + clear_siginfo(&info); info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; @@ -234,8 +232,8 @@ static bool bad_kernel_fault(bool is_exec, unsigned long error_code, } static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, - struct vm_area_struct *vma, - bool store_update_sp) + struct vm_area_struct *vma, unsigned int flags, + bool *must_retry) { /* * N.B. The POWER/Open ABI allows programs to access up to @@ -247,6 +245,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, * expand to 1MB without further checks. */ if (address + 0x100000 < vma->vm_end) { + unsigned int __user *nip = (unsigned int __user *)regs->nip; /* get user regs even if this fault is in kernel mode */ struct pt_regs *uregs = current->thread.regs; if (uregs == NULL) @@ -264,8 +263,22 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, * between the last mapped region and the stack will * expand the stack rather than segfaulting. */ - if (address + 2048 < uregs->gpr[1] && !store_update_sp) - return true; + if (address + 2048 >= uregs->gpr[1]) + return false; + + if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) && + access_ok(VERIFY_READ, nip, sizeof(*nip))) { + unsigned int inst; + int res; + + pagefault_disable(); + res = __get_user_inatomic(inst, nip); + pagefault_enable(); + if (!res) + return !store_updates_sp(inst); + *must_retry = true; + } + return true; } return false; } @@ -403,7 +416,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, int is_user = user_mode(regs); int is_write = page_fault_is_write(error_code); int fault, major = 0; - bool store_update_sp = false; + bool must_retry = false; if (notify_page_fault(regs)) return 0; @@ -454,9 +467,6 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, * can result in fault, which will cause a deadlock when called with * mmap_sem held */ - if (is_write && is_user) - store_update_sp = store_updates_sp(regs); - if (is_user) flags |= FAULT_FLAG_USER; if (is_write) @@ -503,8 +513,17 @@ retry: return bad_area(regs, address); /* The stack is being expanded, check if it's valid */ - if (unlikely(bad_stack_expansion(regs, address, vma, store_update_sp))) - return bad_area(regs, address); + if (unlikely(bad_stack_expansion(regs, address, vma, flags, + &must_retry))) { + if (!must_retry) + return bad_area(regs, address); + + up_read(&mm->mmap_sem); + if (fault_in_pages_readable((const char __user *)regs->nip, + sizeof(unsigned int))) + return bad_area_nosemaphore(regs, address); + goto retry; + } /* Try to expand it */ if (unlikely(expand_stack(vma, address))) diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 0bd3790d35df..8318716e5075 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -64,6 +64,7 @@ #include <asm/trace.h> #include <asm/ps3.h> #include <asm/pte-walk.h> +#include <asm/asm-prototypes.h> #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) @@ -572,8 +573,10 @@ static void __init htab_scan_page_sizes(void) } #ifdef CONFIG_HUGETLB_PAGE - /* Reserve 16G huge page memory sections for huge pages */ - of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL); + if (!hugetlb_disabled) { + /* Reserve 16G huge page memory sections for huge pages */ + of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL); + } #endif /* CONFIG_HUGETLB_PAGE */ } @@ -1010,13 +1013,14 @@ void __init hash__early_init_mmu(void) */ __pte_frag_nr = H_PTE_FRAG_NR; __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT; + __pmd_frag_nr = H_PMD_FRAG_NR; + __pmd_frag_size_shift = H_PMD_FRAG_SIZE_SHIFT; __pte_index_size = H_PTE_INDEX_SIZE; __pmd_index_size = H_PMD_INDEX_SIZE; __pud_index_size = H_PUD_INDEX_SIZE; __pgd_index_size = H_PGD_INDEX_SIZE; __pud_cache_index = H_PUD_CACHE_INDEX; - __pmd_cache_index = H_PMD_CACHE_INDEX; __pte_table_size = H_PTE_TABLE_SIZE; __pmd_table_size = H_PMD_TABLE_SIZE; __pud_table_size = H_PUD_TABLE_SIZE; diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index f1153f8254e3..7c5f479c5c00 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -35,6 +35,8 @@ #define PAGE_SHIFT_16M 24 #define PAGE_SHIFT_16G 34 +bool hugetlb_disabled = false; + unsigned int HPAGE_SHIFT; EXPORT_SYMBOL(HPAGE_SHIFT); @@ -50,7 +52,8 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long s } static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, - unsigned long address, unsigned pdshift, unsigned pshift) + unsigned long address, unsigned int pdshift, + unsigned int pshift, spinlock_t *ptl) { struct kmem_cache *cachep; pte_t *new; @@ -80,8 +83,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, */ smp_wmb(); - spin_lock(&mm->page_table_lock); - + spin_lock(ptl); /* * We have multiple higher-level entries that point to the same * actual pte location. Fill in each as we go and backtrack on error. @@ -111,7 +113,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, *hpdp = __hugepd(0); kmem_cache_free(cachep, new); } - spin_unlock(&mm->page_table_lock); + spin_unlock(ptl); return 0; } @@ -136,6 +138,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz hugepd_t *hpdp = NULL; unsigned pshift = __ffs(sz); unsigned pdshift = PGDIR_SHIFT; + spinlock_t *ptl; addr &= ~(sz-1); pg = pgd_offset(mm, addr); @@ -144,39 +147,46 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz if (pshift == PGDIR_SHIFT) /* 16GB huge page */ return (pte_t *) pg; - else if (pshift > PUD_SHIFT) + else if (pshift > PUD_SHIFT) { /* * We need to use hugepd table */ + ptl = &mm->page_table_lock; hpdp = (hugepd_t *)pg; - else { + } else { pdshift = PUD_SHIFT; pu = pud_alloc(mm, pg, addr); if (pshift == PUD_SHIFT) return (pte_t *)pu; - else if (pshift > PMD_SHIFT) + else if (pshift > PMD_SHIFT) { + ptl = pud_lockptr(mm, pu); hpdp = (hugepd_t *)pu; - else { + } else { pdshift = PMD_SHIFT; pm = pmd_alloc(mm, pu, addr); if (pshift == PMD_SHIFT) /* 16MB hugepage */ return (pte_t *)pm; - else + else { + ptl = pmd_lockptr(mm, pm); hpdp = (hugepd_t *)pm; + } } } #else if (pshift >= HUGEPD_PGD_SHIFT) { + ptl = &mm->page_table_lock; hpdp = (hugepd_t *)pg; } else { pdshift = PUD_SHIFT; pu = pud_alloc(mm, pg, addr); if (pshift >= HUGEPD_PUD_SHIFT) { + ptl = pud_lockptr(mm, pu); hpdp = (hugepd_t *)pu; } else { pdshift = PMD_SHIFT; pm = pmd_alloc(mm, pu, addr); + ptl = pmd_lockptr(mm, pm); hpdp = (hugepd_t *)pm; } } @@ -186,7 +196,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp)); - if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift)) + if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, + pdshift, pshift, ptl)) return NULL; return hugepte_offset(*hpdp, addr, pdshift); @@ -497,6 +508,10 @@ struct page *follow_huge_pd(struct vm_area_struct *vma, struct mm_struct *mm = vma->vm_mm; retry: + /* + * hugepage directory entries are protected by mm->page_table_lock + * Use this instead of huge_pte_lockptr + */ ptl = &mm->page_table_lock; spin_lock(ptl); @@ -651,6 +666,11 @@ static int __init hugetlbpage_init(void) { int psize; + if (hugetlb_disabled) { + pr_info("HugeTLB support is disabled!\n"); + return 0; + } + #if !defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_PPC_8xx) if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE)) return -ENODEV; diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index c3c39b02b2ba..8cecda4bd66a 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -509,8 +509,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, */ unsigned long access, trap; - if (radix_enabled()) + if (radix_enabled()) { + prefetch((void *)address); return; + } /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ if (!pte_young(*ptep) || address >= TASK_SIZE) diff --git a/arch/powerpc/mm/mmu_context.c b/arch/powerpc/mm/mmu_context.c index 0ab297c4cfad..f84e14f23e50 100644 --- a/arch/powerpc/mm/mmu_context.c +++ b/arch/powerpc/mm/mmu_context.c @@ -57,8 +57,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, * in switch_slb(), and/or the store of paca->mm_ctx_id in * copy_mm_to_paca(). * - * On the read side the barrier is in pte_xchg(), which orders - * the store to the PTE vs the load of mm_cpumask. + * On the other side, the barrier is in mm/tlb-radix.c for + * radix which orders earlier stores to clear the PTEs vs + * the load of mm_cpumask. And pte_xchg which does the same + * thing for hash. * * This full barrier is needed by membarrier when switching * between processes after store to rq->curr, before user-space diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c index b75194dff64c..f3d4b4a0e561 100644 --- a/arch/powerpc/mm/mmu_context_book3s64.c +++ b/arch/powerpc/mm/mmu_context_book3s64.c @@ -159,9 +159,8 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) mm->context.id = index; -#ifdef CONFIG_PPC_64K_PAGES mm->context.pte_frag = NULL; -#endif + mm->context.pmd_frag = NULL; #ifdef CONFIG_SPAPR_TCE_IOMMU mm_iommu_init(mm); #endif @@ -192,17 +191,11 @@ static void destroy_contexts(mm_context_t *ctx) spin_unlock(&mmu_context_lock); } -#ifdef CONFIG_PPC_64K_PAGES -static void destroy_pagetable_page(struct mm_struct *mm) +static void pte_frag_destroy(void *pte_frag) { int count; - void *pte_frag; struct page *page; - pte_frag = mm->context.pte_frag; - if (!pte_frag) - return; - page = virt_to_page(pte_frag); /* drop all the pending references */ count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT; @@ -213,12 +206,34 @@ static void destroy_pagetable_page(struct mm_struct *mm) } } -#else -static inline void destroy_pagetable_page(struct mm_struct *mm) +static void pmd_frag_destroy(void *pmd_frag) { + int count; + struct page *page; + + page = virt_to_page(pmd_frag); + /* drop all the pending references */ + count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT; + /* We allow PTE_FRAG_NR fragments from a PTE page */ + if (page_ref_sub_and_test(page, PMD_FRAG_NR - count)) { + pgtable_pmd_page_dtor(page); + free_unref_page(page); + } +} + +static void destroy_pagetable_page(struct mm_struct *mm) +{ + void *frag; + + frag = mm->context.pte_frag; + if (frag) + pte_frag_destroy(frag); + + frag = mm->context.pmd_frag; + if (frag) + pmd_frag_destroy(frag); return; } -#endif void destroy_context(struct mm_struct *mm) { diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index be8f5c9d4d08..4d80239ef83c 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -54,16 +54,44 @@ #include "mmu_decl.h" -static unsigned int first_context, last_context; +/* + * The MPC8xx has only 16 contexts. We rotate through them on each task switch. + * A better way would be to keep track of tasks that own contexts, and implement + * an LRU usage. That way very active tasks don't always have to pay the TLB + * reload overhead. The kernel pages are mapped shared, so the kernel can run on + * behalf of any task that makes a kernel entry. Shared does not mean they are + * not protected, just that the ASID comparison is not performed. -- Dan + * + * The IBM4xx has 256 contexts, so we can just rotate through these as a way of + * "switching" contexts. If the TID of the TLB is zero, the PID/TID comparison + * is disabled, so we can use a TID of zero to represent all kernel pages as + * shared among all contexts. -- Dan + * + * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We should + * normally never have to steal though the facility is present if needed. + * -- BenH + */ +#define FIRST_CONTEXT 1 +#ifdef DEBUG_CLAMP_LAST_CONTEXT +#define LAST_CONTEXT DEBUG_CLAMP_LAST_CONTEXT +#elif defined(CONFIG_PPC_8xx) +#define LAST_CONTEXT 16 +#elif defined(CONFIG_PPC_47x) +#define LAST_CONTEXT 65535 +#else +#define LAST_CONTEXT 255 +#endif + static unsigned int next_context, nr_free_contexts; static unsigned long *context_map; +#ifdef CONFIG_SMP static unsigned long *stale_map[NR_CPUS]; +#endif static struct mm_struct **context_mm; static DEFINE_RAW_SPINLOCK(context_lock); -static bool no_selective_tlbil; #define CTX_MAP_SIZE \ - (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1)) + (sizeof(unsigned long) * (LAST_CONTEXT / BITS_PER_LONG + 1)) /* Steal a context from a task that has one at the moment. @@ -87,7 +115,7 @@ static unsigned int steal_context_smp(unsigned int id) struct mm_struct *mm; unsigned int cpu, max, i; - max = last_context - first_context; + max = LAST_CONTEXT - FIRST_CONTEXT; /* Attempt to free next_context first and then loop until we manage */ while (max--) { @@ -99,8 +127,8 @@ static unsigned int steal_context_smp(unsigned int id) */ if (mm->context.active) { id++; - if (id > last_context) - id = first_context; + if (id > LAST_CONTEXT) + id = FIRST_CONTEXT; continue; } pr_hardcont(" | steal %d from 0x%p", id, mm); @@ -139,10 +167,12 @@ static unsigned int steal_context_smp(unsigned int id) static unsigned int steal_all_contexts(void) { struct mm_struct *mm; +#ifdef CONFIG_SMP int cpu = smp_processor_id(); +#endif unsigned int id; - for (id = first_context; id <= last_context; id++) { + for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) { /* Pick up the victim mm */ mm = context_mm[id]; @@ -150,22 +180,24 @@ static unsigned int steal_all_contexts(void) /* Mark this mm as having no context anymore */ mm->context.id = MMU_NO_CONTEXT; - if (id != first_context) { + if (id != FIRST_CONTEXT) { context_mm[id] = NULL; __clear_bit(id, context_map); #ifdef DEBUG_MAP_CONSISTENCY mm->context.active = 0; #endif } +#ifdef CONFIG_SMP __clear_bit(id, stale_map[cpu]); +#endif } /* Flush the TLB for all contexts (not to be used on SMP) */ _tlbil_all(); - nr_free_contexts = last_context - first_context; + nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT; - return first_context; + return FIRST_CONTEXT; } /* Note that this will also be called on SMP if all other CPUs are @@ -176,7 +208,9 @@ static unsigned int steal_all_contexts(void) static unsigned int steal_context_up(unsigned int id) { struct mm_struct *mm; +#ifdef CONFIG_SMP int cpu = smp_processor_id(); +#endif /* Pick up the victim mm */ mm = context_mm[id]; @@ -190,7 +224,9 @@ static unsigned int steal_context_up(unsigned int id) mm->context.id = MMU_NO_CONTEXT; /* XXX This clear should ultimately be part of local_flush_tlb_mm */ +#ifdef CONFIG_SMP __clear_bit(id, stale_map[cpu]); +#endif return id; } @@ -201,7 +237,7 @@ static void context_check_map(void) unsigned int id, nrf, nact; nrf = nact = 0; - for (id = first_context; id <= last_context; id++) { + for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) { int used = test_bit(id, context_map); if (!used) nrf++; @@ -219,7 +255,7 @@ static void context_check_map(void) if (nact > num_online_cpus()) pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n", nact, num_online_cpus()); - if (first_context > 0 && !test_bit(0, context_map)) + if (FIRST_CONTEXT > 0 && !test_bit(0, context_map)) pr_err("MMU: Context 0 has been freed !!!\n"); } #else @@ -229,7 +265,10 @@ static void context_check_map(void) { } void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { - unsigned int i, id, cpu = smp_processor_id(); + unsigned int id; +#ifdef CONFIG_SMP + unsigned int i, cpu = smp_processor_id(); +#endif unsigned long *map; /* No lockless fast path .. yet */ @@ -263,8 +302,8 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, /* We really don't have a context, let's try to acquire one */ id = next_context; - if (id > last_context) - id = first_context; + if (id > LAST_CONTEXT) + id = FIRST_CONTEXT; map = context_map; /* No more free contexts, let's try to steal one */ @@ -277,7 +316,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, goto stolen; } #endif /* CONFIG_SMP */ - if (no_selective_tlbil) + if (IS_ENABLED(CONFIG_PPC_8xx)) id = steal_all_contexts(); else id = steal_context_up(id); @@ -287,9 +326,9 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, /* We know there's at least one free context, try to find it */ while (__test_and_set_bit(id, map)) { - id = find_next_zero_bit(map, last_context+1, id); - if (id > last_context) - id = first_context; + id = find_next_zero_bit(map, LAST_CONTEXT+1, id); + if (id > LAST_CONTEXT) + id = FIRST_CONTEXT; } stolen: next_context = id + 1; @@ -303,6 +342,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, /* If that context got marked stale on this CPU, then flush the * local TLB for it and unmark it before we use it */ +#ifdef CONFIG_SMP if (test_bit(id, stale_map[cpu])) { pr_hardcont(" | stale flush %d [%d..%d]", id, cpu_first_thread_sibling(cpu), @@ -317,6 +357,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, __clear_bit(id, stale_map[i]); } } +#endif /* Flick the MMU and release lock */ pr_hardcont(" -> %d\n", id); @@ -418,51 +459,11 @@ void __init mmu_context_init(void) init_mm.context.active = NR_CPUS; /* - * The MPC8xx has only 16 contexts. We rotate through them on each - * task switch. A better way would be to keep track of tasks that - * own contexts, and implement an LRU usage. That way very active - * tasks don't always have to pay the TLB reload overhead. The - * kernel pages are mapped shared, so the kernel can run on behalf - * of any task that makes a kernel entry. Shared does not mean they - * are not protected, just that the ASID comparison is not performed. - * -- Dan - * - * The IBM4xx has 256 contexts, so we can just rotate through these - * as a way of "switching" contexts. If the TID of the TLB is zero, - * the PID/TID comparison is disabled, so we can use a TID of zero - * to represent all kernel pages as shared among all contexts. - * -- Dan - * - * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We - * should normally never have to steal though the facility is - * present if needed. - * -- BenH - */ - if (mmu_has_feature(MMU_FTR_TYPE_8xx)) { - first_context = 1; - last_context = 16; - no_selective_tlbil = true; - } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { - first_context = 1; - last_context = 65535; - no_selective_tlbil = false; - } else { - first_context = 1; - last_context = 255; - no_selective_tlbil = false; - } - -#ifdef DEBUG_CLAMP_LAST_CONTEXT - last_context = DEBUG_CLAMP_LAST_CONTEXT; -#endif - /* * Allocate the maps used by context management */ context_map = memblock_virt_alloc(CTX_MAP_SIZE, 0); - context_mm = memblock_virt_alloc(sizeof(void *) * (last_context + 1), 0); -#ifndef CONFIG_SMP - stale_map[0] = memblock_virt_alloc(CTX_MAP_SIZE, 0); -#else + context_mm = memblock_virt_alloc(sizeof(void *) * (LAST_CONTEXT + 1), 0); +#ifdef CONFIG_SMP stale_map[boot_cpuid] = memblock_virt_alloc(CTX_MAP_SIZE, 0); cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE, @@ -472,17 +473,17 @@ void __init mmu_context_init(void) printk(KERN_INFO "MMU: Allocated %zu bytes of context maps for %d contexts\n", - 2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)), - last_context - first_context + 1); + 2 * CTX_MAP_SIZE + (sizeof(void *) * (LAST_CONTEXT + 1)), + LAST_CONTEXT - FIRST_CONTEXT + 1); /* * Some processors have too few contexts to reserve one for * init_mm, and require using context 0 for a normal task. * Other processors reserve the use of context zero for the kernel. - * This code assumes first_context < 32. + * This code assumes FIRST_CONTEXT < 32. */ - context_map[0] = (1 << first_context) - 1; - next_context = first_context; - nr_free_contexts = last_context - first_context + 1; + context_map[0] = (1 << FIRST_CONTEXT) - 1; + next_context = FIRST_CONTEXT; + nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT + 1; } diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index 518518fb7c45..c1f4ca45c93a 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -9,14 +9,22 @@ #include <linux/sched.h> #include <linux/mm_types.h> +#include <linux/memblock.h> #include <misc/cxl-base.h> #include <asm/pgalloc.h> #include <asm/tlb.h> +#include <asm/trace.h> +#include <asm/powernv.h> #include "mmu_decl.h" #include <trace/events/thp.h> +unsigned long __pmd_frag_nr; +EXPORT_SYMBOL(__pmd_frag_nr); +unsigned long __pmd_frag_size_shift; +EXPORT_SYMBOL(__pmd_frag_size_shift); + int (*register_process_table)(unsigned long base, unsigned long page_size, unsigned long tbl_size); @@ -34,13 +42,16 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, int changed; #ifdef CONFIG_DEBUG_VM WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); - assert_spin_locked(&vma->vm_mm->page_table_lock); + assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp)); #endif changed = !pmd_same(*(pmdp), entry); if (changed) { - __ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp), - pmd_pte(entry), address); - flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + /* + * We can use MMU_PAGE_2M here, because only radix + * path look at the psize. + */ + __ptep_set_access_flags(vma, pmdp_ptep(pmdp), + pmd_pte(entry), address, MMU_PAGE_2M); } return changed; } @@ -59,7 +70,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, { #ifdef CONFIG_DEBUG_VM WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp))); - assert_spin_locked(&mm->page_table_lock); + assert_spin_locked(pmd_lockptr(mm, pmdp)); WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd))); #endif trace_hugepage_set_pmd(addr, pmd_val(pmd)); @@ -141,7 +152,8 @@ pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd) { - return; + if (radix_enabled()) + prefetch((void *)addr); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -171,3 +183,258 @@ int __meminit remove_section_mapping(unsigned long start, unsigned long end) return hash__remove_section_mapping(start, end); } #endif /* CONFIG_MEMORY_HOTPLUG */ + +void __init mmu_partition_table_init(void) +{ + unsigned long patb_size = 1UL << PATB_SIZE_SHIFT; + unsigned long ptcr; + + BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large."); + partition_tb = __va(memblock_alloc_base(patb_size, patb_size, + MEMBLOCK_ALLOC_ANYWHERE)); + + /* Initialize the Partition Table with no entries */ + memset((void *)partition_tb, 0, patb_size); + + /* + * update partition table control register, + * 64 K size. + */ + ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12); + mtspr(SPRN_PTCR, ptcr); + powernv_set_nmmu_ptcr(ptcr); +} + +void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0, + unsigned long dw1) +{ + unsigned long old = be64_to_cpu(partition_tb[lpid].patb0); + + partition_tb[lpid].patb0 = cpu_to_be64(dw0); + partition_tb[lpid].patb1 = cpu_to_be64(dw1); + + /* + * Global flush of TLBs and partition table caches for this lpid. + * The type of flush (hash or radix) depends on what the previous + * use of this partition ID was, not the new use. + */ + asm volatile("ptesync" : : : "memory"); + if (old & PATB_HR) { + asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : : + "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); + asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : + "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); + trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1); + } else { + asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : + "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); + trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0); + } + /* do we need fixup here ?*/ + asm volatile("eieio; tlbsync; ptesync" : : : "memory"); +} +EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry); + +static pmd_t *get_pmd_from_cache(struct mm_struct *mm) +{ + void *pmd_frag, *ret; + + spin_lock(&mm->page_table_lock); + ret = mm->context.pmd_frag; + if (ret) { + pmd_frag = ret + PMD_FRAG_SIZE; + /* + * If we have taken up all the fragments mark PTE page NULL + */ + if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0) + pmd_frag = NULL; + mm->context.pmd_frag = pmd_frag; + } + spin_unlock(&mm->page_table_lock); + return (pmd_t *)ret; +} + +static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm) +{ + void *ret = NULL; + struct page *page; + gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO; + + if (mm == &init_mm) + gfp &= ~__GFP_ACCOUNT; + page = alloc_page(gfp); + if (!page) + return NULL; + if (!pgtable_pmd_page_ctor(page)) { + __free_pages(page, 0); + return NULL; + } + + ret = page_address(page); + /* + * if we support only one fragment just return the + * allocated page. + */ + if (PMD_FRAG_NR == 1) + return ret; + + spin_lock(&mm->page_table_lock); + /* + * If we find pgtable_page set, we return + * the allocated page with single fragement + * count. + */ + if (likely(!mm->context.pmd_frag)) { + set_page_count(page, PMD_FRAG_NR); + mm->context.pmd_frag = ret + PMD_FRAG_SIZE; + } + spin_unlock(&mm->page_table_lock); + + return (pmd_t *)ret; +} + +pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr) +{ + pmd_t *pmd; + + pmd = get_pmd_from_cache(mm); + if (pmd) + return pmd; + + return __alloc_for_pmdcache(mm); +} + +void pmd_fragment_free(unsigned long *pmd) +{ + struct page *page = virt_to_page(pmd); + + if (put_page_testzero(page)) { + pgtable_pmd_page_dtor(page); + free_unref_page(page); + } +} + +static pte_t *get_pte_from_cache(struct mm_struct *mm) +{ + void *pte_frag, *ret; + + spin_lock(&mm->page_table_lock); + ret = mm->context.pte_frag; + if (ret) { + pte_frag = ret + PTE_FRAG_SIZE; + /* + * If we have taken up all the fragments mark PTE page NULL + */ + if (((unsigned long)pte_frag & ~PAGE_MASK) == 0) + pte_frag = NULL; + mm->context.pte_frag = pte_frag; + } + spin_unlock(&mm->page_table_lock); + return (pte_t *)ret; +} + +static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) +{ + void *ret = NULL; + struct page *page; + + if (!kernel) { + page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT); + if (!page) + return NULL; + if (!pgtable_page_ctor(page)) { + __free_page(page); + return NULL; + } + } else { + page = alloc_page(PGALLOC_GFP); + if (!page) + return NULL; + } + + + ret = page_address(page); + /* + * if we support only one fragment just return the + * allocated page. + */ + if (PTE_FRAG_NR == 1) + return ret; + spin_lock(&mm->page_table_lock); + /* + * If we find pgtable_page set, we return + * the allocated page with single fragement + * count. + */ + if (likely(!mm->context.pte_frag)) { + set_page_count(page, PTE_FRAG_NR); + mm->context.pte_frag = ret + PTE_FRAG_SIZE; + } + spin_unlock(&mm->page_table_lock); + + return (pte_t *)ret; +} + +pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel) +{ + pte_t *pte; + + pte = get_pte_from_cache(mm); + if (pte) + return pte; + + return __alloc_for_ptecache(mm, kernel); +} + +void pte_fragment_free(unsigned long *table, int kernel) +{ + struct page *page = virt_to_page(table); + + if (put_page_testzero(page)) { + if (!kernel) + pgtable_page_dtor(page); + free_unref_page(page); + } +} + +static inline void pgtable_free(void *table, int index) +{ + switch (index) { + case PTE_INDEX: + pte_fragment_free(table, 0); + break; + case PMD_INDEX: + pmd_fragment_free(table); + break; + case PUD_INDEX: + kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table); + break; + /* We don't free pgd table via RCU callback */ + default: + BUG(); + } +} + +#ifdef CONFIG_SMP +void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index) +{ + unsigned long pgf = (unsigned long)table; + + BUG_ON(index > MAX_PGTABLE_INDEX_SIZE); + pgf |= index; + tlb_remove_table(tlb, (void *)pgf); +} + +void __tlb_remove_table(void *_table) +{ + void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); + unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; + + return pgtable_free(table, index); +} +#else +void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index) +{ + return pgtable_free(table, index); +} +#endif diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c index 199bfda5f0d9..692bfc9e372c 100644 --- a/arch/powerpc/mm/pgtable-hash64.c +++ b/arch/powerpc/mm/pgtable-hash64.c @@ -193,7 +193,7 @@ unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr #ifdef CONFIG_DEBUG_VM WARN_ON(!hash__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); - assert_spin_locked(&mm->page_table_lock); + assert_spin_locked(pmd_lockptr(mm, pmdp)); #endif __asm__ __volatile__( @@ -265,7 +265,8 @@ void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pgtable) { pgtable_t *pgtable_slot; - assert_spin_locked(&mm->page_table_lock); + + assert_spin_locked(pmd_lockptr(mm, pmdp)); /* * we store the pgtable in the second half of PMD */ @@ -285,7 +286,8 @@ pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) pgtable_t pgtable; pgtable_t *pgtable_slot; - assert_spin_locked(&mm->page_table_lock); + assert_spin_locked(pmd_lockptr(mm, pmdp)); + pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; pgtable = *pgtable_slot; /* diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index f1891e215e39..96f68c5aa1f5 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -617,7 +617,6 @@ void __init radix__early_init_mmu(void) __pud_index_size = RADIX_PUD_INDEX_SIZE; __pgd_index_size = RADIX_PGD_INDEX_SIZE; __pud_cache_index = RADIX_PUD_INDEX_SIZE; - __pmd_cache_index = RADIX_PMD_INDEX_SIZE; __pte_table_size = RADIX_PTE_TABLE_SIZE; __pmd_table_size = RADIX_PMD_TABLE_SIZE; __pud_table_size = RADIX_PUD_TABLE_SIZE; @@ -640,6 +639,8 @@ void __init radix__early_init_mmu(void) #endif __pte_frag_nr = RADIX_PTE_FRAG_NR; __pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT; + __pmd_frag_nr = RADIX_PMD_FRAG_NR; + __pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT; if (!firmware_has_feature(FW_FEATURE_LPAR)) { radix_init_native(); @@ -975,7 +976,7 @@ unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long add #ifdef CONFIG_DEBUG_VM WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); - assert_spin_locked(&mm->page_table_lock); + assert_spin_locked(pmd_lockptr(mm, pmdp)); #endif old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1); @@ -1083,3 +1084,36 @@ int radix__has_transparent_hugepage(void) return 0; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, + pte_t entry, unsigned long address, int psize) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | + _PAGE_RW | _PAGE_EXEC); + /* + * To avoid NMMU hang while relaxing access, we need mark + * the pte invalid in between. + */ + if (cpu_has_feature(CPU_FTR_POWER9_DD1) || + atomic_read(&mm->context.copros) > 0) { + unsigned long old_pte, new_pte; + + old_pte = __radix_pte_update(ptep, ~0, 0); + /* + * new value of pte + */ + new_pte = old_pte | set; + radix__flush_tlb_page_psize(mm, address, psize); + __radix_pte_update(ptep, 0, new_pte); + } else { + __radix_pte_update(ptep, 0, set); + /* + * Book3S does not require a TLB flush when relaxing access + * restrictions when the address space is not attached to a + * NMMU, because the core MMU will reload the pte after taking + * an access fault, which is defined by the architectue. + */ + } + /* See ptesync comment in radix__set_pte_at */ +} diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 9f361ae571e9..d71c7777669c 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -221,14 +221,55 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, entry = set_access_flags_filter(entry, vma, dirty); changed = !pte_same(*(ptep), entry); if (changed) { - if (!is_vm_hugetlb_page(vma)) - assert_pte_locked(vma->vm_mm, address); - __ptep_set_access_flags(vma->vm_mm, ptep, entry, address); - flush_tlb_page(vma, address); + assert_pte_locked(vma->vm_mm, address); + __ptep_set_access_flags(vma, ptep, entry, + address, mmu_virtual_psize); } return changed; } +#ifdef CONFIG_HUGETLB_PAGE +extern int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ +#ifdef HUGETLB_NEED_PRELOAD + /* + * The "return 1" forces a call of update_mmu_cache, which will write a + * TLB entry. Without this, platforms that don't do a write of the TLB + * entry in the TLB miss handler asm will fault ad infinitum. + */ + ptep_set_access_flags(vma, addr, ptep, pte, dirty); + return 1; +#else + int changed, psize; + + pte = set_access_flags_filter(pte, vma, dirty); + changed = !pte_same(*(ptep), pte); + if (changed) { + +#ifdef CONFIG_PPC_BOOK3S_64 + struct hstate *h = hstate_vma(vma); + + psize = hstate_get_psize(h); +#ifdef CONFIG_DEBUG_VM + assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep)); +#endif + +#else + /* + * Not used on non book3s64 platforms. But 8xx + * can possibly use tsize derived from hstate. + */ + psize = 0; +#endif + __ptep_set_access_flags(vma, ptep, pte, addr, psize); + } + return changed; +#endif +} +#endif /* CONFIG_HUGETLB_PAGE */ + #ifdef CONFIG_DEBUG_VM void assert_pte_locked(struct mm_struct *mm, unsigned long addr) { diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 9bf659d5078c..53e9eeecd5d4 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -33,7 +33,6 @@ #include <linux/swap.h> #include <linux/stddef.h> #include <linux/vmalloc.h> -#include <linux/memblock.h> #include <linux/slab.h> #include <linux/hugetlb.h> @@ -47,13 +46,11 @@ #include <asm/smp.h> #include <asm/machdep.h> #include <asm/tlb.h> -#include <asm/trace.h> #include <asm/processor.h> #include <asm/cputable.h> #include <asm/sections.h> #include <asm/firmware.h> #include <asm/dma.h> -#include <asm/powernv.h> #include "mmu_decl.h" @@ -75,8 +72,6 @@ unsigned long __pud_index_size; EXPORT_SYMBOL(__pud_index_size); unsigned long __pgd_index_size; EXPORT_SYMBOL(__pgd_index_size); -unsigned long __pmd_cache_index; -EXPORT_SYMBOL(__pmd_cache_index); unsigned long __pud_cache_index; EXPORT_SYMBOL(__pud_cache_index); unsigned long __pte_table_size; @@ -316,172 +311,6 @@ struct page *pmd_page(pmd_t pmd) return virt_to_page(pmd_page_vaddr(pmd)); } -#ifdef CONFIG_PPC_64K_PAGES -static pte_t *get_from_cache(struct mm_struct *mm) -{ - void *pte_frag, *ret; - - spin_lock(&mm->page_table_lock); - ret = mm->context.pte_frag; - if (ret) { - pte_frag = ret + PTE_FRAG_SIZE; - /* - * If we have taken up all the fragments mark PTE page NULL - */ - if (((unsigned long)pte_frag & ~PAGE_MASK) == 0) - pte_frag = NULL; - mm->context.pte_frag = pte_frag; - } - spin_unlock(&mm->page_table_lock); - return (pte_t *)ret; -} - -static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel) -{ - void *ret = NULL; - struct page *page; - - if (!kernel) { - page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT); - if (!page) - return NULL; - if (!pgtable_page_ctor(page)) { - __free_page(page); - return NULL; - } - } else { - page = alloc_page(PGALLOC_GFP); - if (!page) - return NULL; - } - - ret = page_address(page); - spin_lock(&mm->page_table_lock); - /* - * If we find pgtable_page set, we return - * the allocated page with single fragement - * count. - */ - if (likely(!mm->context.pte_frag)) { - set_page_count(page, PTE_FRAG_NR); - mm->context.pte_frag = ret + PTE_FRAG_SIZE; - } - spin_unlock(&mm->page_table_lock); - - return (pte_t *)ret; -} - -pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel) -{ - pte_t *pte; - - pte = get_from_cache(mm); - if (pte) - return pte; - - return __alloc_for_cache(mm, kernel); -} -#endif /* CONFIG_PPC_64K_PAGES */ - -void pte_fragment_free(unsigned long *table, int kernel) -{ - struct page *page = virt_to_page(table); - if (put_page_testzero(page)) { - if (!kernel) - pgtable_page_dtor(page); - free_unref_page(page); - } -} - -#ifdef CONFIG_SMP -void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) -{ - unsigned long pgf = (unsigned long)table; - - BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); - pgf |= shift; - tlb_remove_table(tlb, (void *)pgf); -} - -void __tlb_remove_table(void *_table) -{ - void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); - unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; - - if (!shift) - /* PTE page needs special handling */ - pte_fragment_free(table, 0); - else { - BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); - kmem_cache_free(PGT_CACHE(shift), table); - } -} -#else -void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) -{ - if (!shift) { - /* PTE page needs special handling */ - pte_fragment_free(table, 0); - } else { - BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); - kmem_cache_free(PGT_CACHE(shift), table); - } -} -#endif - -#ifdef CONFIG_PPC_BOOK3S_64 -void __init mmu_partition_table_init(void) -{ - unsigned long patb_size = 1UL << PATB_SIZE_SHIFT; - unsigned long ptcr; - - BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large."); - partition_tb = __va(memblock_alloc_base(patb_size, patb_size, - MEMBLOCK_ALLOC_ANYWHERE)); - - /* Initialize the Partition Table with no entries */ - memset((void *)partition_tb, 0, patb_size); - - /* - * update partition table control register, - * 64 K size. - */ - ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12); - mtspr(SPRN_PTCR, ptcr); - powernv_set_nmmu_ptcr(ptcr); -} - -void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0, - unsigned long dw1) -{ - unsigned long old = be64_to_cpu(partition_tb[lpid].patb0); - - partition_tb[lpid].patb0 = cpu_to_be64(dw0); - partition_tb[lpid].patb1 = cpu_to_be64(dw1); - - /* - * Global flush of TLBs and partition table caches for this lpid. - * The type of flush (hash or radix) depends on what the previous - * use of this partition ID was, not the new use. - */ - asm volatile("ptesync" : : : "memory"); - if (old & PATB_HR) { - asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : : - "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); - asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : - "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); - trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1); - } else { - asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : - "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); - trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0); - } - /* do we need fixup here ?*/ - asm volatile("eieio; tlbsync; ptesync" : : : "memory"); -} -EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry); -#endif /* CONFIG_PPC_BOOK3S_64 */ - #ifdef CONFIG_STRICT_KERNEL_RWX void mark_rodata_ro(void) { diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c index 0eafdf01edc7..e6f500fabf5e 100644 --- a/arch/powerpc/mm/pkeys.c +++ b/arch/powerpc/mm/pkeys.c @@ -383,9 +383,9 @@ int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, { /* * If the currently associated pkey is execute-only, but the requested - * protection requires read or write, move it back to the default pkey. + * protection is not execute-only, move it back to the default pkey. */ - if (vma_is_pkey_exec_only(vma) && (prot & (PROT_READ | PROT_WRITE))) + if (vma_is_pkey_exec_only(vma) && (prot != PROT_EXEC)) return 0; /* diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index 2a049fb8523d..bea6c544e38f 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c @@ -167,7 +167,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, { pmd_t *pmd; - if (Hash == 0) + if (!Hash) return; pmd = pmd_offset(pud_offset(pgd_offset(mm, ea), ea), ea); if (!pmd_none(*pmd)) diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 66577cc66dc9..cb796724a6fc 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -63,14 +63,14 @@ static inline void slb_shadow_update(unsigned long ea, int ssize, * updating it. No write barriers are needed here, provided * we only update the current CPU's SLB shadow buffer. */ - p->save_area[index].esid = 0; - p->save_area[index].vsid = cpu_to_be64(mk_vsid_data(ea, ssize, flags)); - p->save_area[index].esid = cpu_to_be64(mk_esid_data(ea, ssize, index)); + WRITE_ONCE(p->save_area[index].esid, 0); + WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags))); + WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index))); } static inline void slb_shadow_clear(enum slb_index index) { - get_slb_shadow()->save_area[index].esid = 0; + WRITE_ONCE(get_slb_shadow()->save_area[index].esid, 0); } static inline void create_shadowed_slbe(unsigned long ea, int ssize, @@ -352,6 +352,14 @@ static void insert_slb_entry(unsigned long vsid, unsigned long ea, /* * We are irq disabled, hence should be safe to access PACA. */ + VM_WARN_ON(!irqs_disabled()); + + /* + * We can't take a PMU exception in the following code, so hard + * disable interrupts. + */ + hard_irq_disable(); + index = get_paca()->stab_rr; /* @@ -369,6 +377,11 @@ static void insert_slb_entry(unsigned long vsid, unsigned long ea, ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT); esid_data = mk_esid_data(ea, ssize, index); + /* + * No need for an isync before or after this slbmte. The exception + * we enter with and the rfid we exit with are context synchronizing. + * Also we only handle user segments here. + */ asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data) : "memory"); diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c index f14a07c2fb90..75cb646a79c3 100644 --- a/arch/powerpc/mm/subpage-prot.c +++ b/arch/powerpc/mm/subpage-prot.c @@ -13,6 +13,7 @@ #include <linux/types.h> #include <linux/mm.h> #include <linux/hugetlb.h> +#include <linux/syscalls.h> #include <asm/pgtable.h> #include <linux/uaccess.h> @@ -185,7 +186,11 @@ static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, * in a 2-bit field won't allow writes to a page that is otherwise * write-protected. */ -long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpragmas" +#pragma GCC diagnostic ignored "-Wattribute-alias" +SYSCALL_DEFINE3(subpage_prot, unsigned long, addr, + unsigned long, len, u32 __user *, map) { struct mm_struct *mm = current->mm; struct subpage_prot_table *spt = &mm->context.spt; @@ -267,3 +272,4 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map) up_write(&mm->mmap_sem); return err; } +#pragma GCC diagnostic pop diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index a5d7309c2d05..67a6e86d3e7e 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -12,6 +12,8 @@ #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/memblock.h> +#include <linux/mmu_context.h> +#include <linux/sched/mm.h> #include <asm/ppc-opcode.h> #include <asm/tlb.h> @@ -118,6 +120,53 @@ static inline void __tlbie_pid(unsigned long pid, unsigned long ric) trace_tlbie(0, 0, rb, rs, ric, prs, r); } +static inline void __tlbiel_lpid(unsigned long lpid, int set, + unsigned long ric) +{ + unsigned long rb,rs,prs,r; + + rb = PPC_BIT(52); /* IS = 2 */ + rb |= set << PPC_BITLSHIFT(51); + rs = 0; /* LPID comes from LPIDR */ + prs = 0; /* partition scoped */ + r = 1; /* radix format */ + + asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) + : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); + trace_tlbie(lpid, 1, rb, rs, ric, prs, r); +} + +static inline void __tlbie_lpid(unsigned long lpid, unsigned long ric) +{ + unsigned long rb,rs,prs,r; + + rb = PPC_BIT(52); /* IS = 2 */ + rs = lpid; + prs = 0; /* partition scoped */ + r = 1; /* radix format */ + + asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) + : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); + trace_tlbie(lpid, 0, rb, rs, ric, prs, r); +} + +static inline void __tlbiel_lpid_guest(unsigned long lpid, int set, + unsigned long ric) +{ + unsigned long rb,rs,prs,r; + + rb = PPC_BIT(52); /* IS = 2 */ + rb |= set << PPC_BITLSHIFT(51); + rs = 0; /* LPID comes from LPIDR */ + prs = 1; /* process scoped */ + r = 1; /* radix format */ + + asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) + : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); + trace_tlbie(lpid, 1, rb, rs, ric, prs, r); +} + + static inline void __tlbiel_va(unsigned long va, unsigned long pid, unsigned long ap, unsigned long ric) { @@ -150,6 +199,22 @@ static inline void __tlbie_va(unsigned long va, unsigned long pid, trace_tlbie(0, 0, rb, rs, ric, prs, r); } +static inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid, + unsigned long ap, unsigned long ric) +{ + unsigned long rb,rs,prs,r; + + rb = va & ~(PPC_BITMASK(52, 63)); + rb |= ap << PPC_BITLSHIFT(58); + rs = lpid; + prs = 0; /* partition scoped */ + r = 1; /* radix format */ + + asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) + : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); + trace_tlbie(lpid, 0, rb, rs, ric, prs, r); +} + static inline void fixup_tlbie(void) { unsigned long pid = 0; @@ -161,6 +226,16 @@ static inline void fixup_tlbie(void) } } +static inline void fixup_tlbie_lpid(unsigned long lpid) +{ + unsigned long va = ((1UL << 52) - 1); + + if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) { + asm volatile("ptesync": : :"memory"); + __tlbie_lpid_va(va, lpid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB); + } +} + /* * We use 128 set in radix mode and 256 set in hpt mode. */ @@ -214,6 +289,86 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric) asm volatile("eieio; tlbsync; ptesync": : :"memory"); } +static inline void _tlbiel_lpid(unsigned long lpid, unsigned long ric) +{ + int set; + + VM_BUG_ON(mfspr(SPRN_LPID) != lpid); + + asm volatile("ptesync": : :"memory"); + + /* + * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL, + * also flush the entire Page Walk Cache. + */ + __tlbiel_lpid(lpid, 0, ric); + + /* For PWC, only one flush is needed */ + if (ric == RIC_FLUSH_PWC) { + asm volatile("ptesync": : :"memory"); + return; + } + + /* For the remaining sets, just flush the TLB */ + for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++) + __tlbiel_lpid(lpid, set, RIC_FLUSH_TLB); + + asm volatile("ptesync": : :"memory"); + asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); +} + +static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric) +{ + asm volatile("ptesync": : :"memory"); + + /* + * Workaround the fact that the "ric" argument to __tlbie_pid + * must be a compile-time contraint to match the "i" constraint + * in the asm statement. + */ + switch (ric) { + case RIC_FLUSH_TLB: + __tlbie_lpid(lpid, RIC_FLUSH_TLB); + break; + case RIC_FLUSH_PWC: + __tlbie_lpid(lpid, RIC_FLUSH_PWC); + break; + case RIC_FLUSH_ALL: + default: + __tlbie_lpid(lpid, RIC_FLUSH_ALL); + } + fixup_tlbie_lpid(lpid); + asm volatile("eieio; tlbsync; ptesync": : :"memory"); +} + +static inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric) +{ + int set; + + VM_BUG_ON(mfspr(SPRN_LPID) != lpid); + + asm volatile("ptesync": : :"memory"); + + /* + * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL, + * also flush the entire Page Walk Cache. + */ + __tlbiel_lpid_guest(lpid, 0, ric); + + /* For PWC, only one flush is needed */ + if (ric == RIC_FLUSH_PWC) { + asm volatile("ptesync": : :"memory"); + return; + } + + /* For the remaining sets, just flush the TLB */ + for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++) + __tlbiel_lpid_guest(lpid, set, RIC_FLUSH_TLB); + + asm volatile("ptesync": : :"memory"); +} + + static inline void __tlbiel_va_range(unsigned long start, unsigned long end, unsigned long pid, unsigned long page_size, unsigned long psize) @@ -268,6 +423,17 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid, asm volatile("eieio; tlbsync; ptesync": : :"memory"); } +static inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid, + unsigned long psize, unsigned long ric) +{ + unsigned long ap = mmu_get_ap(psize); + + asm volatile("ptesync": : :"memory"); + __tlbie_lpid_va(va, lpid, ap, ric); + fixup_tlbie_lpid(lpid); + asm volatile("eieio; tlbsync; ptesync": : :"memory"); +} + static inline void _tlbie_va_range(unsigned long start, unsigned long end, unsigned long pid, unsigned long page_size, unsigned long psize, bool also_pwc) @@ -340,6 +506,15 @@ void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmadd } EXPORT_SYMBOL(radix__local_flush_tlb_page); +static bool mm_is_singlethreaded(struct mm_struct *mm) +{ + if (atomic_read(&mm->context.copros) > 0) + return false; + if (atomic_read(&mm->mm_users) <= 1 && current->mm == mm) + return true; + return false; +} + static bool mm_needs_flush_escalation(struct mm_struct *mm) { /* @@ -347,10 +522,47 @@ static bool mm_needs_flush_escalation(struct mm_struct *mm) * caching PTEs and not flushing them properly when * RIC = 0 for a PID/LPID invalidate */ - return atomic_read(&mm->context.copros) != 0; + if (atomic_read(&mm->context.copros) > 0) + return true; + return false; } #ifdef CONFIG_SMP +static void do_exit_flush_lazy_tlb(void *arg) +{ + struct mm_struct *mm = arg; + unsigned long pid = mm->context.id; + + if (current->mm == mm) + return; /* Local CPU */ + + if (current->active_mm == mm) { + /* + * Must be a kernel thread because sender is single-threaded. + */ + BUG_ON(current->mm); + mmgrab(&init_mm); + switch_mm(mm, &init_mm, current); + current->active_mm = &init_mm; + mmdrop(mm); + } + _tlbiel_pid(pid, RIC_FLUSH_ALL); +} + +static void exit_flush_lazy_tlbs(struct mm_struct *mm) +{ + /* + * Would be nice if this was async so it could be run in + * parallel with our local flush, but generic code does not + * give a good API for it. Could extend the generic code or + * make a special powerpc IPI for flushing TLBs. + * For now it's not too performance critical. + */ + smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb, + (void *)mm, 1); + mm_reset_thread_local(mm); +} + void radix__flush_tlb_mm(struct mm_struct *mm) { unsigned long pid; @@ -360,18 +572,30 @@ void radix__flush_tlb_mm(struct mm_struct *mm) return; preempt_disable(); + /* + * Order loads of mm_cpumask vs previous stores to clear ptes before + * the invalidate. See barrier in switch_mm_irqs_off + */ + smp_mb(); if (!mm_is_thread_local(mm)) { + if (unlikely(mm_is_singlethreaded(mm))) { + exit_flush_lazy_tlbs(mm); + goto local; + } + if (mm_needs_flush_escalation(mm)) _tlbie_pid(pid, RIC_FLUSH_ALL); else _tlbie_pid(pid, RIC_FLUSH_TLB); - } else + } else { +local: _tlbiel_pid(pid, RIC_FLUSH_TLB); + } preempt_enable(); } EXPORT_SYMBOL(radix__flush_tlb_mm); -void radix__flush_all_mm(struct mm_struct *mm) +static void __flush_all_mm(struct mm_struct *mm, bool fullmm) { unsigned long pid; @@ -380,12 +604,25 @@ void radix__flush_all_mm(struct mm_struct *mm) return; preempt_disable(); - if (!mm_is_thread_local(mm)) + smp_mb(); /* see radix__flush_tlb_mm */ + if (!mm_is_thread_local(mm)) { + if (unlikely(mm_is_singlethreaded(mm))) { + if (!fullmm) { + exit_flush_lazy_tlbs(mm); + goto local; + } + } _tlbie_pid(pid, RIC_FLUSH_ALL); - else + } else { +local: _tlbiel_pid(pid, RIC_FLUSH_ALL); + } preempt_enable(); } +void radix__flush_all_mm(struct mm_struct *mm) +{ + __flush_all_mm(mm, false); +} EXPORT_SYMBOL(radix__flush_all_mm); void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr) @@ -404,10 +641,17 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, return; preempt_disable(); - if (!mm_is_thread_local(mm)) + smp_mb(); /* see radix__flush_tlb_mm */ + if (!mm_is_thread_local(mm)) { + if (unlikely(mm_is_singlethreaded(mm))) { + exit_flush_lazy_tlbs(mm); + goto local; + } _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB); - else + } else { +local: _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB); + } preempt_enable(); } @@ -466,14 +710,22 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, return; preempt_disable(); - if (mm_is_thread_local(mm)) { - local = true; - full = (end == TLB_FLUSH_ALL || - nr_pages > tlb_local_single_page_flush_ceiling); - } else { + smp_mb(); /* see radix__flush_tlb_mm */ + if (!mm_is_thread_local(mm)) { + if (unlikely(mm_is_singlethreaded(mm))) { + if (end != TLB_FLUSH_ALL) { + exit_flush_lazy_tlbs(mm); + goto is_local; + } + } local = false; full = (end == TLB_FLUSH_ALL || nr_pages > tlb_single_page_flush_ceiling); + } else { +is_local: + local = true; + full = (end == TLB_FLUSH_ALL || + nr_pages > tlb_local_single_page_flush_ceiling); } if (full) { @@ -534,6 +786,49 @@ static int radix_get_mmu_psize(int page_size) return psize; } +/* + * Flush partition scoped LPID address translation for all CPUs. + */ +void radix__flush_tlb_lpid_page(unsigned int lpid, + unsigned long addr, + unsigned long page_size) +{ + int psize = radix_get_mmu_psize(page_size); + + _tlbie_lpid_va(addr, lpid, psize, RIC_FLUSH_TLB); +} +EXPORT_SYMBOL_GPL(radix__flush_tlb_lpid_page); + +/* + * Flush partition scoped PWC from LPID for all CPUs. + */ +void radix__flush_pwc_lpid(unsigned int lpid) +{ + _tlbie_lpid(lpid, RIC_FLUSH_PWC); +} +EXPORT_SYMBOL_GPL(radix__flush_pwc_lpid); + +/* + * Flush partition scoped translations from LPID (=LPIDR) + */ +void radix__local_flush_tlb_lpid(unsigned int lpid) +{ + _tlbiel_lpid(lpid, RIC_FLUSH_ALL); +} +EXPORT_SYMBOL_GPL(radix__local_flush_tlb_lpid); + +/* + * Flush process scoped translations from LPID (=LPIDR). + * Important difference, the guest normally manages its own translations, + * but some cases e.g., vCPU CPU migration require KVM to flush. + */ +void radix__local_flush_tlb_lpid_guest(unsigned int lpid) +{ + _tlbiel_lpid_guest(lpid, RIC_FLUSH_ALL); +} +EXPORT_SYMBOL_GPL(radix__local_flush_tlb_lpid_guest); + + static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start, unsigned long end, int psize); @@ -551,7 +846,7 @@ void radix__tlb_flush(struct mmu_gather *tlb) * See the comment for radix in arch_exit_mmap(). */ if (tlb->fullmm) { - radix__flush_all_mm(mm); + __flush_all_mm(mm, true); } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) { if (!tlb->need_flush_all) radix__flush_tlb_mm(mm); @@ -584,24 +879,33 @@ static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm, return; preempt_disable(); - if (mm_is_thread_local(mm)) { - local = true; - full = (end == TLB_FLUSH_ALL || - nr_pages > tlb_local_single_page_flush_ceiling); - } else { + smp_mb(); /* see radix__flush_tlb_mm */ + if (!mm_is_thread_local(mm)) { + if (unlikely(mm_is_singlethreaded(mm))) { + if (end != TLB_FLUSH_ALL) { + exit_flush_lazy_tlbs(mm); + goto is_local; + } + } local = false; full = (end == TLB_FLUSH_ALL || nr_pages > tlb_single_page_flush_ceiling); + } else { +is_local: + local = true; + full = (end == TLB_FLUSH_ALL || + nr_pages > tlb_local_single_page_flush_ceiling); } if (full) { - if (!local && mm_needs_flush_escalation(mm)) - also_pwc = true; - - if (local) + if (local) { _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB); - else - _tlbie_pid(pid, also_pwc ? RIC_FLUSH_ALL: RIC_FLUSH_TLB); + } else { + if (mm_needs_flush_escalation(mm)) + also_pwc = true; + + _tlbie_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB); + } } else { if (local) _tlbiel_va_range(start, end, pid, page_size, psize, also_pwc); @@ -642,11 +946,17 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr) /* Otherwise first do the PWC, then iterate the pages. */ preempt_disable(); - - if (mm_is_thread_local(mm)) { - _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); - } else { + smp_mb(); /* see radix__flush_tlb_mm */ + if (!mm_is_thread_local(mm)) { + if (unlikely(mm_is_singlethreaded(mm))) { + exit_flush_lazy_tlbs(mm); + goto local; + } _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); + goto local; + } else { +local: + _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); } preempt_enable(); diff --git a/arch/powerpc/mm/tlb_hash32.c b/arch/powerpc/mm/tlb_hash32.c index 702d7689d714..cf8472cf3d59 100644 --- a/arch/powerpc/mm/tlb_hash32.c +++ b/arch/powerpc/mm/tlb_hash32.c @@ -41,7 +41,7 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr) { unsigned long ptephys; - if (Hash != 0) { + if (Hash) { ptephys = __pa(ptep) & PAGE_MASK; flush_hash_pages(mm->context.id, addr, ptephys, 1); } @@ -54,7 +54,7 @@ EXPORT_SYMBOL(flush_hash_entry); */ void tlb_flush(struct mmu_gather *tlb) { - if (Hash == 0) { + if (!Hash) { /* * 603 needs to flush the whole TLB here since * it doesn't use a hash table. @@ -84,7 +84,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start, int count; unsigned int ctx = mm->context.id; - if (Hash == 0) { + if (!Hash) { _tlbia(); return; } @@ -124,7 +124,7 @@ void flush_tlb_mm(struct mm_struct *mm) { struct vm_area_struct *mp; - if (Hash == 0) { + if (!Hash) { _tlbia(); return; } @@ -145,7 +145,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) struct mm_struct *mm; pmd_t *pmd; - if (Hash == 0) { + if (!Hash) { _tlbie(vmaddr); return; } diff --git a/arch/powerpc/net/Makefile b/arch/powerpc/net/Makefile index 02d369ca6a53..809f019d3cba 100644 --- a/arch/powerpc/net/Makefile +++ b/arch/powerpc/net/Makefile @@ -3,7 +3,7 @@ # Arch-specific network modules # ifeq ($(CONFIG_PPC64),y) -obj-$(CONFIG_BPF_JIT) += bpf_jit_asm64.o bpf_jit_comp64.o +obj-$(CONFIG_BPF_JIT) += bpf_jit_comp64.o else obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o endif diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h index 8bdef7ed28a8..3609be4692b3 100644 --- a/arch/powerpc/net/bpf_jit64.h +++ b/arch/powerpc/net/bpf_jit64.h @@ -20,7 +20,7 @@ * with our redzone usage. * * [ prev sp ] <------------- - * [ nv gpr save area ] 8*8 | + * [ nv gpr save area ] 6*8 | * [ tail_call_cnt ] 8 | * [ local_tmp_var ] 8 | * fp (r31) --> [ ebpf stack space ] upto 512 | @@ -28,8 +28,8 @@ * sp (r1) ---> [ stack pointer ] -------------- */ -/* for gpr non volatile registers BPG_REG_6 to 10, plus skb cache registers */ -#define BPF_PPC_STACK_SAVE (8*8) +/* for gpr non volatile registers BPG_REG_6 to 10 */ +#define BPF_PPC_STACK_SAVE (6*8) /* for bpf JIT code internal usage */ #define BPF_PPC_STACK_LOCALS 16 /* stack frame excluding BPF stack, ensure this is quadword aligned */ @@ -39,10 +39,8 @@ #ifndef __ASSEMBLY__ /* BPF register usage */ -#define SKB_HLEN_REG (MAX_BPF_JIT_REG + 0) -#define SKB_DATA_REG (MAX_BPF_JIT_REG + 1) -#define TMP_REG_1 (MAX_BPF_JIT_REG + 2) -#define TMP_REG_2 (MAX_BPF_JIT_REG + 3) +#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) +#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* BPF to ppc register mappings */ static const int b2p[] = { @@ -63,40 +61,23 @@ static const int b2p[] = { [BPF_REG_FP] = 31, /* eBPF jit internal registers */ [BPF_REG_AX] = 2, - [SKB_HLEN_REG] = 25, - [SKB_DATA_REG] = 26, [TMP_REG_1] = 9, [TMP_REG_2] = 10 }; -/* PPC NVR range -- update this if we ever use NVRs below r24 */ -#define BPF_PPC_NVR_MIN 24 - -/* Assembly helpers */ -#define DECLARE_LOAD_FUNC(func) u64 func(u64 r3, u64 r4); \ - u64 func##_negative_offset(u64 r3, u64 r4); \ - u64 func##_positive_offset(u64 r3, u64 r4); - -DECLARE_LOAD_FUNC(sk_load_word); -DECLARE_LOAD_FUNC(sk_load_half); -DECLARE_LOAD_FUNC(sk_load_byte); - -#define CHOOSE_LOAD_FUNC(imm, func) \ - (imm < 0 ? \ - (imm >= SKF_LL_OFF ? func##_negative_offset : func) : \ - func##_positive_offset) +/* PPC NVR range -- update this if we ever use NVRs below r27 */ +#define BPF_PPC_NVR_MIN 27 #define SEEN_FUNC 0x1000 /* might call external helpers */ #define SEEN_STACK 0x2000 /* uses BPF stack */ -#define SEEN_SKB 0x4000 /* uses sk_buff */ -#define SEEN_TAILCALL 0x8000 /* uses tail calls */ +#define SEEN_TAILCALL 0x4000 /* uses tail calls */ struct codegen_context { /* * This is used to track register usage as well * as calls to external helpers. * - register usage is tracked with corresponding - * bits (r3-r10 and r25-r31) + * bits (r3-r10 and r27-r31) * - rest of the bits can be used to track other * things -- for now, we use bits 16 to 23 * encoded in SEEN_* macros above diff --git a/arch/powerpc/net/bpf_jit_asm64.S b/arch/powerpc/net/bpf_jit_asm64.S deleted file mode 100644 index 7e4c51430b84..000000000000 --- a/arch/powerpc/net/bpf_jit_asm64.S +++ /dev/null @@ -1,180 +0,0 @@ -/* - * bpf_jit_asm64.S: Packet/header access helper functions - * for PPC64 BPF compiler. - * - * Copyright 2016, Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> - * IBM Corporation - * - * Based on bpf_jit_asm.S by Matt Evans - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; version 2 - * of the License. - */ - -#include <asm/ppc_asm.h> -#include <asm/ptrace.h> -#include "bpf_jit64.h" - -/* - * All of these routines are called directly from generated code, - * with the below register usage: - * r27 skb pointer (ctx) - * r25 skb header length - * r26 skb->data pointer - * r4 offset - * - * Result is passed back in: - * r8 data read in host endian format (accumulator) - * - * r9 is used as a temporary register - */ - -#define r_skb r27 -#define r_hlen r25 -#define r_data r26 -#define r_off r4 -#define r_val r8 -#define r_tmp r9 - -_GLOBAL_TOC(sk_load_word) - cmpdi r_off, 0 - blt bpf_slow_path_word_neg - b sk_load_word_positive_offset - -_GLOBAL_TOC(sk_load_word_positive_offset) - /* Are we accessing past headlen? */ - subi r_tmp, r_hlen, 4 - cmpd r_tmp, r_off - blt bpf_slow_path_word - /* Nope, just hitting the header. cr0 here is eq or gt! */ - LWZX_BE r_val, r_data, r_off - blr /* Return success, cr0 != LT */ - -_GLOBAL_TOC(sk_load_half) - cmpdi r_off, 0 - blt bpf_slow_path_half_neg - b sk_load_half_positive_offset - -_GLOBAL_TOC(sk_load_half_positive_offset) - subi r_tmp, r_hlen, 2 - cmpd r_tmp, r_off - blt bpf_slow_path_half - LHZX_BE r_val, r_data, r_off - blr - -_GLOBAL_TOC(sk_load_byte) - cmpdi r_off, 0 - blt bpf_slow_path_byte_neg - b sk_load_byte_positive_offset - -_GLOBAL_TOC(sk_load_byte_positive_offset) - cmpd r_hlen, r_off - ble bpf_slow_path_byte - lbzx r_val, r_data, r_off - blr - -/* - * Call out to skb_copy_bits: - * Allocate a new stack frame here to remain ABI-compliant in - * stashing LR. - */ -#define bpf_slow_path_common(SIZE) \ - mflr r0; \ - std r0, PPC_LR_STKOFF(r1); \ - stdu r1, -(STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_LOCALS)(r1); \ - mr r3, r_skb; \ - /* r4 = r_off as passed */ \ - addi r5, r1, STACK_FRAME_MIN_SIZE; \ - li r6, SIZE; \ - bl skb_copy_bits; \ - nop; \ - /* save r5 */ \ - addi r5, r1, STACK_FRAME_MIN_SIZE; \ - /* r3 = 0 on success */ \ - addi r1, r1, STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_LOCALS; \ - ld r0, PPC_LR_STKOFF(r1); \ - mtlr r0; \ - cmpdi r3, 0; \ - blt bpf_error; /* cr0 = LT */ - -bpf_slow_path_word: - bpf_slow_path_common(4) - /* Data value is on stack, and cr0 != LT */ - LWZX_BE r_val, 0, r5 - blr - -bpf_slow_path_half: - bpf_slow_path_common(2) - LHZX_BE r_val, 0, r5 - blr - -bpf_slow_path_byte: - bpf_slow_path_common(1) - lbzx r_val, 0, r5 - blr - -/* - * Call out to bpf_internal_load_pointer_neg_helper - */ -#define sk_negative_common(SIZE) \ - mflr r0; \ - std r0, PPC_LR_STKOFF(r1); \ - stdu r1, -STACK_FRAME_MIN_SIZE(r1); \ - mr r3, r_skb; \ - /* r4 = r_off, as passed */ \ - li r5, SIZE; \ - bl bpf_internal_load_pointer_neg_helper; \ - nop; \ - addi r1, r1, STACK_FRAME_MIN_SIZE; \ - ld r0, PPC_LR_STKOFF(r1); \ - mtlr r0; \ - /* R3 != 0 on success */ \ - cmpldi r3, 0; \ - beq bpf_error_slow; /* cr0 = EQ */ - -bpf_slow_path_word_neg: - lis r_tmp, -32 /* SKF_LL_OFF */ - cmpd r_off, r_tmp /* addr < SKF_* */ - blt bpf_error /* cr0 = LT */ - b sk_load_word_negative_offset - -_GLOBAL_TOC(sk_load_word_negative_offset) - sk_negative_common(4) - LWZX_BE r_val, 0, r3 - blr - -bpf_slow_path_half_neg: - lis r_tmp, -32 /* SKF_LL_OFF */ - cmpd r_off, r_tmp /* addr < SKF_* */ - blt bpf_error /* cr0 = LT */ - b sk_load_half_negative_offset - -_GLOBAL_TOC(sk_load_half_negative_offset) - sk_negative_common(2) - LHZX_BE r_val, 0, r3 - blr - -bpf_slow_path_byte_neg: - lis r_tmp, -32 /* SKF_LL_OFF */ - cmpd r_off, r_tmp /* addr < SKF_* */ - blt bpf_error /* cr0 = LT */ - b sk_load_byte_negative_offset - -_GLOBAL_TOC(sk_load_byte_negative_offset) - sk_negative_common(1) - lbzx r_val, 0, r3 - blr - -bpf_error_slow: - /* fabricate a cr0 = lt */ - li r_tmp, -1 - cmpdi r_tmp, 0 -bpf_error: - /* - * Entered with cr0 = lt - * Generated code will 'blt epilogue', returning 0. - */ - li r_val, 0 - blr diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 0ef3d9580e98..f1c95779843b 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -59,7 +59,7 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx) * [ prev sp ] <------------- * [ ... ] | * sp (r1) ---> [ stack pointer ] -------------- - * [ nv gpr save area ] 8*8 + * [ nv gpr save area ] 6*8 * [ tail_call_cnt ] 8 * [ local_tmp_var ] 8 * [ unused red zone ] 208 bytes protected @@ -88,21 +88,6 @@ static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) BUG(); } -static void bpf_jit_emit_skb_loads(u32 *image, struct codegen_context *ctx) -{ - /* - * Load skb->len and skb->data_len - * r3 points to skb - */ - PPC_LWZ(b2p[SKB_HLEN_REG], 3, offsetof(struct sk_buff, len)); - PPC_LWZ(b2p[TMP_REG_1], 3, offsetof(struct sk_buff, data_len)); - /* header_len = len - data_len */ - PPC_SUB(b2p[SKB_HLEN_REG], b2p[SKB_HLEN_REG], b2p[TMP_REG_1]); - - /* skb->data pointer */ - PPC_BPF_LL(b2p[SKB_DATA_REG], 3, offsetof(struct sk_buff, data)); -} - static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) { int i; @@ -145,18 +130,6 @@ static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) if (bpf_is_seen_register(ctx, i)) PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i])); - /* - * Save additional non-volatile regs if we cache skb - * Also, setup skb data - */ - if (ctx->seen & SEEN_SKB) { - PPC_BPF_STL(b2p[SKB_HLEN_REG], 1, - bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG])); - PPC_BPF_STL(b2p[SKB_DATA_REG], 1, - bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG])); - bpf_jit_emit_skb_loads(image, ctx); - } - /* Setup frame pointer to point to the bpf stack area */ if (bpf_is_seen_register(ctx, BPF_REG_FP)) PPC_ADDI(b2p[BPF_REG_FP], 1, @@ -172,14 +145,6 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx if (bpf_is_seen_register(ctx, i)) PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i])); - /* Restore non-volatile registers used for skb cache */ - if (ctx->seen & SEEN_SKB) { - PPC_BPF_LL(b2p[SKB_HLEN_REG], 1, - bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG])); - PPC_BPF_LL(b2p[SKB_DATA_REG], 1, - bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG])); - } - /* Tear down our stack frame */ if (bpf_has_stack_frame(ctx)) { PPC_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size); @@ -202,25 +167,37 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func) { + unsigned int i, ctx_idx = ctx->idx; + + /* Load function address into r12 */ + PPC_LI64(12, func); + + /* For bpf-to-bpf function calls, the callee's address is unknown + * until the last extra pass. As seen above, we use PPC_LI64() to + * load the callee's address, but this may optimize the number of + * instructions required based on the nature of the address. + * + * Since we don't want the number of instructions emitted to change, + * we pad the optimized PPC_LI64() call with NOPs to guarantee that + * we always have a five-instruction sequence, which is the maximum + * that PPC_LI64() can emit. + */ + for (i = ctx->idx - ctx_idx; i < 5; i++) + PPC_NOP(); + #ifdef PPC64_ELF_ABI_v1 - /* func points to the function descriptor */ - PPC_LI64(b2p[TMP_REG_2], func); - /* Load actual entry point from function descriptor */ - PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0); - /* ... and move it to LR */ - PPC_MTLR(b2p[TMP_REG_1]); /* * Load TOC from function descriptor at offset 8. * We can clobber r2 since we get called through a * function pointer (so caller will save/restore r2) * and since we don't use a TOC ourself. */ - PPC_BPF_LL(2, b2p[TMP_REG_2], 8); -#else - /* We can clobber r12 */ - PPC_FUNC_ADDR(12, func); - PPC_MTLR(12); + PPC_BPF_LL(2, 12, 8); + /* Load actual entry point from function descriptor */ + PPC_BPF_LL(12, 12, 0); #endif + + PPC_MTLR(12); PPC_BLRL(); } @@ -291,7 +268,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 /* Assemble the body code between the prologue & epilogue */ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx, - u32 *addrs) + u32 *addrs, bool extra_pass) { const struct bpf_insn *insn = fp->insnsi; int flen = fp->len; @@ -747,29 +724,30 @@ emit_clear: break; /* - * Call kernel helper + * Call kernel helper or bpf function */ case BPF_JMP | BPF_CALL: ctx->seen |= SEEN_FUNC; - func = (u8 *) __bpf_call_base + imm; - /* Save skb pointer if we need to re-cache skb data */ - if ((ctx->seen & SEEN_SKB) && - bpf_helper_changes_pkt_data(func)) - PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx)); + /* bpf function call */ + if (insn[i].src_reg == BPF_PSEUDO_CALL) + if (!extra_pass) + func = NULL; + else if (fp->aux->func && off < fp->aux->func_cnt) + /* use the subprog id from the off + * field to lookup the callee address + */ + func = (u8 *) fp->aux->func[off]->bpf_func; + else + return -EINVAL; + /* kernel helper call */ + else + func = (u8 *) __bpf_call_base + imm; bpf_jit_emit_func_call(image, ctx, (u64)func); /* move return value from r3 to BPF_REG_0 */ PPC_MR(b2p[BPF_REG_0], 3); - - /* refresh skb cache */ - if ((ctx->seen & SEEN_SKB) && - bpf_helper_changes_pkt_data(func)) { - /* reload skb pointer to r3 */ - PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx)); - bpf_jit_emit_skb_loads(image, ctx); - } break; /* @@ -887,65 +865,6 @@ cond_branch: break; /* - * Loads from packet header/data - * Assume 32-bit input value in imm and X (src_reg) - */ - - /* Absolute loads */ - case BPF_LD | BPF_W | BPF_ABS: - func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_word); - goto common_load_abs; - case BPF_LD | BPF_H | BPF_ABS: - func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_half); - goto common_load_abs; - case BPF_LD | BPF_B | BPF_ABS: - func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_byte); -common_load_abs: - /* - * Load from [imm] - * Load into r4, which can just be passed onto - * skb load helpers as the second parameter - */ - PPC_LI32(4, imm); - goto common_load; - - /* Indirect loads */ - case BPF_LD | BPF_W | BPF_IND: - func = (u8 *)sk_load_word; - goto common_load_ind; - case BPF_LD | BPF_H | BPF_IND: - func = (u8 *)sk_load_half; - goto common_load_ind; - case BPF_LD | BPF_B | BPF_IND: - func = (u8 *)sk_load_byte; -common_load_ind: - /* - * Load from [src_reg + imm] - * Treat src_reg as a 32-bit value - */ - PPC_EXTSW(4, src_reg); - if (imm) { - if (imm >= -32768 && imm < 32768) - PPC_ADDI(4, 4, IMM_L(imm)); - else { - PPC_LI32(b2p[TMP_REG_1], imm); - PPC_ADD(4, 4, b2p[TMP_REG_1]); - } - } - -common_load: - ctx->seen |= SEEN_SKB; - ctx->seen |= SEEN_FUNC; - bpf_jit_emit_func_call(image, ctx, (u64)func); - - /* - * Helper returns 'lt' condition on error, and an - * appropriate return value in BPF_REG_0 - */ - PPC_BCC(COND_LT, exit_addr); - break; - - /* * Tail call */ case BPF_JMP | BPF_TAIL_CALL: @@ -971,6 +890,14 @@ common_load: return 0; } +struct powerpc64_jit_data { + struct bpf_binary_header *header; + u32 *addrs; + u8 *image; + u32 proglen; + struct codegen_context ctx; +}; + struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) { u32 proglen; @@ -978,6 +905,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) u8 *image = NULL; u32 *code_base; u32 *addrs; + struct powerpc64_jit_data *jit_data; struct codegen_context cgctx; int pass; int flen; @@ -985,6 +913,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) struct bpf_prog *org_fp = fp; struct bpf_prog *tmp_fp; bool bpf_blinded = false; + bool extra_pass = false; if (!fp->jit_requested) return org_fp; @@ -998,11 +927,32 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) fp = tmp_fp; } + jit_data = fp->aux->jit_data; + if (!jit_data) { + jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); + if (!jit_data) { + fp = org_fp; + goto out; + } + fp->aux->jit_data = jit_data; + } + flen = fp->len; + addrs = jit_data->addrs; + if (addrs) { + cgctx = jit_data->ctx; + image = jit_data->image; + bpf_hdr = jit_data->header; + proglen = jit_data->proglen; + alloclen = proglen + FUNCTION_DESCR_SIZE; + extra_pass = true; + goto skip_init_ctx; + } + addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL); if (addrs == NULL) { fp = org_fp; - goto out; + goto out_addrs; } memset(&cgctx, 0, sizeof(struct codegen_context)); @@ -1011,10 +961,10 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) cgctx.stack_size = round_up(fp->aux->stack_depth, 16); /* Scouting faux-generate pass 0 */ - if (bpf_jit_build_body(fp, 0, &cgctx, addrs)) { + if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) { /* We hit something illegal or unsupported. */ fp = org_fp; - goto out; + goto out_addrs; } /* @@ -1032,9 +982,10 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) bpf_jit_fill_ill_insns); if (!bpf_hdr) { fp = org_fp; - goto out; + goto out_addrs; } +skip_init_ctx: code_base = (u32 *)(image + FUNCTION_DESCR_SIZE); /* Code generation passes 1-2 */ @@ -1042,7 +993,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) /* Now build the prologue, body code & epilogue for real. */ cgctx.idx = 0; bpf_jit_build_prologue(code_base, &cgctx); - bpf_jit_build_body(fp, code_base, &cgctx, addrs); + bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass); bpf_jit_build_epilogue(code_base, &cgctx); if (bpf_jit_enable > 1) @@ -1068,10 +1019,20 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) fp->jited_len = alloclen; bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE)); + if (!fp->is_func || extra_pass) { +out_addrs: + kfree(addrs); + kfree(jit_data); + fp->aux->jit_data = NULL; + } else { + jit_data->addrs = addrs; + jit_data->ctx = cgctx; + jit_data->proglen = proglen; + jit_data->image = image; + jit_data->header = bpf_hdr; + } out: - kfree(addrs); - if (bpf_blinded) bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp); diff --git a/arch/powerpc/oprofile/backtrace.c b/arch/powerpc/oprofile/backtrace.c index ecc66d5f02c9..ad054dd0d666 100644 --- a/arch/powerpc/oprofile/backtrace.c +++ b/arch/powerpc/oprofile/backtrace.c @@ -7,6 +7,7 @@ * 2 of the License, or (at your option) any later version. **/ +#include <linux/compat_time.h> #include <linux/oprofile.h> #include <linux/sched.h> #include <asm/processor.h> diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c index 85f1d18e5fd3..ba485844d506 100644 --- a/arch/powerpc/perf/core-fsl-emb.c +++ b/arch/powerpc/perf/core-fsl-emb.c @@ -42,7 +42,7 @@ static DEFINE_MUTEX(pmc_reserve_mutex); static inline int perf_intr_is_nmi(struct pt_regs *regs) { #ifdef __powerpc64__ - return !regs->softe; + return (regs->softe & IRQS_DISABLED); #else return 0; #endif diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index d7532e7b9ab5..d1977b61f827 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -40,6 +40,7 @@ static struct imc_pmu *core_imc_pmu; /* Thread IMC data structures and variables */ static DEFINE_PER_CPU(u64 *, thread_imc_mem); +static struct imc_pmu *thread_imc_pmu; static int thread_imc_mem_size; struct imc_pmu *imc_event_to_pmu(struct perf_event *event) @@ -1146,14 +1147,14 @@ static int init_nest_pmu_ref(void) static void cleanup_all_core_imc_memory(void) { - int i, nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core); + int i, nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core); struct imc_mem_info *ptr = core_imc_pmu->mem_info; int size = core_imc_pmu->counter_mem_size; /* mem_info will never be NULL */ for (i = 0; i < nr_cores; i++) { if (ptr[i].vbase) - free_pages((u64)ptr->vbase, get_order(size)); + free_pages((u64)ptr[i].vbase, get_order(size)); } kfree(ptr); @@ -1191,7 +1192,6 @@ static void imc_common_mem_free(struct imc_pmu *pmu_ptr) if (pmu_ptr->attr_groups[IMC_EVENT_ATTR]) kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); - kfree(pmu_ptr); } /* @@ -1208,6 +1208,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE); kfree(nest_imc_refc); kfree(per_nest_pmu_arr); + per_nest_pmu_arr = NULL; } if (nest_pmus > 0) @@ -1228,6 +1229,16 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) } } +/* + * Function to unregister thread-imc if core-imc + * is not registered. + */ +void unregister_thread_imc(void) +{ + imc_common_cpuhp_mem_free(thread_imc_pmu); + imc_common_mem_free(thread_imc_pmu); + perf_pmu_unregister(&thread_imc_pmu->pmu); +} /* * imc_mem_init : Function to support memory allocation for core imc. @@ -1236,7 +1247,7 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent, int pmu_index) { const char *s; - int nr_cores, cpu, res; + int nr_cores, cpu, res = -ENOMEM; if (of_property_read_string(parent, "name", &s)) return -ENODEV; @@ -1246,7 +1257,7 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent, /* Update the pmu name */ pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s_imc", "nest_", s); if (!pmu_ptr->pmu.name) - return -ENOMEM; + goto err; /* Needed for hotplug/migration */ if (!per_nest_pmu_arr) { @@ -1254,7 +1265,7 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent, sizeof(struct imc_pmu *), GFP_KERNEL); if (!per_nest_pmu_arr) - return -ENOMEM; + goto err; } per_nest_pmu_arr[pmu_index] = pmu_ptr; break; @@ -1262,21 +1273,21 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent, /* Update the pmu name */ pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); if (!pmu_ptr->pmu.name) - return -ENOMEM; + goto err; - nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core); + nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core); pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info), GFP_KERNEL); if (!pmu_ptr->mem_info) - return -ENOMEM; + goto err; core_imc_refc = kcalloc(nr_cores, sizeof(struct imc_pmu_ref), GFP_KERNEL); if (!core_imc_refc) { kfree(pmu_ptr->mem_info); - return -ENOMEM; + goto err; } core_imc_pmu = pmu_ptr; @@ -1285,23 +1296,26 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent, /* Update the pmu name */ pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); if (!pmu_ptr->pmu.name) - return -ENOMEM; + goto err; thread_imc_mem_size = pmu_ptr->counter_mem_size; for_each_online_cpu(cpu) { res = thread_imc_mem_alloc(cpu, pmu_ptr->counter_mem_size); if (res) { cleanup_all_thread_imc_memory(); - return res; + goto err; } } + thread_imc_pmu = pmu_ptr; break; default: return -EINVAL; } return 0; +err: + return res; } /* @@ -1319,10 +1333,8 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id int ret; ret = imc_mem_init(pmu_ptr, parent, pmu_idx); - if (ret) { - imc_common_mem_free(pmu_ptr); - return ret; - } + if (ret) + goto err_free_mem; switch (pmu_ptr->domain) { case IMC_DOMAIN_NEST: @@ -1337,7 +1349,9 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id ret = init_nest_pmu_ref(); if (ret) { mutex_unlock(&nest_init_lock); - goto err_free; + kfree(per_nest_pmu_arr); + per_nest_pmu_arr = NULL; + goto err_free_mem; } /* Register for cpu hotplug notification. */ ret = nest_pmu_cpumask_init(); @@ -1345,7 +1359,8 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id mutex_unlock(&nest_init_lock); kfree(nest_imc_refc); kfree(per_nest_pmu_arr); - goto err_free; + per_nest_pmu_arr = NULL; + goto err_free_mem; } } nest_pmus++; @@ -1355,7 +1370,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id ret = core_imc_pmu_cpumask_init(); if (ret) { cleanup_all_core_imc_memory(); - return ret; + goto err_free_mem; } break; @@ -1363,33 +1378,34 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id ret = thread_imc_cpu_init(); if (ret) { cleanup_all_thread_imc_memory(); - return ret; + goto err_free_mem; } break; default: - return -1; /* Unknown domain */ + return -EINVAL; /* Unknown domain */ } ret = update_events_in_group(parent, pmu_ptr); if (ret) - goto err_free; + goto err_free_cpuhp_mem; ret = update_pmu_ops(pmu_ptr); if (ret) - goto err_free; + goto err_free_cpuhp_mem; ret = perf_pmu_register(&pmu_ptr->pmu, pmu_ptr->pmu.name, -1); if (ret) - goto err_free; + goto err_free_cpuhp_mem; pr_info("%s performance monitor hardware support registered\n", pmu_ptr->pmu.name); return 0; -err_free: - imc_common_mem_free(pmu_ptr); +err_free_cpuhp_mem: imc_common_cpuhp_mem_free(pmu_ptr); +err_free_mem: + imc_common_mem_free(pmu_ptr); return ret; } diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h index 6c737d675792..6a0b586c935a 100644 --- a/arch/powerpc/perf/isa207-common.h +++ b/arch/powerpc/perf/isa207-common.h @@ -17,70 +17,6 @@ #include <asm/firmware.h> #include <asm/cputable.h> -/* - * Raw event encoding for PowerISA v2.07: - * - * 60 56 52 48 44 40 36 32 - * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - * | | [ ] [ thresh_cmp ] [ thresh_ctl ] - * | | | | - * | | *- IFM (Linux) thresh start/stop OR FAB match -* - * | *- BHRB (Linux) - * *- EBB (Linux) - * - * 28 24 20 16 12 8 4 0 - * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - * [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ] - * | | | | | - * | | | | *- mark - * | | *- L1/L2/L3 cache_sel | - * | | | - * | *- sampling mode for marked events *- combine - * | - * *- thresh_sel - * - * Below uses IBM bit numbering. - * - * MMCR1[x:y] = unit (PMCxUNIT) - * MMCR1[x] = combine (PMCxCOMB) - * - * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011 - * # PM_MRK_FAB_RSP_MATCH - * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH) - * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001 - * # PM_MRK_FAB_RSP_MATCH_CYC - * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH) - * else - * MMCRA[48:55] = thresh_ctl (THRESH START/END) - * - * if thresh_sel: - * MMCRA[45:47] = thresh_sel - * - * if thresh_cmp: - * MMCRA[22:24] = thresh_cmp[0:2] - * MMCRA[25:31] = thresh_cmp[3:9] - * - * if unit == 6 or unit == 7 - * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL) - * else if unit == 8 or unit == 9: - * if cache_sel[0] == 0: # L3 bank - * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0) - * else if cache_sel[0] == 1: - * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1) - * else if cache_sel[1]: # L1 event - * MMCR1[16] = cache_sel[2] -Â * MMCR1[17] = cache_sel[3] - * - * if mark: - * MMCRA[63] = 1 (SAMPLE_ENABLE) - * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG) -Â * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE) - * - * if EBB and BHRB: - * MMCRA[32:33] = IFM - * - */ - #define EVENT_EBB_MASK 1ull #define EVENT_EBB_SHIFT PERF_EVENT_CONFIG_EBB_SHIFT #define EVENT_BHRB_MASK 1ull diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index c9356955cab4..d12a2db26353 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c @@ -30,6 +30,70 @@ enum { #define POWER8_MMCRA_IFM2 0x0000000080000000UL #define POWER8_MMCRA_IFM3 0x00000000C0000000UL +/* + * Raw event encoding for PowerISA v2.07 (Power8): + * + * 60 56 52 48 44 40 36 32 + * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | + * | | [ ] [ thresh_cmp ] [ thresh_ctl ] + * | | | | + * | | *- IFM (Linux) thresh start/stop OR FAB match -* + * | *- BHRB (Linux) + * *- EBB (Linux) + * + * 28 24 20 16 12 8 4 0 + * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | + * [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ] + * | | | | | + * | | | | *- mark + * | | *- L1/L2/L3 cache_sel | + * | | | + * | *- sampling mode for marked events *- combine + * | + * *- thresh_sel + * + * Below uses IBM bit numbering. + * + * MMCR1[x:y] = unit (PMCxUNIT) + * MMCR1[x] = combine (PMCxCOMB) + * + * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011 + * # PM_MRK_FAB_RSP_MATCH + * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH) + * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001 + * # PM_MRK_FAB_RSP_MATCH_CYC + * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH) + * else + * MMCRA[48:55] = thresh_ctl (THRESH START/END) + * + * if thresh_sel: + * MMCRA[45:47] = thresh_sel + * + * if thresh_cmp: + * MMCRA[22:24] = thresh_cmp[0:2] + * MMCRA[25:31] = thresh_cmp[3:9] + * + * if unit == 6 or unit == 7 + * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL) + * else if unit == 8 or unit == 9: + * if cache_sel[0] == 0: # L3 bank + * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0) + * else if cache_sel[0] == 1: + * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1) + * else if cache_sel[1]: # L1 event + * MMCR1[16] = cache_sel[2] +Â * MMCR1[17] = cache_sel[3] + * + * if mark: + * MMCRA[63] = 1 (SAMPLE_ENABLE) + * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG) +Â * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE) + * + * if EBB and BHRB: + * MMCRA[32:33] = IFM + * + */ + /* PowerISA v2.07 format attribute structure*/ extern struct attribute_group isa207_pmu_format_group; diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig index 7e38b7b71a5a..071f53b0c0a0 100644 --- a/arch/powerpc/platforms/83xx/Kconfig +++ b/arch/powerpc/platforms/83xx/Kconfig @@ -90,13 +90,6 @@ config MPC837x_RDB help This option enables support for the MPC837x RDB and WLAN Boards. -config SBC834x - bool "Wind River SBC834x" - select DEFAULT_UIMAGE - select PPC_MPC834x - help - This option enables support for the Wind River SBC834x board. - config ASP834x bool "Analogue & Micro ASP 834x" select PPC_MPC834x diff --git a/arch/powerpc/platforms/83xx/Makefile b/arch/powerpc/platforms/83xx/Makefile index bb4720897f6a..41cb5f842eff 100644 --- a/arch/powerpc/platforms/83xx/Makefile +++ b/arch/powerpc/platforms/83xx/Makefile @@ -14,7 +14,6 @@ obj-$(CONFIG_MPC836x_MDS) += mpc836x_mds.o obj-$(CONFIG_MPC836x_RDK) += mpc836x_rdk.o obj-$(CONFIG_MPC832x_MDS) += mpc832x_mds.o obj-$(CONFIG_MPC837x_MDS) += mpc837x_mds.o -obj-$(CONFIG_SBC834x) += sbc834x.o obj-$(CONFIG_MPC837x_RDB) += mpc837x_rdb.o obj-$(CONFIG_ASP834x) += asp834x.o obj-$(CONFIG_KMETER1) += km83xx.o diff --git a/arch/powerpc/platforms/83xx/sbc834x.c b/arch/powerpc/platforms/83xx/sbc834x.c deleted file mode 100644 index cb4bdabfdf1c..000000000000 --- a/arch/powerpc/platforms/83xx/sbc834x.c +++ /dev/null @@ -1,73 +0,0 @@ -/* - * arch/powerpc/platforms/83xx/sbc834x.c - * - * Wind River SBC834x board specific routines - * - * By Paul Gortmaker (see MAINTAINERS for contact information) - * - * Based largely on the mpc834x_mds.c support by Kumar Gala. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <linux/stddef.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/errno.h> -#include <linux/reboot.h> -#include <linux/pci.h> -#include <linux/kdev_t.h> -#include <linux/major.h> -#include <linux/console.h> -#include <linux/delay.h> -#include <linux/seq_file.h> -#include <linux/root_dev.h> -#include <linux/of_platform.h> - -#include <linux/atomic.h> -#include <asm/time.h> -#include <asm/io.h> -#include <asm/machdep.h> -#include <asm/ipic.h> -#include <asm/irq.h> -#include <asm/prom.h> -#include <asm/udbg.h> -#include <sysdev/fsl_soc.h> -#include <sysdev/fsl_pci.h> - -#include "mpc83xx.h" - -/* ************************************************************************ - * - * Setup the architecture - * - */ -static void __init sbc834x_setup_arch(void) -{ - mpc83xx_setup_arch(); -} - -machine_device_initcall(sbc834x, mpc83xx_declare_of_platform_devices); - -/* - * Called very early, MMU is off, device-tree isn't unflattened - */ -static int __init sbc834x_probe(void) -{ - return of_machine_is_compatible("SBC834xE"); -} - -define_machine(sbc834x) { - .name = "SBC834xE", - .probe = sbc834x_probe, - .setup_arch = sbc834x_setup_arch, - .init_IRQ = mpc83xx_ipic_init_IRQ, - .get_irq = ipic_get_irq, - .restart = mpc83xx_restart, - .time_init = mpc83xx_time_init, - .calibrate_decr = generic_calibrate_decr, - .progress = udbg_progress, -}; diff --git a/arch/powerpc/platforms/8xx/adder875.c b/arch/powerpc/platforms/8xx/adder875.c index 333dece79394..bcef9f66191e 100644 --- a/arch/powerpc/platforms/8xx/adder875.c +++ b/arch/powerpc/platforms/8xx/adder875.c @@ -111,7 +111,5 @@ define_machine(adder875) { .get_irq = mpc8xx_get_irq, .restart = mpc8xx_restart, .calibrate_decr = generic_calibrate_decr, - .set_rtc_time = mpc8xx_set_rtc_time, - .get_rtc_time = mpc8xx_get_rtc_time, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/8xx/ep88xc.c b/arch/powerpc/platforms/8xx/ep88xc.c index cd0d90f1fb1c..ebcf34a14789 100644 --- a/arch/powerpc/platforms/8xx/ep88xc.c +++ b/arch/powerpc/platforms/8xx/ep88xc.c @@ -170,7 +170,5 @@ define_machine(ep88xc) { .get_irq = mpc8xx_get_irq, .restart = mpc8xx_restart, .calibrate_decr = mpc8xx_calibrate_decr, - .set_rtc_time = mpc8xx_set_rtc_time, - .get_rtc_time = mpc8xx_get_rtc_time, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c index 2188d691a40f..027c42d8966c 100644 --- a/arch/powerpc/platforms/8xx/m8xx_setup.c +++ b/arch/powerpc/platforms/8xx/m8xx_setup.c @@ -169,15 +169,14 @@ int mpc8xx_set_rtc_time(struct rtc_time *tm) { sitk8xx_t __iomem *sys_tmr1; sit8xx_t __iomem *sys_tmr2; - int time; + time64_t time; sys_tmr1 = immr_map(im_sitk); sys_tmr2 = immr_map(im_sit); - time = mktime(tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday, - tm->tm_hour, tm->tm_min, tm->tm_sec); + time = rtc_tm_to_time64(tm); out_be32(&sys_tmr1->sitk_rtck, KAPWR_KEY); - out_be32(&sys_tmr2->sit_rtc, time); + out_be32(&sys_tmr2->sit_rtc, (u32)time); out_be32(&sys_tmr1->sitk_rtck, ~KAPWR_KEY); immr_unmap(sys_tmr2); @@ -192,9 +191,7 @@ void mpc8xx_get_rtc_time(struct rtc_time *tm) /* Get time from the RTC. */ data = in_be32(&sys_tmr->sit_rtc); - to_tm(data, tm); - tm->tm_year -= 1900; - tm->tm_mon -= 1; + rtc_time64_to_tm(data, tm); immr_unmap(sys_tmr); return; } diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c index e821a42d5816..a0c83c1905c6 100644 --- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c +++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c @@ -220,7 +220,5 @@ define_machine(mpc885_ads) { .get_irq = mpc8xx_get_irq, .restart = mpc8xx_restart, .calibrate_decr = mpc8xx_calibrate_decr, - .set_rtc_time = mpc8xx_set_rtc_time, - .get_rtc_time = mpc8xx_get_rtc_time, .progress = udbg_progress, }; diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 67d3125d0610..e6a1de521319 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -222,6 +222,7 @@ config PTE_64BIT config PHYS_64BIT bool 'Large physical address support' if E500 || PPC_86xx depends on (44x || E500 || PPC_86xx) && !PPC_83xx && !PPC_82xx + select PHYS_ADDR_T_64BIT ---help--- This option enables kernel support for larger than 32-bit physical addresses. This feature may not be available on all cores. @@ -292,6 +293,10 @@ config PPC_STD_MMU_32 def_bool y depends on PPC_STD_MMU && PPC32 +config ARCH_ENABLE_SPLIT_PMD_PTLOCK + def_bool y + depends on PPC_BOOK3S_64 + config PPC_RADIX_MMU bool "Radix MMU Support" depends on PPC_BOOK3S_64 diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c index a494028b2cdf..8ae86200ef6c 100644 --- a/arch/powerpc/platforms/cell/spu_callbacks.c +++ b/arch/powerpc/platforms/cell/spu_callbacks.c @@ -44,7 +44,7 @@ static void *spu_syscall_table[] = { #define SYSCALL_SPU(func) sys_##func, #define COMPAT_SYS_SPU(func) sys_##func, -#define PPC_SYS_SPU(func) ppc_##func, +#define COMPAT_SPU_NEW(func) sys_##func, #define SYSX_SPU(f, f3264, f32) f, #include <asm/systbl.h> diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c index 5e6e0bad6db6..263413a34823 100644 --- a/arch/powerpc/platforms/cell/spu_syscalls.c +++ b/arch/powerpc/platforms/cell/spu_syscalls.c @@ -26,6 +26,7 @@ #include <linux/syscalls.h> #include <linux/rcupdate.h> #include <linux/binfmts.h> +#include <linux/syscalls.h> #include <asm/spu.h> @@ -90,7 +91,7 @@ SYSCALL_DEFINE4(spu_create, const char __user *, name, unsigned int, flags, return ret; } -asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus) +SYSCALL_DEFINE3(spu_run,int, fd, __u32 __user *, unpc, __u32 __user *, ustatus) { long ret; struct fd arg; diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c index 870c0a82d560..1e002e94d0f6 100644 --- a/arch/powerpc/platforms/cell/spufs/fault.c +++ b/arch/powerpc/platforms/cell/spufs/fault.c @@ -44,7 +44,7 @@ static void spufs_handle_event(struct spu_context *ctx, return; } - memset(&info, 0, sizeof(info)); + clear_siginfo(&info); switch (type) { case SPE_EVENT_INVALID_DMA: diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 469bdd0b748f..43e7b93f27c7 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -232,12 +232,13 @@ spufs_mem_write(struct file *file, const char __user *buffer, return size; } -static int +static vm_fault_t spufs_mem_mmap_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct spu_context *ctx = vma->vm_file->private_data; unsigned long pfn, offset; + vm_fault_t ret; offset = vmf->pgoff << PAGE_SHIFT; if (offset >= LS_SIZE) @@ -256,11 +257,11 @@ spufs_mem_mmap_fault(struct vm_fault *vmf) vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT; } - vm_insert_pfn(vma, vmf->address, pfn); + ret = vmf_insert_pfn(vma, vmf->address, pfn); spu_release(ctx); - return VM_FAULT_NOPAGE; + return ret; } static int spufs_mem_mmap_access(struct vm_area_struct *vma, @@ -312,13 +313,14 @@ static const struct file_operations spufs_mem_fops = { .mmap = spufs_mem_mmap, }; -static int spufs_ps_fault(struct vm_fault *vmf, +static vm_fault_t spufs_ps_fault(struct vm_fault *vmf, unsigned long ps_offs, unsigned long ps_size) { struct spu_context *ctx = vmf->vma->vm_file->private_data; unsigned long area, offset = vmf->pgoff << PAGE_SHIFT; - int ret = 0; + int err = 0; + vm_fault_t ret = VM_FAULT_NOPAGE; spu_context_nospu_trace(spufs_ps_fault__enter, ctx); @@ -349,25 +351,26 @@ static int spufs_ps_fault(struct vm_fault *vmf, if (ctx->state == SPU_STATE_SAVED) { up_read(¤t->mm->mmap_sem); spu_context_nospu_trace(spufs_ps_fault__sleep, ctx); - ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); + err = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu); down_read(¤t->mm->mmap_sem); } else { area = ctx->spu->problem_phys + ps_offs; - vm_insert_pfn(vmf->vma, vmf->address, (area + offset) >> PAGE_SHIFT); + ret = vmf_insert_pfn(vmf->vma, vmf->address, + (area + offset) >> PAGE_SHIFT); spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu); } - if (!ret) + if (!err) spu_release(ctx); refault: put_spu_context(ctx); - return VM_FAULT_NOPAGE; + return ret; } #if SPUFS_MMAP_4K -static int spufs_cntl_mmap_fault(struct vm_fault *vmf) +static vm_fault_t spufs_cntl_mmap_fault(struct vm_fault *vmf) { return spufs_ps_fault(vmf, 0x4000, SPUFS_CNTL_MAP_SIZE); } @@ -1040,7 +1043,7 @@ static ssize_t spufs_signal1_write(struct file *file, const char __user *buf, return 4; } -static int +static vm_fault_t spufs_signal1_mmap_fault(struct vm_fault *vmf) { #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 @@ -1178,7 +1181,7 @@ static ssize_t spufs_signal2_write(struct file *file, const char __user *buf, } #if SPUFS_MMAP_4K -static int +static vm_fault_t spufs_signal2_mmap_fault(struct vm_fault *vmf) { #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 @@ -1307,7 +1310,7 @@ DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE); #if SPUFS_MMAP_4K -static int +static vm_fault_t spufs_mss_mmap_fault(struct vm_fault *vmf) { return spufs_ps_fault(vmf, 0x0000, SPUFS_MSS_MAP_SIZE); @@ -1369,7 +1372,7 @@ static const struct file_operations spufs_mss_fops = { .llseek = no_llseek, }; -static int +static vm_fault_t spufs_psmap_mmap_fault(struct vm_fault *vmf) { return spufs_ps_fault(vmf, 0x0000, SPUFS_PS_MAP_SIZE); @@ -1429,7 +1432,7 @@ static const struct file_operations spufs_psmap_fops = { #if SPUFS_MMAP_4K -static int +static vm_fault_t spufs_mfc_mmap_fault(struct vm_fault *vmf) { return spufs_ps_fault(vmf, 0x3000, SPUFS_MFC_MAP_SIZE); diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index ccc421503363..c9ef3c532169 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -1095,18 +1095,6 @@ static int show_spu_loadavg(struct seq_file *s, void *private) atomic_read(&nr_spu_contexts), idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); return 0; -} - -static int spu_loadavg_open(struct inode *inode, struct file *file) -{ - return single_open(file, show_spu_loadavg, NULL); -} - -static const struct file_operations spu_loadavg_fops = { - .open = spu_loadavg_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, }; int __init spu_sched_init(void) @@ -1135,7 +1123,7 @@ int __init spu_sched_init(void) mod_timer(&spuloadavg_timer, 0); - entry = proc_create("spu_loadavg", 0, NULL, &spu_loadavg_fops); + entry = proc_create_single("spu_loadavg", 0, NULL, show_spu_loadavg); if (!entry) goto out_stop_kthread; diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c index 0f512d35f7c5..5ddb57b82921 100644 --- a/arch/powerpc/platforms/chrp/pci.c +++ b/arch/powerpc/platforms/chrp/pci.c @@ -31,7 +31,7 @@ void __iomem *gg2_pci_config_base; * limit the bus number to 3 bits */ -int gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off, +static int gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off, int len, u32 *val) { volatile void __iomem *cfg_data; @@ -58,7 +58,7 @@ int gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off, return PCIBIOS_SUCCESSFUL; } -int gg2_write_config(struct pci_bus *bus, unsigned int devfn, int off, +static int gg2_write_config(struct pci_bus *bus, unsigned int devfn, int off, int len, u32 val) { volatile void __iomem *cfg_data; @@ -94,8 +94,8 @@ static struct pci_ops gg2_pci_ops = /* * Access functions for PCI config space using RTAS calls. */ -int rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset, - int len, u32 *val) +static int rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset, + int len, u32 *val) { struct pci_controller *hose = pci_bus_to_host(bus); unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8) @@ -109,8 +109,8 @@ int rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset, return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL; } -int rtas_write_config(struct pci_bus *bus, unsigned int devfn, int offset, - int len, u32 val) +static int rtas_write_config(struct pci_bus *bus, unsigned int devfn, int offset, + int len, u32 val) { struct pci_controller *hose = pci_bus_to_host(bus); unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8) diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index 481ed133e04b..d6d8ffc0271e 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c @@ -94,7 +94,7 @@ static const char *chrp_names[] = { "Total Impact Briq" }; -void chrp_show_cpuinfo(struct seq_file *m) +static void chrp_show_cpuinfo(struct seq_file *m) { int i, sdramen; unsigned int t; @@ -299,7 +299,7 @@ out_put: of_node_put(node); } -void __init chrp_setup_arch(void) +static void __init chrp_setup_arch(void) { struct device_node *root = of_find_node_by_path("/"); const char *machine = NULL; @@ -382,7 +382,7 @@ static void __init chrp_find_openpic(void) { struct device_node *np, *root; int len, i, j; - int isu_size, idu_size; + int isu_size; const unsigned int *iranges, *opprop = NULL; int oplen = 0; unsigned long opaddr; @@ -427,11 +427,9 @@ static void __init chrp_find_openpic(void) } isu_size = 0; - idu_size = 0; if (len > 0 && iranges[1] != 0) { printk(KERN_INFO "OpenPIC irqs %d..%d in IDU\n", iranges[0], iranges[0] + iranges[1] - 1); - idu_size = iranges[1]; } if (len > 1) isu_size = iranges[3]; @@ -523,7 +521,7 @@ static void __init chrp_find_8259(void) } } -void __init chrp_init_IRQ(void) +static void __init chrp_init_IRQ(void) { #if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_XMON) struct device_node *kbd; @@ -555,7 +553,7 @@ void __init chrp_init_IRQ(void) #endif } -void __init +static void __init chrp_init2(void) { #ifdef CONFIG_NVRAM diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c index 03d115aaa191..acde7bbe0716 100644 --- a/arch/powerpc/platforms/chrp/time.c +++ b/arch/powerpc/platforms/chrp/time.c @@ -28,6 +28,8 @@ #include <asm/sections.h> #include <asm/time.h> +#include <platforms/chrp/chrp.h> + extern spinlock_t rtc_lock; #define NVRAM_AS0 0x74 @@ -63,7 +65,7 @@ long __init chrp_time_init(void) return 0; } -int chrp_cmos_clock_read(int addr) +static int chrp_cmos_clock_read(int addr) { if (nvram_as1 != 0) outb(addr>>8, nvram_as1); @@ -71,7 +73,7 @@ int chrp_cmos_clock_read(int addr) return (inb(nvram_data)); } -void chrp_cmos_clock_write(unsigned long val, int addr) +static void chrp_cmos_clock_write(unsigned long val, int addr) { if (nvram_as1 != 0) outb(addr>>8, nvram_as1); diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig index 9fb2d5912c5a..8ea16db5ff48 100644 --- a/arch/powerpc/platforms/embedded6xx/Kconfig +++ b/arch/powerpc/platforms/embedded6xx/Kconfig @@ -48,16 +48,6 @@ config PPC_HOLLY Select PPC_HOLLY if configuring for an IBM 750GX/CL Eval Board with TSI108/9 bridge (Hickory/Holly) -config PPC_C2K - bool "SBS/GEFanuc C2K board" - depends on EMBEDDED6xx - select MV64X60 - select NOT_COHERENT_CACHE - select MTD_CFI_I4 - help - This option enables support for the GE Fanuc C2K board (formerly - an SBS board). - config MVME5100 bool "Motorola/Emerson MVME5100" depends on EMBEDDED6xx diff --git a/arch/powerpc/platforms/embedded6xx/Makefile b/arch/powerpc/platforms/embedded6xx/Makefile index 12154e3257ad..e656ae9f23c6 100644 --- a/arch/powerpc/platforms/embedded6xx/Makefile +++ b/arch/powerpc/platforms/embedded6xx/Makefile @@ -6,7 +6,6 @@ obj-$(CONFIG_MPC7448HPC2) += mpc7448_hpc2.o obj-$(CONFIG_LINKSTATION) += linkstation.o ls_uart.o obj-$(CONFIG_STORCENTER) += storcenter.o obj-$(CONFIG_PPC_HOLLY) += holly.o -obj-$(CONFIG_PPC_C2K) += c2k.o obj-$(CONFIG_USBGECKO_UDBG) += usbgecko_udbg.o obj-$(CONFIG_GAMECUBE_COMMON) += flipper-pic.o obj-$(CONFIG_GAMECUBE) += gamecube.o diff --git a/arch/powerpc/platforms/embedded6xx/c2k.c b/arch/powerpc/platforms/embedded6xx/c2k.c deleted file mode 100644 index d19e4e759597..000000000000 --- a/arch/powerpc/platforms/embedded6xx/c2k.c +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Board setup routines for the GEFanuc C2K board - * - * Author: Remi Machet <rmachet@slac.stanford.edu> - * - * Originated from prpmc2800.c - * - * 2008 (c) Stanford University - * 2007 (c) MontaVista, Software, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include <linux/stddef.h> -#include <linux/kernel.h> -#include <linux/delay.h> -#include <linux/interrupt.h> -#include <linux/seq_file.h> -#include <linux/time.h> -#include <linux/of.h> - -#include <asm/machdep.h> -#include <asm/prom.h> -#include <asm/time.h> - -#include <mm/mmu_decl.h> - -#include <sysdev/mv64x60.h> - -#define MV64x60_MPP_CNTL_0 0x0000 -#define MV64x60_MPP_CNTL_2 0x0008 - -#define MV64x60_GPP_IO_CNTL 0x0000 -#define MV64x60_GPP_LEVEL_CNTL 0x0010 -#define MV64x60_GPP_VALUE_SET 0x0018 - -static void __iomem *mv64x60_mpp_reg_base; -static void __iomem *mv64x60_gpp_reg_base; - -static void __init c2k_setup_arch(void) -{ - struct device_node *np; - phys_addr_t paddr; - const unsigned int *reg; - - /* - * ioremap mpp and gpp registers in case they are later - * needed by c2k_reset_board(). - */ - np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-mpp"); - reg = of_get_property(np, "reg", NULL); - paddr = of_translate_address(np, reg); - of_node_put(np); - mv64x60_mpp_reg_base = ioremap(paddr, reg[1]); - - np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-gpp"); - reg = of_get_property(np, "reg", NULL); - paddr = of_translate_address(np, reg); - of_node_put(np); - mv64x60_gpp_reg_base = ioremap(paddr, reg[1]); - -#ifdef CONFIG_PCI - mv64x60_pci_init(); -#endif -} - -static void c2k_reset_board(void) -{ - u32 temp; - - local_irq_disable(); - - temp = in_le32(mv64x60_mpp_reg_base + MV64x60_MPP_CNTL_0); - temp &= 0xFFFF0FFF; - out_le32(mv64x60_mpp_reg_base + MV64x60_MPP_CNTL_0, temp); - - temp = in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_LEVEL_CNTL); - temp |= 0x00000004; - out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_LEVEL_CNTL, temp); - - temp = in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_IO_CNTL); - temp |= 0x00000004; - out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_IO_CNTL, temp); - - temp = in_le32(mv64x60_mpp_reg_base + MV64x60_MPP_CNTL_2); - temp &= 0xFFFF0FFF; - out_le32(mv64x60_mpp_reg_base + MV64x60_MPP_CNTL_2, temp); - - temp = in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_LEVEL_CNTL); - temp |= 0x00080000; - out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_LEVEL_CNTL, temp); - - temp = in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_IO_CNTL); - temp |= 0x00080000; - out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_IO_CNTL, temp); - - out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_VALUE_SET, 0x00080004); -} - -static void __noreturn c2k_restart(char *cmd) -{ - c2k_reset_board(); - msleep(100); - panic("restart failed\n"); -} - -#ifdef CONFIG_NOT_COHERENT_CACHE -#define COHERENCY_SETTING "off" -#else -#define COHERENCY_SETTING "on" -#endif - -void c2k_show_cpuinfo(struct seq_file *m) -{ - seq_printf(m, "Vendor\t\t: GEFanuc\n"); - seq_printf(m, "coherency\t: %s\n", COHERENCY_SETTING); -} - -/* - * Called very early, device-tree isn't unflattened - */ -static int __init c2k_probe(void) -{ - if (!of_machine_is_compatible("GEFanuc,C2K")) - return 0; - - printk(KERN_INFO "Detected a GEFanuc C2K board\n"); - - _set_L2CR(0); - _set_L2CR(L2CR_L2E | L2CR_L2PE | L2CR_L2I); - - mv64x60_init_early(); - - return 1; -} - -define_machine(c2k) { - .name = "C2K", - .probe = c2k_probe, - .setup_arch = c2k_setup_arch, - .show_cpuinfo = c2k_show_cpuinfo, - .init_IRQ = mv64x60_init_irq, - .get_irq = mv64x60_get_irq, - .restart = c2k_restart, - .calibrate_decr = generic_calibrate_decr, -}; diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c index 7206f3f573d4..db0be007fd06 100644 --- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c +++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c @@ -108,16 +108,8 @@ static int flipper_pic_map(struct irq_domain *h, unsigned int virq, return 0; } -static int flipper_pic_match(struct irq_domain *h, struct device_node *np, - enum irq_domain_bus_token bus_token) -{ - return 1; -} - - static const struct irq_domain_ops flipper_irq_domain_ops = { .map = flipper_pic_map, - .match = flipper_pic_match, }; /* diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c index 89c54de88b7a..8112b39879d6 100644 --- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c @@ -35,6 +35,8 @@ */ #define HW_BROADWAY_ICR 0x00 #define HW_BROADWAY_IMR 0x04 +#define HW_STARLET_ICR 0x08 +#define HW_STARLET_IMR 0x0c /* @@ -74,6 +76,9 @@ static void hlwd_pic_unmask(struct irq_data *d) void __iomem *io_base = irq_data_get_irq_chip_data(d); setbits32(io_base + HW_BROADWAY_IMR, 1 << irq); + + /* Make sure the ARM (aka. Starlet) doesn't handle this interrupt. */ + clrbits32(io_base + HW_STARLET_IMR, 1 << irq); } @@ -155,7 +160,7 @@ static void __hlwd_quiesce(void __iomem *io_base) out_be32(io_base + HW_BROADWAY_ICR, 0xffffffff); } -struct irq_domain *hlwd_pic_init(struct device_node *np) +static struct irq_domain *hlwd_pic_init(struct device_node *np) { struct irq_domain *irq_domain; struct resource res; diff --git a/arch/powerpc/platforms/maple/maple.h b/arch/powerpc/platforms/maple/maple.h index d10f4af3a42e..4f358b55c341 100644 --- a/arch/powerpc/platforms/maple/maple.h +++ b/arch/powerpc/platforms/maple/maple.h @@ -6,7 +6,7 @@ */ extern int maple_set_rtc_time(struct rtc_time *tm); extern void maple_get_rtc_time(struct rtc_time *tm); -extern unsigned long maple_get_boot_time(void); +extern time64_t maple_get_boot_time(void); extern void maple_calibrate_decr(void); extern void maple_pci_init(void); extern void maple_pci_irq_fixup(struct pci_dev *dev); diff --git a/arch/powerpc/platforms/maple/time.c b/arch/powerpc/platforms/maple/time.c index cfddc87f81bf..becf2ebf7df5 100644 --- a/arch/powerpc/platforms/maple/time.c +++ b/arch/powerpc/platforms/maple/time.c @@ -137,7 +137,7 @@ static struct resource rtc_iores = { .flags = IORESOURCE_IO | IORESOURCE_BUSY, }; -unsigned long __init maple_get_boot_time(void) +time64_t __init maple_get_boot_time(void) { struct rtc_time tm; struct device_node *rtcs; @@ -170,7 +170,6 @@ unsigned long __init maple_get_boot_time(void) request_resource(&ioport_resource, &rtc_iores); maple_get_rtc_time(&tm); - return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, - tm.tm_hour, tm.tm_min, tm.tm_sec); + return rtc_tm_to_time64(&tm); } diff --git a/arch/powerpc/platforms/pasemi/pasemi.h b/arch/powerpc/platforms/pasemi/pasemi.h index 329d2a619254..70b56048ed1b 100644 --- a/arch/powerpc/platforms/pasemi/pasemi.h +++ b/arch/powerpc/platforms/pasemi/pasemi.h @@ -2,7 +2,7 @@ #ifndef _PASEMI_PASEMI_H #define _PASEMI_PASEMI_H -extern unsigned long pas_get_boot_time(void); +extern time64_t pas_get_boot_time(void); extern void pas_pci_init(void); extern void pas_pci_irq_fixup(struct pci_dev *dev); extern void pas_pci_dma_dev_setup(struct pci_dev *dev); diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c index 5ff6108f19e9..aea9ff2c8e6d 100644 --- a/arch/powerpc/platforms/pasemi/pci.c +++ b/arch/powerpc/platforms/pasemi/pci.c @@ -224,6 +224,8 @@ void __init pas_pci_init(void) return; } + pci_set_flags(PCI_SCAN_ALL_PCIE_DEVS); + for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) if (np->name && !strcmp(np->name, "pxp") && !pas_add_bridge(np)) of_node_get(np); diff --git a/arch/powerpc/platforms/pasemi/time.c b/arch/powerpc/platforms/pasemi/time.c index fa54351ac268..ea815254ee7b 100644 --- a/arch/powerpc/platforms/pasemi/time.c +++ b/arch/powerpc/platforms/pasemi/time.c @@ -21,8 +21,8 @@ #include <asm/time.h> -unsigned long __init pas_get_boot_time(void) +time64_t __init pas_get_boot_time(void) { /* Let's just return a fake date right now */ - return mktime(2006, 1, 1, 12, 0, 0); + return mktime64(2006, 1, 1, 12, 0, 0); } diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c index c3c9bbb3573a..3b3b0b9b3577 100644 --- a/arch/powerpc/platforms/powermac/bootx_init.c +++ b/arch/powerpc/platforms/powermac/bootx_init.c @@ -468,7 +468,7 @@ void __init bootx_init(unsigned long r3, unsigned long r4) boot_infos_t *bi = (boot_infos_t *) r4; unsigned long hdr; unsigned long space; - unsigned long ptr, x; + unsigned long ptr; char *model; unsigned long offset = reloc_offset(); @@ -519,7 +519,7 @@ void __init bootx_init(unsigned long r3, unsigned long r4) ; } if (bi->architecture != BOOT_ARCH_PCI) { - bootx_printf(" !!! WARNING - Usupported machine" + bootx_printf(" !!! WARNING - Unsupported machine" " architecture !\n"); for (;;) ; @@ -562,6 +562,8 @@ void __init bootx_init(unsigned long r3, unsigned long r4) * MMU switched OFF, so this should not be useful anymore. */ if (bi->version < 4) { + unsigned long x __maybe_unused; + bootx_printf("Touching pages...\n"); /* diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index 0b8174a79993..df762bb3c735 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c @@ -62,7 +62,7 @@ struct device_node *k2_skiplist[2]; static int __init fixup_one_level_bus_range(struct device_node *node, int higher) { - for (; node != 0;node = node->sibling) { + for (; node; node = node->sibling) { const int * bus_range; const unsigned int *class_code; int len; @@ -1219,7 +1219,7 @@ static void fixup_u4_pcie(struct pci_dev* dev) region = r; } /* Nothing found, bail */ - if (region == 0) + if (!region) return; /* Print things out */ diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h index 6f15b8804e9b..16a52afdb76e 100644 --- a/arch/powerpc/platforms/powermac/pmac.h +++ b/arch/powerpc/platforms/powermac/pmac.h @@ -15,7 +15,7 @@ struct rtc_time; extern int pmac_newworld; extern long pmac_time_init(void); -extern unsigned long pmac_get_boot_time(void); +extern time64_t pmac_get_boot_time(void); extern void pmac_get_rtc_time(struct rtc_time *); extern int pmac_set_rtc_time(struct rtc_time *); extern void pmac_read_rtc_time(void); diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index ab668cb72263..3a529fcdae97 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -152,7 +152,7 @@ static void pmac_show_cpuinfo(struct seq_file *m) of_get_property(np, "d-cache-size", NULL); seq_printf(m, "L2 cache\t:"); has_l2cache = 1; - if (of_get_property(np, "cache-unified", NULL) != 0 && dc) { + if (of_get_property(np, "cache-unified", NULL) && dc) { seq_printf(m, " %dK unified", *dc / 1024); } else { if (ic) @@ -244,12 +244,12 @@ static void __init l2cr_init(void) /* Checks "l2cr-value" property in the registry */ if (cpu_has_feature(CPU_FTR_L2CR)) { struct device_node *np = of_find_node_by_name(NULL, "cpus"); - if (np == 0) + if (!np) np = of_find_node_by_type(NULL, "cpu"); - if (np != 0) { + if (np) { const unsigned int *l2cr = of_get_property(np, "l2cr-value", NULL); - if (l2cr != 0) { + if (l2cr) { ppc_override_l2cr = 1; ppc_override_l2cr_value = *l2cr; _set_L2CR(0); @@ -352,6 +352,7 @@ static int pmac_late_init(void) } machine_late_initcall(powermac, pmac_late_init); +void note_bootable_part(dev_t dev, int part, int goodness); /* * This is __ref because we check for "initializing" before * touching any of the __init sensitive things and "initializing" diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 95275e0e2efa..447da6db450a 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -65,7 +65,6 @@ #endif extern void __secondary_start_pmac_0(void); -extern int pmac_pfunc_base_install(void); static void (*pmac_tb_freeze)(int freeze); static u64 timebase; diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c index 274af6fa388e..7c968e46736f 100644 --- a/arch/powerpc/platforms/powermac/time.c +++ b/arch/powerpc/platforms/powermac/time.c @@ -84,29 +84,11 @@ long __init pmac_time_init(void) return delta; } -#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU) -static void to_rtc_time(unsigned long now, struct rtc_time *tm) -{ - to_tm(now, tm); - tm->tm_year -= 1900; - tm->tm_mon -= 1; -} -#endif - -#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU) || \ - defined(CONFIG_PMAC_SMU) -static unsigned long from_rtc_time(struct rtc_time *tm) -{ - return mktime(tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday, - tm->tm_hour, tm->tm_min, tm->tm_sec); -} -#endif - #ifdef CONFIG_ADB_CUDA -static unsigned long cuda_get_time(void) +static time64_t cuda_get_time(void) { struct adb_request req; - unsigned int now; + time64_t now; if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0) return 0; @@ -117,17 +99,17 @@ static unsigned long cuda_get_time(void) req.reply_len); now = (req.reply[3] << 24) + (req.reply[4] << 16) + (req.reply[5] << 8) + req.reply[6]; - return ((unsigned long)now) - RTC_OFFSET; + return now - RTC_OFFSET; } -#define cuda_get_rtc_time(tm) to_rtc_time(cuda_get_time(), (tm)) +#define cuda_get_rtc_time(tm) rtc_time64_to_tm(cuda_get_time(), (tm)) static int cuda_set_rtc_time(struct rtc_time *tm) { - unsigned int nowtime; + time64_t nowtime; struct adb_request req; - nowtime = from_rtc_time(tm) + RTC_OFFSET; + nowtime = rtc_tm_to_time64(tm) + RTC_OFFSET; if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME, nowtime >> 24, nowtime >> 16, nowtime >> 8, nowtime) < 0) @@ -147,10 +129,10 @@ static int cuda_set_rtc_time(struct rtc_time *tm) #endif #ifdef CONFIG_ADB_PMU -static unsigned long pmu_get_time(void) +static time64_t pmu_get_time(void) { struct adb_request req; - unsigned int now; + time64_t now; if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0) return 0; @@ -160,17 +142,17 @@ static unsigned long pmu_get_time(void) req.reply_len); now = (req.reply[0] << 24) + (req.reply[1] << 16) + (req.reply[2] << 8) + req.reply[3]; - return ((unsigned long)now) - RTC_OFFSET; + return now - RTC_OFFSET; } -#define pmu_get_rtc_time(tm) to_rtc_time(pmu_get_time(), (tm)) +#define pmu_get_rtc_time(tm) rtc_time64_to_tm(pmu_get_time(), (tm)) static int pmu_set_rtc_time(struct rtc_time *tm) { - unsigned int nowtime; + time64_t nowtime; struct adb_request req; - nowtime = from_rtc_time(tm) + RTC_OFFSET; + nowtime = rtc_tm_to_time64(tm) + RTC_OFFSET; if (pmu_request(&req, NULL, 5, PMU_SET_RTC, nowtime >> 24, nowtime >> 16, nowtime >> 8, nowtime) < 0) return -ENXIO; @@ -188,13 +170,13 @@ static int pmu_set_rtc_time(struct rtc_time *tm) #endif #ifdef CONFIG_PMAC_SMU -static unsigned long smu_get_time(void) +static time64_t smu_get_time(void) { struct rtc_time tm; if (smu_get_rtc_time(&tm, 1)) return 0; - return from_rtc_time(&tm); + return rtc_tm_to_time64(&tm); } #else @@ -204,7 +186,7 @@ static unsigned long smu_get_time(void) #endif /* Can't be __init, it's called when suspending and resuming */ -unsigned long pmac_get_boot_time(void) +time64_t pmac_get_boot_time(void) { /* Get the time from the RTC, used only at boot time */ switch (sys_ctrler) { diff --git a/arch/powerpc/platforms/powernv/copy-paste.h b/arch/powerpc/platforms/powernv/copy-paste.h index c9a503623431..cb36f9fbcef3 100644 --- a/arch/powerpc/platforms/powernv/copy-paste.h +++ b/arch/powerpc/platforms/powernv/copy-paste.h @@ -7,9 +7,8 @@ * 2 of the License, or (at your option) any later version. */ #include <asm/ppc-opcode.h> +#include <asm/reg.h> -#define CR0_SHIFT 28 -#define CR0_MASK 0xF /* * Copy/paste instructions: * @@ -42,5 +41,6 @@ static inline int vas_paste(void *paste_address, int offset) : "b" (offset), "b" (paste_address) : "memory", "cr0"); - return (cr >> CR0_SHIFT) & CR0_MASK; + /* We mask with 0xE to ignore SO */ + return (cr >> CR0_SHIFT) & 0xE; } diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 1f12ab1e6030..1c5d0675b43c 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -79,7 +79,7 @@ static int pnv_save_sprs_for_deep_states(void) uint64_t msr_val = MSR_IDLE; uint64_t psscr_val = pnv_deepest_stop_psscr_val; - for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { uint64_t pir = get_hard_smp_processor_id(cpu); uint64_t hsprg0_val = (uint64_t)paca_ptrs[cpu]; @@ -814,7 +814,7 @@ static int __init pnv_init_idle_states(void) int cpu; pr_info("powernv: idle: Saving PACA pointers of all CPUs in their thread sibling PACA\n"); - for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { int base_cpu = cpu_first_thread_sibling(cpu); int idx = cpu_thread_in_core(cpu); int i; diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c index fc222a0c2ac4..b99283df8584 100644 --- a/arch/powerpc/platforms/powernv/memtrace.c +++ b/arch/powerpc/platforms/powernv/memtrace.c @@ -131,7 +131,7 @@ static u64 memtrace_alloc_node(u32 nid, u64 size) u64 start_pfn, end_pfn, nr_pages; u64 base_pfn; - if (!NODE_DATA(nid) || !node_spanned_pages(nid)) + if (!node_spanned_pages(nid)) return 0; start_pfn = node_start_pfn(nid); diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 525e966dce34..8cdf91f5d3a4 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -459,10 +459,9 @@ static void mmio_launch_invalidate(struct mmio_atsd_reg *mmio_atsd_reg, struct npu *npu = mmio_atsd_reg->npu; int reg = mmio_atsd_reg->reg; - __raw_writeq(cpu_to_be64(va), - npu->mmio_atsd_regs[reg] + XTS_ATSD_AVA); + __raw_writeq_be(va, npu->mmio_atsd_regs[reg] + XTS_ATSD_AVA); eieio(); - __raw_writeq(cpu_to_be64(launch), npu->mmio_atsd_regs[reg]); + __raw_writeq_be(launch, npu->mmio_atsd_regs[reg]); } static void mmio_invalidate_pid(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], diff --git a/arch/powerpc/platforms/powernv/ocxl.c b/arch/powerpc/platforms/powernv/ocxl.c index fa9b53af3c7b..8c65aacda9c8 100644 --- a/arch/powerpc/platforms/powernv/ocxl.c +++ b/arch/powerpc/platforms/powernv/ocxl.c @@ -475,7 +475,7 @@ void pnv_ocxl_spa_release(void *platform_data) } EXPORT_SYMBOL_GPL(pnv_ocxl_spa_release); -int pnv_ocxl_spa_remove_pe(void *platform_data, int pe_handle) +int pnv_ocxl_spa_remove_pe_from_cache(void *platform_data, int pe_handle) { struct spa_data *data = (struct spa_data *) platform_data; int rc; @@ -483,7 +483,7 @@ int pnv_ocxl_spa_remove_pe(void *platform_data, int pe_handle) rc = opal_npu_spa_clear_cache(data->phb_opal_id, data->bdfn, pe_handle); return rc; } -EXPORT_SYMBOL_GPL(pnv_ocxl_spa_remove_pe); +EXPORT_SYMBOL_GPL(pnv_ocxl_spa_remove_pe_from_cache); int pnv_ocxl_alloc_xive_irq(u32 *irq, u64 *trigger_addr) { diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c index 4efc95b4c7d4..586ec71a4e17 100644 --- a/arch/powerpc/platforms/powernv/opal-hmi.c +++ b/arch/powerpc/platforms/powernv/opal-hmi.c @@ -177,7 +177,7 @@ static void print_hmi_event_info(struct OpalHMIEvent *hmi_evt) "Processor recovery occurred for masked error", "Timer facility experienced an error", "TFMR SPR is corrupted", - "UPS (Uniterrupted Power System) Overflow indication", + "UPS (Uninterrupted Power System) Overflow indication", "An XSCOM operation failure", "An XSCOM operation completed", "SCOM has set a reserved FIR bit to cause recovery", diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c index 2a14fda5ea26..58a07948c76e 100644 --- a/arch/powerpc/platforms/powernv/opal-imc.c +++ b/arch/powerpc/platforms/powernv/opal-imc.c @@ -115,8 +115,10 @@ static int imc_get_mem_addr_nest(struct device_node *node, return -ENOMEM; chipid_arr = kcalloc(nr_chips, sizeof(*chipid_arr), GFP_KERNEL); - if (!chipid_arr) + if (!chipid_arr) { + kfree(base_addr_arr); return -ENOMEM; + } if (of_property_read_u32_array(node, "chip-id", chipid_arr, nr_chips)) goto error; @@ -143,7 +145,6 @@ static int imc_get_mem_addr_nest(struct device_node *node, return 0; error: - kfree(pmu_ptr->mem_info); kfree(base_addr_arr); kfree(chipid_arr); return -1; @@ -183,8 +184,14 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain) /* Function to register IMC pmu */ ret = init_imc_pmu(parent, pmu_ptr, pmu_index); - if (ret) + if (ret) { pr_err("IMC PMU %s Register failed\n", pmu_ptr->pmu.name); + kfree(pmu_ptr->pmu.name); + if (pmu_ptr->domain == IMC_DOMAIN_NEST) + kfree(pmu_ptr->mem_info); + kfree(pmu_ptr); + return ret; + } return 0; @@ -248,6 +255,7 @@ static int opal_imc_counters_probe(struct platform_device *pdev) { struct device_node *imc_dev = pdev->dev.of_node; int pmu_count = 0, domain; + bool core_imc_reg = false, thread_imc_reg = false; u32 type; /* @@ -285,6 +293,10 @@ static int opal_imc_counters_probe(struct platform_device *pdev) if (!imc_pmu_create(imc_dev, pmu_count, domain)) { if (domain == IMC_DOMAIN_NEST) pmu_count++; + if (domain == IMC_DOMAIN_CORE) + core_imc_reg = true; + if (domain == IMC_DOMAIN_THREAD) + thread_imc_reg = true; } } @@ -292,6 +304,10 @@ static int opal_imc_counters_probe(struct platform_device *pdev) if (pmu_count == 0) debugfs_remove_recursive(imc_debugfs_parent); + /* If core imc is not registered, unregister thread-imc */ + if (!core_imc_reg && thread_imc_reg) + unregister_thread_imc(); + return 0; } diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c index 9d1b8c0aaf93..605c7e5d52c2 100644 --- a/arch/powerpc/platforms/powernv/opal-irqchip.c +++ b/arch/powerpc/platforms/powernv/opal-irqchip.c @@ -22,7 +22,6 @@ #include <linux/kthread.h> #include <linux/delay.h> #include <linux/slab.h> -#include <linux/irq_work.h> #include <asm/machdep.h> #include <asm/opal.h> @@ -38,37 +37,47 @@ struct opal_event_irqchip { unsigned long mask; }; static struct opal_event_irqchip opal_event_irqchip; - +static u64 last_outstanding_events; static unsigned int opal_irq_count; static unsigned int *opal_irqs; -static void opal_handle_irq_work(struct irq_work *work); -static u64 last_outstanding_events; -static struct irq_work opal_event_irq_work = { - .func = opal_handle_irq_work, -}; - -void opal_handle_events(uint64_t events) +void opal_handle_events(void) { - int virq, hwirq = 0; - u64 mask = opal_event_irqchip.mask; + __be64 events = 0; + u64 e; + + e = READ_ONCE(last_outstanding_events) & opal_event_irqchip.mask; +again: + while (e) { + int virq, hwirq; + + hwirq = fls64(e) - 1; + e &= ~BIT_ULL(hwirq); + + local_irq_disable(); + virq = irq_find_mapping(opal_event_irqchip.domain, hwirq); + if (virq) { + irq_enter(); + generic_handle_irq(virq); + irq_exit(); + } + local_irq_enable(); - if (!in_irq() && (events & mask)) { - last_outstanding_events = events; - irq_work_queue(&opal_event_irq_work); - return; + cond_resched(); } + last_outstanding_events = 0; + if (opal_poll_events(&events) != OPAL_SUCCESS) + return; + e = be64_to_cpu(events) & opal_event_irqchip.mask; + if (e) + goto again; +} - while (events & mask) { - hwirq = fls64(events) - 1; - if (BIT_ULL(hwirq) & mask) { - virq = irq_find_mapping(opal_event_irqchip.domain, - hwirq); - if (virq) - generic_handle_irq(virq); - } - events &= ~BIT_ULL(hwirq); - } +bool opal_have_pending_events(void) +{ + if (last_outstanding_events & opal_event_irqchip.mask) + return true; + return false; } static void opal_event_mask(struct irq_data *d) @@ -78,24 +87,9 @@ static void opal_event_mask(struct irq_data *d) static void opal_event_unmask(struct irq_data *d) { - __be64 events; - set_bit(d->hwirq, &opal_event_irqchip.mask); - - opal_poll_events(&events); - last_outstanding_events = be64_to_cpu(events); - - /* - * We can't just handle the events now with opal_handle_events(). - * If we did we would deadlock when opal_event_unmask() is called from - * handle_level_irq() with the irq descriptor lock held, because - * calling opal_handle_events() would call generic_handle_irq() and - * then handle_level_irq() which would try to take the descriptor lock - * again. Instead queue the events for later. - */ - if (last_outstanding_events & opal_event_irqchip.mask) - /* Need to retrigger the interrupt */ - irq_work_queue(&opal_event_irq_work); + if (opal_have_pending_events()) + opal_wake_poller(); } static int opal_event_set_type(struct irq_data *d, unsigned int flow_type) @@ -136,16 +130,13 @@ static irqreturn_t opal_interrupt(int irq, void *data) __be64 events; opal_handle_interrupt(virq_to_hw(irq), &events); - opal_handle_events(be64_to_cpu(events)); + last_outstanding_events = be64_to_cpu(events); + if (opal_have_pending_events()) + opal_wake_poller(); return IRQ_HANDLED; } -static void opal_handle_irq_work(struct irq_work *work) -{ - opal_handle_events(last_outstanding_events); -} - static int opal_event_match(struct irq_domain *h, struct device_node *node, enum irq_domain_bus_token bus_token) { @@ -177,7 +168,7 @@ void opal_event_shutdown(void) if (!opal_irqs[i]) continue; - if (in_interrupt()) + if (in_interrupt() || irqs_disabled()) disable_irq_nosync(opal_irqs[i]); else free_irq(opal_irqs[i], NULL); diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c index aa2a5139462e..42ec642a3eba 100644 --- a/arch/powerpc/platforms/powernv/opal-rtc.c +++ b/arch/powerpc/platforms/powernv/opal-rtc.c @@ -34,7 +34,7 @@ static void opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm) tm->tm_wday = -1; } -unsigned long __init opal_get_boot_time(void) +time64_t __init opal_get_boot_time(void) { struct rtc_time tm; u32 y_m_d; @@ -61,8 +61,7 @@ unsigned long __init opal_get_boot_time(void) y_m_d = be32_to_cpu(__y_m_d); h_m_s_ms = be64_to_cpu(__h_m_s_ms); opal_to_tm(y_m_d, h_m_s_ms, &tm); - return mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, - tm.tm_hour, tm.tm_min, tm.tm_sec); + return rtc_tm_to_time64(&tm); } static __init int opal_time_init(void) diff --git a/arch/powerpc/platforms/powernv/opal-sensor.c b/arch/powerpc/platforms/powernv/opal-sensor.c index 0a7074bb91dc..35a5f4b9aeb5 100644 --- a/arch/powerpc/platforms/powernv/opal-sensor.c +++ b/arch/powerpc/platforms/powernv/opal-sensor.c @@ -72,6 +72,59 @@ out: } EXPORT_SYMBOL_GPL(opal_get_sensor_data); +int opal_get_sensor_data_u64(u32 sensor_hndl, u64 *sensor_data) +{ + int ret, token; + struct opal_msg msg; + __be64 data; + + if (!opal_check_token(OPAL_SENSOR_READ_U64)) { + u32 sdata; + + ret = opal_get_sensor_data(sensor_hndl, &sdata); + if (!ret) + *sensor_data = sdata; + return ret; + } + + token = opal_async_get_token_interruptible(); + if (token < 0) + return token; + + ret = opal_sensor_read_u64(sensor_hndl, token, &data); + switch (ret) { + case OPAL_ASYNC_COMPLETION: + ret = opal_async_wait_response(token, &msg); + if (ret) { + pr_err("%s: Failed to wait for the async response, %d\n", + __func__, ret); + goto out_token; + } + + ret = opal_error_code(opal_get_async_rc(msg)); + *sensor_data = be64_to_cpu(data); + break; + + case OPAL_SUCCESS: + ret = 0; + *sensor_data = be64_to_cpu(data); + break; + + case OPAL_WRONG_STATE: + ret = -EIO; + break; + + default: + ret = opal_error_code(ret); + break; + } + +out_token: + opal_async_release_token(token); + return ret; +} +EXPORT_SYMBOL_GPL(opal_get_sensor_data_u64); + int __init opal_sensor_init(void) { struct platform_device *pdev; diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index 3da30c2f26b4..a8d9b4089c31 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -320,8 +320,10 @@ OPAL_CALL(opal_set_powercap, OPAL_SET_POWERCAP); OPAL_CALL(opal_get_power_shift_ratio, OPAL_GET_POWER_SHIFT_RATIO); OPAL_CALL(opal_set_power_shift_ratio, OPAL_SET_POWER_SHIFT_RATIO); OPAL_CALL(opal_sensor_group_clear, OPAL_SENSOR_GROUP_CLEAR); +OPAL_CALL(opal_quiesce, OPAL_QUIESCE); OPAL_CALL(opal_npu_spa_setup, OPAL_NPU_SPA_SETUP); OPAL_CALL(opal_npu_spa_clear_cache, OPAL_NPU_SPA_CLEAR_CACHE); OPAL_CALL(opal_npu_tl_set, OPAL_NPU_TL_SET); OPAL_CALL(opal_pci_get_pbcq_tunnel_bar, OPAL_PCI_GET_PBCQ_TUNNEL_BAR); OPAL_CALL(opal_pci_set_pbcq_tunnel_bar, OPAL_PCI_SET_PBCQ_TUNNEL_BAR); +OPAL_CALL(opal_sensor_read_u64, OPAL_SENSOR_READ_U64); diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 48fbb41af5d1..0d539c661748 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -540,21 +540,15 @@ int opal_hmi_exception_early(struct pt_regs *regs) /* HMI exception handler called in virtual mode during check_irq_replay. */ int opal_handle_hmi_exception(struct pt_regs *regs) { - s64 rc; - __be64 evt = 0; - /* * Check if HMI event is available. - * if Yes, then call opal_poll_events to pull opal messages and - * process them. + * if Yes, then wake kopald to process them. */ if (!local_paca->hmi_event_available) return 0; local_paca->hmi_event_available = 0; - rc = opal_poll_events(&evt); - if (rc == OPAL_SUCCESS && evt) - opal_handle_events(be64_to_cpu(evt)); + opal_wake_poller(); return 1; } @@ -757,14 +751,19 @@ static void __init opal_imc_init_dev(void) static int kopald(void *unused) { unsigned long timeout = msecs_to_jiffies(opal_heartbeat) + 1; - __be64 events; set_freezable(); do { try_to_freeze(); - opal_poll_events(&events); - opal_handle_events(be64_to_cpu(events)); - schedule_timeout_interruptible(timeout); + + opal_handle_events(); + + set_current_state(TASK_INTERRUPTIBLE); + if (opal_have_pending_events()) + __set_current_state(TASK_RUNNING); + else + schedule_timeout(timeout); + } while (!kthread_should_stop()); return 0; diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 3f9c69d7623a..5bd0eb6681bc 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1976,9 +1976,10 @@ static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl, mb(); /* Ensure above stores are visible */ while (start <= end) { if (rm) - __raw_rm_writeq(cpu_to_be64(start), invalidate); + __raw_rm_writeq_be(start, invalidate); else - __raw_writeq(cpu_to_be64(start), invalidate); + __raw_writeq_be(start, invalidate); + start += inc; } @@ -2055,9 +2056,9 @@ static void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm) mb(); /* Ensure previous TCE table stores are visible */ if (rm) - __raw_rm_writeq(cpu_to_be64(val), invalidate); + __raw_rm_writeq_be(val, invalidate); else - __raw_writeq(cpu_to_be64(val), invalidate); + __raw_writeq_be(val, invalidate); } static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe) @@ -2067,7 +2068,7 @@ static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe) unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF); mb(); /* Ensure above stores are visible */ - __raw_writeq(cpu_to_be64(val), invalidate); + __raw_writeq_be(val, invalidate); } static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm, @@ -2090,9 +2091,9 @@ static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm, while (start <= end) { if (rm) - __raw_rm_writeq(cpu_to_be64(start), invalidate); + __raw_rm_writeq_be(start, invalidate); else - __raw_writeq(cpu_to_be64(start), invalidate); + __raw_writeq_be(start, invalidate); start += inc; } } @@ -2910,6 +2911,34 @@ static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl) tbl->it_indirect_levels); } +static unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb) +{ + struct pci_controller *hose = phb->hose; + struct device_node *dn = hose->dn; + unsigned long mask = 0; + int i, rc, count; + u32 val; + + count = of_property_count_u32_elems(dn, "ibm,supported-tce-sizes"); + if (count <= 0) { + mask = SZ_4K | SZ_64K; + /* Add 16M for POWER8 by default */ + if (cpu_has_feature(CPU_FTR_ARCH_207S) && + !cpu_has_feature(CPU_FTR_ARCH_300)) + mask |= SZ_16M; + return mask; + } + + for (i = 0; i < count; i++) { + rc = of_property_read_u32_index(dn, "ibm,supported-tce-sizes", + i, &val); + if (rc == 0) + mask |= 1ULL << val; + } + + return mask; +} + static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) { @@ -2934,7 +2963,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, pe->table_group.max_dynamic_windows_supported = IOMMU_TABLE_GROUP_MAX_TABLES; pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS; - pe->table_group.pgsizes = SZ_4K | SZ_64K | SZ_16M; + pe->table_group.pgsizes = pnv_ioda_parse_tce_sizes(phb); #ifdef CONFIG_IOMMU_API pe->table_group.ops = &pnv_pci_ioda2_ops; #endif @@ -3642,7 +3671,6 @@ static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe) WARN_ON(pe->table_group.group); } - pnv_pci_ioda2_table_free_pages(tbl); iommu_tce_table_put(tbl); } diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h index 94f17ab1374b..fd4a1c5a6369 100644 --- a/arch/powerpc/platforms/powernv/powernv.h +++ b/arch/powerpc/platforms/powernv/powernv.h @@ -24,7 +24,8 @@ extern u32 pnv_get_supported_cpuidle_states(void); extern void pnv_lpc_init(void); -extern void opal_handle_events(uint64_t events); +extern void opal_handle_events(void); +extern bool opal_have_pending_events(void); extern void opal_event_shutdown(void); bool cpu_core_split_required(void); diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index a6648ec99ca7..f96df0a25d05 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -124,6 +124,7 @@ static void pnv_setup_rfi_flush(void) security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV)); setup_rfi_flush(type, enable); + setup_barrier_nospec(); } static void __init pnv_setup_arch(void) @@ -357,15 +358,7 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE static unsigned long pnv_memory_block_size(void) { - /* - * We map the kernel linear region with 1GB large pages on radix. For - * memory hot unplug to work our memory block size must be at least - * this size. - */ - if (radix_enabled()) - return 1UL * 1024 * 1024 * 1024; - else - return 256UL * 1024 * 1024; + return 256UL * 1024 * 1024; } #endif diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 19af6de6b6f0..b80909957792 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -334,7 +334,16 @@ static int pnv_cause_nmi_ipi(int cpu) int64_t rc; if (cpu >= 0) { - rc = opal_signal_system_reset(get_hard_smp_processor_id(cpu)); + int h = get_hard_smp_processor_id(cpu); + + if (opal_check_token(OPAL_QUIESCE)) + opal_quiesce(QUIESCE_HOLD, h); + + rc = opal_signal_system_reset(h); + + if (opal_check_token(OPAL_QUIESCE)) + opal_quiesce(QUIESCE_RESUME, h); + if (rc != OPAL_SUCCESS) return 0; return 1; @@ -343,6 +352,8 @@ static int pnv_cause_nmi_ipi(int cpu) bool success = true; int c; + if (opal_check_token(OPAL_QUIESCE)) + opal_quiesce(QUIESCE_HOLD, -1); /* * We do not use broadcasts (yet), because it's not clear @@ -358,6 +369,10 @@ static int pnv_cause_nmi_ipi(int cpu) if (rc != OPAL_SUCCESS) success = false; } + + if (opal_check_token(OPAL_QUIESCE)) + opal_quiesce(QUIESCE_RESUME, -1); + if (success) return 1; diff --git a/arch/powerpc/platforms/ps3/platform.h b/arch/powerpc/platforms/ps3/platform.h index 1809cfc562ee..9bc68f913466 100644 --- a/arch/powerpc/platforms/ps3/platform.h +++ b/arch/powerpc/platforms/ps3/platform.h @@ -57,7 +57,7 @@ static inline void ps3_smp_cleanup_cpu(int cpu) { } /* time */ void __init ps3_calibrate_decr(void); -unsigned long __init ps3_get_boot_time(void); +time64_t __init ps3_get_boot_time(void); void ps3_get_rtc_time(struct rtc_time *time); int ps3_set_rtc_time(struct rtc_time *time); diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c index 50dbaf24b1ee..e49c887787c4 100644 --- a/arch/powerpc/platforms/ps3/repository.c +++ b/arch/powerpc/platforms/ps3/repository.c @@ -101,9 +101,9 @@ static u64 make_first_field(const char *text, u64 index) static u64 make_field(const char *text, u64 index) { - u64 n; + u64 n = 0; - strncpy((char *)&n, text, 8); + memcpy((char *)&n, text, strnlen(text, sizeof(n))); return n + index; } diff --git a/arch/powerpc/platforms/ps3/time.c b/arch/powerpc/platforms/ps3/time.c index 11b45b58c81b..08ca76e23d09 100644 --- a/arch/powerpc/platforms/ps3/time.c +++ b/arch/powerpc/platforms/ps3/time.c @@ -28,30 +28,6 @@ #include "platform.h" -#define dump_tm(_a) _dump_tm(_a, __func__, __LINE__) -static void _dump_tm(const struct rtc_time *tm, const char* func, int line) -{ - pr_debug("%s:%d tm_sec %d\n", func, line, tm->tm_sec); - pr_debug("%s:%d tm_min %d\n", func, line, tm->tm_min); - pr_debug("%s:%d tm_hour %d\n", func, line, tm->tm_hour); - pr_debug("%s:%d tm_mday %d\n", func, line, tm->tm_mday); - pr_debug("%s:%d tm_mon %d\n", func, line, tm->tm_mon); - pr_debug("%s:%d tm_year %d\n", func, line, tm->tm_year); - pr_debug("%s:%d tm_wday %d\n", func, line, tm->tm_wday); -} - -#define dump_time(_a) _dump_time(_a, __func__, __LINE__) -static void __maybe_unused _dump_time(int time, const char *func, - int line) -{ - struct rtc_time tm; - - to_tm(time, &tm); - - pr_debug("%s:%d time %d\n", func, line, time); - _dump_tm(&tm, func, line); -} - void __init ps3_calibrate_decr(void) { int result; @@ -76,7 +52,7 @@ static u64 read_rtc(void) return rtc_val; } -unsigned long __init ps3_get_boot_time(void) +time64_t __init ps3_get_boot_time(void) { return read_rtc() + ps3_os_area_get_rtc_diff(); } diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c index 89b7ce807e70..6da320c786cd 100644 --- a/arch/powerpc/platforms/pseries/hvCall_inst.c +++ b/arch/powerpc/platforms/pseries/hvCall_inst.c @@ -125,7 +125,7 @@ static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long h->purr_start = mfspr(SPRN_PURR); } -static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long retval, +static void probe_hcall_exit(void *ignored, unsigned long opcode, long retval, unsigned long *retbuf) { struct hcall_stats *h; diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c index 3fe126796975..46fbaef69a59 100644 --- a/arch/powerpc/platforms/pseries/kexec.c +++ b/arch/powerpc/platforms/pseries/kexec.c @@ -57,8 +57,11 @@ void pseries_kexec_cpu_down(int crash_shutdown, int secondary) } } - if (xive_enabled()) + if (xive_enabled()) { xive_kexec_teardown_cpu(secondary); - else + + if (!secondary) + xive_shutdown(); + } else xics_kexec_teardown_cpu(secondary); } diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index adb996ed51e1..5a392e40f3d2 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -902,8 +902,7 @@ out: local_irq_restore(flags); } -void __trace_hcall_exit(long opcode, unsigned long retval, - unsigned long *retbuf) +void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf) { unsigned long flags; unsigned int *depth; diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c index c508c938dc71..7c872dc01bdb 100644 --- a/arch/powerpc/platforms/pseries/lparcfg.c +++ b/arch/powerpc/platforms/pseries/lparcfg.c @@ -52,18 +52,20 @@ * Track sum of all purrs across all processors. This is used to further * calculate usage values by different applications */ +static void cpu_get_purr(void *arg) +{ + atomic64_t *sum = arg; + + atomic64_add(mfspr(SPRN_PURR), sum); +} + static unsigned long get_purr(void) { - unsigned long sum_purr = 0; - int cpu; + atomic64_t purr = ATOMIC64_INIT(0); - for_each_possible_cpu(cpu) { - struct cpu_usage *cu; + on_each_cpu(cpu_get_purr, &purr, 1); - cu = &per_cpu(cpu_usage_array, cpu); - sum_purr += cu->current_tb; - } - return sum_purr; + return atomic64_read(&purr); } /* diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index fdb32e056ef4..139f0af6c3d9 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -534,6 +534,7 @@ void pseries_setup_rfi_flush(void) security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR); setup_rfi_flush(types, enable); + setup_barrier_nospec(); } #ifdef CONFIG_PCI_IOV diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index 9861407d644a..ea2f595b5133 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile @@ -28,9 +28,6 @@ obj-$(CONFIG_FSL_85XX_CACHE_SRAM) += fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o obj-$(CONFIG_FSL_RIO) += fsl_rio.o fsl_rmu.o obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o -mv64x60-$(CONFIG_PCI) += mv64x60_pci.o -obj-$(CONFIG_MV64X60) += $(mv64x60-y) mv64x60_pic.o mv64x60_dev.o \ - mv64x60_udbg.o obj-$(CONFIG_RTC_DRV_CMOS) += rtc_cmos_setup.o obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o diff --git a/arch/powerpc/sysdev/cpm_gpio.c b/arch/powerpc/sysdev/cpm_gpio.c index 0badc90be666..0695d26bd301 100644 --- a/arch/powerpc/sysdev/cpm_gpio.c +++ b/arch/powerpc/sysdev/cpm_gpio.c @@ -63,7 +63,6 @@ static struct platform_driver cpm_gpio_driver = { .probe = cpm_gpio_probe, .driver = { .name = "cpm-gpio", - .owner = THIS_MODULE, .of_match_table = cpm_gpio_match, }, }; diff --git a/arch/powerpc/sysdev/mv64x60.h b/arch/powerpc/sysdev/mv64x60.h deleted file mode 100644 index 60cfcb90d1fa..000000000000 --- a/arch/powerpc/sysdev/mv64x60.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __MV64X60_H__ -#define __MV64X60_H__ - -#include <linux/init.h> - -extern void __init mv64x60_init_irq(void); -extern unsigned int mv64x60_get_irq(void); - -extern void __init mv64x60_pci_init(void); -extern void __init mv64x60_init_early(void); - -#endif /* __MV64X60_H__ */ diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c deleted file mode 100644 index 185a67e742a6..000000000000 --- a/arch/powerpc/sysdev/mv64x60_dev.c +++ /dev/null @@ -1,535 +0,0 @@ -/* - * Platform device setup for Marvell mv64360/mv64460 host bridges (Discovery) - * - * Author: Dale Farnsworth <dale@farnsworth.org> - * - * 2007 (c) MontaVista, Software, Inc. This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. - */ - -#include <linux/stddef.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/console.h> -#include <linux/mv643xx.h> -#include <linux/platform_device.h> -#include <linux/of_platform.h> -#include <linux/of_net.h> -#include <linux/dma-mapping.h> - -#include <asm/prom.h> - -/* These functions provide the necessary setup for the mv64x60 drivers. */ - -static const struct of_device_id of_mv64x60_devices[] __initconst = { - { .compatible = "marvell,mv64306-devctrl", }, - {} -}; - -/* - * Create MPSC platform devices - */ -static int __init mv64x60_mpsc_register_shared_pdev(struct device_node *np) -{ - struct platform_device *pdev; - struct resource r[2]; - struct mpsc_shared_pdata pdata; - const phandle *ph; - struct device_node *mpscrouting, *mpscintr; - int err; - - ph = of_get_property(np, "mpscrouting", NULL); - mpscrouting = of_find_node_by_phandle(*ph); - if (!mpscrouting) - return -ENODEV; - - err = of_address_to_resource(mpscrouting, 0, &r[0]); - of_node_put(mpscrouting); - if (err) - return err; - - ph = of_get_property(np, "mpscintr", NULL); - mpscintr = of_find_node_by_phandle(*ph); - if (!mpscintr) - return -ENODEV; - - err = of_address_to_resource(mpscintr, 0, &r[1]); - of_node_put(mpscintr); - if (err) - return err; - - memset(&pdata, 0, sizeof(pdata)); - - pdev = platform_device_alloc(MPSC_SHARED_NAME, 0); - if (!pdev) - return -ENOMEM; - - err = platform_device_add_resources(pdev, r, 2); - if (err) - goto error; - - err = platform_device_add_data(pdev, &pdata, sizeof(pdata)); - if (err) - goto error; - - err = platform_device_add(pdev); - if (err) - goto error; - - return 0; - -error: - platform_device_put(pdev); - return err; -} - - -static int __init mv64x60_mpsc_device_setup(struct device_node *np, int id) -{ - struct resource r[5]; - struct mpsc_pdata pdata; - struct platform_device *pdev; - const unsigned int *prop; - const phandle *ph; - struct device_node *sdma, *brg; - int err; - int port_number; - - /* only register the shared platform device the first time through */ - if (id == 0 && (err = mv64x60_mpsc_register_shared_pdev(np))) - return err; - - memset(r, 0, sizeof(r)); - - err = of_address_to_resource(np, 0, &r[0]); - if (err) - return err; - - of_irq_to_resource(np, 0, &r[4]); - - ph = of_get_property(np, "sdma", NULL); - sdma = of_find_node_by_phandle(*ph); - if (!sdma) - return -ENODEV; - - of_irq_to_resource(sdma, 0, &r[3]); - err = of_address_to_resource(sdma, 0, &r[1]); - of_node_put(sdma); - if (err) - return err; - - ph = of_get_property(np, "brg", NULL); - brg = of_find_node_by_phandle(*ph); - if (!brg) - return -ENODEV; - - err = of_address_to_resource(brg, 0, &r[2]); - of_node_put(brg); - if (err) - return err; - - prop = of_get_property(np, "cell-index", NULL); - if (!prop) - return -ENODEV; - port_number = *(int *)prop; - - memset(&pdata, 0, sizeof(pdata)); - - pdata.cache_mgmt = 1; /* All current revs need this set */ - - pdata.max_idle = 40; /* default */ - prop = of_get_property(np, "max_idle", NULL); - if (prop) - pdata.max_idle = *prop; - - prop = of_get_property(brg, "current-speed", NULL); - if (prop) - pdata.default_baud = *prop; - - /* Default is 8 bits, no parity, no flow control */ - pdata.default_bits = 8; - pdata.default_parity = 'n'; - pdata.default_flow = 'n'; - - prop = of_get_property(np, "chr_1", NULL); - if (prop) - pdata.chr_1_val = *prop; - - prop = of_get_property(np, "chr_2", NULL); - if (prop) - pdata.chr_2_val = *prop; - - prop = of_get_property(np, "chr_10", NULL); - if (prop) - pdata.chr_10_val = *prop; - - prop = of_get_property(np, "mpcr", NULL); - if (prop) - pdata.mpcr_val = *prop; - - prop = of_get_property(brg, "bcr", NULL); - if (prop) - pdata.bcr_val = *prop; - - pdata.brg_can_tune = 1; /* All current revs need this set */ - - prop = of_get_property(brg, "clock-src", NULL); - if (prop) - pdata.brg_clk_src = *prop; - - prop = of_get_property(brg, "clock-frequency", NULL); - if (prop) - pdata.brg_clk_freq = *prop; - - pdev = platform_device_alloc(MPSC_CTLR_NAME, port_number); - if (!pdev) - return -ENOMEM; - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); - - err = platform_device_add_resources(pdev, r, 5); - if (err) - goto error; - - err = platform_device_add_data(pdev, &pdata, sizeof(pdata)); - if (err) - goto error; - - err = platform_device_add(pdev); - if (err) - goto error; - - return 0; - -error: - platform_device_put(pdev); - return err; -} - -/* - * Create mv64x60_eth platform devices - */ -static struct platform_device * __init mv64x60_eth_register_shared_pdev( - struct device_node *np, int id) -{ - struct platform_device *pdev; - struct resource r[2]; - int err; - - err = of_address_to_resource(np, 0, &r[0]); - if (err) - return ERR_PTR(err); - - /* register an orion mdio bus driver */ - r[1].start = r[0].start + 0x4; - r[1].end = r[0].start + 0x84 - 1; - r[1].flags = IORESOURCE_MEM; - - if (id == 0) { - pdev = platform_device_register_simple("orion-mdio", -1, &r[1], 1); - if (IS_ERR(pdev)) - return pdev; - } - - pdev = platform_device_register_simple(MV643XX_ETH_SHARED_NAME, id, - &r[0], 1); - - return pdev; -} - -static int __init mv64x60_eth_device_setup(struct device_node *np, int id, - struct platform_device *shared_pdev) -{ - struct resource r[1]; - struct mv643xx_eth_platform_data pdata; - struct platform_device *pdev; - struct device_node *phy; - const u8 *mac_addr; - const int *prop; - const phandle *ph; - int err; - - memset(r, 0, sizeof(r)); - of_irq_to_resource(np, 0, &r[0]); - - memset(&pdata, 0, sizeof(pdata)); - - pdata.shared = shared_pdev; - - prop = of_get_property(np, "reg", NULL); - if (!prop) - return -ENODEV; - pdata.port_number = *prop; - - mac_addr = of_get_mac_address(np); - if (mac_addr) - memcpy(pdata.mac_addr, mac_addr, 6); - - prop = of_get_property(np, "speed", NULL); - if (prop) - pdata.speed = *prop; - - prop = of_get_property(np, "tx_queue_size", NULL); - if (prop) - pdata.tx_queue_size = *prop; - - prop = of_get_property(np, "rx_queue_size", NULL); - if (prop) - pdata.rx_queue_size = *prop; - - prop = of_get_property(np, "tx_sram_addr", NULL); - if (prop) - pdata.tx_sram_addr = *prop; - - prop = of_get_property(np, "tx_sram_size", NULL); - if (prop) - pdata.tx_sram_size = *prop; - - prop = of_get_property(np, "rx_sram_addr", NULL); - if (prop) - pdata.rx_sram_addr = *prop; - - prop = of_get_property(np, "rx_sram_size", NULL); - if (prop) - pdata.rx_sram_size = *prop; - - ph = of_get_property(np, "phy", NULL); - if (!ph) - return -ENODEV; - - phy = of_find_node_by_phandle(*ph); - if (phy == NULL) - return -ENODEV; - - prop = of_get_property(phy, "reg", NULL); - if (prop) - pdata.phy_addr = MV643XX_ETH_PHY_ADDR(*prop); - - of_node_put(phy); - - pdev = platform_device_alloc(MV643XX_ETH_NAME, id); - if (!pdev) - return -ENOMEM; - - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); - err = platform_device_add_resources(pdev, r, 1); - if (err) - goto error; - - err = platform_device_add_data(pdev, &pdata, sizeof(pdata)); - if (err) - goto error; - - err = platform_device_add(pdev); - if (err) - goto error; - - return 0; - -error: - platform_device_put(pdev); - return err; -} - -/* - * Create mv64x60_i2c platform devices - */ -static int __init mv64x60_i2c_device_setup(struct device_node *np, int id) -{ - struct resource r[2]; - struct platform_device *pdev; - struct mv64xxx_i2c_pdata pdata; - const unsigned int *prop; - int err; - - memset(r, 0, sizeof(r)); - - err = of_address_to_resource(np, 0, &r[0]); - if (err) - return err; - - of_irq_to_resource(np, 0, &r[1]); - - memset(&pdata, 0, sizeof(pdata)); - - pdata.freq_m = 8; /* default */ - prop = of_get_property(np, "freq_m", NULL); - if (prop) - pdata.freq_m = *prop; - - pdata.freq_n = 3; /* default */ - prop = of_get_property(np, "freq_n", NULL); - if (prop) - pdata.freq_n = *prop; - - pdata.timeout = 1000; /* default: 1 second */ - - pdev = platform_device_alloc(MV64XXX_I2C_CTLR_NAME, id); - if (!pdev) - return -ENOMEM; - - err = platform_device_add_resources(pdev, r, 2); - if (err) - goto error; - - err = platform_device_add_data(pdev, &pdata, sizeof(pdata)); - if (err) - goto error; - - err = platform_device_add(pdev); - if (err) - goto error; - - return 0; - -error: - platform_device_put(pdev); - return err; -} - -/* - * Create mv64x60_wdt platform devices - */ -static int __init mv64x60_wdt_device_setup(struct device_node *np, int id) -{ - struct resource r; - struct platform_device *pdev; - struct mv64x60_wdt_pdata pdata; - const unsigned int *prop; - int err; - - err = of_address_to_resource(np, 0, &r); - if (err) - return err; - - memset(&pdata, 0, sizeof(pdata)); - - pdata.timeout = 10; /* Default: 10 seconds */ - - np = of_get_parent(np); - if (!np) - return -ENODEV; - - prop = of_get_property(np, "clock-frequency", NULL); - of_node_put(np); - if (!prop) - return -ENODEV; - pdata.bus_clk = *prop / 1000000; /* wdt driver wants freq in MHz */ - - pdev = platform_device_alloc(MV64x60_WDT_NAME, id); - if (!pdev) - return -ENOMEM; - - err = platform_device_add_resources(pdev, &r, 1); - if (err) - goto error; - - err = platform_device_add_data(pdev, &pdata, sizeof(pdata)); - if (err) - goto error; - - err = platform_device_add(pdev); - if (err) - goto error; - - return 0; - -error: - platform_device_put(pdev); - return err; -} - -static int __init mv64x60_device_setup(void) -{ - struct device_node *np, *np2; - struct platform_device *pdev; - int id, id2; - int err; - - id = 0; - for_each_compatible_node(np, NULL, "marvell,mv64360-mpsc") { - err = mv64x60_mpsc_device_setup(np, id++); - if (err) - printk(KERN_ERR "Failed to initialize MV64x60 " - "serial device %pOF: error %d.\n", - np, err); - } - - id = 0; - id2 = 0; - for_each_compatible_node(np, NULL, "marvell,mv64360-eth-group") { - pdev = mv64x60_eth_register_shared_pdev(np, id++); - if (IS_ERR(pdev)) { - err = PTR_ERR(pdev); - printk(KERN_ERR "Failed to initialize MV64x60 " - "network block %pOF: error %d.\n", - np, err); - continue; - } - for_each_child_of_node(np, np2) { - if (!of_device_is_compatible(np2, - "marvell,mv64360-eth")) - continue; - err = mv64x60_eth_device_setup(np2, id2++, pdev); - if (err) - printk(KERN_ERR "Failed to initialize " - "MV64x60 network device %pOF: " - "error %d.\n", - np2, err); - } - } - - id = 0; - for_each_compatible_node(np, "i2c", "marvell,mv64360-i2c") { - err = mv64x60_i2c_device_setup(np, id++); - if (err) - printk(KERN_ERR "Failed to initialize MV64x60 I2C " - "bus %pOF: error %d.\n", - np, err); - } - - /* support up to one watchdog timer */ - np = of_find_compatible_node(np, NULL, "marvell,mv64360-wdt"); - if (np) { - if ((err = mv64x60_wdt_device_setup(np, id))) - printk(KERN_ERR "Failed to initialize MV64x60 " - "Watchdog %pOF: error %d.\n", - np, err); - of_node_put(np); - } - - /* Now add every node that is on the device bus */ - for_each_compatible_node(np, NULL, "marvell,mv64360") - of_platform_bus_probe(np, of_mv64x60_devices, NULL); - - return 0; -} -arch_initcall(mv64x60_device_setup); - -static int __init mv64x60_add_mpsc_console(void) -{ - struct device_node *np = NULL; - const char *prop; - - prop = of_get_property(of_chosen, "linux,stdout-path", NULL); - if (prop == NULL) - goto not_mpsc; - - np = of_find_node_by_path(prop); - if (!np) - goto not_mpsc; - - if (!of_device_is_compatible(np, "marvell,mv64360-mpsc")) - goto not_mpsc; - - prop = of_get_property(np, "cell-index", NULL); - if (!prop) - goto not_mpsc; - - add_preferred_console("ttyMM", *(int *)prop, NULL); - -not_mpsc: - return 0; -} -console_initcall(mv64x60_add_mpsc_console); diff --git a/arch/powerpc/sysdev/mv64x60_pci.c b/arch/powerpc/sysdev/mv64x60_pci.c deleted file mode 100644 index 1afcdb428e51..000000000000 --- a/arch/powerpc/sysdev/mv64x60_pci.c +++ /dev/null @@ -1,171 +0,0 @@ -/* - * PCI bus setup for Marvell mv64360/mv64460 host bridges (Discovery) - * - * Author: Dale Farnsworth <dale@farnsworth.org> - * - * 2007 (c) MontaVista, Software, Inc. This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. - */ - -#include <linux/stddef.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/stat.h> -#include <linux/pci.h> - -#include <asm/prom.h> -#include <asm/pci-bridge.h> - -#define PCI_HEADER_TYPE_INVALID 0x7f /* Invalid PCI header type */ - -#ifdef CONFIG_SYSFS -/* 32-bit hex or dec stringified number + '\n' */ -#define MV64X60_VAL_LEN_MAX 11 -#define MV64X60_PCICFG_CPCI_HOTSWAP 0x68 - -static ssize_t mv64x60_hs_reg_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, - loff_t off, size_t count) -{ - struct pci_dev *phb; - u32 v; - - if (off > 0) - return 0; - if (count < MV64X60_VAL_LEN_MAX) - return -EINVAL; - - phb = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0)); - if (!phb) - return -ENODEV; - pci_read_config_dword(phb, MV64X60_PCICFG_CPCI_HOTSWAP, &v); - pci_dev_put(phb); - - return sprintf(buf, "0x%08x\n", v); -} - -static ssize_t mv64x60_hs_reg_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, - loff_t off, size_t count) -{ - struct pci_dev *phb; - u32 v; - - if (off > 0) - return 0; - if (count <= 0) - return -EINVAL; - - if (sscanf(buf, "%i", &v) != 1) - return -EINVAL; - - phb = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0)); - if (!phb) - return -ENODEV; - pci_write_config_dword(phb, MV64X60_PCICFG_CPCI_HOTSWAP, v); - pci_dev_put(phb); - - return count; -} - -static const struct bin_attribute mv64x60_hs_reg_attr = { /* Hotswap register */ - .attr = { - .name = "hs_reg", - .mode = 0644, - }, - .size = MV64X60_VAL_LEN_MAX, - .read = mv64x60_hs_reg_read, - .write = mv64x60_hs_reg_write, -}; - -static int __init mv64x60_sysfs_init(void) -{ - struct device_node *np; - struct platform_device *pdev; - const unsigned int *prop; - - np = of_find_compatible_node(NULL, NULL, "marvell,mv64360"); - if (!np) - return 0; - - prop = of_get_property(np, "hs_reg_valid", NULL); - of_node_put(np); - - pdev = platform_device_register_simple("marvell,mv64360", 0, NULL, 0); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); - - return sysfs_create_bin_file(&pdev->dev.kobj, &mv64x60_hs_reg_attr); -} - -subsys_initcall(mv64x60_sysfs_init); - -#endif /* CONFIG_SYSFS */ - -static void mv64x60_pci_fixup_early(struct pci_dev *dev) -{ - /* - * Set the host bridge hdr_type to an invalid value so that - * pci_setup_device() will ignore the host bridge. - */ - dev->hdr_type = PCI_HEADER_TYPE_INVALID; -} -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_MV64360, - mv64x60_pci_fixup_early); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_MV64460, - mv64x60_pci_fixup_early); - -static int __init mv64x60_add_bridge(struct device_node *dev) -{ - int len; - struct pci_controller *hose; - struct resource rsrc; - const int *bus_range; - int primary; - - memset(&rsrc, 0, sizeof(rsrc)); - - /* Fetch host bridge registers address */ - if (of_address_to_resource(dev, 0, &rsrc)) { - printk(KERN_ERR "No PCI reg property in device tree\n"); - return -ENODEV; - } - - /* Get bus range if any */ - bus_range = of_get_property(dev, "bus-range", &len); - if (bus_range == NULL || len < 2 * sizeof(int)) - printk(KERN_WARNING "Can't get bus-range for %pOF, assume" - " bus 0\n", dev); - - hose = pcibios_alloc_controller(dev); - if (!hose) - return -ENOMEM; - - hose->first_busno = bus_range ? bus_range[0] : 0; - hose->last_busno = bus_range ? bus_range[1] : 0xff; - - setup_indirect_pci(hose, rsrc.start, rsrc.start + 4, 0); - hose->self_busno = hose->first_busno; - - printk(KERN_INFO "Found MV64x60 PCI host bridge at 0x%016llx. " - "Firmware bus number: %d->%d\n", - (unsigned long long)rsrc.start, hose->first_busno, - hose->last_busno); - - /* Interpret the "ranges" property */ - /* This also maps the I/O region and sets isa_io/mem_base */ - primary = (hose->first_busno == 0); - pci_process_bridge_OF_ranges(hose, dev, primary); - - return 0; -} - -void __init mv64x60_pci_init(void) -{ - struct device_node *np; - - for_each_compatible_node(np, "pci", "marvell,mv64360-pci") - mv64x60_add_bridge(np); -} diff --git a/arch/powerpc/sysdev/mv64x60_pic.c b/arch/powerpc/sysdev/mv64x60_pic.c deleted file mode 100644 index a79953deb489..000000000000 --- a/arch/powerpc/sysdev/mv64x60_pic.c +++ /dev/null @@ -1,297 +0,0 @@ -/* - * Interrupt handling for Marvell mv64360/mv64460 host bridges (Discovery) - * - * Author: Dale Farnsworth <dale@farnsworth.org> - * - * 2007 (c) MontaVista, Software, Inc. This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. - */ - -#include <linux/stddef.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/irq.h> -#include <linux/interrupt.h> -#include <linux/spinlock.h> - -#include <asm/byteorder.h> -#include <asm/io.h> -#include <asm/prom.h> -#include <asm/irq.h> - -#include "mv64x60.h" - -/* Interrupt Controller Interface Registers */ -#define MV64X60_IC_MAIN_CAUSE_LO 0x0004 -#define MV64X60_IC_MAIN_CAUSE_HI 0x000c -#define MV64X60_IC_CPU0_INTR_MASK_LO 0x0014 -#define MV64X60_IC_CPU0_INTR_MASK_HI 0x001c -#define MV64X60_IC_CPU0_SELECT_CAUSE 0x0024 - -#define MV64X60_HIGH_GPP_GROUPS 0x0f000000 -#define MV64X60_SELECT_CAUSE_HIGH 0x40000000 - -/* General Purpose Pins Controller Interface Registers */ -#define MV64x60_GPP_INTR_CAUSE 0x0008 -#define MV64x60_GPP_INTR_MASK 0x000c - -#define MV64x60_LEVEL1_LOW 0 -#define MV64x60_LEVEL1_HIGH 1 -#define MV64x60_LEVEL1_GPP 2 - -#define MV64x60_LEVEL1_MASK 0x00000060 -#define MV64x60_LEVEL1_OFFSET 5 - -#define MV64x60_LEVEL2_MASK 0x0000001f - -#define MV64x60_NUM_IRQS 96 - -static DEFINE_SPINLOCK(mv64x60_lock); - -static void __iomem *mv64x60_irq_reg_base; -static void __iomem *mv64x60_gpp_reg_base; - -/* - * Interrupt Controller Handling - * - * The interrupt controller handles three groups of interrupts: - * main low: IRQ0-IRQ31 - * main high: IRQ32-IRQ63 - * gpp: IRQ64-IRQ95 - * - * This code handles interrupts in two levels. Level 1 selects the - * interrupt group, and level 2 selects an IRQ within that group. - * Each group has its own irq_chip structure. - */ - -static u32 mv64x60_cached_low_mask; -static u32 mv64x60_cached_high_mask = MV64X60_HIGH_GPP_GROUPS; -static u32 mv64x60_cached_gpp_mask; - -static struct irq_domain *mv64x60_irq_host; - -/* - * mv64x60_chip_low functions - */ - -static void mv64x60_mask_low(struct irq_data *d) -{ - int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; - unsigned long flags; - - spin_lock_irqsave(&mv64x60_lock, flags); - mv64x60_cached_low_mask &= ~(1 << level2); - out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO, - mv64x60_cached_low_mask); - spin_unlock_irqrestore(&mv64x60_lock, flags); - (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO); -} - -static void mv64x60_unmask_low(struct irq_data *d) -{ - int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; - unsigned long flags; - - spin_lock_irqsave(&mv64x60_lock, flags); - mv64x60_cached_low_mask |= 1 << level2; - out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO, - mv64x60_cached_low_mask); - spin_unlock_irqrestore(&mv64x60_lock, flags); - (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO); -} - -static struct irq_chip mv64x60_chip_low = { - .name = "mv64x60_low", - .irq_mask = mv64x60_mask_low, - .irq_mask_ack = mv64x60_mask_low, - .irq_unmask = mv64x60_unmask_low, -}; - -/* - * mv64x60_chip_high functions - */ - -static void mv64x60_mask_high(struct irq_data *d) -{ - int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; - unsigned long flags; - - spin_lock_irqsave(&mv64x60_lock, flags); - mv64x60_cached_high_mask &= ~(1 << level2); - out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI, - mv64x60_cached_high_mask); - spin_unlock_irqrestore(&mv64x60_lock, flags); - (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI); -} - -static void mv64x60_unmask_high(struct irq_data *d) -{ - int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; - unsigned long flags; - - spin_lock_irqsave(&mv64x60_lock, flags); - mv64x60_cached_high_mask |= 1 << level2; - out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI, - mv64x60_cached_high_mask); - spin_unlock_irqrestore(&mv64x60_lock, flags); - (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI); -} - -static struct irq_chip mv64x60_chip_high = { - .name = "mv64x60_high", - .irq_mask = mv64x60_mask_high, - .irq_mask_ack = mv64x60_mask_high, - .irq_unmask = mv64x60_unmask_high, -}; - -/* - * mv64x60_chip_gpp functions - */ - -static void mv64x60_mask_gpp(struct irq_data *d) -{ - int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; - unsigned long flags; - - spin_lock_irqsave(&mv64x60_lock, flags); - mv64x60_cached_gpp_mask &= ~(1 << level2); - out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK, - mv64x60_cached_gpp_mask); - spin_unlock_irqrestore(&mv64x60_lock, flags); - (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK); -} - -static void mv64x60_mask_ack_gpp(struct irq_data *d) -{ - int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; - unsigned long flags; - - spin_lock_irqsave(&mv64x60_lock, flags); - mv64x60_cached_gpp_mask &= ~(1 << level2); - out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK, - mv64x60_cached_gpp_mask); - out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE, - ~(1 << level2)); - spin_unlock_irqrestore(&mv64x60_lock, flags); - (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE); -} - -static void mv64x60_unmask_gpp(struct irq_data *d) -{ - int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; - unsigned long flags; - - spin_lock_irqsave(&mv64x60_lock, flags); - mv64x60_cached_gpp_mask |= 1 << level2; - out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK, - mv64x60_cached_gpp_mask); - spin_unlock_irqrestore(&mv64x60_lock, flags); - (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK); -} - -static struct irq_chip mv64x60_chip_gpp = { - .name = "mv64x60_gpp", - .irq_mask = mv64x60_mask_gpp, - .irq_mask_ack = mv64x60_mask_ack_gpp, - .irq_unmask = mv64x60_unmask_gpp, -}; - -/* - * mv64x60_host_ops functions - */ - -static struct irq_chip *mv64x60_chips[] = { - [MV64x60_LEVEL1_LOW] = &mv64x60_chip_low, - [MV64x60_LEVEL1_HIGH] = &mv64x60_chip_high, - [MV64x60_LEVEL1_GPP] = &mv64x60_chip_gpp, -}; - -static int mv64x60_host_map(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hwirq) -{ - int level1; - - irq_set_status_flags(virq, IRQ_LEVEL); - - level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET; - BUG_ON(level1 > MV64x60_LEVEL1_GPP); - irq_set_chip_and_handler(virq, mv64x60_chips[level1], - handle_level_irq); - - return 0; -} - -static const struct irq_domain_ops mv64x60_host_ops = { - .map = mv64x60_host_map, -}; - -/* - * Global functions - */ - -void __init mv64x60_init_irq(void) -{ - struct device_node *np; - phys_addr_t paddr; - unsigned int size; - const unsigned int *reg; - unsigned long flags; - - np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-gpp"); - reg = of_get_property(np, "reg", &size); - paddr = of_translate_address(np, reg); - mv64x60_gpp_reg_base = ioremap(paddr, reg[1]); - of_node_put(np); - - np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-pic"); - reg = of_get_property(np, "reg", &size); - paddr = of_translate_address(np, reg); - mv64x60_irq_reg_base = ioremap(paddr, reg[1]); - - mv64x60_irq_host = irq_domain_add_linear(np, MV64x60_NUM_IRQS, - &mv64x60_host_ops, NULL); - - spin_lock_irqsave(&mv64x60_lock, flags); - out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK, - mv64x60_cached_gpp_mask); - out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO, - mv64x60_cached_low_mask); - out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI, - mv64x60_cached_high_mask); - - out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE, 0); - out_le32(mv64x60_irq_reg_base + MV64X60_IC_MAIN_CAUSE_LO, 0); - out_le32(mv64x60_irq_reg_base + MV64X60_IC_MAIN_CAUSE_HI, 0); - spin_unlock_irqrestore(&mv64x60_lock, flags); -} - -unsigned int mv64x60_get_irq(void) -{ - u32 cause; - int level1; - irq_hw_number_t hwirq; - int virq = 0; - - cause = in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_SELECT_CAUSE); - if (cause & MV64X60_SELECT_CAUSE_HIGH) { - cause &= mv64x60_cached_high_mask; - level1 = MV64x60_LEVEL1_HIGH; - if (cause & MV64X60_HIGH_GPP_GROUPS) { - cause = in_le32(mv64x60_gpp_reg_base + - MV64x60_GPP_INTR_CAUSE); - cause &= mv64x60_cached_gpp_mask; - level1 = MV64x60_LEVEL1_GPP; - } - } else { - cause &= mv64x60_cached_low_mask; - level1 = MV64x60_LEVEL1_LOW; - } - if (cause) { - hwirq = (level1 << MV64x60_LEVEL1_OFFSET) | __ilog2(cause); - virq = irq_linear_revmap(mv64x60_irq_host, hwirq); - } - - return virq; -} diff --git a/arch/powerpc/sysdev/mv64x60_udbg.c b/arch/powerpc/sysdev/mv64x60_udbg.c deleted file mode 100644 index 3b8734b870e9..000000000000 --- a/arch/powerpc/sysdev/mv64x60_udbg.c +++ /dev/null @@ -1,152 +0,0 @@ -/* - * udbg serial input/output routines for the Marvell MV64x60 (Discovery). - * - * Author: Dale Farnsworth <dale@farnsworth.org> - * - * 2007 (c) MontaVista Software, Inc. This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. - */ - -#include <asm/io.h> -#include <asm/prom.h> -#include <asm/udbg.h> - -#include <sysdev/mv64x60.h> - -#define MPSC_0_CR1_OFFSET 0x000c - -#define MPSC_0_CR2_OFFSET 0x0010 -#define MPSC_CHR_2_TCS (1 << 9) - -#define MPSC_0_CHR_10_OFFSET 0x0030 - -#define MPSC_INTR_CAUSE_OFF_0 0x0004 -#define MPSC_INTR_CAUSE_OFF_1 0x000c -#define MPSC_INTR_CAUSE_RCC (1<<6) - -static void __iomem *mpsc_base; -static void __iomem *mpsc_intr_cause; - -static void mv64x60_udbg_putc(char c) -{ - if (c == '\n') - mv64x60_udbg_putc('\r'); - - while(in_le32(mpsc_base + MPSC_0_CR2_OFFSET) & MPSC_CHR_2_TCS) - ; - out_le32(mpsc_base + MPSC_0_CR1_OFFSET, c); - out_le32(mpsc_base + MPSC_0_CR2_OFFSET, MPSC_CHR_2_TCS); -} - -static int mv64x60_udbg_testc(void) -{ - return (in_le32(mpsc_intr_cause) & MPSC_INTR_CAUSE_RCC) != 0; -} - -static int mv64x60_udbg_getc(void) -{ - int cause = 0; - int c; - - while (!mv64x60_udbg_testc()) - ; - - c = in_8(mpsc_base + MPSC_0_CHR_10_OFFSET + 2); - out_8(mpsc_base + MPSC_0_CHR_10_OFFSET + 2, c); - out_le32(mpsc_intr_cause, cause & ~MPSC_INTR_CAUSE_RCC); - return c; -} - -static int mv64x60_udbg_getc_poll(void) -{ - if (!mv64x60_udbg_testc()) - return -1; - - return mv64x60_udbg_getc(); -} - -static void mv64x60_udbg_init(void) -{ - struct device_node *np, *mpscintr, *stdout = NULL; - const char *path; - const phandle *ph; - struct resource r[2]; - const int *block_index; - int intr_cause_offset; - int err; - - path = of_get_property(of_chosen, "linux,stdout-path", NULL); - if (!path) - return; - - stdout = of_find_node_by_path(path); - if (!stdout) - return; - - for_each_compatible_node(np, NULL, "marvell,mv64360-mpsc") { - if (np == stdout) - break; - } - - of_node_put(stdout); - if (!np) - return; - - block_index = of_get_property(np, "cell-index", NULL); - if (!block_index) - goto error; - - switch (*block_index) { - case 0: - intr_cause_offset = MPSC_INTR_CAUSE_OFF_0; - break; - case 1: - intr_cause_offset = MPSC_INTR_CAUSE_OFF_1; - break; - default: - goto error; - } - - err = of_address_to_resource(np, 0, &r[0]); - if (err) - goto error; - - ph = of_get_property(np, "mpscintr", NULL); - mpscintr = of_find_node_by_phandle(*ph); - if (!mpscintr) - goto error; - - err = of_address_to_resource(mpscintr, 0, &r[1]); - of_node_put(mpscintr); - if (err) - goto error; - - of_node_put(np); - - mpsc_base = ioremap(r[0].start, resource_size(&r[0])); - if (!mpsc_base) - return; - - mpsc_intr_cause = ioremap(r[1].start, resource_size(&r[1])); - if (!mpsc_intr_cause) { - iounmap(mpsc_base); - return; - } - mpsc_intr_cause += intr_cause_offset; - - udbg_putc = mv64x60_udbg_putc; - udbg_getc = mv64x60_udbg_getc; - udbg_getc_poll = mv64x60_udbg_getc_poll; - - return; - -error: - of_node_put(np); -} - -void mv64x60_init_early(void) -{ - mv64x60_udbg_init(); -} diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c index 77e864d5506d..f87470319d71 100644 --- a/arch/powerpc/sysdev/xics/xics-common.c +++ b/arch/powerpc/sysdev/xics/xics-common.c @@ -446,10 +446,11 @@ static void __init xics_get_server_size(void) np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xics"); if (!np) return; + isize = of_get_property(np, "ibm,interrupt-server#-size", NULL); - if (!isize) - return; - xics_interrupt_server_size = be32_to_cpu(*isize); + if (isize) + xics_interrupt_server_size = be32_to_cpu(*isize); + of_node_put(np); } diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index b48454be5b98..83bcd72b21cf 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -341,7 +341,7 @@ static void xive_native_update_pending(struct xive_cpu *xc) * of the hypervisor interrupt (if any) */ cppr = ack & 0xff; - he = GETFIELD(TM_QW3_NSR_HE, (ack >> 8)); + he = (ack >> 8) >> 6; switch(he) { case TM_QW3_NSR_HE_NONE: /* Nothing to see here */ break; diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index 091f1d0d0af1..575db3b06a6b 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -19,6 +19,7 @@ #include <linux/spinlock.h> #include <linux/cpumask.h> #include <linux/mm.h> +#include <linux/delay.h> #include <asm/prom.h> #include <asm/io.h> @@ -108,6 +109,51 @@ static void xive_irq_bitmap_free(int irq) } } + +/* Based on the similar routines in RTAS */ +static unsigned int plpar_busy_delay_time(long rc) +{ + unsigned int ms = 0; + + if (H_IS_LONG_BUSY(rc)) { + ms = get_longbusy_msecs(rc); + } else if (rc == H_BUSY) { + ms = 10; /* seems appropriate for XIVE hcalls */ + } + + return ms; +} + +static unsigned int plpar_busy_delay(int rc) +{ + unsigned int ms; + + ms = plpar_busy_delay_time(rc); + if (ms) + mdelay(ms); + + return ms; +} + +/* + * Note: this call has a partition wide scope and can take a while to + * complete. If it returns H_LONG_BUSY_* it should be retried + * periodically. + */ +static long plpar_int_reset(unsigned long flags) +{ + long rc; + + do { + rc = plpar_hcall_norets(H_INT_RESET, flags); + } while (plpar_busy_delay(rc)); + + if (rc) + pr_err("H_INT_RESET failed %ld\n", rc); + + return rc; +} + static long plpar_int_get_source_info(unsigned long flags, unsigned long lisn, unsigned long *src_flags, @@ -118,7 +164,10 @@ static long plpar_int_get_source_info(unsigned long flags, unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; long rc; - rc = plpar_hcall(H_INT_GET_SOURCE_INFO, retbuf, flags, lisn); + do { + rc = plpar_hcall(H_INT_GET_SOURCE_INFO, retbuf, flags, lisn); + } while (plpar_busy_delay(rc)); + if (rc) { pr_err("H_INT_GET_SOURCE_INFO lisn=%ld failed %ld\n", lisn, rc); return rc; @@ -151,8 +200,11 @@ static long plpar_int_set_source_config(unsigned long flags, flags, lisn, target, prio, sw_irq); - rc = plpar_hcall_norets(H_INT_SET_SOURCE_CONFIG, flags, lisn, - target, prio, sw_irq); + do { + rc = plpar_hcall_norets(H_INT_SET_SOURCE_CONFIG, flags, lisn, + target, prio, sw_irq); + } while (plpar_busy_delay(rc)); + if (rc) { pr_err("H_INT_SET_SOURCE_CONFIG lisn=%ld target=%lx prio=%lx failed %ld\n", lisn, target, prio, rc); @@ -171,7 +223,11 @@ static long plpar_int_get_queue_info(unsigned long flags, unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; long rc; - rc = plpar_hcall(H_INT_GET_QUEUE_INFO, retbuf, flags, target, priority); + do { + rc = plpar_hcall(H_INT_GET_QUEUE_INFO, retbuf, flags, target, + priority); + } while (plpar_busy_delay(rc)); + if (rc) { pr_err("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld failed %ld\n", target, priority, rc); @@ -200,8 +256,11 @@ static long plpar_int_set_queue_config(unsigned long flags, pr_devel("H_INT_SET_QUEUE_CONFIG flags=%lx target=%lx priority=%lx qpage=%lx qsize=%lx\n", flags, target, priority, qpage, qsize); - rc = plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG, flags, target, - priority, qpage, qsize); + do { + rc = plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG, flags, target, + priority, qpage, qsize); + } while (plpar_busy_delay(rc)); + if (rc) { pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=%lx returned %ld\n", target, priority, qpage, rc); @@ -215,7 +274,10 @@ static long plpar_int_sync(unsigned long flags, unsigned long lisn) { long rc; - rc = plpar_hcall_norets(H_INT_SYNC, flags, lisn); + do { + rc = plpar_hcall_norets(H_INT_SYNC, flags, lisn); + } while (plpar_busy_delay(rc)); + if (rc) { pr_err("H_INT_SYNC lisn=%ld returned %ld\n", lisn, rc); return rc; @@ -238,7 +300,11 @@ static long plpar_int_esb(unsigned long flags, pr_devel("H_INT_ESB flags=%lx lisn=%lx offset=%lx in=%lx\n", flags, lisn, offset, in_data); - rc = plpar_hcall(H_INT_ESB, retbuf, flags, lisn, offset, in_data); + do { + rc = plpar_hcall(H_INT_ESB, retbuf, flags, lisn, offset, + in_data); + } while (plpar_busy_delay(rc)); + if (rc) { pr_err("H_INT_ESB lisn=%ld offset=%ld returned %ld\n", lisn, offset, rc); @@ -445,11 +511,7 @@ static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc) static void xive_spapr_shutdown(void) { - long rc; - - rc = plpar_hcall_norets(H_INT_RESET, 0); - if (rc) - pr_err("H_INT_RESET failed %ld\n", rc); + plpar_int_reset(0); } /* diff --git a/arch/powerpc/tools/gcc-check-mprofile-kernel.sh b/arch/powerpc/tools/gcc-check-mprofile-kernel.sh index 061f8035bdbe..a7dd0e5d9f98 100755 --- a/arch/powerpc/tools/gcc-check-mprofile-kernel.sh +++ b/arch/powerpc/tools/gcc-check-mprofile-kernel.sh @@ -7,17 +7,21 @@ set -o pipefail # To debug, uncomment the following line # set -x +# -mprofile-kernel is only supported on 64le, so this should not be invoked +# for other targets. Therefore we can pass in -m64 and -mlittle-endian +# explicitly, to take care of toolchains defaulting to other targets. + # Test whether the compile option -mprofile-kernel exists and generates # profiling code (ie. a call to _mcount()). echo "int func() { return 0; }" | \ - $* -S -x c -O2 -p -mprofile-kernel - -o - 2> /dev/null | \ - grep -q "_mcount" + $* -m64 -mlittle-endian -S -x c -O2 -p -mprofile-kernel - -o - \ + 2> /dev/null | grep -q "_mcount" # Test whether the notrace attribute correctly suppresses calls to _mcount(). echo -e "#include <linux/compiler.h>\nnotrace int func() { return 0; }" | \ - $* -S -x c -O2 -p -mprofile-kernel - -o - 2> /dev/null | \ - grep -q "_mcount" && \ + $* -m64 -mlittle-endian -S -x c -O2 -p -mprofile-kernel - -o - \ + 2> /dev/null | grep -q "_mcount" && \ exit 1 echo "OK" diff --git a/arch/powerpc/xmon/nonstdio.h b/arch/powerpc/xmon/nonstdio.h index 2202ec61972c..e8deac6c84e2 100644 --- a/arch/powerpc/xmon/nonstdio.h +++ b/arch/powerpc/xmon/nonstdio.h @@ -1,13 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0 */ #define EOF (-1) -#define printf xmon_printf -#define putchar xmon_putchar - extern void xmon_set_pagination_lpp(unsigned long lpp); extern void xmon_start_pagination(void); extern void xmon_end_pagination(void); extern int xmon_putchar(int c); extern void xmon_puts(const char *); extern char *xmon_gets(char *, int); -extern void xmon_printf(const char *, ...); +extern __printf(1, 2) void xmon_printf(const char *fmt, ...); + +#define printf xmon_printf +#define putchar xmon_putchar diff --git a/arch/powerpc/xmon/spu-dis.c b/arch/powerpc/xmon/spu-dis.c index e5f89837c82e..4cbc7da88524 100644 --- a/arch/powerpc/xmon/spu-dis.c +++ b/arch/powerpc/xmon/spu-dis.c @@ -102,7 +102,7 @@ print_insn_spu (unsigned long insn, unsigned long memaddr) if (index == 0) { - printf(".long 0x%x", insn); + printf(".long 0x%lx", insn); } else { @@ -134,27 +134,27 @@ print_insn_spu (unsigned long insn, unsigned long memaddr) switch (arg) { case A_T: - printf("$%d", + printf("$%lu", DECODE_INSN_RT (insn)); break; case A_A: - printf("$%d", + printf("$%lu", DECODE_INSN_RA (insn)); break; case A_B: - printf("$%d", + printf("$%lu", DECODE_INSN_RB (insn)); break; case A_C: - printf("$%d", + printf("$%lu", DECODE_INSN_RC (insn)); break; case A_S: - printf("$sp%d", + printf("$sp%lu", DECODE_INSN_RA (insn)); break; case A_H: - printf("$ch%d", + printf("$ch%lu", DECODE_INSN_RA (insn)); break; case A_P: @@ -162,11 +162,11 @@ print_insn_spu (unsigned long insn, unsigned long memaddr) printf("("); break; case A_U7A: - printf("%d", + printf("%lu", 173 - DECODE_INSN_U8 (insn)); break; case A_U7B: - printf("%d", + printf("%lu", 155 - DECODE_INSN_U8 (insn)); break; case A_S3: diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index a0842f1ff72c..47166ad2a669 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -515,7 +515,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) get_output_lock(); excprint(regs); if (bp) { - printf("cpu 0x%x stopped at breakpoint 0x%lx (", + printf("cpu 0x%x stopped at breakpoint 0x%tx (", cpu, BP_NUM(bp)); xmon_print_symbol(regs->nip, " ", ")\n"); } @@ -622,7 +622,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) excprint(regs); bp = at_breakpoint(regs->nip); if (bp) { - printf("Stopped at breakpoint %lx (", BP_NUM(bp)); + printf("Stopped at breakpoint %tx (", BP_NUM(bp)); xmon_print_symbol(regs->nip, " ", ")\n"); } if (unrecoverable_excp(regs)) @@ -778,6 +778,16 @@ static int xmon_fault_handler(struct pt_regs *regs) return 0; } +/* Force enable xmon if not already enabled */ +static inline void force_enable_xmon(void) +{ + /* Enable xmon hooks if needed */ + if (!xmon_on) { + printf("xmon: Enabling debugger hooks\n"); + xmon_on = 1; + } +} + static struct bpt *at_breakpoint(unsigned long pc) { int i; @@ -1094,6 +1104,7 @@ static int do_step(struct pt_regs *regs) unsigned int instr; int stepped; + force_enable_xmon(); /* check we are in 64-bit kernel mode, translation enabled */ if ((regs->msr & (MSR_64BIT|MSR_PR|MSR_IR)) == (MSR_64BIT|MSR_IR)) { if (mread(regs->nip, &instr, 4) == 4) { @@ -1160,7 +1171,11 @@ static int cpu_cmd(void) } /* try to switch to cpu specified */ if (!cpumask_test_cpu(cpu, &cpus_in_xmon)) { - printf("cpu 0x%x isn't in xmon\n", cpu); + printf("cpu 0x%lx isn't in xmon\n", cpu); +#ifdef CONFIG_PPC64 + printf("backtrace of paca[0x%lx].saved_r1 (possibly stale):\n", cpu); + xmon_show_stack(paca_ptrs[cpu]->saved_r1, 0, 0); +#endif return 0; } xmon_taken = 0; @@ -1174,7 +1189,7 @@ static int cpu_cmd(void) /* take control back */ mb(); xmon_owner = smp_processor_id(); - printf("cpu 0x%x didn't take control\n", cpu); + printf("cpu 0x%lx didn't take control\n", cpu); return 0; } barrier(); @@ -1268,16 +1283,6 @@ static long check_bp_loc(unsigned long addr) return 1; } -/* Force enable xmon if not already enabled */ -static inline void force_enable_xmon(void) -{ - /* Enable xmon hooks if needed */ - if (!xmon_on) { - printf("xmon: Enabling debugger hooks\n"); - xmon_on = 1; - } -} - static char *breakpoint_help_string = "Breakpoint command usage:\n" "b show breakpoints\n" @@ -1374,7 +1379,7 @@ bpt_cmds(void) } } - printf("Cleared breakpoint %lx (", BP_NUM(bp)); + printf("Cleared breakpoint %tx (", BP_NUM(bp)); xmon_print_symbol(bp->address, " ", ")\n"); bp->enabled = 0; break; @@ -1401,7 +1406,7 @@ bpt_cmds(void) for (bp = bpts; bp < &bpts[NBPTS]; ++bp) { if (!bp->enabled) continue; - printf("%2x %s ", BP_NUM(bp), + printf("%tx %s ", BP_NUM(bp), (bp->enabled & BP_CIABR) ? "inst": "trap"); xmon_print_symbol(bp->address, " ", "\n"); } @@ -1618,11 +1623,11 @@ static void excprint(struct pt_regs *fp) #endif /* CONFIG_SMP */ trap = TRAP(fp); - printf("Vector: %lx %s at [%lx]\n", fp->trap, getvecname(trap), fp); + printf("Vector: %lx %s at [%px]\n", fp->trap, getvecname(trap), fp); printf(" pc: "); xmon_print_symbol(fp->nip, ": ", "\n"); - printf(" lr: ", fp->link); + printf(" lr: "); xmon_print_symbol(fp->link, ": ", "\n"); printf(" sp: %lx\n", fp->gpr[1]); @@ -1634,13 +1639,13 @@ static void excprint(struct pt_regs *fp) printf(" dsisr: %lx\n", fp->dsisr); } - printf(" current = 0x%lx\n", current); + printf(" current = 0x%px\n", current); #ifdef CONFIG_PPC64 - printf(" paca = 0x%lx\t softe: %d\t irq_happened: 0x%02x\n", + printf(" paca = 0x%px\t irqmask: 0x%02x\t irq_happened: 0x%02x\n", local_paca, local_paca->irq_soft_mask, local_paca->irq_happened); #endif if (current) { - printf(" pid = %ld, comm = %s\n", + printf(" pid = %d, comm = %s\n", current->pid, current->comm); } @@ -1676,16 +1681,16 @@ static void prregs(struct pt_regs *fp) #ifdef CONFIG_PPC64 if (FULL_REGS(fp)) { for (n = 0; n < 16; ++n) - printf("R%.2ld = "REG" R%.2ld = "REG"\n", + printf("R%.2d = "REG" R%.2d = "REG"\n", n, fp->gpr[n], n+16, fp->gpr[n+16]); } else { for (n = 0; n < 7; ++n) - printf("R%.2ld = "REG" R%.2ld = "REG"\n", + printf("R%.2d = "REG" R%.2d = "REG"\n", n, fp->gpr[n], n+7, fp->gpr[n+7]); } #else for (n = 0; n < 32; ++n) { - printf("R%.2d = %.8x%s", n, fp->gpr[n], + printf("R%.2d = %.8lx%s", n, fp->gpr[n], (n & 3) == 3? "\n": " "); if (n == 12 && !FULL_REGS(fp)) { printf("\n"); @@ -1789,9 +1794,9 @@ static void dump_206_sprs(void) /* Actually some of these pre-date 2.06, but whatevs */ - printf("srr0 = %.16lx srr1 = %.16lx dsisr = %.8x\n", + printf("srr0 = %.16lx srr1 = %.16lx dsisr = %.8lx\n", mfspr(SPRN_SRR0), mfspr(SPRN_SRR1), mfspr(SPRN_DSISR)); - printf("dscr = %.16lx ppr = %.16lx pir = %.8x\n", + printf("dscr = %.16lx ppr = %.16lx pir = %.8lx\n", mfspr(SPRN_DSCR), mfspr(SPRN_PPR), mfspr(SPRN_PIR)); printf("amr = %.16lx uamor = %.16lx\n", mfspr(SPRN_AMR), mfspr(SPRN_UAMOR)); @@ -1799,11 +1804,11 @@ static void dump_206_sprs(void) if (!(mfmsr() & MSR_HV)) return; - printf("sdr1 = %.16lx hdar = %.16lx hdsisr = %.8x\n", + printf("sdr1 = %.16lx hdar = %.16lx hdsisr = %.8lx\n", mfspr(SPRN_SDR1), mfspr(SPRN_HDAR), mfspr(SPRN_HDSISR)); printf("hsrr0 = %.16lx hsrr1 = %.16lx hdec = %.16lx\n", mfspr(SPRN_HSRR0), mfspr(SPRN_HSRR1), mfspr(SPRN_HDEC)); - printf("lpcr = %.16lx pcr = %.16lx lpidr = %.8x\n", + printf("lpcr = %.16lx pcr = %.16lx lpidr = %.8lx\n", mfspr(SPRN_LPCR), mfspr(SPRN_PCR), mfspr(SPRN_LPID)); printf("hsprg0 = %.16lx hsprg1 = %.16lx amor = %.16lx\n", mfspr(SPRN_HSPRG0), mfspr(SPRN_HSPRG1), mfspr(SPRN_AMOR)); @@ -1820,10 +1825,10 @@ static void dump_207_sprs(void) if (!cpu_has_feature(CPU_FTR_ARCH_207S)) return; - printf("dpdes = %.16lx tir = %.16lx cir = %.8x\n", + printf("dpdes = %.16lx tir = %.16lx cir = %.8lx\n", mfspr(SPRN_DPDES), mfspr(SPRN_TIR), mfspr(SPRN_CIR)); - printf("fscr = %.16lx tar = %.16lx pspb = %.8x\n", + printf("fscr = %.16lx tar = %.16lx pspb = %.8lx\n", mfspr(SPRN_FSCR), mfspr(SPRN_TAR), mfspr(SPRN_PSPB)); msr = mfmsr(); @@ -1836,12 +1841,12 @@ static void dump_207_sprs(void) printf("mmcr0 = %.16lx mmcr1 = %.16lx mmcr2 = %.16lx\n", mfspr(SPRN_MMCR0), mfspr(SPRN_MMCR1), mfspr(SPRN_MMCR2)); - printf("pmc1 = %.8x pmc2 = %.8x pmc3 = %.8x pmc4 = %.8x\n", + printf("pmc1 = %.8lx pmc2 = %.8lx pmc3 = %.8lx pmc4 = %.8lx\n", mfspr(SPRN_PMC1), mfspr(SPRN_PMC2), mfspr(SPRN_PMC3), mfspr(SPRN_PMC4)); - printf("mmcra = %.16lx siar = %.16lx pmc5 = %.8x\n", + printf("mmcra = %.16lx siar = %.16lx pmc5 = %.8lx\n", mfspr(SPRN_MMCRA), mfspr(SPRN_SIAR), mfspr(SPRN_PMC5)); - printf("sdar = %.16lx sier = %.16lx pmc6 = %.8x\n", + printf("sdar = %.16lx sier = %.16lx pmc6 = %.8lx\n", mfspr(SPRN_SDAR), mfspr(SPRN_SIER), mfspr(SPRN_PMC6)); printf("ebbhr = %.16lx ebbrr = %.16lx bescr = %.16lx\n", mfspr(SPRN_EBBHR), mfspr(SPRN_EBBRR), mfspr(SPRN_BESCR)); @@ -2345,31 +2350,31 @@ static void dump_one_paca(int cpu) printf("paca for cpu 0x%x @ %px:\n", cpu, p); - printf(" %-*s = %s\n", 20, "possible", cpu_possible(cpu) ? "yes" : "no"); - printf(" %-*s = %s\n", 20, "present", cpu_present(cpu) ? "yes" : "no"); - printf(" %-*s = %s\n", 20, "online", cpu_online(cpu) ? "yes" : "no"); + printf(" %-*s = %s\n", 25, "possible", cpu_possible(cpu) ? "yes" : "no"); + printf(" %-*s = %s\n", 25, "present", cpu_present(cpu) ? "yes" : "no"); + printf(" %-*s = %s\n", 25, "online", cpu_online(cpu) ? "yes" : "no"); -#define DUMP(paca, name, format) \ - printf(" %-*s = %#-*"format"\t(0x%lx)\n", 20, #name, 18, paca->name, \ +#define DUMP(paca, name, format) \ + printf(" %-*s = "format"\t(0x%lx)\n", 25, #name, 18, paca->name, \ offsetof(struct paca_struct, name)); - DUMP(p, lock_token, "x"); - DUMP(p, paca_index, "x"); - DUMP(p, kernel_toc, "lx"); - DUMP(p, kernelbase, "lx"); - DUMP(p, kernel_msr, "lx"); - DUMP(p, emergency_sp, "px"); + DUMP(p, lock_token, "%#-*x"); + DUMP(p, paca_index, "%#-*x"); + DUMP(p, kernel_toc, "%#-*llx"); + DUMP(p, kernelbase, "%#-*llx"); + DUMP(p, kernel_msr, "%#-*llx"); + DUMP(p, emergency_sp, "%-*px"); #ifdef CONFIG_PPC_BOOK3S_64 - DUMP(p, nmi_emergency_sp, "px"); - DUMP(p, mc_emergency_sp, "px"); - DUMP(p, in_nmi, "x"); - DUMP(p, in_mce, "x"); - DUMP(p, hmi_event_available, "x"); + DUMP(p, nmi_emergency_sp, "%-*px"); + DUMP(p, mc_emergency_sp, "%-*px"); + DUMP(p, in_nmi, "%#-*x"); + DUMP(p, in_mce, "%#-*x"); + DUMP(p, hmi_event_available, "%#-*x"); #endif - DUMP(p, data_offset, "lx"); - DUMP(p, hw_cpu_id, "x"); - DUMP(p, cpu_start, "x"); - DUMP(p, kexec_state, "x"); + DUMP(p, data_offset, "%#-*llx"); + DUMP(p, hw_cpu_id, "%#-*x"); + DUMP(p, cpu_start, "%#-*x"); + DUMP(p, kexec_state, "%#-*x"); #ifdef CONFIG_PPC_BOOK3S_64 for (i = 0; i < SLB_NUM_BOLTED; i++) { u64 esid, vsid; @@ -2381,58 +2386,69 @@ static void dump_one_paca(int cpu) vsid = be64_to_cpu(p->slb_shadow_ptr->save_area[i].vsid); if (esid || vsid) { - printf(" slb_shadow[%d]: = 0x%016lx 0x%016lx\n", - i, esid, vsid); + printf(" %-*s[%d] = 0x%016llx 0x%016llx\n", + 22, "slb_shadow", i, esid, vsid); } } - DUMP(p, vmalloc_sllp, "x"); - DUMP(p, slb_cache_ptr, "x"); + DUMP(p, vmalloc_sllp, "%#-*x"); + DUMP(p, slb_cache_ptr, "%#-*x"); for (i = 0; i < SLB_CACHE_ENTRIES; i++) - printf(" slb_cache[%d]: = 0x%016lx\n", i, p->slb_cache[i]); + printf(" %-*s[%d] = 0x%016x\n", + 22, "slb_cache", i, p->slb_cache[i]); - DUMP(p, rfi_flush_fallback_area, "px"); + DUMP(p, rfi_flush_fallback_area, "%-*px"); #endif - DUMP(p, dscr_default, "llx"); + DUMP(p, dscr_default, "%#-*llx"); #ifdef CONFIG_PPC_BOOK3E - DUMP(p, pgd, "px"); - DUMP(p, kernel_pgd, "px"); - DUMP(p, tcd_ptr, "px"); - DUMP(p, mc_kstack, "px"); - DUMP(p, crit_kstack, "px"); - DUMP(p, dbg_kstack, "px"); + DUMP(p, pgd, "%-*px"); + DUMP(p, kernel_pgd, "%-*px"); + DUMP(p, tcd_ptr, "%-*px"); + DUMP(p, mc_kstack, "%-*px"); + DUMP(p, crit_kstack, "%-*px"); + DUMP(p, dbg_kstack, "%-*px"); #endif - DUMP(p, __current, "px"); - DUMP(p, kstack, "lx"); - printf(" kstack_base = 0x%016lx\n", p->kstack & ~(THREAD_SIZE - 1)); - DUMP(p, stab_rr, "lx"); - DUMP(p, saved_r1, "lx"); - DUMP(p, trap_save, "x"); - DUMP(p, irq_soft_mask, "x"); - DUMP(p, irq_happened, "x"); - DUMP(p, io_sync, "x"); - DUMP(p, irq_work_pending, "x"); - DUMP(p, nap_state_lost, "x"); - DUMP(p, sprg_vdso, "llx"); + DUMP(p, __current, "%-*px"); + DUMP(p, kstack, "%#-*llx"); + printf(" %-*s = 0x%016llx\n", 25, "kstack_base", p->kstack & ~(THREAD_SIZE - 1)); + DUMP(p, stab_rr, "%#-*llx"); + DUMP(p, saved_r1, "%#-*llx"); + DUMP(p, trap_save, "%#-*x"); + DUMP(p, irq_soft_mask, "%#-*x"); + DUMP(p, irq_happened, "%#-*x"); + DUMP(p, io_sync, "%#-*x"); + DUMP(p, irq_work_pending, "%#-*x"); + DUMP(p, nap_state_lost, "%#-*x"); + DUMP(p, sprg_vdso, "%#-*llx"); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM - DUMP(p, tm_scratch, "llx"); + DUMP(p, tm_scratch, "%#-*llx"); #endif #ifdef CONFIG_PPC_POWERNV - DUMP(p, core_idle_state_ptr, "px"); - DUMP(p, thread_idle_state, "x"); - DUMP(p, thread_mask, "x"); - DUMP(p, subcore_sibling_mask, "x"); + DUMP(p, core_idle_state_ptr, "%-*px"); + DUMP(p, thread_idle_state, "%#-*x"); + DUMP(p, thread_mask, "%#-*x"); + DUMP(p, subcore_sibling_mask, "%#-*x"); + DUMP(p, thread_sibling_pacas, "%-*px"); + DUMP(p, requested_psscr, "%#-*llx"); + DUMP(p, stop_sprs.pid, "%#-*llx"); + DUMP(p, stop_sprs.ldbar, "%#-*llx"); + DUMP(p, stop_sprs.fscr, "%#-*llx"); + DUMP(p, stop_sprs.hfscr, "%#-*llx"); + DUMP(p, stop_sprs.mmcr1, "%#-*llx"); + DUMP(p, stop_sprs.mmcr2, "%#-*llx"); + DUMP(p, stop_sprs.mmcra, "%#-*llx"); + DUMP(p, dont_stop.counter, "%#-*x"); #endif - DUMP(p, accounting.utime, "llx"); - DUMP(p, accounting.stime, "llx"); - DUMP(p, accounting.utime_scaled, "llx"); - DUMP(p, accounting.starttime, "llx"); - DUMP(p, accounting.starttime_user, "llx"); - DUMP(p, accounting.startspurr, "llx"); - DUMP(p, accounting.utime_sspurr, "llx"); - DUMP(p, accounting.steal_time, "llx"); + DUMP(p, accounting.utime, "%#-*lx"); + DUMP(p, accounting.stime, "%#-*lx"); + DUMP(p, accounting.utime_scaled, "%#-*lx"); + DUMP(p, accounting.starttime, "%#-*lx"); + DUMP(p, accounting.starttime_user, "%#-*lx"); + DUMP(p, accounting.startspurr, "%#-*lx"); + DUMP(p, accounting.utime_sspurr, "%#-*lx"); + DUMP(p, accounting.steal_time, "%#-*lx"); #undef DUMP catch_memory_errors = 0; @@ -2578,7 +2594,7 @@ static void dump_by_size(unsigned long addr, long count, int size) default: val = 0; } - printf("%0*lx", size * 2, val); + printf("%0*llx", size * 2, val); } printf("\n"); } @@ -2742,7 +2758,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr, dotted = 0; last_inst = inst; if (praddr) - printf(REG" %.8x", adr, inst); + printf(REG" %.8lx", adr, inst); printf("\t"); dump_func(inst, adr); printf("\n"); @@ -2874,7 +2890,7 @@ memdiffs(unsigned char *p1, unsigned char *p2, unsigned nb, unsigned maxpr) for( n = nb; n > 0; --n ) if( *p1++ != *p2++ ) if( ++prt <= maxpr ) - printf("%.16x %.2x # %.16x %.2x\n", p1 - 1, + printf("%px %.2x # %px %.2x\n", p1 - 1, p1[-1], p2 - 1, p2[-1]); if( prt > maxpr ) printf("Total of %d differences\n", prt); @@ -2934,13 +2950,13 @@ memzcan(void) if (ok && !ook) { printf("%.8x .. ", a); } else if (!ok && ook) - printf("%.8x\n", a - mskip); + printf("%.8lx\n", a - mskip); ook = ok; if (a + mskip < a) break; } if (ook) - printf("%.8x\n", a - mskip); + printf("%.8lx\n", a - mskip); } static void show_task(struct task_struct *tsk) @@ -3024,13 +3040,13 @@ static void show_pte(unsigned long addr) return; } - printf("pgd @ 0x%016lx\n", pgdir); + printf("pgd @ 0x%px\n", pgdir); if (pgd_huge(*pgdp)) { format_pte(pgdp, pgd_val(*pgdp)); return; } - printf("pgdp @ 0x%016lx = 0x%016lx\n", pgdp, pgd_val(*pgdp)); + printf("pgdp @ 0x%px = 0x%016lx\n", pgdp, pgd_val(*pgdp)); pudp = pud_offset(pgdp, addr); @@ -3044,7 +3060,7 @@ static void show_pte(unsigned long addr) return; } - printf("pudp @ 0x%016lx = 0x%016lx\n", pudp, pud_val(*pudp)); + printf("pudp @ 0x%px = 0x%016lx\n", pudp, pud_val(*pudp)); pmdp = pmd_offset(pudp, addr); @@ -3057,7 +3073,7 @@ static void show_pte(unsigned long addr) format_pte(pmdp, pmd_val(*pmdp)); return; } - printf("pmdp @ 0x%016lx = 0x%016lx\n", pmdp, pmd_val(*pmdp)); + printf("pmdp @ 0x%px = 0x%016lx\n", pmdp, pmd_val(*pmdp)); ptep = pte_offset_map(pmdp, addr); if (pte_none(*ptep)) { @@ -3161,7 +3177,7 @@ skipbl(void) } #define N_PTREGS 44 -static char *regnames[N_PTREGS] = { +static const char *regnames[N_PTREGS] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", @@ -3196,18 +3212,17 @@ scanhex(unsigned long *vp) regname[i] = c; } regname[i] = 0; - for (i = 0; i < N_PTREGS; ++i) { - if (strcmp(regnames[i], regname) == 0) { - if (xmon_regs == NULL) { - printf("regs not available\n"); - return 0; - } - *vp = ((unsigned long *)xmon_regs)[i]; - return 1; - } + i = match_string(regnames, N_PTREGS, regname); + if (i < 0) { + printf("invalid register name '%%%s'\n", regname); + return 0; } - printf("invalid register name '%%%s'\n", regname); - return 0; + if (xmon_regs == NULL) { + printf("regs not available\n"); + return 0; + } + *vp = ((unsigned long *)xmon_regs)[i]; + return 1; } /* skip leading "0x" if any */ @@ -3456,9 +3471,9 @@ static void dump_tlb_44x(void) asm volatile("tlbre %0,%1,0" : "=r" (w0) : "r" (i)); asm volatile("tlbre %0,%1,1" : "=r" (w1) : "r" (i)); asm volatile("tlbre %0,%1,2" : "=r" (w2) : "r" (i)); - printf("[%02x] %08x %08x %08x ", i, w0, w1, w2); + printf("[%02x] %08lx %08lx %08lx ", i, w0, w1, w2); if (w0 & PPC44x_TLB_VALID) { - printf("V %08x -> %01x%08x %c%c%c%c%c", + printf("V %08lx -> %01lx%08lx %c%c%c%c%c", w0 & PPC44x_TLB_EPN_MASK, w1 & PPC44x_TLB_ERPN_MASK, w1 & PPC44x_TLB_RPN_MASK, @@ -3882,19 +3897,19 @@ static void dump_spu_fields(struct spu *spu) DUMP_FIELD(spu, "0x%lx", ls_size); DUMP_FIELD(spu, "0x%x", node); DUMP_FIELD(spu, "0x%lx", flags); - DUMP_FIELD(spu, "%d", class_0_pending); - DUMP_FIELD(spu, "0x%lx", class_0_dar); - DUMP_FIELD(spu, "0x%lx", class_1_dar); - DUMP_FIELD(spu, "0x%lx", class_1_dsisr); - DUMP_FIELD(spu, "0x%lx", irqs[0]); - DUMP_FIELD(spu, "0x%lx", irqs[1]); - DUMP_FIELD(spu, "0x%lx", irqs[2]); + DUMP_FIELD(spu, "%llu", class_0_pending); + DUMP_FIELD(spu, "0x%llx", class_0_dar); + DUMP_FIELD(spu, "0x%llx", class_1_dar); + DUMP_FIELD(spu, "0x%llx", class_1_dsisr); + DUMP_FIELD(spu, "0x%x", irqs[0]); + DUMP_FIELD(spu, "0x%x", irqs[1]); + DUMP_FIELD(spu, "0x%x", irqs[2]); DUMP_FIELD(spu, "0x%x", slb_replace); DUMP_FIELD(spu, "%d", pid); DUMP_FIELD(spu, "0x%p", mm); DUMP_FIELD(spu, "0x%p", ctx); DUMP_FIELD(spu, "0x%p", rq); - DUMP_FIELD(spu, "0x%p", timestamp); + DUMP_FIELD(spu, "0x%llx", timestamp); DUMP_FIELD(spu, "0x%lx", problem_phys); DUMP_FIELD(spu, "0x%p", problem); DUMP_VALUE("0x%x", problem->spu_runcntl_RW, @@ -3925,7 +3940,7 @@ static void dump_spu_ls(unsigned long num, int subcmd) __delay(200); } else { catch_memory_errors = 0; - printf("*** Error: accessing spu info for spu %d\n", num); + printf("*** Error: accessing spu info for spu %ld\n", num); return; } catch_memory_errors = 0; diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index cd4fd85fde84..17f19e67993b 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -3,8 +3,16 @@ # see Documentation/kbuild/kconfig-language.txt. # +config 64BIT + bool + +config 32BIT + bool + config RISCV def_bool y + # even on 32-bit, physical (and DMA) addresses are > 32-bits + select PHYS_ADDR_T_64BIT select OF select OF_EARLY_FLATTREE select OF_IRQ @@ -22,7 +30,6 @@ config RISCV select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP - select HAVE_DMA_API_DEBUG select HAVE_DMA_CONTIGUOUS select HAVE_GENERIC_DMA_COHERENT select IRQ_DOMAIN @@ -35,20 +42,14 @@ config RISCV select THREAD_INFO_IN_TASK select RISCV_TIMER select GENERIC_IRQ_MULTI_HANDLER + select ARCH_HAS_PTE_SPECIAL config MMU def_bool y -# even on 32-bit, physical (and DMA) addresses are > 32-bits -config ARCH_PHYS_ADDR_T_64BIT - def_bool y - config ZONE_DMA32 bool - default y - -config ARCH_DMA_ADDR_T_64BIT - def_bool y + default y if 64BIT config PAGE_OFFSET hex @@ -101,7 +102,6 @@ choice config ARCH_RV32I bool "RV32I" - select CPU_SUPPORTS_32BIT_KERNEL select 32BIT select GENERIC_ASHLDI3 select GENERIC_ASHRDI3 @@ -109,13 +109,13 @@ config ARCH_RV32I config ARCH_RV64I bool "RV64I" - select CPU_SUPPORTS_64BIT_KERNEL select 64BIT select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FTRACE_MCOUNT_RECORD select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS + select SWIOTLB endchoice @@ -171,11 +171,6 @@ config NR_CPUS depends on SMP default "8" -config CPU_SUPPORTS_32BIT_KERNEL - bool -config CPU_SUPPORTS_64BIT_KERNEL - bool - choice prompt "CPU Tuning" default TUNE_GENERIC @@ -202,24 +197,6 @@ endmenu menu "Kernel type" -choice - prompt "Kernel code model" - default 64BIT - -config 32BIT - bool "32-bit kernel" - depends on CPU_SUPPORTS_32BIT_KERNEL - help - Select this option to build a 32-bit kernel. - -config 64BIT - bool "64-bit kernel" - depends on CPU_SUPPORTS_64BIT_KERNEL - help - Select this option to build a 64-bit kernel. - -endchoice - source "mm/Kconfig" source "kernel/Kconfig.preempt" diff --git a/arch/riscv/include/asm/dma-mapping.h b/arch/riscv/include/asm/dma-mapping.h new file mode 100644 index 000000000000..8facc1c8fa05 --- /dev/null +++ b/arch/riscv/include/asm/dma-mapping.h @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef _RISCV_ASM_DMA_MAPPING_H +#define _RISCV_ASM_DMA_MAPPING_H 1 + +#ifdef CONFIG_SWIOTLB +#include <linux/swiotlb.h> +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) +{ + return &swiotlb_dma_ops; +} +#else +#include <asm-generic/dma-mapping.h> +#endif /* CONFIG_SWIOTLB */ + +#endif /* _RISCV_ASM_DMA_MAPPING_H */ diff --git a/arch/riscv/include/asm/pci.h b/arch/riscv/include/asm/pci.h index 0f2fc9ef20fc..b3638c505728 100644 --- a/arch/riscv/include/asm/pci.h +++ b/arch/riscv/include/asm/pci.h @@ -26,9 +26,6 @@ /* RISC-V shim does not initialize PCI bus */ #define pcibios_assign_all_busses() 1 -/* We do not have an IOMMU */ -#define PCI_DMA_BUS_IS_PHYS 1 - extern int isa_dma_bridge_buggy; #ifdef CONFIG_PCI diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h index 997ddbb1d370..2fa2942be221 100644 --- a/arch/riscv/include/asm/pgtable-bits.h +++ b/arch/riscv/include/asm/pgtable-bits.h @@ -42,7 +42,4 @@ _PAGE_WRITE | _PAGE_EXEC | \ _PAGE_USER | _PAGE_GLOBAL)) -/* Advertise support for _PAGE_SPECIAL */ -#define __HAVE_ARCH_PTE_SPECIAL - #endif /* _ASM_RISCV_PGTABLE_BITS_H */ diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c index 10ed2749e246..0bc86e5f8f3f 100644 --- a/arch/riscv/kernel/cacheinfo.c +++ b/arch/riscv/kernel/cacheinfo.c @@ -20,7 +20,6 @@ static void ci_leaf_init(struct cacheinfo *this_leaf, struct device_node *node, enum cache_type type, unsigned int level) { - this_leaf->of_node = node; this_leaf->level = level; this_leaf->type = type; /* not a sector cache */ diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index c11f40c1b2a8..ee44a48faf79 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -29,6 +29,7 @@ #include <linux/of_fdt.h> #include <linux/of_platform.h> #include <linux/sched/task.h> +#include <linux/swiotlb.h> #include <asm/setup.h> #include <asm/sections.h> @@ -206,6 +207,7 @@ void __init setup_arch(char **cmdline_p) setup_bootmem(); paging_init(); unflatten_device_tree(); + swiotlb_init(1); #ifdef CONFIG_SMP setup_smp(); diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 93132cb59184..b99d9dd21fd0 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -63,18 +63,6 @@ void die(struct pt_regs *regs, const char *str) do_exit(SIGSEGV); } -static inline void do_trap_siginfo(int signo, int code, - unsigned long addr, struct task_struct *tsk) -{ - siginfo_t info; - - info.si_signo = signo; - info.si_errno = 0; - info.si_code = code; - info.si_addr = (void __user *)addr; - force_sig_info(signo, &info, tsk); -} - void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr, struct task_struct *tsk) { @@ -87,7 +75,7 @@ void do_trap(struct pt_regs *regs, int signo, int code, show_regs(regs); } - do_trap_siginfo(signo, code, addr, tsk); + force_sig_fault(signo, code, (void __user *)addr, tsk); } static void do_trap_error(struct pt_regs *regs, int signo, int code, @@ -149,7 +137,7 @@ asmlinkage void do_trap_break(struct pt_regs *regs) } #endif /* CONFIG_GENERIC_BUG */ - do_trap_siginfo(SIGTRAP, TRAP_BRKPT, regs->sepc, current); + force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)(regs->sepc), current); regs->sepc += 0x4; } diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 199ac3e4da1d..baed39772c84 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -35,9 +35,6 @@ config GENERIC_BUG config GENERIC_BUG_RELATIVE_POINTERS def_bool y -config ARCH_DMA_ADDR_T_64BIT - def_bool y - config GENERIC_LOCKBREAK def_bool y if SMP && PREEMPT @@ -68,6 +65,7 @@ config S390 select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA select ARCH_HAS_KCOV + select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_MEMORY select ARCH_HAS_SG_CHAIN select ARCH_HAS_STRICT_KERNEL_RWX @@ -133,7 +131,6 @@ config S390 select HAVE_CMPXCHG_LOCAL select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_KMEMLEAK - select HAVE_DMA_API_DEBUG select HAVE_DMA_CONTIGUOUS select DMA_DIRECT_OPS select HAVE_DYNAMIC_FTRACE @@ -709,7 +706,11 @@ config QDIO menuconfig PCI bool "PCI support" select PCI_MSI + select IOMMU_HELPER select IOMMU_SUPPORT + select NEED_DMA_MAP_STATE + select NEED_SG_DMA_LENGTH + help Enable PCI support. @@ -733,15 +734,6 @@ config PCI_DOMAINS config HAS_IOMEM def_bool PCI -config IOMMU_HELPER - def_bool PCI - -config NEED_SG_DMA_LENGTH - def_bool PCI - -config NEED_DMA_MAP_STATE - def_bool PCI - config CHSC_SCH def_tristate m prompt "Support for CHSC subchannels" @@ -847,6 +839,10 @@ config CCW source "drivers/Kconfig" +config HAVE_PNETID + tristate + default (SMC || CCWGROUP) + source "fs/Kconfig" source "arch/s390/Kconfig.debug" diff --git a/arch/s390/Makefile b/arch/s390/Makefile index c79936d02f7b..68a690442be0 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -18,7 +18,7 @@ KBUILD_CFLAGS += -m64 KBUILD_AFLAGS += -m64 UTS_MACHINE := s390x STACK_SIZE := 16384 -CHECKFLAGS += -D__s390__ -D__s390x__ -mbig-endian +CHECKFLAGS += -D__s390__ -D__s390x__ export LD_BFD diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c index 8720e9203ecf..dd95cdbd22ce 100644 --- a/arch/s390/crypto/arch_random.c +++ b/arch/s390/crypto/arch_random.c @@ -2,14 +2,37 @@ /* * s390 arch random implementation. * - * Copyright IBM Corp. 2017 - * Author(s): Harald Freudenberger <freude@de.ibm.com> + * Copyright IBM Corp. 2017, 2018 + * Author(s): Harald Freudenberger + * + * The s390_arch_random_generate() function may be called from random.c + * in interrupt context. So this implementation does the best to be very + * fast. There is a buffer of random data which is asynchronously checked + * and filled by a workqueue thread. + * If there are enough bytes in the buffer the s390_arch_random_generate() + * just delivers these bytes. Otherwise false is returned until the + * worker thread refills the buffer. + * The worker fills the rng buffer by pulling fresh entropy from the + * high quality (but slow) true hardware random generator. This entropy + * is then spread over the buffer with an pseudo random generator PRNG. + * As the arch_get_random_seed_long() fetches 8 bytes and the calling + * function add_interrupt_randomness() counts this as 1 bit entropy the + * distribution needs to make sure there is in fact 1 bit entropy contained + * in 8 bytes of the buffer. The current values pull 32 byte entropy + * and scatter this into a 2048 byte buffer. So 8 byte in the buffer + * will contain 1 bit of entropy. + * The worker thread is rescheduled based on the charge level of the + * buffer but at least with 500 ms delay to avoid too much CPU consumption. + * So the max. amount of rng data delivered via arch_get_random_seed is + * limited to 4k bytes per second. */ #include <linux/kernel.h> #include <linux/atomic.h> #include <linux/random.h> +#include <linux/slab.h> #include <linux/static_key.h> +#include <linux/workqueue.h> #include <asm/cpacf.h> DEFINE_STATIC_KEY_FALSE(s390_arch_random_available); @@ -17,11 +40,83 @@ DEFINE_STATIC_KEY_FALSE(s390_arch_random_available); atomic64_t s390_arch_random_counter = ATOMIC64_INIT(0); EXPORT_SYMBOL(s390_arch_random_counter); +#define ARCH_REFILL_TICKS (HZ/2) +#define ARCH_PRNG_SEED_SIZE 32 +#define ARCH_RNG_BUF_SIZE 2048 + +static DEFINE_SPINLOCK(arch_rng_lock); +static u8 *arch_rng_buf; +static unsigned int arch_rng_buf_idx; + +static void arch_rng_refill_buffer(struct work_struct *); +static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer); + +bool s390_arch_random_generate(u8 *buf, unsigned int nbytes) +{ + /* lock rng buffer */ + if (!spin_trylock(&arch_rng_lock)) + return false; + + /* try to resolve the requested amount of bytes from the buffer */ + arch_rng_buf_idx -= nbytes; + if (arch_rng_buf_idx < ARCH_RNG_BUF_SIZE) { + memcpy(buf, arch_rng_buf + arch_rng_buf_idx, nbytes); + atomic64_add(nbytes, &s390_arch_random_counter); + spin_unlock(&arch_rng_lock); + return true; + } + + /* not enough bytes in rng buffer, refill is done asynchronously */ + spin_unlock(&arch_rng_lock); + + return false; +} +EXPORT_SYMBOL(s390_arch_random_generate); + +static void arch_rng_refill_buffer(struct work_struct *unused) +{ + unsigned int delay = ARCH_REFILL_TICKS; + + spin_lock(&arch_rng_lock); + if (arch_rng_buf_idx > ARCH_RNG_BUF_SIZE) { + /* buffer is exhausted and needs refill */ + u8 seed[ARCH_PRNG_SEED_SIZE]; + u8 prng_wa[240]; + /* fetch ARCH_PRNG_SEED_SIZE bytes of entropy */ + cpacf_trng(NULL, 0, seed, sizeof(seed)); + /* blow this entropy up to ARCH_RNG_BUF_SIZE with PRNG */ + memset(prng_wa, 0, sizeof(prng_wa)); + cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, + &prng_wa, NULL, 0, seed, sizeof(seed)); + cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, + &prng_wa, arch_rng_buf, ARCH_RNG_BUF_SIZE, NULL, 0); + arch_rng_buf_idx = ARCH_RNG_BUF_SIZE; + } + delay += (ARCH_REFILL_TICKS * arch_rng_buf_idx) / ARCH_RNG_BUF_SIZE; + spin_unlock(&arch_rng_lock); + + /* kick next check */ + queue_delayed_work(system_long_wq, &arch_rng_work, delay); +} + static int __init s390_arch_random_init(void) { - /* check if subfunction CPACF_PRNO_TRNG is available */ - if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG)) + /* all the needed PRNO subfunctions available ? */ + if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG) && + cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN)) { + + /* alloc arch random working buffer */ + arch_rng_buf = kmalloc(ARCH_RNG_BUF_SIZE, GFP_KERNEL); + if (!arch_rng_buf) + return -ENOMEM; + + /* kick worker queue job to fill the random buffer */ + queue_delayed_work(system_long_wq, + &arch_rng_work, ARCH_REFILL_TICKS); + + /* enable arch random to the outside world */ static_branch_enable(&s390_arch_random_available); + } return 0; } diff --git a/arch/s390/hypfs/hypfs_sprp.c b/arch/s390/hypfs/hypfs_sprp.c index ae0ed8dd5f1b..5d85a039391c 100644 --- a/arch/s390/hypfs/hypfs_sprp.c +++ b/arch/s390/hypfs/hypfs_sprp.c @@ -13,7 +13,6 @@ #include <linux/string.h> #include <linux/types.h> #include <linux/uaccess.h> -#include <asm/compat.h> #include <asm/diag.h> #include <asm/sclp.h> #include "hypfs.h" diff --git a/arch/s390/include/asm/archrandom.h b/arch/s390/include/asm/archrandom.h index 09aed1095336..c67b82dfa558 100644 --- a/arch/s390/include/asm/archrandom.h +++ b/arch/s390/include/asm/archrandom.h @@ -15,16 +15,11 @@ #include <linux/static_key.h> #include <linux/atomic.h> -#include <asm/cpacf.h> DECLARE_STATIC_KEY_FALSE(s390_arch_random_available); extern atomic64_t s390_arch_random_counter; -static void s390_arch_random_generate(u8 *buf, unsigned int nbytes) -{ - cpacf_trng(NULL, 0, buf, nbytes); - atomic64_add(nbytes, &s390_arch_random_counter); -} +bool s390_arch_random_generate(u8 *buf, unsigned int nbytes); static inline bool arch_has_random(void) { @@ -51,8 +46,7 @@ static inline bool arch_get_random_int(unsigned int *v) static inline bool arch_get_random_seed_long(unsigned long *v) { if (static_branch_likely(&s390_arch_random_available)) { - s390_arch_random_generate((u8 *)v, sizeof(*v)); - return true; + return s390_arch_random_generate((u8 *)v, sizeof(*v)); } return false; } @@ -60,8 +54,7 @@ static inline bool arch_get_random_seed_long(unsigned long *v) static inline bool arch_get_random_seed_int(unsigned int *v) { if (static_branch_likely(&s390_arch_random_available)) { - s390_arch_random_generate((u8 *)v, sizeof(*v)); - return true; + return s390_arch_random_generate((u8 *)v, sizeof(*v)); } return false; } diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index 20bce136b2e5..a29dd430fb40 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h @@ -231,4 +231,5 @@ int ccw_device_siosl(struct ccw_device *); extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *); struct channel_path_desc_fmt0 *ccw_device_get_chp_desc(struct ccw_device *, int); +u8 *ccw_device_get_util_str(struct ccw_device *cdev, int chp_idx); #endif /* _S390_CCWDEV_H_ */ diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h index 99aa817dad32..860cab7479c3 100644 --- a/arch/s390/include/asm/ccwgroup.h +++ b/arch/s390/include/asm/ccwgroup.h @@ -73,4 +73,14 @@ extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev); #define to_ccwgroupdev(x) container_of((x), struct ccwgroup_device, dev) #define to_ccwgroupdrv(x) container_of((x), struct ccwgroup_driver, driver) + +#if IS_ENABLED(CONFIG_CCWGROUP) +bool dev_is_ccwgroup(struct device *dev); +#else /* CONFIG_CCWGROUP */ +static inline bool dev_is_ccwgroup(struct device *dev) +{ + return false; +} +#endif /* CONFIG_CCWGROUP */ + #endif diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index 9830fb6b076e..97db2fba546a 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -53,7 +53,6 @@ typedef u32 compat_size_t; typedef s32 compat_ssize_t; -typedef s32 compat_time_t; typedef s32 compat_clock_t; typedef s32 compat_pid_t; typedef u16 __compat_uid_t; @@ -97,16 +96,6 @@ typedef struct { u32 gprs_high[NUM_GPRS]; } s390_compat_regs_high; -struct compat_timespec { - compat_time_t tv_sec; - s32 tv_nsec; -}; - -struct compat_timeval { - compat_time_t tv_sec; - s32 tv_usec; -}; - struct compat_stat { compat_dev_t st_dev; u16 __pad1; @@ -243,10 +232,10 @@ struct compat_ipc64_perm { struct compat_semid64_ds { struct compat_ipc64_perm sem_perm; - compat_time_t sem_otime; - compat_ulong_t __pad1; - compat_time_t sem_ctime; - compat_ulong_t __pad2; + compat_ulong_t sem_otime; + compat_ulong_t sem_otime_high; + compat_ulong_t sem_ctime; + compat_ulong_t sem_ctime_high; compat_ulong_t sem_nsems; compat_ulong_t __unused1; compat_ulong_t __unused2; @@ -254,12 +243,12 @@ struct compat_semid64_ds { struct compat_msqid64_ds { struct compat_ipc64_perm msg_perm; - compat_time_t msg_stime; - compat_ulong_t __pad1; - compat_time_t msg_rtime; - compat_ulong_t __pad2; - compat_time_t msg_ctime; - compat_ulong_t __pad3; + compat_ulong_t msg_stime; + compat_ulong_t msg_stime_high; + compat_ulong_t msg_rtime; + compat_ulong_t msg_rtime_high; + compat_ulong_t msg_ctime; + compat_ulong_t msg_ctime_high; compat_ulong_t msg_cbytes; compat_ulong_t msg_qnum; compat_ulong_t msg_qbytes; @@ -272,12 +261,12 @@ struct compat_msqid64_ds { struct compat_shmid64_ds { struct compat_ipc64_perm shm_perm; compat_size_t shm_segsz; - compat_time_t shm_atime; - compat_ulong_t __pad1; - compat_time_t shm_dtime; - compat_ulong_t __pad2; - compat_time_t shm_ctime; - compat_ulong_t __pad3; + compat_ulong_t shm_atime; + compat_ulong_t shm_atime_high; + compat_ulong_t shm_dtime; + compat_ulong_t shm_dtime_high; + compat_ulong_t shm_ctime; + compat_ulong_t shm_ctime_high; compat_pid_t shm_cpid; compat_pid_t shm_lpid; compat_ulong_t shm_nattch; diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index f58d17e9dd65..de023a9a88ca 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -113,7 +113,7 @@ struct hws_basic_entry { struct hws_diag_entry { unsigned int def:16; /* 0-15 Data Entry Format */ - unsigned int R:14; /* 16-19 and 20-30 reserved */ + unsigned int R:15; /* 16-19 and 20-30 reserved */ unsigned int I:1; /* 31 entry valid or invalid */ u8 data[]; /* Machine-dependent sample data */ } __packed; @@ -129,7 +129,9 @@ struct hws_trailer_entry { unsigned int f:1; /* 0 - Block Full Indicator */ unsigned int a:1; /* 1 - Alert request control */ unsigned int t:1; /* 2 - Timestamp format */ - unsigned long long:61; /* 3 - 63: Reserved */ + unsigned int :29; /* 3 - 31: Reserved */ + unsigned int bsdes:16; /* 32-47: size of basic SDE */ + unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */ }; unsigned long long flags; /* 0 - 63: All indicators */ }; diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 1a61b1b997f2..7d22a474a040 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -125,8 +125,9 @@ * ELF register definitions.. */ +#include <linux/compat.h> + #include <asm/ptrace.h> -#include <asm/compat.h> #include <asm/syscall.h> #include <asm/user.h> @@ -136,7 +137,6 @@ typedef s390_regs elf_gregset_t; typedef s390_fp_regs compat_elf_fpregset_t; typedef s390_compat_regs compat_elf_gregset_t; -#include <linux/compat.h> #include <linux/sched/mm.h> /* for task_struct */ #include <asm/mmu_context.h> diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h index a296c6acfd07..dfbc3c6c0674 100644 --- a/arch/s390/include/asm/hardirq.h +++ b/arch/s390/include/asm/hardirq.h @@ -14,6 +14,8 @@ #include <asm/lowcore.h> #define local_softirq_pending() (S390_lowcore.softirq_pending) +#define set_softirq_pending(x) (S390_lowcore.softirq_pending = (x)) +#define or_softirq_pending(x) (S390_lowcore.softirq_pending |= (x)) #define __ARCH_IRQ_STAT #define __ARCH_HAS_DO_SOFTIRQ diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 12fe3591034f..94f8db468c9b 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -2,8 +2,6 @@ #ifndef __ASM_S390_PCI_H #define __ASM_S390_PCI_H -/* must be set before including asm-generic/pci.h */ -#define PCI_DMA_BUS_IS_PHYS (0) /* must be set before including pci_clp.h */ #define PCI_BAR_COUNT 6 diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 2d24d33bf188..9809694e1389 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -171,7 +171,6 @@ static inline int is_module_addr(void *addr) #define _PAGE_WRITE 0x020 /* SW pte write bit */ #define _PAGE_SPECIAL 0x040 /* SW associated with special page */ #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */ -#define __HAVE_ARCH_PTE_SPECIAL #ifdef CONFIG_MEM_SOFT_DIRTY #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */ diff --git a/arch/s390/include/asm/pnet.h b/arch/s390/include/asm/pnet.h new file mode 100644 index 000000000000..6e278584f8f1 --- /dev/null +++ b/arch/s390/include/asm/pnet.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * IBM System z PNET ID Support + * + * Copyright IBM Corp. 2018 + */ + +#ifndef _ASM_S390_PNET_H +#define _ASM_S390_PNET_H + +#include <linux/device.h> +#include <linux/types.h> + +#define PNETIDS_LEN 64 /* Total utility string length in bytes + * to cover up to 4 PNETIDs of 16 bytes + * for up to 4 device ports + */ +#define MAX_PNETID_LEN 16 /* Max.length of a single port PNETID */ +#define MAX_PNETID_PORTS (PNETIDS_LEN / MAX_PNETID_LEN) + /* Max. # of ports with a PNETID */ + +int pnet_id_by_dev_port(struct device *dev, unsigned short port, u8 *pnetid); +#endif /* _ASM_S390_PNET_H */ diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild index faef3f7e8353..e364873e0d10 100644 --- a/arch/s390/include/uapi/asm/Kbuild +++ b/arch/s390/include/uapi/asm/Kbuild @@ -9,9 +9,12 @@ generic-y += errno.h generic-y += fcntl.h generic-y += ioctl.h generic-y += mman.h +generic-y += msgbuf.h generic-y += param.h generic-y += poll.h generic-y += resource.h +generic-y += sembuf.h +generic-y += shmbuf.h generic-y += sockios.h generic-y += swab.h generic-y += termbits.h diff --git a/arch/s390/include/uapi/asm/msgbuf.h b/arch/s390/include/uapi/asm/msgbuf.h deleted file mode 100644 index 604f847cd68c..000000000000 --- a/arch/s390/include/uapi/asm/msgbuf.h +++ /dev/null @@ -1,38 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _S390_MSGBUF_H -#define _S390_MSGBUF_H - -/* - * The msqid64_ds structure for S/390 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct msqid64_ds { - struct ipc64_perm msg_perm; - __kernel_time_t msg_stime; /* last msgsnd time */ -#ifndef __s390x__ - unsigned long __unused1; -#endif /* ! __s390x__ */ - __kernel_time_t msg_rtime; /* last msgrcv time */ -#ifndef __s390x__ - unsigned long __unused2; -#endif /* ! __s390x__ */ - __kernel_time_t msg_ctime; /* last change time */ -#ifndef __s390x__ - unsigned long __unused3; -#endif /* ! __s390x__ */ - unsigned long msg_cbytes; /* current number of bytes on queue */ - unsigned long msg_qnum; /* number of messages in queue */ - unsigned long msg_qbytes; /* max number of bytes on queue */ - __kernel_pid_t msg_lspid; /* pid of last msgsnd */ - __kernel_pid_t msg_lrpid; /* last receive pid */ - unsigned long __unused4; - unsigned long __unused5; -}; - -#endif /* _S390_MSGBUF_H */ diff --git a/arch/s390/include/uapi/asm/sembuf.h b/arch/s390/include/uapi/asm/sembuf.h deleted file mode 100644 index 3e917697b668..000000000000 --- a/arch/s390/include/uapi/asm/sembuf.h +++ /dev/null @@ -1,30 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _S390_SEMBUF_H -#define _S390_SEMBUF_H - -/* - * The semid64_ds structure for S/390 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem (for !__s390x__) - * - 2 miscellaneous 32-bit values - */ - -struct semid64_ds { - struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ - __kernel_time_t sem_otime; /* last semop time */ -#ifndef __s390x__ - unsigned long __unused1; -#endif /* ! __s390x__ */ - __kernel_time_t sem_ctime; /* last change time */ -#ifndef __s390x__ - unsigned long __unused2; -#endif /* ! __s390x__ */ - unsigned long sem_nsems; /* no. of semaphores in array */ - unsigned long __unused3; - unsigned long __unused4; -}; - -#endif /* _S390_SEMBUF_H */ diff --git a/arch/s390/include/uapi/asm/shmbuf.h b/arch/s390/include/uapi/asm/shmbuf.h deleted file mode 100644 index 9cdce8d7ce60..000000000000 --- a/arch/s390/include/uapi/asm/shmbuf.h +++ /dev/null @@ -1,49 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _S390_SHMBUF_H -#define _S390_SHMBUF_H - -/* - * The shmid64_ds structure for S/390 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem (for !__s390x__) - * - 2 miscellaneous 32-bit values - */ - -struct shmid64_ds { - struct ipc64_perm shm_perm; /* operation perms */ - size_t shm_segsz; /* size of segment (bytes) */ - __kernel_time_t shm_atime; /* last attach time */ -#ifndef __s390x__ - unsigned long __unused1; -#endif /* ! __s390x__ */ - __kernel_time_t shm_dtime; /* last detach time */ -#ifndef __s390x__ - unsigned long __unused2; -#endif /* ! __s390x__ */ - __kernel_time_t shm_ctime; /* last change time */ -#ifndef __s390x__ - unsigned long __unused3; -#endif /* ! __s390x__ */ - __kernel_pid_t shm_cpid; /* pid of creator */ - __kernel_pid_t shm_lpid; /* pid of last operator */ - unsigned long shm_nattch; /* no. of current attaches */ - unsigned long __unused4; - unsigned long __unused5; -}; - -struct shminfo64 { - unsigned long shmmax; - unsigned long shmmin; - unsigned long shmmni; - unsigned long shmseg; - unsigned long shmall; - unsigned long __unused1; - unsigned long __unused2; - unsigned long __unused3; - unsigned long __unused4; -}; - -#endif /* _S390_SHMBUF_H */ diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index f92dd8ed3884..2fed39b26b42 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -6,22 +6,26 @@ ifdef CONFIG_FUNCTION_TRACER # Do not trace tracer code -CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) # Do not trace early setup code -CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE) -CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_early_nobss.o = $(CC_FLAGS_FTRACE) endif -GCOV_PROFILE_als.o := n -GCOV_PROFILE_early.o := n +GCOV_PROFILE_als.o := n +GCOV_PROFILE_early.o := n +GCOV_PROFILE_early_nobss.o := n -KCOV_INSTRUMENT_als.o := n -KCOV_INSTRUMENT_early.o := n +KCOV_INSTRUMENT_als.o := n +KCOV_INSTRUMENT_early.o := n +KCOV_INSTRUMENT_early_nobss.o := n -UBSAN_SANITIZE_als.o := n -UBSAN_SANITIZE_early.o := n +UBSAN_SANITIZE_als.o := n +UBSAN_SANITIZE_early.o := n +UBSAN_SANITIZE_early_nobss.o := n # # Use -march=z900 for als.c to be able to print an error @@ -57,7 +61,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o -obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o +obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o early_nobss.o obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o @@ -94,3 +98,6 @@ obj-$(CONFIG_TRACEPOINTS) += trace.o # vdso obj-y += vdso64/ obj-$(CONFIG_COMPAT) += vdso32/ + +chkbss := head.o head64.o als.o early_nobss.o +include $(srctree)/arch/s390/scripts/Makefile.chkbss diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 32daa0f84325..827699eb48fa 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -34,32 +34,6 @@ static void __init setup_boot_command_line(void); /* - * Get the TOD clock running. - */ -static void __init reset_tod_clock(void) -{ - u64 time; - - if (store_tod_clock(&time) == 0) - return; - /* TOD clock not running. Set the clock to Unix Epoch. */ - if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0) - disabled_wait(0); - - memset(tod_clock_base, 0, 16); - *(__u64 *) &tod_clock_base[1] = TOD_UNIX_EPOCH; - S390_lowcore.last_update_clock = TOD_UNIX_EPOCH; -} - -/* - * Clear bss memory - */ -static noinline __init void clear_bss_section(void) -{ - memset(__bss_start, 0, __bss_stop - __bss_start); -} - -/* * Initialize storage key for kernel pages */ static noinline __init void init_kernel_storage_key(void) @@ -310,57 +284,6 @@ static int __init cad_setup(char *str) } early_param("cad", cad_setup); -static __init void memmove_early(void *dst, const void *src, size_t n) -{ - unsigned long addr; - long incr; - psw_t old; - - if (!n) - return; - incr = 1; - if (dst > src) { - incr = -incr; - dst += n - 1; - src += n - 1; - } - old = S390_lowcore.program_new_psw; - S390_lowcore.program_new_psw.mask = __extract_psw(); - asm volatile( - " larl %[addr],1f\n" - " stg %[addr],%[psw_pgm_addr]\n" - "0: mvc 0(1,%[dst]),0(%[src])\n" - " agr %[dst],%[incr]\n" - " agr %[src],%[incr]\n" - " brctg %[n],0b\n" - "1:\n" - : [addr] "=&d" (addr), - [psw_pgm_addr] "=Q" (S390_lowcore.program_new_psw.addr), - [dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n) - : [incr] "d" (incr) - : "cc", "memory"); - S390_lowcore.program_new_psw = old; -} - -static __init noinline void rescue_initrd(void) -{ -#ifdef CONFIG_BLK_DEV_INITRD - unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20); - /* - * Just like in case of IPL from VM reader we make sure there is a - * gap of 4MB between end of kernel and start of initrd. - * That way we can also be sure that saving an NSS will succeed, - * which however only requires different segments. - */ - if (!INITRD_START || !INITRD_SIZE) - return; - if (INITRD_START >= min_initrd_addr) - return; - memmove_early((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE); - INITRD_START = min_initrd_addr; -#endif -} - /* Set up boot command line */ static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t)) { @@ -410,9 +333,6 @@ static void __init setup_boot_command_line(void) void __init startup_init(void) { - reset_tod_clock(); - rescue_initrd(); - clear_bss_section(); time_early_init(); init_kernel_storage_key(); lockdep_off(); diff --git a/arch/s390/kernel/early_nobss.c b/arch/s390/kernel/early_nobss.c new file mode 100644 index 000000000000..2d84fc48df3a --- /dev/null +++ b/arch/s390/kernel/early_nobss.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2007, 2018 + */ + +/* + * Early setup functions which may not rely on an initialized bss + * section. The last thing that is supposed to happen here is + * initialization of the bss section. + */ + +#include <linux/processor.h> +#include <linux/string.h> +#include <asm/sections.h> +#include <asm/lowcore.h> +#include <asm/setup.h> +#include <asm/timex.h> +#include "entry.h" + +static void __init reset_tod_clock(void) +{ + u64 time; + + if (store_tod_clock(&time) == 0) + return; + /* TOD clock not running. Set the clock to Unix Epoch. */ + if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0) + disabled_wait(0); + + memset(tod_clock_base, 0, 16); + *(__u64 *) &tod_clock_base[1] = TOD_UNIX_EPOCH; + S390_lowcore.last_update_clock = TOD_UNIX_EPOCH; +} + +static void __init rescue_initrd(void) +{ + unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20); + + /* + * Just like in case of IPL from VM reader we make sure there is a + * gap of 4MB between end of kernel and start of initrd. + * That way we can also be sure that saving an NSS will succeed, + * which however only requires different segments. + */ + if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD)) + return; + if (!INITRD_START || !INITRD_SIZE) + return; + if (INITRD_START >= min_initrd_addr) + return; + memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE); + INITRD_START = min_initrd_addr; +} + +static void __init clear_bss_section(void) +{ + memset(__bss_start, 0, __bss_stop - __bss_start); +} + +void __init startup_init_nobss(void) +{ + reset_tod_clock(); + rescue_initrd(); + clear_bss_section(); +} diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index e87758f8fbdc..961abfac2c5f 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -58,6 +58,7 @@ void do_notify_resume(struct pt_regs *regs); void __init init_IRQ(void); void do_IRQ(struct pt_regs *regs, int irq); void do_restart(void); +void __init startup_init_nobss(void); void __init startup_init(void); void die(struct pt_regs *regs, const char *str); int setup_profiling_timer(unsigned int multiplier); diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 38a973ccf501..791cb9000e86 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -40,8 +40,12 @@ ENTRY(startup_continue) stg %r15,__LC_KERNEL_STACK # set end of kernel stack aghi %r15,-160 # -# Save ipl parameters, clear bss memory, initialize storage key for kernel pages, -# and create a kernel NSS if the SAVESYS= parm is defined +# Early setup functions that may not rely on an initialized bss section, +# like moving the initrd. Returns with an initialized bss section. +# + brasl %r14,startup_init_nobss +# +# Early machine initialization and detection functions. # brasl %r14,startup_init lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space, diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c index 8ad6a7128b3a..18ae7b9c71d6 100644 --- a/arch/s390/kernel/nospec-branch.c +++ b/arch/s390/kernel/nospec-branch.c @@ -36,9 +36,9 @@ early_param("nospec", nospec_setup_early); static int __init nospec_report(void) { if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) - pr_info("Spectre V2 mitigation: execute trampolines.\n"); + pr_info("Spectre V2 mitigation: execute trampolines\n"); if (__test_facility(82, S390_lowcore.alt_stfle_fac_list)) - pr_info("Spectre V2 mitigation: limited branch prediction.\n"); + pr_info("Spectre V2 mitigation: limited branch prediction\n"); return 0; } arch_initcall(nospec_report); diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c index fc7e04c2195b..54f5496913fa 100644 --- a/arch/s390/kernel/sysinfo.c +++ b/arch/s390/kernel/sysinfo.c @@ -294,21 +294,9 @@ static int sysinfo_show(struct seq_file *m, void *v) return 0; } -static int sysinfo_open(struct inode *inode, struct file *file) -{ - return single_open(file, sysinfo_show, NULL); -} - -static const struct file_operations sysinfo_fops = { - .open = sysinfo_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - static int __init sysinfo_create_proc(void) { - proc_create("sysinfo", 0444, NULL, &sysinfo_fops); + proc_create_single("sysinfo", 0444, NULL, sysinfo_show); return 0; } device_initcall(sysinfo_create_proc); @@ -386,18 +374,6 @@ static const struct seq_operations service_level_seq_ops = { .show = service_level_show }; -static int service_level_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &service_level_seq_ops); -} - -static const struct file_operations service_level_ops = { - .open = service_level_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release -}; - static void service_level_vm_print(struct seq_file *m, struct service_level *slr) { @@ -420,7 +396,7 @@ static struct service_level service_level_vm = { static __init int create_proc_service_level(void) { - proc_create("service_levels", 0, NULL, &service_level_ops); + proc_create_seq("service_levels", 0, NULL, &service_level_seq_ops); if (MACHINE_IS_VM) register_service_level(&service_level_vm); return 0; diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index a5297a22bc1e..8003b38c1688 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -44,14 +44,8 @@ int is_valid_bugaddr(unsigned long addr) void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str) { - siginfo_t info; - if (user_mode(regs)) { - info.si_signo = si_signo; - info.si_errno = 0; - info.si_code = si_code; - info.si_addr = get_trap_ip(regs); - force_sig_info(si_signo, &info, current); + force_sig_fault(si_signo, si_code, get_trap_ip(regs), current); report_user_fault(regs, si_signo, 0); } else { const struct exception_table_entry *fixup; @@ -80,18 +74,12 @@ NOKPROBE_SYMBOL(do_trap); void do_per_trap(struct pt_regs *regs) { - siginfo_t info; - if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) return; if (!current->ptrace) return; - info.si_signo = SIGTRAP; - info.si_errno = 0; - info.si_code = TRAP_HWBKPT; - info.si_addr = - (void __force __user *) current->thread.per_event.address; - force_sig_info(SIGTRAP, &info, current); + force_sig_fault(SIGTRAP, TRAP_HWBKPT, + (void __force __user *) current->thread.per_event.address, current); } NOKPROBE_SYMBOL(do_per_trap); @@ -165,7 +153,6 @@ void translation_exception(struct pt_regs *regs) void illegal_op(struct pt_regs *regs) { - siginfo_t info; __u8 opcode[6]; __u16 __user *location; int is_uprobe_insn = 0; @@ -177,13 +164,9 @@ void illegal_op(struct pt_regs *regs) if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) return; if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { - if (current->ptrace) { - info.si_signo = SIGTRAP; - info.si_errno = 0; - info.si_code = TRAP_BRKPT; - info.si_addr = location; - force_sig_info(SIGTRAP, &info, current); - } else + if (current->ptrace) + force_sig_fault(SIGTRAP, TRAP_BRKPT, location, current); + else signal = SIGILL; #ifdef CONFIG_UPROBES } else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) { diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 08d12cfaf091..f0414f52817b 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -82,10 +82,10 @@ SECTIONS . = ALIGN(PAGE_SIZE); .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { - VMLINUX_SYMBOL(_sinittext) = . ; + _sinittext = .; INIT_TEXT . = ALIGN(PAGE_SIZE); - VMLINUX_SYMBOL(_einittext) = . ; + _einittext = .; } /* diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index ebfa0442e569..a3bce0e84346 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -26,7 +26,6 @@ #include <asm/gmap.h> #include <asm/io.h> #include <asm/ptrace.h> -#include <asm/compat.h> #include <asm/sclp.h> #include "gaccess.h" #include "kvm-s390.h" diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index 9bfe0802684b..57ab40188d4b 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile @@ -8,3 +8,6 @@ obj-y += mem.o xor.o lib-$(CONFIG_SMP) += spinlock.o lib-$(CONFIG_KPROBES) += probes.o lib-$(CONFIG_UPROBES) += probes.o + +chkbss := mem.o +include $(srctree)/arch/s390/scripts/Makefile.chkbss diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 93faeca52284..e074480d3598 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -265,14 +265,10 @@ void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault) */ static noinline void do_sigsegv(struct pt_regs *regs, int si_code) { - struct siginfo si; - report_user_fault(regs, SIGSEGV, 1); - si.si_signo = SIGSEGV; - si.si_errno = 0; - si.si_code = si_code; - si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK); - force_sig_info(SIGSEGV, &si, current); + force_sig_fault(SIGSEGV, si_code, + (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK), + current); } static noinline void do_no_context(struct pt_regs *regs) @@ -316,18 +312,13 @@ static noinline void do_low_address(struct pt_regs *regs) static noinline void do_sigbus(struct pt_regs *regs) { - struct task_struct *tsk = current; - struct siginfo si; - /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ - si.si_signo = SIGBUS; - si.si_errno = 0; - si.si_code = BUS_ADRERR; - si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK); - force_sig_info(SIGBUS, &si, tsk); + force_sig_fault(SIGBUS, BUS_ADRERR, + (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK), + current); } static noinline int signal_return(struct pt_regs *regs) diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 562f72955956..84bd6329a88d 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -190,14 +190,15 @@ unsigned long *page_table_alloc(struct mm_struct *mm) if (!list_empty(&mm->context.pgtable_list)) { page = list_first_entry(&mm->context.pgtable_list, struct page, lru); - mask = atomic_read(&page->_mapcount); + mask = atomic_read(&page->_refcount) >> 24; mask = (mask | (mask >> 4)) & 3; if (mask != 3) { table = (unsigned long *) page_to_phys(page); bit = mask & 1; /* =1 -> second 2K */ if (bit) table += PTRS_PER_PTE; - atomic_xor_bits(&page->_mapcount, 1U << bit); + atomic_xor_bits(&page->_refcount, + 1U << (bit + 24)); list_del(&page->lru); } } @@ -218,12 +219,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm) table = (unsigned long *) page_to_phys(page); if (mm_alloc_pgste(mm)) { /* Return 4K page table with PGSTEs */ - atomic_set(&page->_mapcount, 3); + atomic_xor_bits(&page->_refcount, 3 << 24); memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE); memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE); } else { /* Return the first 2K fragment of the page */ - atomic_set(&page->_mapcount, 1); + atomic_xor_bits(&page->_refcount, 1 << 24); memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE); spin_lock_bh(&mm->context.lock); list_add(&page->lru, &mm->context.pgtable_list); @@ -242,7 +243,8 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) /* Free 2K page table fragment of a 4K page */ bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); spin_lock_bh(&mm->context.lock); - mask = atomic_xor_bits(&page->_mapcount, 1U << bit); + mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24)); + mask >>= 24; if (mask & 3) list_add(&page->lru, &mm->context.pgtable_list); else @@ -253,7 +255,6 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) } pgtable_page_dtor(page); - atomic_set(&page->_mapcount, -1); __free_page(page); } @@ -274,7 +275,8 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, } bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)); spin_lock_bh(&mm->context.lock); - mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit); + mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24)); + mask >>= 24; if (mask & 3) list_add_tail(&page->lru, &mm->context.pgtable_list); else @@ -296,12 +298,13 @@ static void __tlb_remove_table(void *_table) break; case 1: /* lower 2K of a 4K page table */ case 2: /* higher 2K of a 4K page table */ - if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0) + mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24)); + mask >>= 24; + if (mask != 0) break; /* fallthrough */ case 3: /* 4K page table with pgstes */ pgtable_page_dtor(page); - atomic_set(&page->_mapcount, -1); __free_page(page); break; } diff --git a/arch/s390/net/Makefile b/arch/s390/net/Makefile index e0d5f245e42b..8cab6deb0403 100644 --- a/arch/s390/net/Makefile +++ b/arch/s390/net/Makefile @@ -2,4 +2,5 @@ # # Arch-specific network modules # -obj-$(CONFIG_BPF_JIT) += bpf_jit.o bpf_jit_comp.o +obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o +obj-$(CONFIG_HAVE_PNETID) += pnet.o diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S deleted file mode 100644 index 9f794869c1b0..000000000000 --- a/arch/s390/net/bpf_jit.S +++ /dev/null @@ -1,120 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * BPF Jit compiler for s390, help functions. - * - * Copyright IBM Corp. 2012,2015 - * - * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> - * Michael Holzheu <holzheu@linux.vnet.ibm.com> - */ - -#include <linux/linkage.h> -#include <asm/nospec-insn.h> -#include "bpf_jit.h" - -/* - * Calling convention: - * registers %r7-%r10, %r11,%r13, and %r15 are call saved - * - * Input (64 bit): - * %r3 (%b2) = offset into skb data - * %r6 (%b5) = return address - * %r7 (%b6) = skb pointer - * %r12 = skb data pointer - * - * Output: - * %r14= %b0 = return value (read skb value) - * - * Work registers: %r2,%r4,%r5,%r14 - * - * skb_copy_bits takes 4 parameters: - * %r2 = skb pointer - * %r3 = offset into skb data - * %r4 = pointer to temp buffer - * %r5 = length to copy - * Return value in %r2: 0 = ok - * - * bpf_internal_load_pointer_neg_helper takes 3 parameters: - * %r2 = skb pointer - * %r3 = offset into data - * %r4 = length to copy - * Return value in %r2: Pointer to data - */ - -#define SKF_MAX_NEG_OFF -0x200000 /* SKF_LL_OFF from filter.h */ - -/* - * Load SIZE bytes from SKB - */ -#define sk_load_common(NAME, SIZE, LOAD) \ -ENTRY(sk_load_##NAME); \ - ltgr %r3,%r3; /* Is offset negative? */ \ - jl sk_load_##NAME##_slow_neg; \ -ENTRY(sk_load_##NAME##_pos); \ - aghi %r3,SIZE; /* Offset + SIZE */ \ - clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \ - jh sk_load_##NAME##_slow; \ - LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \ - B_EX OFF_OK,%r6; /* Return */ \ - \ -sk_load_##NAME##_slow:; \ - lgr %r2,%r7; /* Arg1 = skb pointer */ \ - aghi %r3,-SIZE; /* Arg2 = offset */ \ - la %r4,STK_OFF_TMP(%r15); /* Arg3 = temp bufffer */ \ - lghi %r5,SIZE; /* Arg4 = size */ \ - brasl %r14,skb_copy_bits; /* Get data from skb */ \ - LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \ - ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \ - BR_EX %r6; /* Return */ - -sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */ -sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */ - - GEN_BR_THUNK %r6 - GEN_B_THUNK OFF_OK,%r6 - -/* - * Load 1 byte from SKB (optimized version) - */ - /* r14 = *(u8 *) (skb->data+offset) */ -ENTRY(sk_load_byte) - ltgr %r3,%r3 # Is offset negative? - jl sk_load_byte_slow_neg -ENTRY(sk_load_byte_pos) - clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen? - jnl sk_load_byte_slow - llgc %r14,0(%r3,%r12) # Get byte from skb - B_EX OFF_OK,%r6 # Return OK - -sk_load_byte_slow: - lgr %r2,%r7 # Arg1 = skb pointer - # Arg2 = offset - la %r4,STK_OFF_TMP(%r15) # Arg3 = pointer to temp buffer - lghi %r5,1 # Arg4 = size (1 byte) - brasl %r14,skb_copy_bits # Get data from skb - llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer - ltgr %r2,%r2 # Set cc to (%r2 != 0) - BR_EX %r6 # Return cc - -#define sk_negative_common(NAME, SIZE, LOAD) \ -sk_load_##NAME##_slow_neg:; \ - cgfi %r3,SKF_MAX_NEG_OFF; \ - jl bpf_error; \ - lgr %r2,%r7; /* Arg1 = skb pointer */ \ - /* Arg2 = offset */ \ - lghi %r4,SIZE; /* Arg3 = size */ \ - brasl %r14,bpf_internal_load_pointer_neg_helper; \ - ltgr %r2,%r2; \ - jz bpf_error; \ - LOAD %r14,0(%r2); /* Get data from pointer */ \ - xr %r3,%r3; /* Set cc to zero */ \ - BR_EX %r6; /* Return cc */ - -sk_negative_common(word, 4, llgf) -sk_negative_common(half, 2, llgh) -sk_negative_common(byte, 1, llgc) - -bpf_error: -# force a return 0 from jit handler - ltgr %r15,%r15 # Set condition code - BR_EX %r6 diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h index 5e1e5133132d..7822ea92e54a 100644 --- a/arch/s390/net/bpf_jit.h +++ b/arch/s390/net/bpf_jit.h @@ -16,9 +16,6 @@ #include <linux/filter.h> #include <linux/types.h> -extern u8 sk_load_word_pos[], sk_load_half_pos[], sk_load_byte_pos[]; -extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; - #endif /* __ASSEMBLY__ */ /* @@ -36,15 +33,6 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; * | | | * | BPF stack | | * | | | - * +---------------+ | - * | 8 byte skbp | | - * R15+176 -> +---------------+ | - * | 8 byte hlen | | - * R15+168 -> +---------------+ | - * | 4 byte align | | - * +---------------+ | - * | 4 byte temp | | - * | for bpf_jit.S | | * R15+160 -> +---------------+ | * | new backchain | | * R15+152 -> +---------------+ | @@ -57,17 +45,11 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; * The stack size used by the BPF program ("BPF stack" above) is passed * via "aux->stack_depth". */ -#define STK_SPACE_ADD (8 + 8 + 4 + 4 + 160) +#define STK_SPACE_ADD (160) #define STK_160_UNUSED (160 - 12 * 8) #define STK_OFF (STK_SPACE_ADD - STK_160_UNUSED) -#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */ -#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */ -#define STK_OFF_SKBP 176 /* Offset of SKB pointer on stack */ #define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */ #define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */ -/* Offset to skip condition code check */ -#define OFF_OK 4 - #endif /* __ARCH_S390_NET_BPF_JIT_H */ diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index dd2bcf0e7d00..d2db8acb1a55 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -51,23 +51,21 @@ struct bpf_jit { #define BPF_SIZE_MAX 0xffff /* Max size for program (16 bit branches) */ -#define SEEN_SKB 1 /* skb access */ -#define SEEN_MEM 2 /* use mem[] for temporary storage */ -#define SEEN_RET0 4 /* ret0_ip points to a valid return 0 */ -#define SEEN_LITERAL 8 /* code uses literals */ -#define SEEN_FUNC 16 /* calls C functions */ -#define SEEN_TAIL_CALL 32 /* code uses tail calls */ -#define SEEN_REG_AX 64 /* code uses constant blinding */ -#define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB) +#define SEEN_MEM (1 << 0) /* use mem[] for temporary storage */ +#define SEEN_RET0 (1 << 1) /* ret0_ip points to a valid return 0 */ +#define SEEN_LITERAL (1 << 2) /* code uses literals */ +#define SEEN_FUNC (1 << 3) /* calls C functions */ +#define SEEN_TAIL_CALL (1 << 4) /* code uses tail calls */ +#define SEEN_REG_AX (1 << 5) /* code uses constant blinding */ +#define SEEN_STACK (SEEN_FUNC | SEEN_MEM) /* * s390 registers */ #define REG_W0 (MAX_BPF_JIT_REG + 0) /* Work register 1 (even) */ #define REG_W1 (MAX_BPF_JIT_REG + 1) /* Work register 2 (odd) */ -#define REG_SKB_DATA (MAX_BPF_JIT_REG + 2) /* SKB data register */ -#define REG_L (MAX_BPF_JIT_REG + 3) /* Literal pool register */ -#define REG_15 (MAX_BPF_JIT_REG + 4) /* Register 15 */ +#define REG_L (MAX_BPF_JIT_REG + 2) /* Literal pool register */ +#define REG_15 (MAX_BPF_JIT_REG + 3) /* Register 15 */ #define REG_0 REG_W0 /* Register 0 */ #define REG_1 REG_W1 /* Register 1 */ #define REG_2 BPF_REG_1 /* Register 2 */ @@ -92,10 +90,8 @@ static const int reg2hex[] = { [BPF_REG_9] = 10, /* BPF stack pointer */ [BPF_REG_FP] = 13, - /* Register for blinding (shared with REG_SKB_DATA) */ + /* Register for blinding */ [BPF_REG_AX] = 12, - /* SKB data pointer */ - [REG_SKB_DATA] = 12, /* Work registers for s390x backend */ [REG_W0] = 0, [REG_W1] = 1, @@ -402,27 +398,6 @@ static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth) } /* - * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S" - * we store the SKB header length on the stack and the SKB data - * pointer in REG_SKB_DATA if BPF_REG_AX is not used. - */ -static void emit_load_skb_data_hlen(struct bpf_jit *jit) -{ - /* Header length: llgf %w1,<len>(%b1) */ - EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_1, - offsetof(struct sk_buff, len)); - /* s %w1,<data_len>(%b1) */ - EMIT4_DISP(0x5b000000, REG_W1, BPF_REG_1, - offsetof(struct sk_buff, data_len)); - /* stg %w1,ST_OFF_HLEN(%r0,%r15) */ - EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, STK_OFF_HLEN); - if (!(jit->seen & SEEN_REG_AX)) - /* lg %skb_data,data_off(%b1) */ - EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0, - BPF_REG_1, offsetof(struct sk_buff, data)); -} - -/* * Emit function prologue * * Save registers and create stack frame if necessary. @@ -462,12 +437,6 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth) EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, 152); } - if (jit->seen & SEEN_SKB) { - emit_load_skb_data_hlen(jit); - /* stg %b1,ST_OFF_SKBP(%r0,%r15) */ - EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15, - STK_OFF_SKBP); - } } /* @@ -537,12 +506,12 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i { struct bpf_insn *insn = &fp->insnsi[i]; int jmp_off, last, insn_count = 1; - unsigned int func_addr, mask; u32 dst_reg = insn->dst_reg; u32 src_reg = insn->src_reg; u32 *addrs = jit->addrs; s32 imm = insn->imm; s16 off = insn->off; + unsigned int mask; if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX) jit->seen |= SEEN_REG_AX; @@ -1029,13 +998,6 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i } /* lgr %b0,%r2: load return value into %b0 */ EMIT4(0xb9040000, BPF_REG_0, REG_2); - if ((jit->seen & SEEN_SKB) && - bpf_helper_changes_pkt_data((void *)func)) { - /* lg %b1,ST_OFF_SKBP(%r15) */ - EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0, - REG_15, STK_OFF_SKBP); - emit_load_skb_data_hlen(jit); - } break; } case BPF_JMP | BPF_TAIL_CALL: @@ -1235,73 +1197,6 @@ branch_oc: jmp_off = addrs[i + off + 1] - (addrs[i + 1] - 4); EMIT4_PCREL(0xa7040000 | mask << 8, jmp_off); break; - /* - * BPF_LD - */ - case BPF_LD | BPF_ABS | BPF_B: /* b0 = *(u8 *) (skb->data+imm) */ - case BPF_LD | BPF_IND | BPF_B: /* b0 = *(u8 *) (skb->data+imm+src) */ - if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0)) - func_addr = __pa(sk_load_byte_pos); - else - func_addr = __pa(sk_load_byte); - goto call_fn; - case BPF_LD | BPF_ABS | BPF_H: /* b0 = *(u16 *) (skb->data+imm) */ - case BPF_LD | BPF_IND | BPF_H: /* b0 = *(u16 *) (skb->data+imm+src) */ - if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0)) - func_addr = __pa(sk_load_half_pos); - else - func_addr = __pa(sk_load_half); - goto call_fn; - case BPF_LD | BPF_ABS | BPF_W: /* b0 = *(u32 *) (skb->data+imm) */ - case BPF_LD | BPF_IND | BPF_W: /* b0 = *(u32 *) (skb->data+imm+src) */ - if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0)) - func_addr = __pa(sk_load_word_pos); - else - func_addr = __pa(sk_load_word); - goto call_fn; -call_fn: - jit->seen |= SEEN_SKB | SEEN_RET0 | SEEN_FUNC; - REG_SET_SEEN(REG_14); /* Return address of possible func call */ - - /* - * Implicit input: - * BPF_REG_6 (R7) : skb pointer - * REG_SKB_DATA (R12): skb data pointer (if no BPF_REG_AX) - * - * Calculated input: - * BPF_REG_2 (R3) : offset of byte(s) to fetch in skb - * BPF_REG_5 (R6) : return address - * - * Output: - * BPF_REG_0 (R14): data read from skb - * - * Scratch registers (BPF_REG_1-5) - */ - - /* Call function: llilf %w1,func_addr */ - EMIT6_IMM(0xc00f0000, REG_W1, func_addr); - - /* Offset: lgfi %b2,imm */ - EMIT6_IMM(0xc0010000, BPF_REG_2, imm); - if (BPF_MODE(insn->code) == BPF_IND) - /* agfr %b2,%src (%src is s32 here) */ - EMIT4(0xb9180000, BPF_REG_2, src_reg); - - /* Reload REG_SKB_DATA if BPF_REG_AX is used */ - if (jit->seen & SEEN_REG_AX) - /* lg %skb_data,data_off(%b6) */ - EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0, - BPF_REG_6, offsetof(struct sk_buff, data)); - /* basr %b5,%w1 (%b5 is call saved) */ - EMIT2(0x0d00, BPF_REG_5, REG_W1); - - /* - * Note: For fast access we jump directly after the - * jnz instruction from bpf_jit.S - */ - /* jnz <ret0> */ - EMIT4_PCREL(0xa7740000, jit->ret0_ip - jit->prg); - break; default: /* too complex, give up */ pr_err("Unknown opcode %02x\n", insn->code); return -1; diff --git a/arch/s390/net/pnet.c b/arch/s390/net/pnet.c new file mode 100644 index 000000000000..e22f1b10a6c7 --- /dev/null +++ b/arch/s390/net/pnet.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * IBM System z PNET ID Support + * + * Copyright IBM Corp. 2018 + */ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/types.h> +#include <asm/ccwgroup.h> +#include <asm/ccwdev.h> +#include <asm/pnet.h> + +/* + * Get the PNETIDs from a device. + * s390 hardware supports the definition of a so-called Physical Network + * Identifier (short PNETID) per network device port. These PNETIDs can be + * used to identify network devices that are attached to the same physical + * network (broadcast domain). + * + * The device can be + * - a ccwgroup device with all bundled subchannels having the same PNETID + * - a PCI attached network device + * + * Returns: + * 0: PNETIDs extracted from device. + * -ENOMEM: No memory to extract utility string. + * -EOPNOTSUPP: Device type without utility string support + */ +static int pnet_ids_by_device(struct device *dev, u8 *pnetids) +{ + memset(pnetids, 0, PNETIDS_LEN); + if (dev_is_ccwgroup(dev)) { + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + u8 *util_str; + + util_str = ccw_device_get_util_str(gdev->cdev[0], 0); + if (!util_str) + return -ENOMEM; + memcpy(pnetids, util_str, PNETIDS_LEN); + kfree(util_str); + return 0; + } + if (dev_is_pci(dev)) { + struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); + + memcpy(pnetids, zdev->util_str, sizeof(zdev->util_str)); + return 0; + } + return -EOPNOTSUPP; +} + +/* + * Extract the pnetid for a device port. + * + * Return 0 if a pnetid is found and -ENOENT otherwise. + */ +int pnet_id_by_dev_port(struct device *dev, unsigned short port, u8 *pnetid) +{ + u8 pnetids[MAX_PNETID_PORTS][MAX_PNETID_LEN]; + static const u8 zero[MAX_PNETID_LEN] = { 0 }; + int rc = 0; + + if (!dev || port >= MAX_PNETID_PORTS) + return -ENOENT; + + if (!pnet_ids_by_device(dev, (u8 *)pnetids) && + memcmp(pnetids[port], zero, MAX_PNETID_LEN)) + memcpy(pnetid, pnetids[port], MAX_PNETID_LEN); + else + rc = -ENOENT; + + return rc; +} +EXPORT_SYMBOL_GPL(pnet_id_by_dev_port); + +MODULE_DESCRIPTION("pnetid determination from utility strings"); +MODULE_LICENSE("GPL"); diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index 93cd0f1ca12b..19b2d2a9b43d 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c @@ -19,7 +19,6 @@ #include <linux/uaccess.h> #include <asm/pci_debug.h> #include <asm/pci_clp.h> -#include <asm/compat.h> #include <asm/clp.h> #include <uapi/asm/clp.h> diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 2d15d84c20ed..d387a0fbdd7e 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -668,15 +668,6 @@ void zpci_dma_exit(void) kmem_cache_destroy(dma_region_table_cache); } -#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) - -static int __init dma_debug_do_init(void) -{ - dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); - return 0; -} -fs_initcall(dma_debug_do_init); - const struct dma_map_ops s390_pci_dma_ops = { .alloc = s390_dma_alloc, .free = s390_dma_free, @@ -685,8 +676,6 @@ const struct dma_map_ops s390_pci_dma_ops = { .map_page = s390_dma_map_pages, .unmap_page = s390_dma_unmap_pages, .mapping_error = s390_mapping_error, - /* if we support direct DMA this must be conditional */ - .is_phys = 0, /* dma_supported is unconditionally true without a callback */ }; EXPORT_SYMBOL_GPL(s390_pci_dma_ops); diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile index e9525bc1b4a6..1ace023cbdce 100644 --- a/arch/s390/purgatory/Makefile +++ b/arch/s390/purgatory/Makefile @@ -21,7 +21,7 @@ LDFLAGS_purgatory.ro += -z nodefaultlib KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -KBUILD_CFLAGS += -c -MD -Os -m64 +KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float KBUILD_CFLAGS += $(call cc-option,-fno-PIE) $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE diff --git a/arch/s390/scripts/Makefile.chkbss b/arch/s390/scripts/Makefile.chkbss new file mode 100644 index 000000000000..d92f2d94a5d9 --- /dev/null +++ b/arch/s390/scripts/Makefile.chkbss @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 + +quiet_cmd_chkbss = CHKBSS $< +define cmd_chkbss + if ! $(OBJDUMP) -j .bss -w -h $< | awk 'END { if ($$3) exit 1 }'; then \ + echo "error: $< .bss section is not empty" >&2; exit 1; \ + fi; \ + touch $@; +endef + +$(obj)/built-in.a: $(patsubst %, $(obj)/%.chkbss, $(chkbss)) + +%.o.chkbss: %.o + $(call cmd,chkbss) diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 1851eaeee131..4d61a085982b 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config SUPERH def_bool y + select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_NO_COHERENT_DMA_MMAP if !MMU @@ -14,7 +15,6 @@ config SUPERH select HAVE_OPROFILE select HAVE_GENERIC_DMA_COHERENT select HAVE_ARCH_TRACEHOOK - select HAVE_DMA_API_DEBUG select HAVE_PERF_EVENTS select HAVE_DEBUG_BUGVERBOSE select ARCH_HAVE_CUSTOM_GPIO_H @@ -51,6 +51,9 @@ config SUPERH select HAVE_ARCH_AUDITSYSCALL select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_NMI + select NEED_DMA_MAP_STATE + select NEED_SG_DMA_LENGTH + help The SuperH is a RISC processor targeted for use in embedded systems and consumer electronics; it was also used in the Sega Dreamcast @@ -58,7 +61,7 @@ config SUPERH <http://www.linux-sh.org/>. config SUPERH32 - def_bool ARCH = "sh" + def_bool "$(ARCH)" = "sh" select HAVE_KPROBES select HAVE_KRETPROBES select HAVE_IOREMAP_PROT if MMU && !X2TLB @@ -77,7 +80,7 @@ config SUPERH32 select HAVE_CC_STACKPROTECTOR config SUPERH64 - def_bool ARCH = "sh64" + def_bool "$(ARCH)" = "sh64" select HAVE_EXIT_THREAD select KALLSYMS @@ -161,12 +164,6 @@ config DMA_COHERENT config DMA_NONCOHERENT def_bool !DMA_COHERENT -config NEED_DMA_MAP_STATE - def_bool DMA_NONCOHERENT - -config NEED_SG_DMA_LENGTH - def_bool y - config PGTABLE_LEVELS default 3 if X2TLB default 2 diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c index 271dfc260e82..3d7d0046cf49 100644 --- a/arch/sh/boards/mach-migor/setup.c +++ b/arch/sh/boards/mach-migor/setup.c @@ -359,7 +359,7 @@ static struct gpiod_lookup_table ov7725_gpios = { static struct gpiod_lookup_table tw9910_gpios = { .dev_id = "0-0045", .table = { - GPIO_LOOKUP("sh7722_pfc", GPIO_PTT2, "pdn", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("sh7722_pfc", GPIO_PTT2, "pdn", GPIO_ACTIVE_LOW), GPIO_LOOKUP("sh7722_pfc", GPIO_PTT3, "rstb", GPIO_ACTIVE_LOW), }, }; diff --git a/arch/sh/drivers/dma/dma-api.c b/arch/sh/drivers/dma/dma-api.c index c0eec08d8f95..b05be597b19f 100644 --- a/arch/sh/drivers/dma/dma-api.c +++ b/arch/sh/drivers/dma/dma-api.c @@ -339,18 +339,6 @@ static int dma_proc_show(struct seq_file *m, void *v) return 0; } -static int dma_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, dma_proc_show, NULL); -} - -static const struct file_operations dma_proc_fops = { - .open = dma_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - int register_dmac(struct dma_info *info) { unsigned int total_channels, i; @@ -423,7 +411,7 @@ EXPORT_SYMBOL(unregister_dmac); static int __init dma_api_init(void) { printk(KERN_NOTICE "DMA: Registering DMA API.\n"); - return proc_create("dma", 0, NULL, &dma_proc_fops) ? 0 : -ENOMEM; + return proc_create_single("dma", 0, NULL, dma_proc_show) ? 0 : -ENOMEM; } subsys_initcall(dma_api_init); diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index 1efcce74997b..46dd82ab2c29 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild @@ -1,3 +1,4 @@ +generic-y += compat.h generic-y += current.h generic-y += delay.h generic-y += div64.h diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h index 0033f0df2b3b..10a36b1cf2ea 100644 --- a/arch/sh/include/asm/pci.h +++ b/arch/sh/include/asm/pci.h @@ -71,12 +71,6 @@ extern unsigned long PCIBIOS_MIN_IO, PCIBIOS_MIN_MEM; * SuperH has everything mapped statically like x86. */ -/* The PCI address space does equal the physical memory - * address space. The networking and block device layers use - * this boolean for bounce buffer decisions. - */ -#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) - #ifdef CONFIG_PCI /* * None of the SH PCI controllers support MWI, it is always treated as a diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index 89c513a982fc..f6abfe2bca93 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -156,8 +156,6 @@ extern void page_table_range_init(unsigned long start, unsigned long end, #define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN -#define __HAVE_ARCH_PTE_SPECIAL - #include <asm-generic/pgtable.h> #endif /* __ASM_SH_PGTABLE_H */ diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c index 178457d7620c..3e3a32fc676e 100644 --- a/arch/sh/kernel/dma-nommu.c +++ b/arch/sh/kernel/dma-nommu.c @@ -78,7 +78,6 @@ const struct dma_map_ops nommu_dma_ops = { .sync_single_for_device = nommu_sync_single_for_device, .sync_sg_for_device = nommu_sync_sg_for_device, #endif - .is_phys = 1, }; void __init no_iommu_init(void) diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c index afe965712a69..8648ed05ccf0 100644 --- a/arch/sh/kernel/hw_breakpoint.c +++ b/arch/sh/kernel/hw_breakpoint.c @@ -347,13 +347,8 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args) /* Deliver the signal to userspace */ if (!arch_check_bp_in_kernelspace(bp)) { - siginfo_t info; - - info.si_signo = args->signr; - info.si_errno = notifier_to_errno(rc); - info.si_code = TRAP_HWBKPT; - - force_sig_info(args->signr, &info, current); + force_sig_fault(SIGTRAP, TRAP_HWBKPT, + (void __user *)NULL, current); } rcu_read_unlock(); diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 245dbeb20afe..5717c7cbdd97 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -44,7 +44,7 @@ int arch_show_interrupts(struct seq_file *p, int prec) seq_printf(p, "%*s: ", prec, "NMI"); for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stat[j].__nmi_count); + seq_printf(p, "%10u ", nmi_count(j)); seq_printf(p, " Non-maskable interrupts\n"); seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index b3770bb26211..60709ad17fc7 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -477,7 +477,6 @@ asmlinkage void do_address_error(struct pt_regs *regs, { unsigned long error_code = 0; mm_segment_t oldfs; - siginfo_t info; insn_size_t instruction; int tmp; @@ -537,11 +536,7 @@ uspace_segv: "access (PC %lx PR %lx)\n", current->comm, regs->pc, regs->pr); - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = si_code; - info.si_addr = (void __user *)address; - force_sig_info(SIGBUS, &info, current); + force_sig_fault(SIGBUS, si_code, (void __user *)address, current); } else { inc_unaligned_kernel_access(); @@ -598,19 +593,20 @@ int is_dsp_inst(struct pt_regs *regs) #ifdef CONFIG_CPU_SH2A asmlinkage void do_divide_error(unsigned long r4) { - siginfo_t info; + int code; switch (r4) { case TRAP_DIVZERO_ERROR: - info.si_code = FPE_INTDIV; + code = FPE_INTDIV; break; case TRAP_DIVOVF_ERROR: - info.si_code = FPE_INTOVF; + code = FPE_INTOVF; break; + default: + /* Let gcc know unhandled cases don't make it past here */ + return; } - - info.si_signo = SIGFPE; - force_sig_info(info.si_signo, &info, current); + force_sig_fault(SIGFPE, code, NULL, current); } #endif diff --git a/arch/sh/math-emu/math.c b/arch/sh/math-emu/math.c index c86f4360c6ce..a0fa8fc88739 100644 --- a/arch/sh/math-emu/math.c +++ b/arch/sh/math-emu/math.c @@ -507,7 +507,6 @@ static int ieee_fpe_handler(struct pt_regs *regs) unsigned short insn = *(unsigned short *)regs->pc; unsigned short finsn; unsigned long nextpc; - siginfo_t info; int nib[4] = { (insn >> 12) & 0xf, (insn >> 8) & 0xf, @@ -560,11 +559,8 @@ static int ieee_fpe_handler(struct pt_regs *regs) ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); task_thread_info(tsk)->status |= TS_USEDFPU; } else { - info.si_signo = SIGFPE; - info.si_errno = 0; - info.si_code = FPE_FLTINV; - info.si_addr = (void __user *)regs->pc; - force_sig_info(SIGFPE, &info, tsk); + force_sig_fault(SIGFPE, FPE_FLTINV, + (void __user *)regs->pc, tsk); } regs->pc = nextpc; diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index f1b44697ad68..fceb2adfcac7 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -20,18 +20,9 @@ #include <asm/cacheflush.h> #include <asm/addrspace.h> -#define PREALLOC_DMA_DEBUG_ENTRIES 4096 - const struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); -static int __init dma_init(void) -{ - dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); - return 0; -} -fs_initcall(dma_init); - void *dma_generic_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 6fd1bf7481c7..b8e7bb84b6b1 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -42,14 +42,7 @@ static void force_sig_info_fault(int si_signo, int si_code, unsigned long address, struct task_struct *tsk) { - siginfo_t info; - - info.si_signo = si_signo; - info.si_errno = 0; - info.si_code = si_code; - info.si_addr = (void __user *)address; - - force_sig_info(si_signo, &info, tsk); + force_sig_fault(si_signo, si_code, (void __user *)address, tsk); } /* diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 8767e45f1b2b..9a2b8877f174 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -1,6 +1,6 @@ config 64BIT - bool "64-bit kernel" if ARCH = "sparc" - default ARCH = "sparc64" + bool "64-bit kernel" if "$(ARCH)" = "sparc" + default "$(ARCH)" = "sparc64" help SPARC is a family of RISC microprocessors designed and marketed by Sun Microsystems, incorporated. They are very widely found in Sun @@ -25,7 +25,6 @@ config SPARC select RTC_CLASS select RTC_DRV_M48T59 select RTC_SYSTOHC - select HAVE_DMA_API_DEBUG select HAVE_ARCH_JUMP_LABEL if SPARC64 select GENERIC_IRQ_SHOW select ARCH_WANT_IPC_PARSE_VERSION @@ -44,6 +43,8 @@ config SPARC select ARCH_HAS_SG_CHAIN select CPU_NO_EFFICIENT_FFS select LOCKDEP_SMALL if LOCKDEP + select NEED_DMA_MAP_STATE + select NEED_SG_DMA_LENGTH config SPARC32 def_bool !64BIT @@ -67,6 +68,7 @@ config SPARC64 select HAVE_SYSCALL_TRACEPOINTS select HAVE_CONTEXT_TRACKING select HAVE_DEBUG_KMEMLEAK + select IOMMU_HELPER select SPARSE_IRQ select RTC_DRV_CMOS select RTC_DRV_BQ4802 @@ -86,6 +88,7 @@ config SPARC64 select ARCH_USE_QUEUED_SPINLOCKS select GENERIC_TIME_VSYSCALL select ARCH_CLOCKSOURCE_DATA + select ARCH_HAS_PTE_SPECIAL config ARCH_DEFCONFIG string @@ -102,14 +105,6 @@ config ARCH_ATU bool default y if SPARC64 -config ARCH_DMA_ADDR_T_64BIT - bool - default y if ARCH_ATU - -config IOMMU_HELPER - bool - default y if SPARC64 - config STACKTRACE_SUPPORT bool default y if SPARC64 @@ -146,12 +141,6 @@ config ZONE_DMA bool default y if SPARC32 -config NEED_DMA_MAP_STATE - def_bool y - -config NEED_SG_DMA_LENGTH - def_bool y - config GENERIC_ISA_DMA bool default y if SPARC32 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile index edac927e4952..966a13d2b127 100644 --- a/arch/sparc/Makefile +++ b/arch/sparc/Makefile @@ -39,7 +39,7 @@ else # sparc64 # -CHECKFLAGS += -D__sparc__ -D__sparc_v9__ -D__arch64__ -m64 +CHECKFLAGS += -D__sparc__ -D__sparc_v9__ -D__arch64__ LDFLAGS := -m elf64_sparc export BITS := 64 UTS_MACHINE := sparc64 diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h index 615283e16f22..4eb51d2dae98 100644 --- a/arch/sparc/include/asm/compat.h +++ b/arch/sparc/include/asm/compat.h @@ -11,7 +11,6 @@ typedef u32 compat_size_t; typedef s32 compat_ssize_t; -typedef s32 compat_time_t; typedef s32 compat_clock_t; typedef s32 compat_pid_t; typedef u16 __compat_uid_t; @@ -39,16 +38,6 @@ typedef u32 compat_ulong_t; typedef u64 compat_u64; typedef u32 compat_uptr_t; -struct compat_timespec { - compat_time_t tv_sec; - s32 tv_nsec; -}; - -struct compat_timeval { - compat_time_t tv_sec; - s32 tv_usec; -}; - struct compat_stat { compat_dev_t st_dev; compat_ino_t st_ino; @@ -168,6 +157,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } +#ifdef CONFIG_COMPAT static inline void __user *arch_compat_alloc_user_space(long len) { struct pt_regs *regs = current_thread_info()->kregs; @@ -184,6 +174,7 @@ static inline void __user *arch_compat_alloc_user_space(long len) return (void __user *) usp; } +#endif struct compat_ipc64_perm { compat_key_t key; @@ -201,10 +192,10 @@ struct compat_ipc64_perm { struct compat_semid64_ds { struct compat_ipc64_perm sem_perm; - unsigned int __pad1; - compat_time_t sem_otime; - unsigned int __pad2; - compat_time_t sem_ctime; + unsigned int sem_otime_high; + unsigned int sem_otime; + unsigned int sem_ctime_high; + unsigned int sem_ctime; u32 sem_nsems; u32 __unused1; u32 __unused2; @@ -212,12 +203,12 @@ struct compat_semid64_ds { struct compat_msqid64_ds { struct compat_ipc64_perm msg_perm; - unsigned int __pad1; - compat_time_t msg_stime; - unsigned int __pad2; - compat_time_t msg_rtime; - unsigned int __pad3; - compat_time_t msg_ctime; + unsigned int msg_stime_high; + unsigned int msg_stime; + unsigned int msg_rtime_high; + unsigned int msg_rtime; + unsigned int msg_ctime_high; + unsigned int msg_ctime; unsigned int msg_cbytes; unsigned int msg_qnum; unsigned int msg_qbytes; @@ -229,12 +220,12 @@ struct compat_msqid64_ds { struct compat_shmid64_ds { struct compat_ipc64_perm shm_perm; - unsigned int __pad1; - compat_time_t shm_atime; - unsigned int __pad2; - compat_time_t shm_dtime; - unsigned int __pad3; - compat_time_t shm_ctime; + unsigned int shm_atime_high; + unsigned int shm_atime; + unsigned int shm_dtime_high; + unsigned int shm_dtime; + unsigned int shm_ctime_high; + unsigned int shm_ctime; compat_size_t shm_segsz; compat_pid_t shm_cpid; compat_pid_t shm_lpid; @@ -243,6 +234,7 @@ struct compat_shmid64_ds { unsigned int __unused2; }; +#ifdef CONFIG_COMPAT static inline int is_compat_task(void) { return test_thread_flag(TIF_32BIT); @@ -254,5 +246,6 @@ static inline bool in_compat_syscall(void) return pt_regs_trap_type(current_pt_regs()) == 0x110; } #define in_compat_syscall in_compat_syscall +#endif #endif /* _ASM_SPARC64_COMPAT_H */ diff --git a/arch/sparc/include/asm/hardirq_64.h b/arch/sparc/include/asm/hardirq_64.h index f56540271993..75b92bfe04b5 100644 --- a/arch/sparc/include/asm/hardirq_64.h +++ b/arch/sparc/include/asm/hardirq_64.h @@ -10,8 +10,9 @@ #include <asm/cpudata.h> #define __ARCH_IRQ_STAT -#define local_softirq_pending() \ - (local_cpu_data().__softirq_pending) + +#define local_softirq_pending_ref \ + __cpu_data.__softirq_pending void ack_bad_irq(unsigned int irq); diff --git a/arch/sparc/include/asm/iommu-common.h b/arch/sparc/include/asm/iommu-common.h new file mode 100644 index 000000000000..802c90c79d1f --- /dev/null +++ b/arch/sparc/include/asm/iommu-common.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IOMMU_COMMON_H +#define _LINUX_IOMMU_COMMON_H + +#include <linux/spinlock_types.h> +#include <linux/device.h> +#include <asm/page.h> + +#define IOMMU_POOL_HASHBITS 4 +#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS) +#define IOMMU_ERROR_CODE (~(unsigned long) 0) + +struct iommu_pool { + unsigned long start; + unsigned long end; + unsigned long hint; + spinlock_t lock; +}; + +struct iommu_map_table { + unsigned long table_map_base; + unsigned long table_shift; + unsigned long nr_pools; + void (*lazy_flush)(struct iommu_map_table *); + unsigned long poolsize; + struct iommu_pool pools[IOMMU_NR_POOLS]; + u32 flags; +#define IOMMU_HAS_LARGE_POOL 0x00000001 +#define IOMMU_NO_SPAN_BOUND 0x00000002 +#define IOMMU_NEED_FLUSH 0x00000004 + struct iommu_pool large_pool; + unsigned long *map; +}; + +extern void iommu_tbl_pool_init(struct iommu_map_table *iommu, + unsigned long num_entries, + u32 table_shift, + void (*lazy_flush)(struct iommu_map_table *), + bool large_pool, u32 npools, + bool skip_span_boundary_check); + +extern unsigned long iommu_tbl_range_alloc(struct device *dev, + struct iommu_map_table *iommu, + unsigned long npages, + unsigned long *handle, + unsigned long mask, + unsigned int align_order); + +extern void iommu_tbl_range_free(struct iommu_map_table *iommu, + u64 dma_addr, unsigned long npages, + unsigned long entry); + +#endif diff --git a/arch/sparc/include/asm/iommu_64.h b/arch/sparc/include/asm/iommu_64.h index 9ed6b54caa4b..0ef6dedf747e 100644 --- a/arch/sparc/include/asm/iommu_64.h +++ b/arch/sparc/include/asm/iommu_64.h @@ -17,7 +17,7 @@ #define IOPTE_WRITE 0x0000000000000002UL #define IOMMU_NUM_CTXS 4096 -#include <linux/iommu-common.h> +#include <asm/iommu-common.h> struct iommu_arena { unsigned long *map; diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h index 98917e48727d..cfc0ee9476c6 100644 --- a/arch/sparc/include/asm/pci_32.h +++ b/arch/sparc/include/asm/pci_32.h @@ -17,10 +17,6 @@ #define PCI_IRQ_NONE 0xffffffff -/* Dynamic DMA mapping stuff. - */ -#define PCI_DMA_BUS_IS_PHYS (0) - #endif /* __KERNEL__ */ #ifndef CONFIG_LEON_PCI diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h index 671274e36cfa..fac77813402c 100644 --- a/arch/sparc/include/asm/pci_64.h +++ b/arch/sparc/include/asm/pci_64.h @@ -17,12 +17,6 @@ #define PCI_IRQ_NONE 0xffffffff -/* The PCI address space does not equal the physical memory - * address space. The networking and block device layers use - * this boolean for bounce buffer decisions. - */ -#define PCI_DMA_BUS_IS_PHYS (0) - /* PCI IOMMU mapping bypass support. */ /* PCI 64-bit addressing works for all slots on all controller diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 44d6ac47e035..1393a8ac596b 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -117,9 +117,6 @@ bool kern_addr_valid(unsigned long addr); #define _PAGE_PMD_HUGE _AC(0x0100000000000000,UL) /* Huge page */ #define _PAGE_PUD_HUGE _PAGE_PMD_HUGE -/* Advertise support for _PAGE_SPECIAL */ -#define __HAVE_ARCH_PTE_SPECIAL - /* SUN4U pte bits... */ #define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */ #define _PAGE_SZ512K_4U _AC(0x4000000000000000,UL) /* 512K Page */ diff --git a/arch/sparc/include/uapi/asm/jsflash.h b/arch/sparc/include/uapi/asm/jsflash.h deleted file mode 100644 index 68c98a54281a..000000000000 --- a/arch/sparc/include/uapi/asm/jsflash.h +++ /dev/null @@ -1,40 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * jsflash.h: OS Flash SIMM support for JavaStations. - * - * Copyright (C) 1999 Pete Zaitcev - */ - -#ifndef _SPARC_JSFLASH_H -#define _SPARC_JSFLASH_H - -#ifndef _SPARC_TYPES_H -#include <linux/types.h> -#endif - -/* - * Semantics of the offset is a full address. - * Hardcode it or get it from probe ioctl. - * - * We use full bus address, so that we would be - * automatically compatible with possible future systems. - */ - -#define JSFLASH_IDENT (('F'<<8)|54) -struct jsflash_ident_arg { - __u64 off; /* 0x20000000 is included */ - __u32 size; - char name[32]; /* With trailing zero */ -}; - -#define JSFLASH_ERASE (('F'<<8)|55) -/* Put 0 as argument, may be flags or sector number... */ - -#define JSFLASH_PROGRAM (('F'<<8)|56) -struct jsflash_program_arg { - __u64 data; /* char* for sparc and sparc64 */ - __u64 off; - __u32 size; -}; - -#endif /* _SPARC_JSFLASH_H */ diff --git a/arch/sparc/include/uapi/asm/msgbuf.h b/arch/sparc/include/uapi/asm/msgbuf.h index b601c4f4d956..ffc46c211d6d 100644 --- a/arch/sparc/include/uapi/asm/msgbuf.h +++ b/arch/sparc/include/uapi/asm/msgbuf.h @@ -8,25 +8,22 @@ * between kernel and user space. * * Pad space is left for: - * - 64-bit time_t to solve y2038 problem * - 2 miscellaneous 32-bit values */ - -#if defined(__sparc__) && defined(__arch64__) -# define PADDING(x) -#else -# define PADDING(x) unsigned int x; -#endif - - struct msqid64_ds { struct ipc64_perm msg_perm; - PADDING(__pad1) +#if defined(__sparc__) && defined(__arch64__) __kernel_time_t msg_stime; /* last msgsnd time */ - PADDING(__pad2) __kernel_time_t msg_rtime; /* last msgrcv time */ - PADDING(__pad3) __kernel_time_t msg_ctime; /* last change time */ +#else + unsigned long msg_stime_high; + unsigned long msg_stime; /* last msgsnd time */ + unsigned long msg_rtime_high; + unsigned long msg_rtime; /* last msgrcv time */ + unsigned long msg_ctime_high; + unsigned long msg_ctime; /* last change time */ +#endif unsigned long msg_cbytes; /* current number of bytes on queue */ unsigned long msg_qnum; /* number of messages in queue */ unsigned long msg_qbytes; /* max number of bytes on queue */ @@ -35,5 +32,4 @@ struct msqid64_ds { unsigned long __unused1; unsigned long __unused2; }; -#undef PADDING #endif /* _SPARC_MSGBUF_H */ diff --git a/arch/sparc/include/uapi/asm/sembuf.h b/arch/sparc/include/uapi/asm/sembuf.h index f49b0ffa0ab8..f3d309c2e1cd 100644 --- a/arch/sparc/include/uapi/asm/sembuf.h +++ b/arch/sparc/include/uapi/asm/sembuf.h @@ -8,25 +8,23 @@ * between kernel and user space. * * Pad space is left for: - * - 64-bit time_t to solve y2038 problem * - 2 miscellaneous 32-bit values */ -#if defined(__sparc__) && defined(__arch64__) -# define PADDING(x) -#else -# define PADDING(x) unsigned int x; -#endif struct semid64_ds { struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ - PADDING(__pad1) +#if defined(__sparc__) && defined(__arch64__) __kernel_time_t sem_otime; /* last semop time */ - PADDING(__pad2) __kernel_time_t sem_ctime; /* last change time */ +#else + unsigned long sem_otime_high; + unsigned long sem_otime; /* last semop time */ + unsigned long sem_ctime_high; + unsigned long sem_ctime; /* last change time */ +#endif unsigned long sem_nsems; /* no. of semaphores in array */ unsigned long __unused1; unsigned long __unused2; }; -#undef PADDING #endif /* _SPARC64_SEMBUF_H */ diff --git a/arch/sparc/include/uapi/asm/shmbuf.h b/arch/sparc/include/uapi/asm/shmbuf.h index 286631db705c..06618b84822d 100644 --- a/arch/sparc/include/uapi/asm/shmbuf.h +++ b/arch/sparc/include/uapi/asm/shmbuf.h @@ -8,24 +8,23 @@ * between kernel and user space. * * Pad space is left for: - * - 64-bit time_t to solve y2038 problem * - 2 miscellaneous 32-bit values */ -#if defined(__sparc__) && defined(__arch64__) -# define PADDING(x) -#else -# define PADDING(x) unsigned int x; -#endif - struct shmid64_ds { struct ipc64_perm shm_perm; /* operation perms */ - PADDING(__pad1) +#if defined(__sparc__) && defined(__arch64__) __kernel_time_t shm_atime; /* last attach time */ - PADDING(__pad2) __kernel_time_t shm_dtime; /* last detach time */ - PADDING(__pad3) __kernel_time_t shm_ctime; /* last change time */ +#else + unsigned long shm_atime_high; + unsigned long shm_atime; /* last attach time */ + unsigned long shm_dtime_high; + unsigned long shm_dtime; /* last detach time */ + unsigned long shm_ctime_high; + unsigned long shm_ctime; /* last change time */ +#endif size_t shm_segsz; /* size of segment (bytes) */ __kernel_pid_t shm_cpid; /* pid of creator */ __kernel_pid_t shm_lpid; /* pid of last operator */ @@ -46,6 +45,4 @@ struct shminfo64 { unsigned long __unused4; }; -#undef PADDING - #endif /* _SPARC_SHMBUF_H */ diff --git a/arch/sparc/include/uapi/asm/siginfo.h b/arch/sparc/include/uapi/asm/siginfo.h index 896ce447d16a..e7049550ac82 100644 --- a/arch/sparc/include/uapi/asm/siginfo.h +++ b/arch/sparc/include/uapi/asm/siginfo.h @@ -18,13 +18,6 @@ #define SI_NOINFO 32767 /* no information in siginfo_t */ /* - * SIGFPE si_codes - */ -#ifdef __KERNEL__ -#define FPE_FIXME 0 /* Broken dup of SI_USER */ -#endif /* __KERNEL__ */ - -/* * SIGEMT si_codes */ #define EMT_TAGOVF 1 /* tag overflow */ diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 76cb57750dda..cf8640841b7a 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -59,7 +59,7 @@ obj-$(CONFIG_SPARC32) += leon_pmc.o obj-$(CONFIG_SPARC64) += reboot.o obj-$(CONFIG_SPARC64) += sysfs.o -obj-$(CONFIG_SPARC64) += iommu.o +obj-$(CONFIG_SPARC64) += iommu.o iommu-common.o obj-$(CONFIG_SPARC64) += central.o obj-$(CONFIG_SPARC64) += starfire.o obj-$(CONFIG_SPARC64) += power.o @@ -74,8 +74,6 @@ obj-$(CONFIG_SPARC64) += pcr.o obj-$(CONFIG_SPARC64) += nmi.o obj-$(CONFIG_SPARC64_SMP) += cpumap.o -obj-y += dma.o - obj-$(CONFIG_PCIC_PCI) += pcic.o obj-$(CONFIG_LEON_PCI) += leon_pci.o obj-$(CONFIG_SPARC_GRPCI2)+= leon_pci_grpci2.o diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c deleted file mode 100644 index f73e7597c971..000000000000 --- a/arch/sparc/kernel/dma.c +++ /dev/null @@ -1,13 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <linux/kernel.h> -#include <linux/dma-mapping.h> -#include <linux/dma-debug.h> - -#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 15) - -static int __init dma_init(void) -{ - dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); - return 0; -} -fs_initcall(dma_init); diff --git a/arch/sparc/kernel/iommu-common.c b/arch/sparc/kernel/iommu-common.c new file mode 100644 index 000000000000..59cb16691322 --- /dev/null +++ b/arch/sparc/kernel/iommu-common.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * IOMMU mmap management and range allocation functions. + * Based almost entirely upon the powerpc iommu allocator. + */ + +#include <linux/export.h> +#include <linux/bitmap.h> +#include <linux/bug.h> +#include <linux/iommu-helper.h> +#include <linux/dma-mapping.h> +#include <linux/hash.h> +#include <asm/iommu-common.h> + +static unsigned long iommu_large_alloc = 15; + +static DEFINE_PER_CPU(unsigned int, iommu_hash_common); + +static inline bool need_flush(struct iommu_map_table *iommu) +{ + return ((iommu->flags & IOMMU_NEED_FLUSH) != 0); +} + +static inline void set_flush(struct iommu_map_table *iommu) +{ + iommu->flags |= IOMMU_NEED_FLUSH; +} + +static inline void clear_flush(struct iommu_map_table *iommu) +{ + iommu->flags &= ~IOMMU_NEED_FLUSH; +} + +static void setup_iommu_pool_hash(void) +{ + unsigned int i; + static bool do_once; + + if (do_once) + return; + do_once = true; + for_each_possible_cpu(i) + per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS); +} + +/* + * Initialize iommu_pool entries for the iommu_map_table. `num_entries' + * is the number of table entries. If `large_pool' is set to true, + * the top 1/4 of the table will be set aside for pool allocations + * of more than iommu_large_alloc pages. + */ +void iommu_tbl_pool_init(struct iommu_map_table *iommu, + unsigned long num_entries, + u32 table_shift, + void (*lazy_flush)(struct iommu_map_table *), + bool large_pool, u32 npools, + bool skip_span_boundary_check) +{ + unsigned int start, i; + struct iommu_pool *p = &(iommu->large_pool); + + setup_iommu_pool_hash(); + if (npools == 0) + iommu->nr_pools = IOMMU_NR_POOLS; + else + iommu->nr_pools = npools; + BUG_ON(npools > IOMMU_NR_POOLS); + + iommu->table_shift = table_shift; + iommu->lazy_flush = lazy_flush; + start = 0; + if (skip_span_boundary_check) + iommu->flags |= IOMMU_NO_SPAN_BOUND; + if (large_pool) + iommu->flags |= IOMMU_HAS_LARGE_POOL; + + if (!large_pool) + iommu->poolsize = num_entries/iommu->nr_pools; + else + iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools; + for (i = 0; i < iommu->nr_pools; i++) { + spin_lock_init(&(iommu->pools[i].lock)); + iommu->pools[i].start = start; + iommu->pools[i].hint = start; + start += iommu->poolsize; /* start for next pool */ + iommu->pools[i].end = start - 1; + } + if (!large_pool) + return; + /* initialize large_pool */ + spin_lock_init(&(p->lock)); + p->start = start; + p->hint = p->start; + p->end = num_entries; +} + +unsigned long iommu_tbl_range_alloc(struct device *dev, + struct iommu_map_table *iommu, + unsigned long npages, + unsigned long *handle, + unsigned long mask, + unsigned int align_order) +{ + unsigned int pool_hash = __this_cpu_read(iommu_hash_common); + unsigned long n, end, start, limit, boundary_size; + struct iommu_pool *pool; + int pass = 0; + unsigned int pool_nr; + unsigned int npools = iommu->nr_pools; + unsigned long flags; + bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0); + bool largealloc = (large_pool && npages > iommu_large_alloc); + unsigned long shift; + unsigned long align_mask = 0; + + if (align_order > 0) + align_mask = ~0ul >> (BITS_PER_LONG - align_order); + + /* Sanity check */ + if (unlikely(npages == 0)) { + WARN_ON_ONCE(1); + return IOMMU_ERROR_CODE; + } + + if (largealloc) { + pool = &(iommu->large_pool); + pool_nr = 0; /* to keep compiler happy */ + } else { + /* pick out pool_nr */ + pool_nr = pool_hash & (npools - 1); + pool = &(iommu->pools[pool_nr]); + } + spin_lock_irqsave(&pool->lock, flags); + + again: + if (pass == 0 && handle && *handle && + (*handle >= pool->start) && (*handle < pool->end)) + start = *handle; + else + start = pool->hint; + + limit = pool->end; + + /* The case below can happen if we have a small segment appended + * to a large, or when the previous alloc was at the very end of + * the available space. If so, go back to the beginning. If a + * flush is needed, it will get done based on the return value + * from iommu_area_alloc() below. + */ + if (start >= limit) + start = pool->start; + shift = iommu->table_map_base >> iommu->table_shift; + if (limit + shift > mask) { + limit = mask - shift + 1; + /* If we're constrained on address range, first try + * at the masked hint to avoid O(n) search complexity, + * but on second pass, start at 0 in pool 0. + */ + if ((start & mask) >= limit || pass > 0) { + spin_unlock(&(pool->lock)); + pool = &(iommu->pools[0]); + spin_lock(&(pool->lock)); + start = pool->start; + } else { + start &= mask; + } + } + + if (dev) + boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, + 1 << iommu->table_shift); + else + boundary_size = ALIGN(1ULL << 32, 1 << iommu->table_shift); + + boundary_size = boundary_size >> iommu->table_shift; + /* + * if the skip_span_boundary_check had been set during init, we set + * things up so that iommu_is_span_boundary() merely checks if the + * (index + npages) < num_tsb_entries + */ + if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) { + shift = 0; + boundary_size = iommu->poolsize * iommu->nr_pools; + } + n = iommu_area_alloc(iommu->map, limit, start, npages, shift, + boundary_size, align_mask); + if (n == -1) { + if (likely(pass == 0)) { + /* First failure, rescan from the beginning. */ + pool->hint = pool->start; + set_flush(iommu); + pass++; + goto again; + } else if (!largealloc && pass <= iommu->nr_pools) { + spin_unlock(&(pool->lock)); + pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1); + pool = &(iommu->pools[pool_nr]); + spin_lock(&(pool->lock)); + pool->hint = pool->start; + set_flush(iommu); + pass++; + goto again; + } else { + /* give up */ + n = IOMMU_ERROR_CODE; + goto bail; + } + } + if (iommu->lazy_flush && + (n < pool->hint || need_flush(iommu))) { + clear_flush(iommu); + iommu->lazy_flush(iommu); + } + + end = n + npages; + pool->hint = end; + + /* Update handle for SG allocations */ + if (handle) + *handle = end; +bail: + spin_unlock_irqrestore(&(pool->lock), flags); + + return n; +} + +static struct iommu_pool *get_pool(struct iommu_map_table *tbl, + unsigned long entry) +{ + struct iommu_pool *p; + unsigned long largepool_start = tbl->large_pool.start; + bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0); + + /* The large pool is the last pool at the top of the table */ + if (large_pool && entry >= largepool_start) { + p = &tbl->large_pool; + } else { + unsigned int pool_nr = entry / tbl->poolsize; + + BUG_ON(pool_nr >= tbl->nr_pools); + p = &tbl->pools[pool_nr]; + } + return p; +} + +/* Caller supplies the index of the entry into the iommu map table + * itself when the mapping from dma_addr to the entry is not the + * default addr->entry mapping below. + */ +void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr, + unsigned long npages, unsigned long entry) +{ + struct iommu_pool *pool; + unsigned long flags; + unsigned long shift = iommu->table_shift; + + if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */ + entry = (dma_addr - iommu->table_map_base) >> shift; + pool = get_pool(iommu, entry); + + spin_lock_irqsave(&(pool->lock), flags); + bitmap_clear(iommu->map, entry, npages); + spin_unlock_irqrestore(&(pool->lock), flags); +} diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index b08dc3416f06..40d008b0bd3e 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -14,7 +14,7 @@ #include <linux/errno.h> #include <linux/iommu-helper.h> #include <linux/bitmap.h> -#include <linux/iommu-common.h> +#include <asm/iommu-common.h> #ifdef CONFIG_PCI #include <linux/pci.h> diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 3bcef9ce74df..cca9134cfa7d 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -678,25 +678,14 @@ static int sparc_io_proc_show(struct seq_file *m, void *v) return 0; } - -static int sparc_io_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, sparc_io_proc_show, PDE_DATA(inode)); -} - -static const struct file_operations sparc_io_proc_fops = { - .owner = THIS_MODULE, - .open = sparc_io_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; #endif /* CONFIG_PROC_FS */ static void register_proc_sparc_ioport(void) { #ifdef CONFIG_PROC_FS - proc_create_data("io_map", 0, NULL, &sparc_io_proc_fops, &sparc_iomap); - proc_create_data("dvma_map", 0, NULL, &sparc_io_proc_fops, &_sparc_dvma); + proc_create_single_data("io_map", 0, NULL, sparc_io_proc_show, + &sparc_iomap); + proc_create_single_data("dvma_map", 0, NULL, sparc_io_proc_show, + &_sparc_dvma); #endif } diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c index 86b625f9d8dc..c0fa3ef6cf01 100644 --- a/arch/sparc/kernel/ldc.c +++ b/arch/sparc/kernel/ldc.c @@ -16,7 +16,7 @@ #include <linux/list.h> #include <linux/init.h> #include <linux/bitmap.h> -#include <linux/iommu-common.h> +#include <asm/iommu-common.h> #include <asm/hypervisor.h> #include <asm/iommu.h> diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c index 15b59169c535..e5e5ff6b9a5c 100644 --- a/arch/sparc/kernel/leon_pci.c +++ b/arch/sparc/kernel/leon_pci.c @@ -60,50 +60,30 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info) pci_bus_add_devices(root_bus); } -void pcibios_fixup_bus(struct pci_bus *pbus) +int pcibios_enable_device(struct pci_dev *dev, int mask) { - struct pci_dev *dev; - int i, has_io, has_mem; - u16 cmd; + u16 cmd, oldcmd; + int i; - list_for_each_entry(dev, &pbus->devices, bus_list) { - /* - * We can not rely on that the bootloader has enabled I/O - * or memory access to PCI devices. Instead we enable it here - * if the device has BARs of respective type. - */ - has_io = has_mem = 0; - for (i = 0; i < PCI_ROM_RESOURCE; i++) { - unsigned long f = dev->resource[i].flags; - if (f & IORESOURCE_IO) - has_io = 1; - else if (f & IORESOURCE_MEM) - has_mem = 1; - } - /* ROM BARs are mapped into 32-bit memory space */ - if (dev->resource[PCI_ROM_RESOURCE].end != 0) { - dev->resource[PCI_ROM_RESOURCE].flags |= - IORESOURCE_ROM_ENABLE; - has_mem = 1; - } - pci_bus_read_config_word(pbus, dev->devfn, PCI_COMMAND, &cmd); - if (has_io && !(cmd & PCI_COMMAND_IO)) { -#ifdef CONFIG_PCI_DEBUG - printk(KERN_INFO "LEONPCI: Enabling I/O for dev %s\n", - pci_name(dev)); -#endif + pci_read_config_word(dev, PCI_COMMAND, &cmd); + oldcmd = cmd; + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + struct resource *res = &dev->resource[i]; + + /* Only set up the requested stuff */ + if (!(mask & (1<<i))) + continue; + + if (res->flags & IORESOURCE_IO) cmd |= PCI_COMMAND_IO; - pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND, - cmd); - } - if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) { -#ifdef CONFIG_PCI_DEBUG - printk(KERN_INFO "LEONPCI: Enabling MEMORY for dev" - "%s\n", pci_name(dev)); -#endif + if (res->flags & IORESOURCE_MEM) cmd |= PCI_COMMAND_MEMORY; - pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND, - cmd); - } } + + if (cmd != oldcmd) { + pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd); + pci_write_config_word(dev, PCI_COMMAND, cmd); + } + return 0; } diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 41b20edb427d..17ea16a1337c 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -214,8 +214,8 @@ static void pci_parse_of_addrs(struct platform_device *op, if (!addrs) return; if (ofpci_verbose) - printk(" parse addresses (%d bytes) @ %p\n", - proplen, addrs); + pci_info(dev, " parse addresses (%d bytes) @ %p\n", + proplen, addrs); op_res = &op->resource[0]; for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) { struct resource *res; @@ -227,8 +227,8 @@ static void pci_parse_of_addrs(struct platform_device *op, continue; i = addrs[0] & 0xff; if (ofpci_verbose) - printk(" start: %llx, end: %llx, i: %x\n", - op_res->start, op_res->end, i); + pci_info(dev, " start: %llx, end: %llx, i: %x\n", + op_res->start, op_res->end, i); if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; @@ -236,13 +236,15 @@ static void pci_parse_of_addrs(struct platform_device *op, res = &dev->resource[PCI_ROM_RESOURCE]; flags |= IORESOURCE_READONLY | IORESOURCE_SIZEALIGN; } else { - printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); + pci_err(dev, "bad cfg reg num 0x%x\n", i); continue; } res->start = op_res->start; res->end = op_res->end; res->flags = flags; res->name = pci_name(dev); + + pci_info(dev, "reg 0x%x: %pR\n", i, res); } } @@ -289,8 +291,8 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, type = ""; if (ofpci_verbose) - printk(" create device, devfn: %x, type: %s\n", - devfn, type); + pci_info(bus," create device, devfn: %x, type: %s\n", + devfn, type); dev->sysdata = node; dev->dev.parent = bus->bridge; @@ -323,10 +325,6 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(bus), dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); - if (ofpci_verbose) - printk(" class: 0x%x device name: %s\n", - dev->class, pci_name(dev)); - /* I have seen IDE devices which will not respond to * the bmdma simplex check reads if bus mastering is * disabled. @@ -353,10 +351,13 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, dev->irq = PCI_IRQ_NONE; } + pci_info(dev, "[%04x:%04x] type %02x class %#08x\n", + dev->vendor, dev->device, dev->hdr_type, dev->class); + pci_parse_of_addrs(sd->op, node, dev); if (ofpci_verbose) - printk(" adding to system ...\n"); + pci_info(dev, " adding to system ...\n"); pci_device_add(dev, bus); @@ -430,19 +431,19 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm, u64 size; if (ofpci_verbose) - printk("of_scan_pci_bridge(%s)\n", node->full_name); + pci_info(dev, "of_scan_pci_bridge(%s)\n", node->full_name); /* parse bus-range property */ busrange = of_get_property(node, "bus-range", &len); if (busrange == NULL || len != 8) { - printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n", + pci_info(dev, "Can't get bus-range for PCI-PCI bridge %s\n", node->full_name); return; } if (ofpci_verbose) - printk(" Bridge bus range [%u --> %u]\n", - busrange[0], busrange[1]); + pci_info(dev, " Bridge bus range [%u --> %u]\n", + busrange[0], busrange[1]); ranges = of_get_property(node, "ranges", &len); simba = 0; @@ -454,8 +455,8 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm, bus = pci_add_new_bus(dev->bus, dev, busrange[0]); if (!bus) { - printk(KERN_ERR "Failed to create pci bus for %s\n", - node->full_name); + pci_err(dev, "Failed to create pci bus for %s\n", + node->full_name); return; } @@ -464,8 +465,8 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm, bus->bridge_ctl = 0; if (ofpci_verbose) - printk(" Bridge ranges[%p] simba[%d]\n", - ranges, simba); + pci_info(dev, " Bridge ranges[%p] simba[%d]\n", + ranges, simba); /* parse ranges property, or cook one up by hand for Simba */ /* PCI #address-cells == 3 and #size-cells == 2 always */ @@ -487,10 +488,10 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm, u64 start; if (ofpci_verbose) - printk(" RAW Range[%08x:%08x:%08x:%08x:%08x:%08x:" - "%08x:%08x]\n", - ranges[0], ranges[1], ranges[2], ranges[3], - ranges[4], ranges[5], ranges[6], ranges[7]); + pci_info(dev, " RAW Range[%08x:%08x:%08x:%08x:%08x:%08x:" + "%08x:%08x]\n", + ranges[0], ranges[1], ranges[2], ranges[3], + ranges[4], ranges[5], ranges[6], ranges[7]); flags = pci_parse_of_flags(ranges[0]); size = GET_64BIT(ranges, 6); @@ -510,14 +511,14 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm, if (flags & IORESOURCE_IO) { res = bus->resource[0]; if (res->flags) { - printk(KERN_ERR "PCI: ignoring extra I/O range" - " for bridge %s\n", node->full_name); + pci_err(dev, "ignoring extra I/O range" + " for bridge %s\n", node->full_name); continue; } } else { if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) { - printk(KERN_ERR "PCI: too many memory ranges" - " for bridge %s\n", node->full_name); + pci_err(dev, "too many memory ranges" + " for bridge %s\n", node->full_name); continue; } res = bus->resource[i]; @@ -529,8 +530,8 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm, region.end = region.start + size - 1; if (ofpci_verbose) - printk(" Using flags[%08x] start[%016llx] size[%016llx]\n", - flags, start, size); + pci_info(dev, " Using flags[%08x] start[%016llx] size[%016llx]\n", + flags, start, size); pcibios_bus_to_resource(dev->bus, res, ®ion); } @@ -538,7 +539,7 @@ after_ranges: sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), bus->number); if (ofpci_verbose) - printk(" bus name: %s\n", bus->name); + pci_info(dev, " bus name: %s\n", bus->name); pci_of_scan_bus(pbm, node, bus); } @@ -553,14 +554,14 @@ static void pci_of_scan_bus(struct pci_pbm_info *pbm, struct pci_dev *dev; if (ofpci_verbose) - printk("PCI: scan_bus[%s] bus no %d\n", - node->full_name, bus->number); + pci_info(bus, "scan_bus[%s] bus no %d\n", + node->full_name, bus->number); child = NULL; prev_devfn = -1; while ((child = of_get_next_child(node, child)) != NULL) { if (ofpci_verbose) - printk(" * %s\n", child->full_name); + pci_info(bus, " * %s\n", child->full_name); reg = of_get_property(child, "reg", ®len); if (reg == NULL || reglen < 20) continue; @@ -581,8 +582,7 @@ static void pci_of_scan_bus(struct pci_pbm_info *pbm, if (!dev) continue; if (ofpci_verbose) - printk("PCI: dev header type: %x\n", - dev->hdr_type); + pci_info(dev, "dev header type: %x\n", dev->hdr_type); if (pci_is_bridge(dev)) of_scan_pci_bridge(pbm, child, dev); @@ -624,6 +624,45 @@ static void pci_bus_register_of_sysfs(struct pci_bus *bus) pci_bus_register_of_sysfs(child_bus); } +static void pci_claim_legacy_resources(struct pci_dev *dev) +{ + struct pci_bus_region region; + struct resource *p, *root, *conflict; + + if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) + return; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return; + + p->name = "Video RAM area"; + p->flags = IORESOURCE_MEM | IORESOURCE_BUSY; + + region.start = 0xa0000UL; + region.end = region.start + 0x1ffffUL; + pcibios_bus_to_resource(dev->bus, p, ®ion); + + root = pci_find_parent_resource(dev, p); + if (!root) { + pci_info(dev, "can't claim VGA legacy %pR: no compatible bridge window\n", p); + goto err; + } + + conflict = request_resource_conflict(root, p); + if (conflict) { + pci_info(dev, "can't claim VGA legacy %pR: address conflict with %s %pR\n", + p, conflict->name, conflict); + goto err; + } + + pci_info(dev, "VGA legacy framebuffer %pR\n", p); + return; + +err: + kfree(p); +} + static void pci_claim_bus_resources(struct pci_bus *bus) { struct pci_bus *child_bus; @@ -639,15 +678,13 @@ static void pci_claim_bus_resources(struct pci_bus *bus) continue; if (ofpci_verbose) - printk("PCI: Claiming %s: " - "Resource %d: %016llx..%016llx [%x]\n", - pci_name(dev), i, - (unsigned long long)r->start, - (unsigned long long)r->end, - (unsigned int)r->flags); + pci_info(dev, "Claiming Resource %d: %pR\n", + i, r); pci_claim_resource(dev, i); } + + pci_claim_legacy_resources(dev); } list_for_each_entry(child_bus, &bus->children, node) @@ -687,6 +724,7 @@ struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm, pci_bus_register_of_sysfs(bus); pci_claim_bus_resources(bus); + pci_bus_add_devices(bus); return bus; } @@ -713,9 +751,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) } if (cmd != oldcmd) { - printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n", - pci_name(dev), cmd); - /* Enable the appropriate bits in the PCI command register. */ + pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd); } return 0; @@ -1075,8 +1111,8 @@ static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus) sp = prop->names; if (ofpci_verbose) - printk("PCI: Making slots for [%s] mask[0x%02x]\n", - node->full_name, mask); + pci_info(bus, "Making slots for [%s] mask[0x%02x]\n", + node->full_name, mask); i = 0; while (mask) { @@ -1089,12 +1125,12 @@ static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus) } if (ofpci_verbose) - printk("PCI: Making slot [%s]\n", sp); + pci_info(bus, "Making slot [%s]\n", sp); pci_slot = pci_create_slot(bus, i, sp, NULL); if (IS_ERR(pci_slot)) - printk(KERN_ERR "PCI: pci_create_slot returned %ld\n", - PTR_ERR(pci_slot)); + pci_err(bus, "pci_create_slot returned %ld\n", + PTR_ERR(pci_slot)); sp += strlen(sp) + 1; mask &= ~this_bit; diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c index 38d46bcc8634..4759ccd542fe 100644 --- a/arch/sparc/kernel/pci_common.c +++ b/arch/sparc/kernel/pci_common.c @@ -329,23 +329,6 @@ void pci_get_pbm_props(struct pci_pbm_info *pbm) } } -static void pci_register_legacy_regions(struct resource *io_res, - struct resource *mem_res) -{ - struct resource *p; - - /* VGA Video RAM. */ - p = kzalloc(sizeof(*p), GFP_KERNEL); - if (!p) - return; - - p->name = "Video RAM area"; - p->start = mem_res->start + 0xa0000UL; - p->end = p->start + 0x1ffffUL; - p->flags = IORESOURCE_BUSY; - request_resource(mem_res, p); -} - static void pci_register_iommu_region(struct pci_pbm_info *pbm) { const u32 *vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", @@ -487,8 +470,6 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm) if (pbm->mem64_space.flags) request_resource(&iomem_resource, &pbm->mem64_space); - pci_register_legacy_regions(&pbm->io_space, - &pbm->mem_space); pci_register_iommu_region(pbm); } @@ -508,8 +489,8 @@ void pci_scan_for_target_abort(struct pci_pbm_info *pbm, PCI_STATUS_REC_TARGET_ABORT)); if (error_bits) { pci_write_config_word(pdev, PCI_STATUS, error_bits); - printk("%s: Device %s saw Target Abort [%016x]\n", - pbm->name, pci_name(pdev), status); + pci_info(pdev, "%s: Device saw Target Abort [%016x]\n", + pbm->name, status); } } @@ -531,8 +512,8 @@ void pci_scan_for_master_abort(struct pci_pbm_info *pbm, (status & (PCI_STATUS_REC_MASTER_ABORT)); if (error_bits) { pci_write_config_word(pdev, PCI_STATUS, error_bits); - printk("%s: Device %s received Master Abort [%016x]\n", - pbm->name, pci_name(pdev), status); + pci_info(pdev, "%s: Device received Master Abort " + "[%016x]\n", pbm->name, status); } } @@ -555,8 +536,8 @@ void pci_scan_for_parity_error(struct pci_pbm_info *pbm, PCI_STATUS_DETECTED_PARITY)); if (error_bits) { pci_write_config_word(pdev, PCI_STATUS, error_bits); - printk("%s: Device %s saw Parity Error [%016x]\n", - pbm->name, pci_name(pdev), status); + pci_info(pdev, "%s: Device saw Parity Error [%016x]\n", + pbm->name, status); } } diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c index 1994d7638406..fb5899cbfa51 100644 --- a/arch/sparc/kernel/pci_msi.c +++ b/arch/sparc/kernel/pci_msi.c @@ -191,8 +191,8 @@ static void sparc64_teardown_msi_irq(unsigned int irq, break; } if (i >= pbm->msi_num) { - printk(KERN_ERR "%s: teardown: No MSI for irq %u\n", - pbm->name, irq); + pci_err(pdev, "%s: teardown: No MSI for irq %u\n", pbm->name, + irq); return; } @@ -201,9 +201,9 @@ static void sparc64_teardown_msi_irq(unsigned int irq, err = ops->msi_teardown(pbm, msi_num); if (err) { - printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, " - "irq %u, gives error %d\n", - pbm->name, msi_num, irq, err); + pci_err(pdev, "%s: teardown: ops->teardown() on MSI %u, " + "irq %u, gives error %d\n", pbm->name, msi_num, irq, + err); return; } diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 249367228c33..565d9ac883d0 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -16,7 +16,7 @@ #include <linux/export.h> #include <linux/log2.h> #include <linux/of_device.h> -#include <linux/iommu-common.h> +#include <asm/iommu-common.h> #include <asm/iommu.h> #include <asm/irq.h> diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c index 22f8774977d5..ee4c9a9a171c 100644 --- a/arch/sparc/kernel/pcic.c +++ b/arch/sparc/kernel/pcic.c @@ -518,10 +518,10 @@ static void pcic_map_pci_device(struct linux_pcic *pcic, * board in a PCI slot. We must remap it * under 64K but it is not done yet. XXX */ - printk("PCIC: Skipping I/O space at 0x%lx, " - "this will Oops if a driver attaches " - "device '%s' at %02x:%02x)\n", address, - namebuf, dev->bus->number, dev->devfn); + pci_info(dev, "PCIC: Skipping I/O space at " + "0x%lx, this will Oops if a driver " + "attaches device '%s'\n", address, + namebuf); } } } @@ -551,8 +551,8 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node) p++; } if (i >= pcic->pcic_imdim) { - printk("PCIC: device %s devfn %02x:%02x not found in %d\n", - namebuf, dev->bus->number, dev->devfn, pcic->pcic_imdim); + pci_info(dev, "PCIC: device %s not found in %d\n", namebuf, + pcic->pcic_imdim); dev->irq = 0; return; } @@ -565,7 +565,7 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node) ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI); real_irq = ivec >> ((i-4) << 2) & 0xF; } else { /* Corrupted map */ - printk("PCIC: BAD PIN %d\n", i); for (;;) {} + pci_info(dev, "PCIC: BAD PIN %d\n", i); for (;;) {} } /* P3 */ /* printk("PCIC: device %s pin %d ivec 0x%x irq %x\n", namebuf, i, ivec, dev->irq); */ @@ -574,10 +574,10 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node) */ if (real_irq == 0 || p->force) { if (p->irq == 0 || p->irq >= 15) { /* Corrupted map */ - printk("PCIC: BAD IRQ %d\n", p->irq); for (;;) {} + pci_info(dev, "PCIC: BAD IRQ %d\n", p->irq); for (;;) {} } - printk("PCIC: setting irq %d at pin %d for device %02x:%02x\n", - p->irq, p->pin, dev->bus->number, dev->devfn); + pci_info(dev, "PCIC: setting irq %d at pin %d\n", p->irq, + p->pin); real_irq = p->irq; i = p->pin; @@ -602,15 +602,13 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node) void pcibios_fixup_bus(struct pci_bus *bus) { struct pci_dev *dev; - int i, has_io, has_mem; - unsigned int cmd = 0; struct linux_pcic *pcic; /* struct linux_pbm_info* pbm = &pcic->pbm; */ int node; struct pcidev_cookie *pcp; if (!pcic0_up) { - printk("pcibios_fixup_bus: no PCIC\n"); + pci_info(bus, "pcibios_fixup_bus: no PCIC\n"); return; } pcic = &pcic0; @@ -619,44 +617,12 @@ void pcibios_fixup_bus(struct pci_bus *bus) * Next crud is an equivalent of pbm = pcic_bus_to_pbm(bus); */ if (bus->number != 0) { - printk("pcibios_fixup_bus: nonzero bus 0x%x\n", bus->number); + pci_info(bus, "pcibios_fixup_bus: nonzero bus 0x%x\n", + bus->number); return; } list_for_each_entry(dev, &bus->devices, bus_list) { - - /* - * Comment from i386 branch: - * There are buggy BIOSes that forget to enable I/O and memory - * access to PCI devices. We try to fix this, but we need to - * be sure that the BIOS didn't forget to assign an address - * to the device. [mj] - * OBP is a case of such BIOS :-) - */ - has_io = has_mem = 0; - for(i=0; i<6; i++) { - unsigned long f = dev->resource[i].flags; - if (f & IORESOURCE_IO) { - has_io = 1; - } else if (f & IORESOURCE_MEM) - has_mem = 1; - } - pcic_read_config(dev->bus, dev->devfn, PCI_COMMAND, 2, &cmd); - if (has_io && !(cmd & PCI_COMMAND_IO)) { - printk("PCIC: Enabling I/O for device %02x:%02x\n", - dev->bus->number, dev->devfn); - cmd |= PCI_COMMAND_IO; - pcic_write_config(dev->bus, dev->devfn, - PCI_COMMAND, 2, cmd); - } - if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) { - printk("PCIC: Enabling memory for device %02x:%02x\n", - dev->bus->number, dev->devfn); - cmd |= PCI_COMMAND_MEMORY; - pcic_write_config(dev->bus, dev->devfn, - PCI_COMMAND, 2, cmd); - } - node = pdev_to_pnode(&pcic->pbm, dev); if(node == 0) node = -1; @@ -675,6 +641,34 @@ void pcibios_fixup_bus(struct pci_bus *bus) } } +int pcibios_enable_device(struct pci_dev *dev, int mask) +{ + u16 cmd, oldcmd; + int i; + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + oldcmd = cmd; + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + struct resource *res = &dev->resource[i]; + + /* Only set up the requested stuff */ + if (!(mask & (1<<i))) + continue; + + if (res->flags & IORESOURCE_IO) + cmd |= PCI_COMMAND_IO; + if (res->flags & IORESOURCE_MEM) + cmd |= PCI_COMMAND_MEMORY; + } + + if (cmd != oldcmd) { + pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd); + pci_write_config_word(dev, PCI_COMMAND, cmd); + } + return 0; +} + /* Makes compiler happy */ static volatile int pcic_timer_dummy; @@ -747,17 +741,11 @@ static void watchdog_reset() { } #endif -int pcibios_enable_device(struct pci_dev *pdev, int mask) -{ - return 0; -} - /* * NMI */ void pcic_nmi(unsigned int pend, struct pt_regs *regs) { - pend = swab32(pend); if (!pcic_speculative || (pend & PCI_SYS_INT_PENDING_PIO) == 0) { diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 454a8af28f13..6c086086ca8f 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -518,14 +518,7 @@ void synchronize_user_stack(void) static void stack_unaligned(unsigned long sp) { - siginfo_t info; - - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRALN; - info.si_addr = (void __user *) sp; - info.si_trapno = 0; - force_sig_info(SIGBUS, &info, current); + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) sp, 0, current); } void fault_in_user_windows(void) diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c index e8c3cb6b6d08..7f3d9c59719a 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c @@ -147,17 +147,11 @@ SYSCALL_DEFINE0(nis_syscall) asmlinkage void sparc_breakpoint (struct pt_regs *regs) { - siginfo_t info; #ifdef DEBUG_SPARC_BREAKPOINT printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc); #endif - info.si_signo = SIGTRAP; - info.si_errno = 0; - info.si_code = TRAP_BRKPT; - info.si_addr = (void __user *)regs->pc; - info.si_trapno = 0; - force_sig_info(SIGTRAP, &info, current); + force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc, 0, current); #ifdef DEBUG_SPARC_BREAKPOINT printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc); diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 9ef8de63f28b..7e49bbc925a5 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -502,7 +502,6 @@ SYSCALL_DEFINE0(nis_syscall) asmlinkage void sparc_breakpoint(struct pt_regs *regs) { enum ctx_state prev_state = exception_enter(); - siginfo_t info; if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; @@ -511,12 +510,7 @@ asmlinkage void sparc_breakpoint(struct pt_regs *regs) #ifdef DEBUG_SPARC_BREAKPOINT printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc); #endif - info.si_signo = SIGTRAP; - info.si_errno = 0; - info.si_code = TRAP_BRKPT; - info.si_addr = (void __user *)regs->tpc; - info.si_trapno = 0; - force_sig_info(SIGTRAP, &info, current); + force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->tpc, 0, current); #ifdef DEBUG_SPARC_BREAKPOINT printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc); #endif diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c index b1ed763e4787..bcdfc6168dd5 100644 --- a/arch/sparc/kernel/traps_32.c +++ b/arch/sparc/kernel/traps_32.c @@ -93,8 +93,6 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs) void do_hw_interrupt(struct pt_regs *regs, unsigned long type) { - siginfo_t info; - if(type < 0x80) { /* Sun OS's puke from bad traps, Linux survives! */ printk("Unimplemented Sparc TRAP, type = %02lx\n", type); @@ -104,19 +102,13 @@ void do_hw_interrupt(struct pt_regs *regs, unsigned long type) if(regs->psr & PSR_PS) die_if_kernel("Kernel bad trap", regs); - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_code = ILL_ILLTRP; - info.si_addr = (void __user *)regs->pc; - info.si_trapno = type - 0x80; - force_sig_info(SIGILL, &info, current); + force_sig_fault(SIGILL, ILL_ILLTRP, + (void __user *)regs->pc, type - 0x80, current); } void do_illegal_instruction(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { - siginfo_t info; - if(psr & PSR_PS) die_if_kernel("Kernel illegal instruction", regs); #ifdef TRAP_DEBUG @@ -124,27 +116,15 @@ void do_illegal_instruction(struct pt_regs *regs, unsigned long pc, unsigned lon regs->pc, *(unsigned long *)regs->pc); #endif - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_code = ILL_ILLOPC; - info.si_addr = (void __user *)pc; - info.si_trapno = 0; - send_sig_info(SIGILL, &info, current); + send_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)pc, 0, current); } void do_priv_instruction(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { - siginfo_t info; - if(psr & PSR_PS) die_if_kernel("Penguin instruction from Penguin mode??!?!", regs); - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_code = ILL_PRVOPC; - info.si_addr = (void __user *)pc; - info.si_trapno = 0; - send_sig_info(SIGILL, &info, current); + send_sig_fault(SIGILL, ILL_PRVOPC, (void __user *)pc, 0, current); } /* XXX User may want to be allowed to do this. XXX */ @@ -152,8 +132,6 @@ void do_priv_instruction(struct pt_regs *regs, unsigned long pc, unsigned long n void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { - siginfo_t info; - if(regs->psr & PSR_PS) { printk("KERNEL MNA at pc %08lx npc %08lx called by %08lx\n", pc, npc, regs->u_regs[UREG_RETPC]); @@ -165,12 +143,9 @@ void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc, unsigned lon instruction_dump ((unsigned long *) regs->pc); printk ("do_MNA!\n"); #endif - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRALN; - info.si_addr = /* FIXME: Should dig out mna address */ (void *)0; - info.si_trapno = 0; - send_sig_info(SIGBUS, &info, current); + send_sig_fault(SIGBUS, BUS_ADRALN, + /* FIXME: Should dig out mna address */ (void *)0, + 0, current); } static unsigned long init_fsr = 0x0UL; @@ -226,9 +201,9 @@ void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { static int calls; - siginfo_t info; unsigned long fsr; int ret = 0; + int code; #ifndef CONFIG_SMP struct task_struct *fpt = last_task_used_math; #else @@ -303,24 +278,20 @@ void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, } fsr = fpt->thread.fsr; - info.si_signo = SIGFPE; - info.si_errno = 0; - info.si_addr = (void __user *)pc; - info.si_trapno = 0; - info.si_code = FPE_FIXME; + code = FPE_FLTUNK; if ((fsr & 0x1c000) == (1 << 14)) { if (fsr & 0x10) - info.si_code = FPE_FLTINV; + code = FPE_FLTINV; else if (fsr & 0x08) - info.si_code = FPE_FLTOVF; + code = FPE_FLTOVF; else if (fsr & 0x04) - info.si_code = FPE_FLTUND; + code = FPE_FLTUND; else if (fsr & 0x02) - info.si_code = FPE_FLTDIV; + code = FPE_FLTDIV; else if (fsr & 0x01) - info.si_code = FPE_FLTRES; + code = FPE_FLTRES; } - send_sig_info(SIGFPE, &info, fpt); + send_sig_fault(SIGFPE, code, (void __user *)pc, 0, fpt); #ifndef CONFIG_SMP last_task_used_math = NULL; #endif @@ -332,16 +303,9 @@ void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, void handle_tag_overflow(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { - siginfo_t info; - if(psr & PSR_PS) die_if_kernel("Penguin overflow trap from kernel mode", regs); - info.si_signo = SIGEMT; - info.si_errno = 0; - info.si_code = EMT_TAGOVF; - info.si_addr = (void __user *)pc; - info.si_trapno = 0; - send_sig_info(SIGEMT, &info, current); + send_sig_fault(SIGEMT, EMT_TAGOVF, (void __user *)pc, 0, current); } void handle_watchpoint(struct pt_regs *regs, unsigned long pc, unsigned long npc, @@ -359,61 +323,33 @@ void handle_watchpoint(struct pt_regs *regs, unsigned long pc, unsigned long npc void handle_reg_access(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { - siginfo_t info; - #ifdef TRAP_DEBUG printk("Register Access Exception at PC %08lx NPC %08lx PSR %08lx\n", pc, npc, psr); #endif - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_OBJERR; - info.si_addr = (void __user *)pc; - info.si_trapno = 0; - force_sig_info(SIGBUS, &info, current); + force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)pc, 0, current); } void handle_cp_disabled(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { - siginfo_t info; - - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_code = ILL_COPROC; - info.si_addr = (void __user *)pc; - info.si_trapno = 0; - send_sig_info(SIGILL, &info, current); + send_sig_fault(SIGILL, ILL_COPROC, (void __user *)pc, 0, current); } void handle_cp_exception(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { - siginfo_t info; - #ifdef TRAP_DEBUG printk("Co-Processor Exception at PC %08lx NPC %08lx PSR %08lx\n", pc, npc, psr); #endif - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_code = ILL_COPROC; - info.si_addr = (void __user *)pc; - info.si_trapno = 0; - send_sig_info(SIGILL, &info, current); + send_sig_fault(SIGILL, ILL_COPROC, (void __user *)pc, 0, current); } void handle_hw_divzero(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { - siginfo_t info; - - info.si_signo = SIGFPE; - info.si_errno = 0; - info.si_code = FPE_INTDIV; - info.si_addr = (void __user *)pc; - info.si_trapno = 0; - send_sig_info(SIGFPE, &info, current); + send_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)pc, 0, current); } #ifdef CONFIG_DEBUG_BUGVERBOSE diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 462a21abd105..aa624ed79db1 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -87,7 +87,6 @@ static void dump_tl1_traplog(struct tl1_traplog *p) void bad_trap(struct pt_regs *regs, long lvl) { char buffer[36]; - siginfo_t info; if (notify_die(DIE_TRAP, "bad trap", regs, 0, lvl, SIGTRAP) == NOTIFY_STOP) @@ -107,12 +106,8 @@ void bad_trap(struct pt_regs *regs, long lvl) regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_code = ILL_ILLTRP; - info.si_addr = (void __user *)regs->tpc; - info.si_trapno = lvl; - force_sig_info(SIGILL, &info, current); + force_sig_fault(SIGILL, ILL_ILLTRP, + (void __user *)regs->tpc, lvl, current); } void bad_trap_tl1(struct pt_regs *regs, long lvl) @@ -191,7 +186,6 @@ EXPORT_SYMBOL_GPL(unregister_dimm_printer); void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) { enum ctx_state prev_state = exception_enter(); - siginfo_t info; if (notify_die(DIE_TRAP, "instruction access exception", regs, 0, 0x8, SIGTRAP) == NOTIFY_STOP) @@ -206,12 +200,8 @@ void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, un regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } - info.si_signo = SIGSEGV; - info.si_errno = 0; - info.si_code = SEGV_MAPERR; - info.si_addr = (void __user *)regs->tpc; - info.si_trapno = 0; - force_sig_info(SIGSEGV, &info, current); + force_sig_fault(SIGSEGV, SEGV_MAPERR, + (void __user *)regs->tpc, 0, current); out: exception_exit(prev_state); } @@ -230,7 +220,6 @@ void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsig { unsigned short type = (type_ctx >> 16); unsigned short ctx = (type_ctx & 0xffff); - siginfo_t info; if (notify_die(DIE_TRAP, "instruction access exception", regs, 0, 0x8, SIGTRAP) == NOTIFY_STOP) @@ -247,12 +236,7 @@ void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsig regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } - info.si_signo = SIGSEGV; - info.si_errno = 0; - info.si_code = SEGV_MAPERR; - info.si_addr = (void __user *) addr; - info.si_trapno = 0; - force_sig_info(SIGSEGV, &info, current); + force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *) addr, 0, current); } void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) @@ -307,7 +291,6 @@ bool is_no_fault_exception(struct pt_regs *regs) void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) { enum ctx_state prev_state = exception_enter(); - siginfo_t info; if (notify_die(DIE_TRAP, "data access exception", regs, 0, 0x30, SIGTRAP) == NOTIFY_STOP) @@ -338,12 +321,7 @@ void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, un if (is_no_fault_exception(regs)) return; - info.si_signo = SIGSEGV; - info.si_errno = 0; - info.si_code = SEGV_MAPERR; - info.si_addr = (void __user *)sfar; - info.si_trapno = 0; - force_sig_info(SIGSEGV, &info, current); + force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)sfar, 0, current); out: exception_exit(prev_state); } @@ -559,8 +537,6 @@ static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned lo static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs) { - siginfo_t info; - printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] " "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n", smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1); @@ -595,12 +571,7 @@ static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned lon regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_OBJERR; - info.si_addr = (void *)0; - info.si_trapno = 0; - force_sig_info(SIGBUS, &info, current); + force_sig_fault(SIGBUS, BUS_OBJERR, (void *)0, 0, current); } void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar) @@ -2190,7 +2161,6 @@ bool sun4v_nonresum_error_user_handled(struct pt_regs *regs, if (attrs & SUN4V_ERR_ATTRS_MEMORY) { unsigned long addr = ent->err_raddr; - siginfo_t info; if (addr == ~(u64)0) { /* This seems highly unlikely to ever occur */ @@ -2211,21 +2181,13 @@ bool sun4v_nonresum_error_user_handled(struct pt_regs *regs, addr += PAGE_SIZE; } } - info.si_signo = SIGKILL; - info.si_errno = 0; - info.si_trapno = 0; - force_sig_info(info.si_signo, &info, current); + force_sig(SIGKILL, current); return true; } if (attrs & SUN4V_ERR_ATTRS_PIO) { - siginfo_t info; - - info.si_signo = SIGBUS; - info.si_code = BUS_ADRERR; - info.si_addr = (void __user *)sun4v_get_vaddr(regs); - force_sig_info(info.si_signo, &info, current); - + force_sig_fault(SIGBUS, BUS_ADRERR, + (void __user *)sun4v_get_vaddr(regs), 0, current); return true; } @@ -2362,30 +2324,27 @@ static void do_fpe_common(struct pt_regs *regs) regs->tnpc += 4; } else { unsigned long fsr = current_thread_info()->xfsr[0]; - siginfo_t info; + int code; if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } - info.si_signo = SIGFPE; - info.si_errno = 0; - info.si_addr = (void __user *)regs->tpc; - info.si_trapno = 0; - info.si_code = FPE_FIXME; + code = FPE_FLTUNK; if ((fsr & 0x1c000) == (1 << 14)) { if (fsr & 0x10) - info.si_code = FPE_FLTINV; + code = FPE_FLTINV; else if (fsr & 0x08) - info.si_code = FPE_FLTOVF; + code = FPE_FLTOVF; else if (fsr & 0x04) - info.si_code = FPE_FLTUND; + code = FPE_FLTUND; else if (fsr & 0x02) - info.si_code = FPE_FLTDIV; + code = FPE_FLTDIV; else if (fsr & 0x01) - info.si_code = FPE_FLTRES; + code = FPE_FLTRES; } - force_sig_info(SIGFPE, &info, current); + force_sig_fault(SIGFPE, code, + (void __user *)regs->tpc, 0, current); } } @@ -2428,7 +2387,6 @@ out: void do_tof(struct pt_regs *regs) { enum ctx_state prev_state = exception_enter(); - siginfo_t info; if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs, 0, 0x26, SIGEMT) == NOTIFY_STOP) @@ -2440,12 +2398,8 @@ void do_tof(struct pt_regs *regs) regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } - info.si_signo = SIGEMT; - info.si_errno = 0; - info.si_code = EMT_TAGOVF; - info.si_addr = (void __user *)regs->tpc; - info.si_trapno = 0; - force_sig_info(SIGEMT, &info, current); + force_sig_fault(SIGEMT, EMT_TAGOVF, + (void __user *)regs->tpc, 0, current); out: exception_exit(prev_state); } @@ -2453,7 +2407,6 @@ out: void do_div0(struct pt_regs *regs) { enum ctx_state prev_state = exception_enter(); - siginfo_t info; if (notify_die(DIE_TRAP, "integer division by zero", regs, 0, 0x28, SIGFPE) == NOTIFY_STOP) @@ -2465,12 +2418,8 @@ void do_div0(struct pt_regs *regs) regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } - info.si_signo = SIGFPE; - info.si_errno = 0; - info.si_code = FPE_INTDIV; - info.si_addr = (void __user *)regs->tpc; - info.si_trapno = 0; - force_sig_info(SIGFPE, &info, current); + force_sig_fault(SIGFPE, FPE_INTDIV, + (void __user *)regs->tpc, 0, current); out: exception_exit(prev_state); } @@ -2632,7 +2581,6 @@ void do_illegal_instruction(struct pt_regs *regs) unsigned long pc = regs->tpc; unsigned long tstate = regs->tstate; u32 insn; - siginfo_t info; if (notify_die(DIE_TRAP, "illegal instruction", regs, 0, 0x10, SIGILL) == NOTIFY_STOP) @@ -2666,12 +2614,7 @@ void do_illegal_instruction(struct pt_regs *regs) } } } - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_code = ILL_ILLOPC; - info.si_addr = (void __user *)pc; - info.si_trapno = 0; - force_sig_info(SIGILL, &info, current); + force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)pc, 0, current); out: exception_exit(prev_state); } @@ -2679,7 +2622,6 @@ out: void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) { enum ctx_state prev_state = exception_enter(); - siginfo_t info; if (notify_die(DIE_TRAP, "memory address unaligned", regs, 0, 0x34, SIGSEGV) == NOTIFY_STOP) @@ -2692,20 +2634,13 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo if (is_no_fault_exception(regs)) return; - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRALN; - info.si_addr = (void __user *)sfar; - info.si_trapno = 0; - force_sig_info(SIGBUS, &info, current); + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)sfar, 0, current); out: exception_exit(prev_state); } void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) { - siginfo_t info; - if (notify_die(DIE_TRAP, "memory address unaligned", regs, 0, 0x34, SIGSEGV) == NOTIFY_STOP) return; @@ -2717,12 +2652,7 @@ void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_c if (is_no_fault_exception(regs)) return; - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRALN; - info.si_addr = (void __user *) addr; - info.si_trapno = 0; - force_sig_info(SIGBUS, &info, current); + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) addr, 0, current); } /* sun4v_mem_corrupt_detect_precise() - Handle precise exception on an ADI @@ -2775,7 +2705,6 @@ void sun4v_mem_corrupt_detect_precise(struct pt_regs *regs, unsigned long addr, void do_privop(struct pt_regs *regs) { enum ctx_state prev_state = exception_enter(); - siginfo_t info; if (notify_die(DIE_TRAP, "privileged operation", regs, 0, 0x11, SIGILL) == NOTIFY_STOP) @@ -2785,12 +2714,8 @@ void do_privop(struct pt_regs *regs) regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_code = ILL_PRVOPC; - info.si_addr = (void __user *)regs->tpc; - info.si_trapno = 0; - force_sig_info(SIGILL, &info, current); + force_sig_fault(SIGILL, ILL_PRVOPC, + (void __user *)regs->tpc, 0, current); out: exception_exit(prev_state); } diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c index 7642d7e4f0d9..64ac8c0c1429 100644 --- a/arch/sparc/kernel/unaligned_32.c +++ b/arch/sparc/kernel/unaligned_32.c @@ -311,14 +311,9 @@ static inline int ok_for_user(struct pt_regs *regs, unsigned int insn, static void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn) { - siginfo_t info; - - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRALN; - info.si_addr = (void __user *)safe_compute_effective_address(regs, insn); - info.si_trapno = 0; - send_sig_info(SIGBUS, &info, current); + send_sig_fault(SIGBUS, BUS_ADRALN, + (void __user *)safe_compute_effective_address(regs, insn), + 0, current); } asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index a8103a84b4ac..9f75b6444bf1 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -127,19 +127,11 @@ show_signal_msg(struct pt_regs *regs, int sig, int code, static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs, unsigned long addr) { - siginfo_t info; - - info.si_signo = sig; - info.si_code = code; - info.si_errno = 0; - info.si_addr = (void __user *) addr; - info.si_trapno = 0; - if (unlikely(show_unhandled_signals)) - show_signal_msg(regs, sig, info.si_code, + show_signal_msg(regs, sig, code, addr, current); - force_sig_info (sig, &info, current); + force_sig_fault(sig, code, (void __user *) addr, 0, current); } static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault) diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 41363f46797b..63166fcf9e25 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -170,11 +170,7 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, int fault_code) { unsigned long addr; - siginfo_t info; - info.si_code = code; - info.si_signo = sig; - info.si_errno = 0; if (fault_code & FAULT_CODE_ITLB) { addr = regs->tpc; } else { @@ -187,13 +183,11 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, else addr = fault_addr; } - info.si_addr = (void __user *) addr; - info.si_trapno = 0; if (unlikely(show_unhandled_signals)) show_signal_msg(regs, sig, code, addr, current); - force_sig_info(sig, &info, current); + force_sig_fault(sig, code, (void __user *) addr, 0, current); } static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn) diff --git a/arch/sparc/net/Makefile b/arch/sparc/net/Makefile index 76fa8e95b721..d32aac3a25b8 100644 --- a/arch/sparc/net/Makefile +++ b/arch/sparc/net/Makefile @@ -1,4 +1,7 @@ # # Arch-specific network modules # -obj-$(CONFIG_BPF_JIT) += bpf_jit_asm_$(BITS).o bpf_jit_comp_$(BITS).o +obj-$(CONFIG_BPF_JIT) += bpf_jit_comp_$(BITS).o +ifeq ($(BITS),32) +obj-$(CONFIG_BPF_JIT) += bpf_jit_asm_32.o +endif diff --git a/arch/sparc/net/bpf_jit_64.h b/arch/sparc/net/bpf_jit_64.h index 428f7fd19175..fbc836f1c51c 100644 --- a/arch/sparc/net/bpf_jit_64.h +++ b/arch/sparc/net/bpf_jit_64.h @@ -33,35 +33,6 @@ #define I5 0x1d #define FP 0x1e #define I7 0x1f - -#define r_SKB L0 -#define r_HEADLEN L4 -#define r_SKB_DATA L5 -#define r_TMP G1 -#define r_TMP2 G3 - -/* assembly code in arch/sparc/net/bpf_jit_asm_64.S */ -extern u32 bpf_jit_load_word[]; -extern u32 bpf_jit_load_half[]; -extern u32 bpf_jit_load_byte[]; -extern u32 bpf_jit_load_byte_msh[]; -extern u32 bpf_jit_load_word_positive_offset[]; -extern u32 bpf_jit_load_half_positive_offset[]; -extern u32 bpf_jit_load_byte_positive_offset[]; -extern u32 bpf_jit_load_byte_msh_positive_offset[]; -extern u32 bpf_jit_load_word_negative_offset[]; -extern u32 bpf_jit_load_half_negative_offset[]; -extern u32 bpf_jit_load_byte_negative_offset[]; -extern u32 bpf_jit_load_byte_msh_negative_offset[]; - -#else -#define r_RESULT %o0 -#define r_SKB %o0 -#define r_OFF %o1 -#define r_HEADLEN %l4 -#define r_SKB_DATA %l5 -#define r_TMP %g1 -#define r_TMP2 %g3 #endif #endif /* _BPF_JIT_H */ diff --git a/arch/sparc/net/bpf_jit_asm_64.S b/arch/sparc/net/bpf_jit_asm_64.S deleted file mode 100644 index 7177867052a1..000000000000 --- a/arch/sparc/net/bpf_jit_asm_64.S +++ /dev/null @@ -1,162 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#include <asm/ptrace.h> - -#include "bpf_jit_64.h" - -#define SAVE_SZ 176 -#define SCRATCH_OFF STACK_BIAS + 128 -#define BE_PTR(label) be,pn %xcc, label -#define SIGN_EXTEND(reg) sra reg, 0, reg - -#define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */ - - .text - .globl bpf_jit_load_word -bpf_jit_load_word: - cmp r_OFF, 0 - bl bpf_slow_path_word_neg - nop - .globl bpf_jit_load_word_positive_offset -bpf_jit_load_word_positive_offset: - sub r_HEADLEN, r_OFF, r_TMP - cmp r_TMP, 3 - ble bpf_slow_path_word - add r_SKB_DATA, r_OFF, r_TMP - andcc r_TMP, 3, %g0 - bne load_word_unaligned - nop - retl - ld [r_TMP], r_RESULT -load_word_unaligned: - ldub [r_TMP + 0x0], r_OFF - ldub [r_TMP + 0x1], r_TMP2 - sll r_OFF, 8, r_OFF - or r_OFF, r_TMP2, r_OFF - ldub [r_TMP + 0x2], r_TMP2 - sll r_OFF, 8, r_OFF - or r_OFF, r_TMP2, r_OFF - ldub [r_TMP + 0x3], r_TMP2 - sll r_OFF, 8, r_OFF - retl - or r_OFF, r_TMP2, r_RESULT - - .globl bpf_jit_load_half -bpf_jit_load_half: - cmp r_OFF, 0 - bl bpf_slow_path_half_neg - nop - .globl bpf_jit_load_half_positive_offset -bpf_jit_load_half_positive_offset: - sub r_HEADLEN, r_OFF, r_TMP - cmp r_TMP, 1 - ble bpf_slow_path_half - add r_SKB_DATA, r_OFF, r_TMP - andcc r_TMP, 1, %g0 - bne load_half_unaligned - nop - retl - lduh [r_TMP], r_RESULT -load_half_unaligned: - ldub [r_TMP + 0x0], r_OFF - ldub [r_TMP + 0x1], r_TMP2 - sll r_OFF, 8, r_OFF - retl - or r_OFF, r_TMP2, r_RESULT - - .globl bpf_jit_load_byte -bpf_jit_load_byte: - cmp r_OFF, 0 - bl bpf_slow_path_byte_neg - nop - .globl bpf_jit_load_byte_positive_offset -bpf_jit_load_byte_positive_offset: - cmp r_OFF, r_HEADLEN - bge bpf_slow_path_byte - nop - retl - ldub [r_SKB_DATA + r_OFF], r_RESULT - -#define bpf_slow_path_common(LEN) \ - save %sp, -SAVE_SZ, %sp; \ - mov %i0, %o0; \ - mov %i1, %o1; \ - add %fp, SCRATCH_OFF, %o2; \ - call skb_copy_bits; \ - mov (LEN), %o3; \ - cmp %o0, 0; \ - restore; - -bpf_slow_path_word: - bpf_slow_path_common(4) - bl bpf_error - ld [%sp + SCRATCH_OFF], r_RESULT - retl - nop -bpf_slow_path_half: - bpf_slow_path_common(2) - bl bpf_error - lduh [%sp + SCRATCH_OFF], r_RESULT - retl - nop -bpf_slow_path_byte: - bpf_slow_path_common(1) - bl bpf_error - ldub [%sp + SCRATCH_OFF], r_RESULT - retl - nop - -#define bpf_negative_common(LEN) \ - save %sp, -SAVE_SZ, %sp; \ - mov %i0, %o0; \ - mov %i1, %o1; \ - SIGN_EXTEND(%o1); \ - call bpf_internal_load_pointer_neg_helper; \ - mov (LEN), %o2; \ - mov %o0, r_TMP; \ - cmp %o0, 0; \ - BE_PTR(bpf_error); \ - restore; - -bpf_slow_path_word_neg: - sethi %hi(SKF_MAX_NEG_OFF), r_TMP - cmp r_OFF, r_TMP - bl bpf_error - nop - .globl bpf_jit_load_word_negative_offset -bpf_jit_load_word_negative_offset: - bpf_negative_common(4) - andcc r_TMP, 3, %g0 - bne load_word_unaligned - nop - retl - ld [r_TMP], r_RESULT - -bpf_slow_path_half_neg: - sethi %hi(SKF_MAX_NEG_OFF), r_TMP - cmp r_OFF, r_TMP - bl bpf_error - nop - .globl bpf_jit_load_half_negative_offset -bpf_jit_load_half_negative_offset: - bpf_negative_common(2) - andcc r_TMP, 1, %g0 - bne load_half_unaligned - nop - retl - lduh [r_TMP], r_RESULT - -bpf_slow_path_byte_neg: - sethi %hi(SKF_MAX_NEG_OFF), r_TMP - cmp r_OFF, r_TMP - bl bpf_error - nop - .globl bpf_jit_load_byte_negative_offset -bpf_jit_load_byte_negative_offset: - bpf_negative_common(1) - retl - ldub [r_TMP], r_RESULT - -bpf_error: - /* Make the JIT program itself return zero. */ - ret - restore %g0, %g0, %o0 diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c index 48a25869349b..222785af550b 100644 --- a/arch/sparc/net/bpf_jit_comp_64.c +++ b/arch/sparc/net/bpf_jit_comp_64.c @@ -48,10 +48,6 @@ static void bpf_flush_icache(void *start_, void *end_) } } -#define SEEN_DATAREF 1 /* might call external helpers */ -#define SEEN_XREG 2 /* ebx is used */ -#define SEEN_MEM 4 /* use mem[] for temporary storage */ - #define S13(X) ((X) & 0x1fff) #define S5(X) ((X) & 0x1f) #define IMMED 0x00002000 @@ -198,7 +194,6 @@ struct jit_ctx { bool tmp_1_used; bool tmp_2_used; bool tmp_3_used; - bool saw_ld_abs_ind; bool saw_frame_pointer; bool saw_call; bool saw_tail_call; @@ -207,9 +202,7 @@ struct jit_ctx { #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) -#define SKB_HLEN_REG (MAX_BPF_JIT_REG + 2) -#define SKB_DATA_REG (MAX_BPF_JIT_REG + 3) -#define TMP_REG_3 (MAX_BPF_JIT_REG + 4) +#define TMP_REG_3 (MAX_BPF_JIT_REG + 2) /* Map BPF registers to SPARC registers */ static const int bpf2sparc[] = { @@ -238,9 +231,6 @@ static const int bpf2sparc[] = { [TMP_REG_1] = G1, [TMP_REG_2] = G2, [TMP_REG_3] = G3, - - [SKB_HLEN_REG] = L4, - [SKB_DATA_REG] = L5, }; static void emit(const u32 insn, struct jit_ctx *ctx) @@ -800,25 +790,6 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src, return 0; } -static void load_skb_regs(struct jit_ctx *ctx, u8 r_skb) -{ - const u8 r_headlen = bpf2sparc[SKB_HLEN_REG]; - const u8 r_data = bpf2sparc[SKB_DATA_REG]; - const u8 r_tmp = bpf2sparc[TMP_REG_1]; - unsigned int off; - - off = offsetof(struct sk_buff, len); - emit(LD32I | RS1(r_skb) | S13(off) | RD(r_headlen), ctx); - - off = offsetof(struct sk_buff, data_len); - emit(LD32I | RS1(r_skb) | S13(off) | RD(r_tmp), ctx); - - emit(SUB | RS1(r_headlen) | RS2(r_tmp) | RD(r_headlen), ctx); - - off = offsetof(struct sk_buff, data); - emit(LDPTRI | RS1(r_skb) | S13(off) | RD(r_data), ctx); -} - /* Just skip the save instruction and the ctx register move. */ #define BPF_TAILCALL_PROLOGUE_SKIP 16 #define BPF_TAILCALL_CNT_SP_OFF (STACK_BIAS + 128) @@ -857,9 +828,6 @@ static void build_prologue(struct jit_ctx *ctx) emit_reg_move(I0, O0, ctx); /* If you add anything here, adjust BPF_TAILCALL_PROLOGUE_SKIP above. */ - - if (ctx->saw_ld_abs_ind) - load_skb_regs(ctx, bpf2sparc[BPF_REG_1]); } static void build_epilogue(struct jit_ctx *ctx) @@ -926,7 +894,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) const int i = insn - ctx->prog->insnsi; const s16 off = insn->off; const s32 imm = insn->imm; - u32 *func; if (insn->src_reg == BPF_REG_FP) ctx->saw_frame_pointer = true; @@ -1225,16 +1192,11 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) u8 *func = ((u8 *)__bpf_call_base) + imm; ctx->saw_call = true; - if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func)) - emit_reg_move(bpf2sparc[BPF_REG_1], L7, ctx); emit_call((u32 *)func, ctx); emit_nop(ctx); emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx); - - if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func)) - load_skb_regs(ctx, L7); break; } @@ -1412,43 +1374,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) emit_nop(ctx); break; } -#define CHOOSE_LOAD_FUNC(K, func) \ - ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) - - /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ - case BPF_LD | BPF_ABS | BPF_W: - func = CHOOSE_LOAD_FUNC(imm, bpf_jit_load_word); - goto common_load; - case BPF_LD | BPF_ABS | BPF_H: - func = CHOOSE_LOAD_FUNC(imm, bpf_jit_load_half); - goto common_load; - case BPF_LD | BPF_ABS | BPF_B: - func = CHOOSE_LOAD_FUNC(imm, bpf_jit_load_byte); - goto common_load; - /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */ - case BPF_LD | BPF_IND | BPF_W: - func = bpf_jit_load_word; - goto common_load; - case BPF_LD | BPF_IND | BPF_H: - func = bpf_jit_load_half; - goto common_load; - - case BPF_LD | BPF_IND | BPF_B: - func = bpf_jit_load_byte; - common_load: - ctx->saw_ld_abs_ind = true; - - emit_reg_move(bpf2sparc[BPF_REG_6], O0, ctx); - emit_loadimm(imm, O1, ctx); - - if (BPF_MODE(code) == BPF_IND) - emit_alu(ADD, src, O1, ctx); - - emit_call(func, ctx); - emit_alu_K(SRA, O1, 0, ctx); - - emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx); - break; default: pr_err_once("unknown opcode %02x\n", code); @@ -1583,12 +1508,11 @@ skip_init_ctx: build_epilogue(&ctx); if (bpf_jit_enable > 1) - pr_info("Pass %d: shrink = %d, seen = [%c%c%c%c%c%c%c]\n", pass, + pr_info("Pass %d: shrink = %d, seen = [%c%c%c%c%c%c]\n", pass, image_size - (ctx.idx * 4), ctx.tmp_1_used ? '1' : ' ', ctx.tmp_2_used ? '2' : ' ', ctx.tmp_3_used ? '3' : ' ', - ctx.saw_ld_abs_ind ? 'L' : ' ', ctx.saw_frame_pointer ? 'F' : ' ', ctx.saw_call ? 'C' : ' ', ctx.saw_tail_call ? 'T' : ' '); diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common index c68add8df3ae..07f84c842cc3 100644 --- a/arch/um/Kconfig.common +++ b/arch/um/Kconfig.common @@ -54,10 +54,6 @@ config HZ int default 100 -config SUBARCH - string - option env="SUBARCH" - config NR_CPUS int range 1 1 diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index d4e8c497ae86..dcf5ea28a281 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -208,19 +208,6 @@ static int fake_ide_media_proc_show(struct seq_file *m, void *v) return 0; } -static int fake_ide_media_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, fake_ide_media_proc_show, NULL); -} - -static const struct file_operations fake_ide_media_proc_fops = { - .owner = THIS_MODULE, - .open = fake_ide_media_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - static void make_ide_entries(const char *dev_name) { struct proc_dir_entry *dir, *ent; @@ -231,7 +218,8 @@ static void make_ide_entries(const char *dev_name) dir = proc_mkdir(dev_name, proc_ide); if(!dir) return; - ent = proc_create("media", S_IRUGO, dir, &fake_ide_media_proc_fops); + ent = proc_create_single("media", S_IRUGO, dir, + fake_ide_media_proc_show); if(!ent) return; snprintf(name, sizeof(name), "ide0/%s", dev_name); proc_symlink(dev_name, proc_ide_root, name); diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index bb5a196c3061..b10dde6cb793 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild @@ -1,6 +1,7 @@ generic-y += barrier.h generic-y += bpf_perf_event.h generic-y += bug.h +generic-y += compat.h generic-y += current.h generic-y += delay.h generic-y += device.h diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c index bc2a516c190f..1a1d88a4d940 100644 --- a/arch/um/kernel/ptrace.c +++ b/arch/um/kernel/ptrace.c @@ -115,17 +115,10 @@ long arch_ptrace(struct task_struct *child, long request, static void send_sigtrap(struct task_struct *tsk, struct uml_pt_regs *regs, int error_code) { - struct siginfo info; - - memset(&info, 0, sizeof(info)); - info.si_signo = SIGTRAP; - info.si_code = TRAP_BRKPT; - - /* User-mode eip? */ - info.si_addr = UPT_IS_USER(regs) ? (void __user *) UPT_IP(regs) : NULL; - /* Send us the fake SIGTRAP */ - force_sig_info(SIGTRAP, &info, tsk); + force_sig_fault(SIGTRAP, TRAP_BRKPT, + /* User-mode eip? */ + UPT_IS_USER(regs) ? (void __user *) UPT_IP(regs) : NULL, tsk); } /* diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index b2b02df9896e..ec9a42c14c56 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -162,13 +162,9 @@ static void show_segv_info(struct uml_pt_regs *regs) static void bad_segv(struct faultinfo fi, unsigned long ip) { - struct siginfo si; - - si.si_signo = SIGSEGV; - si.si_code = SEGV_ACCERR; - si.si_addr = (void __user *) FAULT_ADDRESS(fi); current->thread.arch.faultinfo = fi; - force_sig_info(SIGSEGV, &si, current); + force_sig_fault(SIGSEGV, SEGV_ACCERR, (void __user *) FAULT_ADDRESS(fi), + current); } void fatal_sigsegv(void) @@ -214,8 +210,8 @@ void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, struct uml_pt_regs *regs) { - struct siginfo si; jmp_buf *catcher; + int si_code; int err; int is_write = FAULT_WRITE(fi); unsigned long address = FAULT_ADDRESS(fi); @@ -239,7 +235,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, if (SEGV_IS_FIXABLE(&fi)) err = handle_page_fault(address, ip, is_write, is_user, - &si.si_code); + &si_code); else { err = -EFAULT; /* @@ -271,18 +267,14 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, show_segv_info(regs); if (err == -EACCES) { - si.si_signo = SIGBUS; - si.si_errno = 0; - si.si_code = BUS_ADRERR; - si.si_addr = (void __user *)address; current->thread.arch.faultinfo = fi; - force_sig_info(SIGBUS, &si, current); + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, + current); } else { BUG_ON(err != -EFAULT); - si.si_signo = SIGSEGV; - si.si_addr = (void __user *) address; current->thread.arch.faultinfo = fi; - force_sig_info(SIGSEGV, &si, current); + force_sig_fault(SIGSEGV, si_code, (void __user *) address, + current); } out: @@ -294,9 +286,7 @@ out: void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs) { - struct faultinfo *fi; - struct siginfo clean_si; - + int code, err; if (!UPT_IS_USER(regs)) { if (sig == SIGBUS) printk(KERN_ERR "Bus error - the host /dev/shm or /tmp " @@ -306,29 +296,21 @@ void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs) arch_examine_signal(sig, regs); - clear_siginfo(&clean_si); - clean_si.si_signo = si->si_signo; - clean_si.si_errno = si->si_errno; - clean_si.si_code = si->si_code; - switch (sig) { - case SIGILL: - case SIGFPE: - case SIGSEGV: - case SIGBUS: - case SIGTRAP: - fi = UPT_FAULTINFO(regs); - clean_si.si_addr = (void __user *) FAULT_ADDRESS(*fi); + /* Is the signal layout for the signal known? + * Signal data must be scrubbed to prevent information leaks. + */ + code = si->si_code; + err = si->si_errno; + if ((err == 0) && (siginfo_layout(sig, code) == SIL_FAULT)) { + struct faultinfo *fi = UPT_FAULTINFO(regs); current->thread.arch.faultinfo = *fi; -#ifdef __ARCH_SI_TRAPNO - clean_si.si_trapno = si->si_trapno; -#endif - break; - default: - printk(KERN_ERR "Attempted to relay unknown signal %d (si_code = %d)\n", - sig, si->si_code); + force_sig_fault(sig, code, (void __user *)FAULT_ADDRESS(*fi), + current); + } else { + printk(KERN_ERR "Attempted to relay unknown signal %d (si_code = %d) with errno %d\n", + sig, code, err); + force_sig(sig, current); } - - force_sig_info(sig, &clean_si, current); } void bus_handler(int sig, struct siginfo *si, struct uml_pt_regs *regs) diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index 462e59a7ae78..03f991e44288 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig @@ -19,6 +19,8 @@ config UNICORE32 select ARCH_WANT_FRAME_POINTERS select GENERIC_IOMAP select MODULES_USE_ELF_REL + select NEED_DMA_MAP_STATE + select SWIOTLB help UniCore-32 is 32-bit Instruction Set Architecture, including a series of low-power-consumption RISC chip @@ -61,9 +63,6 @@ config ARCH_MAY_HAVE_PC_FDC config ZONE_DMA def_bool y -config NEED_DMA_MAP_STATE - def_bool y - source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index 6f70c76c81fc..bfc7abe77905 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild @@ -1,5 +1,6 @@ generic-y += atomic.h generic-y += bugs.h +generic-y += compat.h generic-y += current.h generic-y += device.h generic-y += div64.h diff --git a/arch/unicore32/kernel/fpu-ucf64.c b/arch/unicore32/kernel/fpu-ucf64.c index 12c8c9527b8e..8594b168f25e 100644 --- a/arch/unicore32/kernel/fpu-ucf64.c +++ b/arch/unicore32/kernel/fpu-ucf64.c @@ -52,14 +52,14 @@ * Raise a SIGFPE for the current process. * sicode describes the signal being raised. */ -void ucf64_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) +void ucf64_raise_sigfpe(struct pt_regs *regs) { siginfo_t info; - memset(&info, 0, sizeof(info)); + clear_siginfo(&info); info.si_signo = SIGFPE; - info.si_code = sicode; + info.si_code = FPE_FLTUNK; info.si_addr = (void __user *)(instruction_pointer(regs) - 4); /* @@ -94,7 +94,7 @@ void ucf64_exchandler(u32 inst, u32 fpexc, struct pt_regs *regs) pr_debug("UniCore-F64 FPSCR 0x%08x INST 0x%08x\n", cff(FPSCR), inst); - ucf64_raise_sigfpe(0, regs); + ucf64_raise_sigfpe(regs); return; } diff --git a/arch/unicore32/mm/Kconfig b/arch/unicore32/mm/Kconfig index e9154a59d561..82759b6aba67 100644 --- a/arch/unicore32/mm/Kconfig +++ b/arch/unicore32/mm/Kconfig @@ -39,14 +39,3 @@ config CPU_TLB_SINGLE_ENTRY_DISABLE default y help Say Y here to disable the TLB single entry operations. - -config SWIOTLB - def_bool y - select DMA_DIRECT_OPS - -config IOMMU_HELPER - def_bool SWIOTLB - -config NEED_SG_DMA_LENGTH - def_bool SWIOTLB - diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c index bbefcc46a45e..381473412937 100644 --- a/arch/unicore32/mm/fault.c +++ b/arch/unicore32/mm/fault.c @@ -125,6 +125,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr, tsk->thread.address = addr; tsk->thread.error_code = fsr; tsk->thread.trap_no = 14; + clear_siginfo(&si); si.si_signo = sig; si.si_errno = 0; si.si_code = code; @@ -472,6 +473,7 @@ asmlinkage void do_DataAbort(unsigned long addr, unsigned int fsr, printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n", inf->name, fsr, addr); + clear_siginfo(&info); info.si_signo = inf->sig; info.si_errno = 0; info.si_code = inf->code; @@ -491,6 +493,7 @@ asmlinkage void do_PrefetchAbort(unsigned long addr, printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", inf->name, ifsr, addr); + clear_siginfo(&info); info.si_signo = inf->sig; info.si_errno = 0; info.si_code = inf->code; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 6ca22706cd64..f182a4e8e5bd 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1,8 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 # Select 32 or 64 bit config 64BIT - bool "64-bit kernel" if ARCH = "x86" - default ARCH != "i386" + bool "64-bit kernel" if "$(ARCH)" = "x86" + default "$(ARCH)" != "i386" ---help--- Say yes to build a 64-bit kernel - formerly known as x86_64 Say no to build a 32-bit kernel - formerly known as i386 @@ -28,6 +28,8 @@ config X86_64 select ARCH_USE_CMPXCHG_LOCKREF select HAVE_ARCH_SOFT_DIRTY select MODULES_USE_ELF_RELA + select NEED_DMA_MAP_STATE + select SWIOTLB select X86_DEV_DMA_OPS select ARCH_HAS_SYSCALL_WRAPPER @@ -58,6 +60,7 @@ config X86 select ARCH_HAS_KCOV if X86_64 select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_PMEM_API if X86_64 + select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_REFCOUNT select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 select ARCH_HAS_UACCESS_MCSAFE if X86_64 @@ -135,11 +138,10 @@ config X86 select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_STACKOVERFLOW - select HAVE_DMA_API_DEBUG select HAVE_DMA_CONTIGUOUS select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS - select HAVE_EBPF_JIT if X86_64 + select HAVE_EBPF_JIT select HAVE_EFFICIENT_UNALIGNED_ACCESS select HAVE_EXIT_THREAD select HAVE_FENTRY if X86_64 || DYNAMIC_FTRACE @@ -185,6 +187,7 @@ config X86 select HAVE_UNSTABLE_SCHED_CLOCK select HAVE_USER_RETURN_NOTIFIER select IRQ_FORCED_THREADING + select NEED_SG_DMA_LENGTH select PCI_LOCKLESS_CONFIG select PERF_EVENTS select RTC_LIB @@ -237,13 +240,6 @@ config ARCH_MMAP_RND_COMPAT_BITS_MAX config SBUS bool -config NEED_DMA_MAP_STATE - def_bool y - depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG || SWIOTLB - -config NEED_SG_DMA_LENGTH - def_bool y - config GENERIC_ISA_DMA def_bool y depends on ISA_DMA_API @@ -876,6 +872,7 @@ config DMI config GART_IOMMU bool "Old AMD GART IOMMU support" + select IOMMU_HELPER select SWIOTLB depends on X86_64 && PCI && AMD_NB ---help--- @@ -897,6 +894,7 @@ config GART_IOMMU config CALGARY_IOMMU bool "IBM Calgary IOMMU support" + select IOMMU_HELPER select SWIOTLB depends on X86_64 && PCI ---help--- @@ -924,20 +922,6 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT Calgary anyway, pass 'iommu=calgary' on the kernel command line. If unsure, say Y. -# need this always selected by IOMMU for the VIA workaround -config SWIOTLB - def_bool y if X86_64 - ---help--- - Support for software bounce buffers used on x86-64 systems - which don't have a hardware IOMMU. Using this PCI devices - which can only access 32-bits of memory can be used on systems - with more than 3 GB of memory. - If unsure, say Y. - -config IOMMU_HELPER - def_bool y - depends on CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU - config MAXSMP bool "Enable Maximum number of SMP Processors and NUMA Nodes" depends on X86_64 && SMP && DEBUG_KERNEL @@ -1459,6 +1443,7 @@ config HIGHMEM config X86_PAE bool "PAE (Physical Address Extension) Support" depends on X86_32 && !HIGHMEM4G + select PHYS_ADDR_T_64BIT select SWIOTLB ---help--- PAE is required for NX support, and furthermore enables @@ -1486,14 +1471,6 @@ config X86_5LEVEL Say N if unsure. -config ARCH_PHYS_ADDR_T_64BIT - def_bool y - depends on X86_64 || X86_PAE - -config ARCH_DMA_ADDR_T_64BIT - def_bool y - depends on X86_64 || HIGHMEM64G - config X86_DIRECT_GBPAGES def_bool y depends on X86_64 && !DEBUG_PAGEALLOC diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 60135cbd905c..f0a6ea22429d 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -94,7 +94,7 @@ ifeq ($(CONFIG_X86_32),y) else BITS := 64 UTS_MACHINE := x86_64 - CHECKFLAGS += -D__x86_64__ -m64 + CHECKFLAGS += -D__x86_64__ biarch := -m64 KBUILD_AFLAGS += -m64 diff --git a/arch/x86/boot/compressed/cmdline.c b/arch/x86/boot/compressed/cmdline.c index 0cb325734cfb..af6cda0b7900 100644 --- a/arch/x86/boot/compressed/cmdline.c +++ b/arch/x86/boot/compressed/cmdline.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include "misc.h" -#if CONFIG_EARLY_PRINTK || CONFIG_RANDOMIZE_BASE +#if CONFIG_EARLY_PRINTK || CONFIG_RANDOMIZE_BASE || CONFIG_X86_5LEVEL static unsigned long fs; static inline void set_fs(unsigned long seg) diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 09f36c0d9d4f..a8a8642d2b0b 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -109,23 +109,34 @@ void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str) } static efi_status_t -__setup_efi_pci32(efi_pci_io_protocol_32 *pci, struct pci_setup_rom **__rom) +__setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom) { struct pci_setup_rom *rom = NULL; efi_status_t status; unsigned long size; - uint64_t attributes; + uint64_t attributes, romsize; + void *romimage; - status = efi_early->call(pci->attributes, pci, - EfiPciIoAttributeOperationGet, 0, 0, - &attributes); + status = efi_call_proto(efi_pci_io_protocol, attributes, pci, + EfiPciIoAttributeOperationGet, 0, 0, + &attributes); if (status != EFI_SUCCESS) return status; - if (!pci->romimage || !pci->romsize) + /* + * Some firmware images contain EFI function pointers at the place where the + * romimage and romsize fields are supposed to be. Typically the EFI + * code is mapped at high addresses, translating to an unrealistically + * large romsize. The UEFI spec limits the size of option ROMs to 16 + * MiB so we reject any ROMs over 16 MiB in size to catch this. + */ + romimage = (void *)(unsigned long)efi_table_attr(efi_pci_io_protocol, + romimage, pci); + romsize = efi_table_attr(efi_pci_io_protocol, romsize, pci); + if (!romimage || !romsize || romsize > SZ_16M) return EFI_INVALID_PARAMETER; - size = pci->romsize + sizeof(*rom); + size = romsize + sizeof(*rom); status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom); if (status != EFI_SUCCESS) { @@ -141,30 +152,32 @@ __setup_efi_pci32(efi_pci_io_protocol_32 *pci, struct pci_setup_rom **__rom) rom->pcilen = pci->romsize; *__rom = rom; - status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16, - PCI_VENDOR_ID, 1, &(rom->vendor)); + status = efi_call_proto(efi_pci_io_protocol, pci.read, pci, + EfiPciIoWidthUint16, PCI_VENDOR_ID, 1, + &rom->vendor); if (status != EFI_SUCCESS) { efi_printk(sys_table, "Failed to read rom->vendor\n"); goto free_struct; } - status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16, - PCI_DEVICE_ID, 1, &(rom->devid)); + status = efi_call_proto(efi_pci_io_protocol, pci.read, pci, + EfiPciIoWidthUint16, PCI_DEVICE_ID, 1, + &rom->devid); if (status != EFI_SUCCESS) { efi_printk(sys_table, "Failed to read rom->devid\n"); goto free_struct; } - status = efi_early->call(pci->get_location, pci, &(rom->segment), - &(rom->bus), &(rom->device), &(rom->function)); + status = efi_call_proto(efi_pci_io_protocol, get_location, pci, + &rom->segment, &rom->bus, &rom->device, + &rom->function); if (status != EFI_SUCCESS) goto free_struct; - memcpy(rom->romdata, (void *)(unsigned long)pci->romimage, - pci->romsize); + memcpy(rom->romdata, romimage, romsize); return status; free_struct: @@ -176,7 +189,7 @@ static void setup_efi_pci32(struct boot_params *params, void **pci_handle, unsigned long size) { - efi_pci_io_protocol_32 *pci = NULL; + efi_pci_io_protocol_t *pci = NULL; efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID; u32 *handles = (u32 *)(unsigned long)pci_handle; efi_status_t status; @@ -203,7 +216,7 @@ setup_efi_pci32(struct boot_params *params, void **pci_handle, if (!pci) continue; - status = __setup_efi_pci32(pci, &rom); + status = __setup_efi_pci(pci, &rom); if (status != EFI_SUCCESS) continue; @@ -217,74 +230,11 @@ setup_efi_pci32(struct boot_params *params, void **pci_handle, } } -static efi_status_t -__setup_efi_pci64(efi_pci_io_protocol_64 *pci, struct pci_setup_rom **__rom) -{ - struct pci_setup_rom *rom; - efi_status_t status; - unsigned long size; - uint64_t attributes; - - status = efi_early->call(pci->attributes, pci, - EfiPciIoAttributeOperationGet, 0, - &attributes); - if (status != EFI_SUCCESS) - return status; - - if (!pci->romimage || !pci->romsize) - return EFI_INVALID_PARAMETER; - - size = pci->romsize + sizeof(*rom); - - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom); - if (status != EFI_SUCCESS) { - efi_printk(sys_table, "Failed to alloc mem for rom\n"); - return status; - } - - rom->data.type = SETUP_PCI; - rom->data.len = size - sizeof(struct setup_data); - rom->data.next = 0; - rom->pcilen = pci->romsize; - *__rom = rom; - - status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16, - PCI_VENDOR_ID, 1, &(rom->vendor)); - - if (status != EFI_SUCCESS) { - efi_printk(sys_table, "Failed to read rom->vendor\n"); - goto free_struct; - } - - status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16, - PCI_DEVICE_ID, 1, &(rom->devid)); - - if (status != EFI_SUCCESS) { - efi_printk(sys_table, "Failed to read rom->devid\n"); - goto free_struct; - } - - status = efi_early->call(pci->get_location, pci, &(rom->segment), - &(rom->bus), &(rom->device), &(rom->function)); - - if (status != EFI_SUCCESS) - goto free_struct; - - memcpy(rom->romdata, (void *)(unsigned long)pci->romimage, - pci->romsize); - return status; - -free_struct: - efi_call_early(free_pool, rom); - return status; - -} - static void setup_efi_pci64(struct boot_params *params, void **pci_handle, unsigned long size) { - efi_pci_io_protocol_64 *pci = NULL; + efi_pci_io_protocol_t *pci = NULL; efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID; u64 *handles = (u64 *)(unsigned long)pci_handle; efi_status_t status; @@ -311,7 +261,7 @@ setup_efi_pci64(struct boot_params *params, void **pci_handle, if (!pci) continue; - status = __setup_efi_pci64(pci, &rom); + status = __setup_efi_pci(pci, &rom); if (status != EFI_SUCCESS) continue; diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 8169e8b7a4dc..64037895b085 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -365,6 +365,7 @@ ENTRY(startup_64) * this function call. */ pushq %rsi + movq %rsi, %rdi /* real mode address */ call paging_prepare popq %rsi diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c index a0a50b91ecef..b87a7582853d 100644 --- a/arch/x86/boot/compressed/kaslr.c +++ b/arch/x86/boot/compressed/kaslr.c @@ -47,7 +47,7 @@ #include <linux/decompress/mm.h> #ifdef CONFIG_X86_5LEVEL -unsigned int pgtable_l5_enabled __ro_after_init; +unsigned int __pgtable_l5_enabled; unsigned int pgdir_shift __ro_after_init = 39; unsigned int ptrs_per_p4d __ro_after_init = 1; #endif @@ -734,7 +734,7 @@ void choose_random_location(unsigned long input, #ifdef CONFIG_X86_5LEVEL if (__read_cr4() & X86_CR4_LA57) { - pgtable_l5_enabled = 1; + __pgtable_l5_enabled = 1; pgdir_shift = 48; ptrs_per_p4d = 512; } diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 9e11be4cae19..a423bdb42686 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -12,10 +12,8 @@ #undef CONFIG_PARAVIRT_SPINLOCKS #undef CONFIG_KASAN -#ifdef CONFIG_X86_5LEVEL -/* cpu_feature_enabled() cannot be used that early */ -#define pgtable_l5_enabled __pgtable_l5_enabled -#endif +/* cpu_feature_enabled() cannot be used this early */ +#define USE_EARLY_PGTABLE_L5 #include <linux/linkage.h> #include <linux/screen_info.h> diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c index a362fa0b849c..8c5107545251 100644 --- a/arch/x86/boot/compressed/pgtable_64.c +++ b/arch/x86/boot/compressed/pgtable_64.c @@ -31,16 +31,23 @@ static char trampoline_save[TRAMPOLINE_32BIT_SIZE]; */ unsigned long *trampoline_32bit __section(.data); -struct paging_config paging_prepare(void) +extern struct boot_params *boot_params; +int cmdline_find_option_bool(const char *option); + +struct paging_config paging_prepare(void *rmode) { struct paging_config paging_config = {}; unsigned long bios_start, ebda_start; + /* Initialize boot_params. Required for cmdline_find_option_bool(). */ + boot_params = rmode; + /* * Check if LA57 is desired and supported. * - * There are two parts to the check: + * There are several parts to the check: * - if the kernel supports 5-level paging: CONFIG_X86_5LEVEL=y + * - if user asked to disable 5-level paging: no5lvl in cmdline * - if the machine supports 5-level paging: * + CPUID leaf 7 is supported * + the leaf has the feature bit set @@ -48,6 +55,7 @@ struct paging_config paging_prepare(void) * That's substitute for boot_cpu_has() in early boot code. */ if (IS_ENABLED(CONFIG_X86_5LEVEL) && + !cmdline_find_option_bool("no5lvl") && native_cpuid_eax(0) >= 7 && (native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)))) { paging_config.l5_required = 1; @@ -130,7 +138,7 @@ void cleanup_trampoline(void *pgtable) { void *trampoline_pgtable; - trampoline_pgtable = trampoline_32bit + TRAMPOLINE_32BIT_PGTABLE_OFFSET; + trampoline_pgtable = trampoline_32bit + TRAMPOLINE_32BIT_PGTABLE_OFFSET / sizeof(unsigned long); /* * Move the top level page table out of trampoline memory, diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 5f07333bb224..a450ad573dcb 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -15,7 +15,6 @@ obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o -obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o obj-$(CONFIG_CRYPTO_SERPENT_SSE2_586) += serpent-sse2-i586.o obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o @@ -24,7 +23,6 @@ obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o -obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha20-x86_64.o obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o @@ -38,6 +36,16 @@ obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o obj-$(CONFIG_CRYPTO_POLY1305_X86_64) += poly1305-x86_64.o +obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) += aegis128-aesni.o +obj-$(CONFIG_CRYPTO_AEGIS128L_AESNI_SSE2) += aegis128l-aesni.o +obj-$(CONFIG_CRYPTO_AEGIS256_AESNI_SSE2) += aegis256-aesni.o + +obj-$(CONFIG_CRYPTO_MORUS640_GLUE) += morus640_glue.o +obj-$(CONFIG_CRYPTO_MORUS1280_GLUE) += morus1280_glue.o + +obj-$(CONFIG_CRYPTO_MORUS640_SSE2) += morus640-sse2.o +obj-$(CONFIG_CRYPTO_MORUS1280_SSE2) += morus1280-sse2.o + # These modules require assembler to support AVX. ifeq ($(avx_supported),yes) obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64) += \ @@ -55,11 +63,12 @@ ifeq ($(avx2_supported),yes) obj-$(CONFIG_CRYPTO_SHA1_MB) += sha1-mb/ obj-$(CONFIG_CRYPTO_SHA256_MB) += sha256-mb/ obj-$(CONFIG_CRYPTO_SHA512_MB) += sha512-mb/ + + obj-$(CONFIG_CRYPTO_MORUS1280_AVX2) += morus1280-avx2.o endif aes-i586-y := aes-i586-asm_32.o aes_glue.o twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o -salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o @@ -68,10 +77,16 @@ camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o -salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o chacha20-x86_64-y := chacha20-ssse3-x86_64.o chacha20_glue.o serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o +aegis128-aesni-y := aegis128-aesni-asm.o aegis128-aesni-glue.o +aegis128l-aesni-y := aegis128l-aesni-asm.o aegis128l-aesni-glue.o +aegis256-aesni-y := aegis256-aesni-asm.o aegis256-aesni-glue.o + +morus640-sse2-y := morus640-sse2-asm.o morus640-sse2-glue.o +morus1280-sse2-y := morus1280-sse2-asm.o morus1280-sse2-glue.o + ifeq ($(avx_supported),yes) camellia-aesni-avx-x86_64-y := camellia-aesni-avx-asm_64.o \ camellia_aesni_avx_glue.o @@ -87,6 +102,8 @@ ifeq ($(avx2_supported),yes) camellia-aesni-avx2-y := camellia-aesni-avx2-asm_64.o camellia_aesni_avx2_glue.o chacha20-x86_64-y += chacha20-avx2-x86_64.o serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o + + morus1280-avx2-y := morus1280-avx2-asm.o morus1280-avx2-glue.o endif aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S new file mode 100644 index 000000000000..9254e0b6cc06 --- /dev/null +++ b/arch/x86/crypto/aegis128-aesni-asm.S @@ -0,0 +1,749 @@ +/* + * AES-NI + SSE2 implementation of AEGIS-128 + * + * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com> + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <linux/linkage.h> +#include <asm/frame.h> + +#define STATE0 %xmm0 +#define STATE1 %xmm1 +#define STATE2 %xmm2 +#define STATE3 %xmm3 +#define STATE4 %xmm4 +#define KEY %xmm5 +#define MSG %xmm5 +#define T0 %xmm6 +#define T1 %xmm7 + +#define STATEP %rdi +#define LEN %rsi +#define SRC %rdx +#define DST %rcx + +.section .rodata.cst16.aegis128_const, "aM", @progbits, 32 +.align 16 +.Laegis128_const_0: + .byte 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d + .byte 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62 +.Laegis128_const_1: + .byte 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1 + .byte 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd + +.section .rodata.cst16.aegis128_counter, "aM", @progbits, 16 +.align 16 +.Laegis128_counter: + .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 + .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f + +.text + +/* + * aegis128_update + * input: + * STATE[0-4] - input state + * output: + * STATE[0-4] - output state (shifted positions) + * changed: + * T0 + */ +.macro aegis128_update + movdqa STATE4, T0 + aesenc STATE0, STATE4 + aesenc STATE1, STATE0 + aesenc STATE2, STATE1 + aesenc STATE3, STATE2 + aesenc T0, STATE3 +.endm + +/* + * __load_partial: internal ABI + * input: + * LEN - bytes + * SRC - src + * output: + * MSG - message block + * changed: + * T0 + * %r8 + * %r9 + */ +__load_partial: + xor %r9, %r9 + pxor MSG, MSG + + mov LEN, %r8 + and $0x1, %r8 + jz .Lld_partial_1 + + mov LEN, %r8 + and $0x1E, %r8 + add SRC, %r8 + mov (%r8), %r9b + +.Lld_partial_1: + mov LEN, %r8 + and $0x2, %r8 + jz .Lld_partial_2 + + mov LEN, %r8 + and $0x1C, %r8 + add SRC, %r8 + shl $0x10, %r9 + mov (%r8), %r9w + +.Lld_partial_2: + mov LEN, %r8 + and $0x4, %r8 + jz .Lld_partial_4 + + mov LEN, %r8 + and $0x18, %r8 + add SRC, %r8 + shl $32, %r9 + mov (%r8), %r8d + xor %r8, %r9 + +.Lld_partial_4: + movq %r9, MSG + + mov LEN, %r8 + and $0x8, %r8 + jz .Lld_partial_8 + + mov LEN, %r8 + and $0x10, %r8 + add SRC, %r8 + pslldq $8, MSG + movq (%r8), T0 + pxor T0, MSG + +.Lld_partial_8: + ret +ENDPROC(__load_partial) + +/* + * __store_partial: internal ABI + * input: + * LEN - bytes + * DST - dst + * output: + * T0 - message block + * changed: + * %r8 + * %r9 + * %r10 + */ +__store_partial: + mov LEN, %r8 + mov DST, %r9 + + movq T0, %r10 + + cmp $8, %r8 + jl .Lst_partial_8 + + mov %r10, (%r9) + psrldq $8, T0 + movq T0, %r10 + + sub $8, %r8 + add $8, %r9 + +.Lst_partial_8: + cmp $4, %r8 + jl .Lst_partial_4 + + mov %r10d, (%r9) + shr $32, %r10 + + sub $4, %r8 + add $4, %r9 + +.Lst_partial_4: + cmp $2, %r8 + jl .Lst_partial_2 + + mov %r10w, (%r9) + shr $0x10, %r10 + + sub $2, %r8 + add $2, %r9 + +.Lst_partial_2: + cmp $1, %r8 + jl .Lst_partial_1 + + mov %r10b, (%r9) + +.Lst_partial_1: + ret +ENDPROC(__store_partial) + +/* + * void crypto_aegis128_aesni_init(void *state, const void *key, const void *iv); + */ +ENTRY(crypto_aegis128_aesni_init) + FRAME_BEGIN + + /* load IV: */ + movdqu (%rdx), T1 + + /* load key: */ + movdqa (%rsi), KEY + pxor KEY, T1 + movdqa T1, STATE0 + movdqa KEY, STATE3 + movdqa KEY, STATE4 + + /* load the constants: */ + movdqa .Laegis128_const_0, STATE2 + movdqa .Laegis128_const_1, STATE1 + pxor STATE2, STATE3 + pxor STATE1, STATE4 + + /* update 10 times with KEY / KEY xor IV: */ + aegis128_update; pxor KEY, STATE4 + aegis128_update; pxor T1, STATE3 + aegis128_update; pxor KEY, STATE2 + aegis128_update; pxor T1, STATE1 + aegis128_update; pxor KEY, STATE0 + aegis128_update; pxor T1, STATE4 + aegis128_update; pxor KEY, STATE3 + aegis128_update; pxor T1, STATE2 + aegis128_update; pxor KEY, STATE1 + aegis128_update; pxor T1, STATE0 + + /* store the state: */ + movdqu STATE0, 0x00(STATEP) + movdqu STATE1, 0x10(STATEP) + movdqu STATE2, 0x20(STATEP) + movdqu STATE3, 0x30(STATEP) + movdqu STATE4, 0x40(STATEP) + + FRAME_END + ret +ENDPROC(crypto_aegis128_aesni_init) + +/* + * void crypto_aegis128_aesni_ad(void *state, unsigned int length, + * const void *data); + */ +ENTRY(crypto_aegis128_aesni_ad) + FRAME_BEGIN + + cmp $0x10, LEN + jb .Lad_out + + /* load the state: */ + movdqu 0x00(STATEP), STATE0 + movdqu 0x10(STATEP), STATE1 + movdqu 0x20(STATEP), STATE2 + movdqu 0x30(STATEP), STATE3 + movdqu 0x40(STATEP), STATE4 + + mov SRC, %r8 + and $0xF, %r8 + jnz .Lad_u_loop + +.align 8 +.Lad_a_loop: + movdqa 0x00(SRC), MSG + aegis128_update + pxor MSG, STATE4 + sub $0x10, LEN + cmp $0x10, LEN + jl .Lad_out_1 + + movdqa 0x10(SRC), MSG + aegis128_update + pxor MSG, STATE3 + sub $0x10, LEN + cmp $0x10, LEN + jl .Lad_out_2 + + movdqa 0x20(SRC), MSG + aegis128_update + pxor MSG, STATE2 + sub $0x10, LEN + cmp $0x10, LEN + jl .Lad_out_3 + + movdqa 0x30(SRC), MSG + aegis128_update + pxor MSG, STATE1 + sub $0x10, LEN + cmp $0x10, LEN + jl .Lad_out_4 + + movdqa 0x40(SRC), MSG + aegis128_update + pxor MSG, STATE0 + sub $0x10, LEN + cmp $0x10, LEN + jl .Lad_out_0 + + add $0x50, SRC + jmp .Lad_a_loop + +.align 8 +.Lad_u_loop: + movdqu 0x00(SRC), MSG + aegis128_update + pxor MSG, STATE4 + sub $0x10, LEN + cmp $0x10, LEN + jl .Lad_out_1 + + movdqu 0x10(SRC), MSG + aegis128_update + pxor MSG, STATE3 + sub $0x10, LEN + cmp $0x10, LEN + jl .Lad_out_2 + + movdqu 0x20(SRC), MSG + aegis128_update + pxor MSG, STATE2 + sub $0x10, LEN + cmp $0x10, LEN + jl .Lad_out_3 + + movdqu 0x30(SRC), MSG + aegis128_update + pxor MSG, STATE1 + sub $0x10, LEN + cmp $0x10, LEN + jl .Lad_out_4 + + movdqu 0x40(SRC), MSG + aegis128_update + pxor MSG, STATE0 + sub $0x10, LEN + cmp $0x10, LEN + jl .Lad_out_0 + + add $0x50, SRC + jmp .Lad_u_loop + + /* store the state: */ +.Lad_out_0: + movdqu STATE0, 0x00(STATEP) + movdqu STATE1, 0x10(STATEP) + movdqu STATE2, 0x20(STATEP) + movdqu STATE3, 0x30(STATEP) + movdqu STATE4, 0x40(STATEP) + FRAME_END + ret + +.Lad_out_1: + movdqu STATE4, 0x00(STATEP) + movdqu STATE0, 0x10(STATEP) + movdqu STATE1, 0x20(STATEP) + movdqu STATE2, 0x30(STATEP) + movdqu STATE3, 0x40(STATEP) + FRAME_END + ret + +.Lad_out_2: + movdqu STATE3, 0x00(STATEP) + movdqu STATE4, 0x10(STATEP) + movdqu STATE0, 0x20(STATEP) + movdqu STATE1, 0x30(STATEP) + movdqu STATE2, 0x40(STATEP) + FRAME_END + ret + +.Lad_out_3: + movdqu STATE2, 0x00(STATEP) + movdqu STATE3, 0x10(STATEP) + movdqu STATE4, 0x20(STATEP) + movdqu STATE0, 0x30(STATEP) + movdqu STATE1, 0x40(STATEP) + FRAME_END + ret + +.Lad_out_4: + movdqu STATE1, 0x00(STATEP) + movdqu STATE2, 0x10(STATEP) + movdqu STATE3, 0x20(STATEP) + movdqu STATE4, 0x30(STATEP) + movdqu STATE0, 0x40(STATEP) + FRAME_END + ret + +.Lad_out: + FRAME_END + ret +ENDPROC(crypto_aegis128_aesni_ad) + +.macro encrypt_block a s0 s1 s2 s3 s4 i + movdq\a (\i * 0x10)(SRC), MSG + movdqa MSG, T0 + pxor \s1, T0 + pxor \s4, T0 + movdqa \s2, T1 + pand \s3, T1 + pxor T1, T0 + movdq\a T0, (\i * 0x10)(DST) + + aegis128_update + pxor MSG, \s4 + + sub $0x10, LEN + cmp $0x10, LEN + jl .Lenc_out_\i +.endm + +/* + * void crypto_aegis128_aesni_enc(void *state, unsigned int length, + * const void *src, void *dst); + */ +ENTRY(crypto_aegis128_aesni_enc) + FRAME_BEGIN + + cmp $0x10, LEN + jb .Lenc_out + + /* load the state: */ + movdqu 0x00(STATEP), STATE0 + movdqu 0x10(STATEP), STATE1 + movdqu 0x20(STATEP), STATE2 + movdqu 0x30(STATEP), STATE3 + movdqu 0x40(STATEP), STATE4 + + mov SRC, %r8 + or DST, %r8 + and $0xF, %r8 + jnz .Lenc_u_loop + +.align 8 +.Lenc_a_loop: + encrypt_block a STATE0 STATE1 STATE2 STATE3 STATE4 0 + encrypt_block a STATE4 STATE0 STATE1 STATE2 STATE3 1 + encrypt_block a STATE3 STATE4 STATE0 STATE1 STATE2 2 + encrypt_block a STATE2 STATE3 STATE4 STATE0 STATE1 3 + encrypt_block a STATE1 STATE2 STATE3 STATE4 STATE0 4 + + add $0x50, SRC + add $0x50, DST + jmp .Lenc_a_loop + +.align 8 +.Lenc_u_loop: + encrypt_block u STATE0 STATE1 STATE2 STATE3 STATE4 0 + encrypt_block u STATE4 STATE0 STATE1 STATE2 STATE3 1 + encrypt_block u STATE3 STATE4 STATE0 STATE1 STATE2 2 + encrypt_block u STATE2 STATE3 STATE4 STATE0 STATE1 3 + encrypt_block u STATE1 STATE2 STATE3 STATE4 STATE0 4 + + add $0x50, SRC + add $0x50, DST + jmp .Lenc_u_loop + + /* store the state: */ +.Lenc_out_0: + movdqu STATE4, 0x00(STATEP) + movdqu STATE0, 0x10(STATEP) + movdqu STATE1, 0x20(STATEP) + movdqu STATE2, 0x30(STATEP) + movdqu STATE3, 0x40(STATEP) + FRAME_END + ret + +.Lenc_out_1: + movdqu STATE3, 0x00(STATEP) + movdqu STATE4, 0x10(STATEP) + movdqu STATE0, 0x20(STATEP) + movdqu STATE1, 0x30(STATEP) + movdqu STATE2, 0x40(STATEP) + FRAME_END + ret + +.Lenc_out_2: + movdqu STATE2, 0x00(STATEP) + movdqu STATE3, 0x10(STATEP) + movdqu STATE4, 0x20(STATEP) + movdqu STATE0, 0x30(STATEP) + movdqu STATE1, 0x40(STATEP) + FRAME_END + ret + +.Lenc_out_3: + movdqu STATE1, 0x00(STATEP) + movdqu STATE2, 0x10(STATEP) + movdqu STATE3, 0x20(STATEP) + movdqu STATE4, 0x30(STATEP) + movdqu STATE0, 0x40(STATEP) + FRAME_END + ret + +.Lenc_out_4: + movdqu STATE0, 0x00(STATEP) + movdqu STATE1, 0x10(STATEP) + movdqu STATE2, 0x20(STATEP) + movdqu STATE3, 0x30(STATEP) + movdqu STATE4, 0x40(STATEP) + FRAME_END + ret + +.Lenc_out: + FRAME_END + ret +ENDPROC(crypto_aegis128_aesni_enc) + +/* + * void crypto_aegis128_aesni_enc_tail(void *state, unsigned int length, + * const void *src, void *dst); + */ +ENTRY(crypto_aegis128_aesni_enc_tail) + FRAME_BEGIN + + /* load the state: */ + movdqu 0x00(STATEP), STATE0 + movdqu 0x10(STATEP), STATE1 + movdqu 0x20(STATEP), STATE2 + movdqu 0x30(STATEP), STATE3 + movdqu 0x40(STATEP), STATE4 + + /* encrypt message: */ + call __load_partial + + movdqa MSG, T0 + pxor STATE1, T0 + pxor STATE4, T0 + movdqa STATE2, T1 + pand STATE3, T1 + pxor T1, T0 + + call __store_partial + + aegis128_update + pxor MSG, STATE4 + + /* store the state: */ + movdqu STATE4, 0x00(STATEP) + movdqu STATE0, 0x10(STATEP) + movdqu STATE1, 0x20(STATEP) + movdqu STATE2, 0x30(STATEP) + movdqu STATE3, 0x40(STATEP) + + FRAME_END +ENDPROC(crypto_aegis128_aesni_enc_tail) + +.macro decrypt_block a s0 s1 s2 s3 s4 i + movdq\a (\i * 0x10)(SRC), MSG + pxor \s1, MSG + pxor \s4, MSG + movdqa \s2, T1 + pand \s3, T1 + pxor T1, MSG + movdq\a MSG, (\i * 0x10)(DST) + + aegis128_update + pxor MSG, \s4 + + sub $0x10, LEN + cmp $0x10, LEN + jl .Ldec_out_\i +.endm + +/* + * void crypto_aegis128_aesni_dec(void *state, unsigned int length, + * const void *src, void *dst); + */ +ENTRY(crypto_aegis128_aesni_dec) + FRAME_BEGIN + + cmp $0x10, LEN + jb .Ldec_out + + /* load the state: */ + movdqu 0x00(STATEP), STATE0 + movdqu 0x10(STATEP), STATE1 + movdqu 0x20(STATEP), STATE2 + movdqu 0x30(STATEP), STATE3 + movdqu 0x40(STATEP), STATE4 + + mov SRC, %r8 + or DST, %r8 + and $0xF, %r8 + jnz .Ldec_u_loop + +.align 8 +.Ldec_a_loop: + decrypt_block a STATE0 STATE1 STATE2 STATE3 STATE4 0 + decrypt_block a STATE4 STATE0 STATE1 STATE2 STATE3 1 + decrypt_block a STATE3 STATE4 STATE0 STATE1 STATE2 2 + decrypt_block a STATE2 STATE3 STATE4 STATE0 STATE1 3 + decrypt_block a STATE1 STATE2 STATE3 STATE4 STATE0 4 + + add $0x50, SRC + add $0x50, DST + jmp .Ldec_a_loop + +.align 8 +.Ldec_u_loop: + decrypt_block u STATE0 STATE1 STATE2 STATE3 STATE4 0 + decrypt_block u STATE4 STATE0 STATE1 STATE2 STATE3 1 + decrypt_block u STATE3 STATE4 STATE0 STATE1 STATE2 2 + decrypt_block u STATE2 STATE3 STATE4 STATE0 STATE1 3 + decrypt_block u STATE1 STATE2 STATE3 STATE4 STATE0 4 + + add $0x50, SRC + add $0x50, DST + jmp .Ldec_u_loop + + /* store the state: */ +.Ldec_out_0: + movdqu STATE4, 0x00(STATEP) + movdqu STATE0, 0x10(STATEP) + movdqu STATE1, 0x20(STATEP) + movdqu STATE2, 0x30(STATEP) + movdqu STATE3, 0x40(STATEP) + FRAME_END + ret + +.Ldec_out_1: + movdqu STATE3, 0x00(STATEP) + movdqu STATE4, 0x10(STATEP) + movdqu STATE0, 0x20(STATEP) + movdqu STATE1, 0x30(STATEP) + movdqu STATE2, 0x40(STATEP) + FRAME_END + ret + +.Ldec_out_2: + movdqu STATE2, 0x00(STATEP) + movdqu STATE3, 0x10(STATEP) + movdqu STATE4, 0x20(STATEP) + movdqu STATE0, 0x30(STATEP) + movdqu STATE1, 0x40(STATEP) + FRAME_END + ret + +.Ldec_out_3: + movdqu STATE1, 0x00(STATEP) + movdqu STATE2, 0x10(STATEP) + movdqu STATE3, 0x20(STATEP) + movdqu STATE4, 0x30(STATEP) + movdqu STATE0, 0x40(STATEP) + FRAME_END + ret + +.Ldec_out_4: + movdqu STATE0, 0x00(STATEP) + movdqu STATE1, 0x10(STATEP) + movdqu STATE2, 0x20(STATEP) + movdqu STATE3, 0x30(STATEP) + movdqu STATE4, 0x40(STATEP) + FRAME_END + ret + +.Ldec_out: + FRAME_END + ret +ENDPROC(crypto_aegis128_aesni_dec) + +/* + * void crypto_aegis128_aesni_dec_tail(void *state, unsigned int length, + * const void *src, void *dst); + */ +ENTRY(crypto_aegis128_aesni_dec_tail) + FRAME_BEGIN + + /* load the state: */ + movdqu 0x00(STATEP), STATE0 + movdqu 0x10(STATEP), STATE1 + movdqu 0x20(STATEP), STATE2 + movdqu 0x30(STATEP), STATE3 + movdqu 0x40(STATEP), STATE4 + + /* decrypt message: */ + call __load_partial + + pxor STATE1, MSG + pxor STATE4, MSG + movdqa STATE2, T1 + pand STATE3, T1 + pxor T1, MSG + + movdqa MSG, T0 + call __store_partial + + /* mask with byte count: */ + movq LEN, T0 + punpcklbw T0, T0 + punpcklbw T0, T0 + punpcklbw T0, T0 + punpcklbw T0, T0 + movdqa .Laegis128_counter, T1 + pcmpgtb T1, T0 + pand T0, MSG + + aegis128_update + pxor MSG, STATE4 + + /* store the state: */ + movdqu STATE4, 0x00(STATEP) + movdqu STATE0, 0x10(STATEP) + movdqu STATE1, 0x20(STATEP) + movdqu STATE2, 0x30(STATEP) + movdqu STATE3, 0x40(STATEP) + + FRAME_END + ret +ENDPROC(crypto_aegis128_aesni_dec_tail) + +/* + * void crypto_aegis128_aesni_final(void *state, void *tag_xor, + * u64 assoclen, u64 cryptlen); + */ +ENTRY(crypto_aegis128_aesni_final) + FRAME_BEGIN + + /* load the state: */ + movdqu 0x00(STATEP), STATE0 + movdqu 0x10(STATEP), STATE1 + movdqu 0x20(STATEP), STATE2 + movdqu 0x30(STATEP), STATE3 + movdqu 0x40(STATEP), STATE4 + + /* prepare length block: */ + movq %rdx, MSG + movq %rcx, T0 + pslldq $8, T0 + pxor T0, MSG + psllq $3, MSG /* multiply by 8 (to get bit count) */ + + pxor STATE3, MSG + + /* update state: */ + aegis128_update; pxor MSG, STATE4 + aegis128_update; pxor MSG, STATE3 + aegis128_update; pxor MSG, STATE2 + aegis128_update; pxor MSG, STATE1 + aegis128_update; pxor MSG, STATE0 + aegis128_update; pxor MSG, STATE4 + aegis128_update; pxor MSG, STATE3 + + /* xor tag: */ + movdqu (%rsi), MSG + + pxor STATE0, MSG + pxor STATE1, MSG + pxor STATE2, MSG + pxor STATE3, MSG + pxor STATE4, MSG + + movdqu MSG, (%rsi) + + FRAME_END + ret +ENDPROC(crypto_aegis128_aesni_final) diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c new file mode 100644 index 000000000000..5de7c0d46edf --- /dev/null +++ b/arch/x86/crypto/aegis128-aesni-glue.c @@ -0,0 +1,407 @@ +/* + * The AEGIS-128 Authenticated-Encryption Algorithm + * Glue for AES-NI + SSE2 implementation + * + * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com> + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <crypto/cryptd.h> +#include <crypto/internal/aead.h> +#include <crypto/internal/skcipher.h> +#include <crypto/scatterwalk.h> +#include <linux/module.h> +#include <asm/fpu/api.h> +#include <asm/cpu_device_id.h> + +#define AEGIS128_BLOCK_ALIGN 16 +#define AEGIS128_BLOCK_SIZE 16 +#define AEGIS128_NONCE_SIZE 16 +#define AEGIS128_STATE_BLOCKS 5 +#define AEGIS128_KEY_SIZE 16 +#define AEGIS128_MIN_AUTH_SIZE 8 +#define AEGIS128_MAX_AUTH_SIZE 16 + +asmlinkage void crypto_aegis128_aesni_init(void *state, void *key, void *iv); + +asmlinkage void crypto_aegis128_aesni_ad( + void *state, unsigned int length, const void *data); + +asmlinkage void crypto_aegis128_aesni_enc( + void *state, unsigned int length, const void *src, void *dst); + +asmlinkage void crypto_aegis128_aesni_dec( + void *state, unsigned int length, const void *src, void *dst); + +asmlinkage void crypto_aegis128_aesni_enc_tail( + void *state, unsigned int length, const void *src, void *dst); + +asmlinkage void crypto_aegis128_aesni_dec_tail( + void *state, unsigned int length, const void *src, void *dst); + +asmlinkage void crypto_aegis128_aesni_final( + void *state, void *tag_xor, unsigned int cryptlen, + unsigned int assoclen); + +struct aegis_block { + u8 bytes[AEGIS128_BLOCK_SIZE] __aligned(AEGIS128_BLOCK_ALIGN); +}; + +struct aegis_state { + struct aegis_block blocks[AEGIS128_STATE_BLOCKS]; +}; + +struct aegis_ctx { + struct aegis_block key; +}; + +struct aegis_crypt_ops { + int (*skcipher_walk_init)(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); + + void (*crypt_blocks)(void *state, unsigned int length, const void *src, + void *dst); + void (*crypt_tail)(void *state, unsigned int length, const void *src, + void *dst); +}; + +static void crypto_aegis128_aesni_process_ad( + struct aegis_state *state, struct scatterlist *sg_src, + unsigned int assoclen) +{ + struct scatter_walk walk; + struct aegis_block buf; + unsigned int pos = 0; + + scatterwalk_start(&walk, sg_src); + while (assoclen != 0) { + unsigned int size = scatterwalk_clamp(&walk, assoclen); + unsigned int left = size; + void *mapped = scatterwalk_map(&walk); + const u8 *src = (const u8 *)mapped; + + if (pos + size >= AEGIS128_BLOCK_SIZE) { + if (pos > 0) { + unsigned int fill = AEGIS128_BLOCK_SIZE - pos; + memcpy(buf.bytes + pos, src, fill); + crypto_aegis128_aesni_ad(state, + AEGIS128_BLOCK_SIZE, + buf.bytes); + pos = 0; + left -= fill; + src += fill; + } + + crypto_aegis128_aesni_ad(state, left, src); + + src += left & ~(AEGIS128_BLOCK_SIZE - 1); + left &= AEGIS128_BLOCK_SIZE - 1; + } + + memcpy(buf.bytes + pos, src, left); + pos += left; + assoclen -= size; + + scatterwalk_unmap(mapped); + scatterwalk_advance(&walk, size); + scatterwalk_done(&walk, 0, assoclen); + } + + if (pos > 0) { + memset(buf.bytes + pos, 0, AEGIS128_BLOCK_SIZE - pos); + crypto_aegis128_aesni_ad(state, AEGIS128_BLOCK_SIZE, buf.bytes); + } +} + +static void crypto_aegis128_aesni_process_crypt( + struct aegis_state *state, struct aead_request *req, + const struct aegis_crypt_ops *ops) +{ + struct skcipher_walk walk; + u8 *src, *dst; + unsigned int chunksize, base; + + ops->skcipher_walk_init(&walk, req, false); + + while (walk.nbytes) { + src = walk.src.virt.addr; + dst = walk.dst.virt.addr; + chunksize = walk.nbytes; + + ops->crypt_blocks(state, chunksize, src, dst); + + base = chunksize & ~(AEGIS128_BLOCK_SIZE - 1); + src += base; + dst += base; + chunksize &= AEGIS128_BLOCK_SIZE - 1; + + if (chunksize > 0) + ops->crypt_tail(state, chunksize, src, dst); + + skcipher_walk_done(&walk, 0); + } +} + +static struct aegis_ctx *crypto_aegis128_aesni_ctx(struct crypto_aead *aead) +{ + u8 *ctx = crypto_aead_ctx(aead); + ctx = PTR_ALIGN(ctx, __alignof__(struct aegis_ctx)); + return (void *)ctx; +} + +static int crypto_aegis128_aesni_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(aead); + + if (keylen != AEGIS128_KEY_SIZE) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE); + + return 0; +} + +static int crypto_aegis128_aesni_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + if (authsize > AEGIS128_MAX_AUTH_SIZE) + return -EINVAL; + if (authsize < AEGIS128_MIN_AUTH_SIZE) + return -EINVAL; + return 0; +} + +static void crypto_aegis128_aesni_crypt(struct aead_request *req, + struct aegis_block *tag_xor, + unsigned int cryptlen, + const struct aegis_crypt_ops *ops) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm); + struct aegis_state state; + + kernel_fpu_begin(); + + crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv); + crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen); + crypto_aegis128_aesni_process_crypt(&state, req, ops); + crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen); + + kernel_fpu_end(); +} + +static int crypto_aegis128_aesni_encrypt(struct aead_request *req) +{ + static const struct aegis_crypt_ops OPS = { + .skcipher_walk_init = skcipher_walk_aead_encrypt, + .crypt_blocks = crypto_aegis128_aesni_enc, + .crypt_tail = crypto_aegis128_aesni_enc_tail, + }; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aegis_block tag = {}; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen; + + crypto_aegis128_aesni_crypt(req, &tag, cryptlen, &OPS); + + scatterwalk_map_and_copy(tag.bytes, req->dst, + req->assoclen + cryptlen, authsize, 1); + return 0; +} + +static int crypto_aegis128_aesni_decrypt(struct aead_request *req) +{ + static const struct aegis_block zeros = {}; + + static const struct aegis_crypt_ops OPS = { + .skcipher_walk_init = skcipher_walk_aead_decrypt, + .crypt_blocks = crypto_aegis128_aesni_dec, + .crypt_tail = crypto_aegis128_aesni_dec_tail, + }; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aegis_block tag; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen - authsize; + + scatterwalk_map_and_copy(tag.bytes, req->src, + req->assoclen + cryptlen, authsize, 0); + + crypto_aegis128_aesni_crypt(req, &tag, cryptlen, &OPS); + + return crypto_memneq(tag.bytes, zeros.bytes, authsize) ? -EBADMSG : 0; +} + +static int crypto_aegis128_aesni_init_tfm(struct crypto_aead *aead) +{ + return 0; +} + +static void crypto_aegis128_aesni_exit_tfm(struct crypto_aead *aead) +{ +} + +static int cryptd_aegis128_aesni_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + return crypto_aead_setkey(&cryptd_tfm->base, key, keylen); +} + +static int cryptd_aegis128_aesni_setauthsize(struct crypto_aead *aead, + unsigned int authsize) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + return crypto_aead_setauthsize(&cryptd_tfm->base, authsize); +} + +static int cryptd_aegis128_aesni_encrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + aead = &cryptd_tfm->base; + if (irq_fpu_usable() && (!in_atomic() || + !cryptd_aead_queued(cryptd_tfm))) + aead = cryptd_aead_child(cryptd_tfm); + + aead_request_set_tfm(req, aead); + + return crypto_aead_encrypt(req); +} + +static int cryptd_aegis128_aesni_decrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + aead = &cryptd_tfm->base; + if (irq_fpu_usable() && (!in_atomic() || + !cryptd_aead_queued(cryptd_tfm))) + aead = cryptd_aead_child(cryptd_tfm); + + aead_request_set_tfm(req, aead); + + return crypto_aead_decrypt(req); +} + +static int cryptd_aegis128_aesni_init_tfm(struct crypto_aead *aead) +{ + struct cryptd_aead *cryptd_tfm; + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + + cryptd_tfm = cryptd_alloc_aead("__aegis128-aesni", CRYPTO_ALG_INTERNAL, + CRYPTO_ALG_INTERNAL); + if (IS_ERR(cryptd_tfm)) + return PTR_ERR(cryptd_tfm); + + *ctx = cryptd_tfm; + crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); + return 0; +} + +static void cryptd_aegis128_aesni_exit_tfm(struct crypto_aead *aead) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + + cryptd_free_aead(*ctx); +} + +static struct aead_alg crypto_aegis128_aesni_alg[] = { + { + .setkey = crypto_aegis128_aesni_setkey, + .setauthsize = crypto_aegis128_aesni_setauthsize, + .encrypt = crypto_aegis128_aesni_encrypt, + .decrypt = crypto_aegis128_aesni_decrypt, + .init = crypto_aegis128_aesni_init_tfm, + .exit = crypto_aegis128_aesni_exit_tfm, + + .ivsize = AEGIS128_NONCE_SIZE, + .maxauthsize = AEGIS128_MAX_AUTH_SIZE, + .chunksize = AEGIS128_BLOCK_SIZE, + + .base = { + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct aegis_ctx) + + __alignof__(struct aegis_ctx), + .cra_alignmask = 0, + + .cra_name = "__aegis128", + .cra_driver_name = "__aegis128-aesni", + + .cra_module = THIS_MODULE, + } + }, { + .setkey = cryptd_aegis128_aesni_setkey, + .setauthsize = cryptd_aegis128_aesni_setauthsize, + .encrypt = cryptd_aegis128_aesni_encrypt, + .decrypt = cryptd_aegis128_aesni_decrypt, + .init = cryptd_aegis128_aesni_init_tfm, + .exit = cryptd_aegis128_aesni_exit_tfm, + + .ivsize = AEGIS128_NONCE_SIZE, + .maxauthsize = AEGIS128_MAX_AUTH_SIZE, + .chunksize = AEGIS128_BLOCK_SIZE, + + .base = { + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct cryptd_aead *), + .cra_alignmask = 0, + + .cra_priority = 400, + + .cra_name = "aegis128", + .cra_driver_name = "aegis128-aesni", + + .cra_module = THIS_MODULE, + } + } +}; + +static const struct x86_cpu_id aesni_cpu_id[] = { + X86_FEATURE_MATCH(X86_FEATURE_AES), + X86_FEATURE_MATCH(X86_FEATURE_XMM2), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id); + +static int __init crypto_aegis128_aesni_module_init(void) +{ + if (!x86_match_cpu(aesni_cpu_id)) + return -ENODEV; + + return crypto_register_aeads(crypto_aegis128_aesni_alg, + ARRAY_SIZE(crypto_aegis128_aesni_alg)); +} + +static void __exit crypto_aegis128_aesni_module_exit(void) +{ + crypto_unregister_aeads(crypto_aegis128_aesni_alg, + ARRAY_SIZE(crypto_aegis128_aesni_alg)); +} + +module_init(crypto_aegis128_aesni_module_init); +module_exit(crypto_aegis128_aesni_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>"); +MODULE_DESCRIPTION("AEGIS-128 AEAD algorithm -- AESNI+SSE2 implementation"); +MODULE_ALIAS_CRYPTO("aegis128"); +MODULE_ALIAS_CRYPTO("aegis128-aesni"); diff --git a/arch/x86/crypto/aegis128l-aesni-asm.S b/arch/x86/crypto/aegis128l-aesni-asm.S new file mode 100644 index 000000000000..9263c344f2c7 --- /dev/null +++ b/arch/x86/crypto/aegis128l-aesni-asm.S @@ -0,0 +1,825 @@ +/* + * AES-NI + SSE2 implementation of AEGIS-128L + * + * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com> + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <linux/linkage.h> +#include <asm/frame.h> + +#define STATE0 %xmm0 +#define STATE1 %xmm1 +#define STATE2 %xmm2 +#define STATE3 %xmm3 +#define STATE4 %xmm4 +#define STATE5 %xmm5 +#define STATE6 %xmm6 +#define STATE7 %xmm7 +#define MSG0 %xmm8 +#define MSG1 %xmm9 +#define T0 %xmm10 +#define T1 %xmm11 +#define T2 %xmm12 +#define T3 %xmm13 + +#define STATEP %rdi +#define LEN %rsi +#define SRC %rdx +#define DST %rcx + +.section .rodata.cst16.aegis128l_const, "aM", @progbits, 32 +.align 16 +.Laegis128l_const_0: + .byte 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d + .byte 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62 +.Laegis128l_const_1: + .byte 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1 + .byte 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd + +.section .rodata.cst16.aegis128l_counter, "aM", @progbits, 16 +.align 16 +.Laegis128l_counter0: + .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 + .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f +.Laegis128l_counter1: + .byte 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 + .byte 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f + +.text + +/* + * __load_partial: internal ABI + * input: + * LEN - bytes + * SRC - src + * output: + * MSG0 - first message block + * MSG1 - second message block + * changed: + * T0 + * %r8 + * %r9 + */ +__load_partial: + xor %r9, %r9 + pxor MSG0, MSG0 + pxor MSG1, MSG1 + + mov LEN, %r8 + and $0x1, %r8 + jz .Lld_partial_1 + + mov LEN, %r8 + and $0x1E, %r8 + add SRC, %r8 + mov (%r8), %r9b + +.Lld_partial_1: + mov LEN, %r8 + and $0x2, %r8 + jz .Lld_partial_2 + + mov LEN, %r8 + and $0x1C, %r8 + add SRC, %r8 + shl $0x10, %r9 + mov (%r8), %r9w + +.Lld_partial_2: + mov LEN, %r8 + and $0x4, %r8 + jz .Lld_partial_4 + + mov LEN, %r8 + and $0x18, %r8 + add SRC, %r8 + shl $32, %r9 + mov (%r8), %r8d + xor %r8, %r9 + +.Lld_partial_4: + movq %r9, MSG0 + + mov LEN, %r8 + and $0x8, %r8 + jz .Lld_partial_8 + + mov LEN, %r8 + and $0x10, %r8 + add SRC, %r8 + pslldq $8, MSG0 + movq (%r8), T0 + pxor T0, MSG0 + +.Lld_partial_8: + mov LEN, %r8 + and $0x10, %r8 + jz .Lld_partial_16 + + movdqa MSG0, MSG1 + movdqu (SRC), MSG0 + +.Lld_partial_16: + ret +ENDPROC(__load_partial) + +/* + * __store_partial: internal ABI + * input: + * LEN - bytes + * DST - dst + * output: + * T0 - first message block + * T1 - second message block + * changed: + * %r8 + * %r9 + * %r10 + */ +__store_partial: + mov LEN, %r8 + mov DST, %r9 + + cmp $16, %r8 + jl .Lst_partial_16 + + movdqu T0, (%r9) + movdqa T1, T0 + + sub $16, %r8 + add $16, %r9 + +.Lst_partial_16: + movq T0, %r10 + + cmp $8, %r8 + jl .Lst_partial_8 + + mov %r10, (%r9) + psrldq $8, T0 + movq T0, %r10 + + sub $8, %r8 + add $8, %r9 + +.Lst_partial_8: + cmp $4, %r8 + jl .Lst_partial_4 + + mov %r10d, (%r9) + shr $32, %r10 + + sub $4, %r8 + add $4, %r9 + +.Lst_partial_4: + cmp $2, %r8 + jl .Lst_partial_2 + + mov %r10w, (%r9) + shr $0x10, %r10 + + sub $2, %r8 + add $2, %r9 + +.Lst_partial_2: + cmp $1, %r8 + jl .Lst_partial_1 + + mov %r10b, (%r9) + +.Lst_partial_1: + ret +ENDPROC(__store_partial) + +.macro update + movdqa STATE7, T0 + aesenc STATE0, STATE7 + aesenc STATE1, STATE0 + aesenc STATE2, STATE1 + aesenc STATE3, STATE2 + aesenc STATE4, STATE3 + aesenc STATE5, STATE4 + aesenc STATE6, STATE5 + aesenc T0, STATE6 +.endm + +.macro update0 + update + pxor MSG0, STATE7 + pxor MSG1, STATE3 +.endm + +.macro update1 + update + pxor MSG0, STATE6 + pxor MSG1, STATE2 +.endm + +.macro update2 + update + pxor MSG0, STATE5 + pxor MSG1, STATE1 +.endm + +.macro update3 + update + pxor MSG0, STATE4 + pxor MSG1, STATE0 +.endm + +.macro update4 + update + pxor MSG0, STATE3 + pxor MSG1, STATE7 +.endm + +.macro update5 + update + pxor MSG0, STATE2 + pxor MSG1, STATE6 +.endm + +.macro update6 + update + pxor MSG0, STATE1 + pxor MSG1, STATE5 +.endm + +.macro update7 + update + pxor MSG0, STATE0 + pxor MSG1, STATE4 +.endm + +.macro state_load + movdqu 0x00(STATEP), STATE0 + movdqu 0x10(STATEP), STATE1 + movdqu 0x20(STATEP), STATE2 + movdqu 0x30(STATEP), STATE3 + movdqu 0x40(STATEP), STATE4 + movdqu 0x50(STATEP), STATE5 + movdqu 0x60(STATEP), STATE6 + movdqu 0x70(STATEP), STATE7 +.endm + +.macro state_store s0 s1 s2 s3 s4 s5 s6 s7 + movdqu \s7, 0x00(STATEP) + movdqu \s0, 0x10(STATEP) + movdqu \s1, 0x20(STATEP) + movdqu \s2, 0x30(STATEP) + movdqu \s3, 0x40(STATEP) + movdqu \s4, 0x50(STATEP) + movdqu \s5, 0x60(STATEP) + movdqu \s6, 0x70(STATEP) +.endm + +.macro state_store0 + state_store STATE0 STATE1 STATE2 STATE3 STATE4 STATE5 STATE6 STATE7 +.endm + +.macro state_store1 + state_store STATE7 STATE0 STATE1 STATE2 STATE3 STATE4 STATE5 STATE6 +.endm + +.macro state_store2 + state_store STATE6 STATE7 STATE0 STATE1 STATE2 STATE3 STATE4 STATE5 +.endm + +.macro state_store3 + state_store STATE5 STATE6 STATE7 STATE0 STATE1 STATE2 STATE3 STATE4 +.endm + +.macro state_store4 + state_store STATE4 STATE5 STATE6 STATE7 STATE0 STATE1 STATE2 STATE3 +.endm + +.macro state_store5 + state_store STATE3 STATE4 STATE5 STATE6 STATE7 STATE0 STATE1 STATE2 +.endm + +.macro state_store6 + state_store STATE2 STATE3 STATE4 STATE5 STATE6 STATE7 STATE0 STATE1 +.endm + +.macro state_store7 + state_store STATE1 STATE2 STATE3 STATE4 STATE5 STATE6 STATE7 STATE0 +.endm + +/* + * void crypto_aegis128l_aesni_init(void *state, const void *key, const void *iv); + */ +ENTRY(crypto_aegis128l_aesni_init) + FRAME_BEGIN + + /* load key: */ + movdqa (%rsi), MSG1 + movdqa MSG1, STATE0 + movdqa MSG1, STATE4 + movdqa MSG1, STATE5 + movdqa MSG1, STATE6 + movdqa MSG1, STATE7 + + /* load IV: */ + movdqu (%rdx), MSG0 + pxor MSG0, STATE0 + pxor MSG0, STATE4 + + /* load the constants: */ + movdqa .Laegis128l_const_0, STATE2 + movdqa .Laegis128l_const_1, STATE1 + movdqa STATE1, STATE3 + pxor STATE2, STATE5 + pxor STATE1, STATE6 + pxor STATE2, STATE7 + + /* update 10 times with IV and KEY: */ + update0 + update1 + update2 + update3 + update4 + update5 + update6 + update7 + update0 + update1 + + state_store1 + + FRAME_END + ret +ENDPROC(crypto_aegis128l_aesni_init) + +.macro ad_block a i + movdq\a (\i * 0x20 + 0x00)(SRC), MSG0 + movdq\a (\i * 0x20 + 0x10)(SRC), MSG1 + update\i + sub $0x20, LEN + cmp $0x20, LEN + jl .Lad_out_\i +.endm + +/* + * void crypto_aegis128l_aesni_ad(void *state, unsigned int length, + * const void *data); + */ +ENTRY(crypto_aegis128l_aesni_ad) + FRAME_BEGIN + + cmp $0x20, LEN + jb .Lad_out + + state_load + + mov SRC, %r8 + and $0xf, %r8 + jnz .Lad_u_loop + +.align 8 +.Lad_a_loop: + ad_block a 0 + ad_block a 1 + ad_block a 2 + ad_block a 3 + ad_block a 4 + ad_block a 5 + ad_block a 6 + ad_block a 7 + + add $0x100, SRC + jmp .Lad_a_loop + +.align 8 +.Lad_u_loop: + ad_block u 0 + ad_block u 1 + ad_block u 2 + ad_block u 3 + ad_block u 4 + ad_block u 5 + ad_block u 6 + ad_block u 7 + + add $0x100, SRC + jmp .Lad_u_loop + +.Lad_out_0: + state_store0 + FRAME_END + ret + +.Lad_out_1: + state_store1 + FRAME_END + ret + +.Lad_out_2: + state_store2 + FRAME_END + ret + +.Lad_out_3: + state_store3 + FRAME_END + ret + +.Lad_out_4: + state_store4 + FRAME_END + ret + +.Lad_out_5: + state_store5 + FRAME_END + ret + +.Lad_out_6: + state_store6 + FRAME_END + ret + +.Lad_out_7: + state_store7 + FRAME_END + ret + +.Lad_out: + FRAME_END + ret +ENDPROC(crypto_aegis128l_aesni_ad) + +.macro crypt m0 m1 s0 s1 s2 s3 s4 s5 s6 s7 + pxor \s1, \m0 + pxor \s6, \m0 + movdqa \s2, T3 + pand \s3, T3 + pxor T3, \m0 + + pxor \s2, \m1 + pxor \s5, \m1 + movdqa \s6, T3 + pand \s7, T3 + pxor T3, \m1 +.endm + +.macro crypt0 m0 m1 + crypt \m0 \m1 STATE0 STATE1 STATE2 STATE3 STATE4 STATE5 STATE6 STATE7 +.endm + +.macro crypt1 m0 m1 + crypt \m0 \m1 STATE7 STATE0 STATE1 STATE2 STATE3 STATE4 STATE5 STATE6 +.endm + +.macro crypt2 m0 m1 + crypt \m0 \m1 STATE6 STATE7 STATE0 STATE1 STATE2 STATE3 STATE4 STATE5 +.endm + +.macro crypt3 m0 m1 + crypt \m0 \m1 STATE5 STATE6 STATE7 STATE0 STATE1 STATE2 STATE3 STATE4 +.endm + +.macro crypt4 m0 m1 + crypt \m0 \m1 STATE4 STATE5 STATE6 STATE7 STATE0 STATE1 STATE2 STATE3 +.endm + +.macro crypt5 m0 m1 + crypt \m0 \m1 STATE3 STATE4 STATE5 STATE6 STATE7 STATE0 STATE1 STATE2 +.endm + +.macro crypt6 m0 m1 + crypt \m0 \m1 STATE2 STATE3 STATE4 STATE5 STATE6 STATE7 STATE0 STATE1 +.endm + +.macro crypt7 m0 m1 + crypt \m0 \m1 STATE1 STATE2 STATE3 STATE4 STATE5 STATE6 STATE7 STATE0 +.endm + +.macro encrypt_block a i + movdq\a (\i * 0x20 + 0x00)(SRC), MSG0 + movdq\a (\i * 0x20 + 0x10)(SRC), MSG1 + movdqa MSG0, T0 + movdqa MSG1, T1 + crypt\i T0, T1 + movdq\a T0, (\i * 0x20 + 0x00)(DST) + movdq\a T1, (\i * 0x20 + 0x10)(DST) + + update\i + + sub $0x20, LEN + cmp $0x20, LEN + jl .Lenc_out_\i +.endm + +.macro decrypt_block a i + movdq\a (\i * 0x20 + 0x00)(SRC), MSG0 + movdq\a (\i * 0x20 + 0x10)(SRC), MSG1 + crypt\i MSG0, MSG1 + movdq\a MSG0, (\i * 0x20 + 0x00)(DST) + movdq\a MSG1, (\i * 0x20 + 0x10)(DST) + + update\i + + sub $0x20, LEN + cmp $0x20, LEN + jl .Ldec_out_\i +.endm + +/* + * void crypto_aegis128l_aesni_enc(void *state, unsigned int length, + * const void *src, void *dst); + */ +ENTRY(crypto_aegis128l_aesni_enc) + FRAME_BEGIN + + cmp $0x20, LEN + jb .Lenc_out + + state_load + + mov SRC, %r8 + or DST, %r8 + and $0xf, %r8 + jnz .Lenc_u_loop + +.align 8 +.Lenc_a_loop: + encrypt_block a 0 + encrypt_block a 1 + encrypt_block a 2 + encrypt_block a 3 + encrypt_block a 4 + encrypt_block a 5 + encrypt_block a 6 + encrypt_block a 7 + + add $0x100, SRC + add $0x100, DST + jmp .Lenc_a_loop + +.align 8 +.Lenc_u_loop: + encrypt_block u 0 + encrypt_block u 1 + encrypt_block u 2 + encrypt_block u 3 + encrypt_block u 4 + encrypt_block u 5 + encrypt_block u 6 + encrypt_block u 7 + + add $0x100, SRC + add $0x100, DST + jmp .Lenc_u_loop + +.Lenc_out_0: + state_store0 + FRAME_END + ret + +.Lenc_out_1: + state_store1 + FRAME_END + ret + +.Lenc_out_2: + state_store2 + FRAME_END + ret + +.Lenc_out_3: + state_store3 + FRAME_END + ret + +.Lenc_out_4: + state_store4 + FRAME_END + ret + +.Lenc_out_5: + state_store5 + FRAME_END + ret + +.Lenc_out_6: + state_store6 + FRAME_END + ret + +.Lenc_out_7: + state_store7 + FRAME_END + ret + +.Lenc_out: + FRAME_END + ret +ENDPROC(crypto_aegis128l_aesni_enc) + +/* + * void crypto_aegis128l_aesni_enc_tail(void *state, unsigned int length, + * const void *src, void *dst); + */ +ENTRY(crypto_aegis128l_aesni_enc_tail) + FRAME_BEGIN + + state_load + + /* encrypt message: */ + call __load_partial + + movdqa MSG0, T0 + movdqa MSG1, T1 + crypt0 T0, T1 + + call __store_partial + + update0 + + state_store0 + + FRAME_END +ENDPROC(crypto_aegis128l_aesni_enc_tail) + +/* + * void crypto_aegis128l_aesni_dec(void *state, unsigned int length, + * const void *src, void *dst); + */ +ENTRY(crypto_aegis128l_aesni_dec) + FRAME_BEGIN + + cmp $0x20, LEN + jb .Ldec_out + + state_load + + mov SRC, %r8 + or DST, %r8 + and $0xF, %r8 + jnz .Ldec_u_loop + +.align 8 +.Ldec_a_loop: + decrypt_block a 0 + decrypt_block a 1 + decrypt_block a 2 + decrypt_block a 3 + decrypt_block a 4 + decrypt_block a 5 + decrypt_block a 6 + decrypt_block a 7 + + add $0x100, SRC + add $0x100, DST + jmp .Ldec_a_loop + +.align 8 +.Ldec_u_loop: + decrypt_block u 0 + decrypt_block u 1 + decrypt_block u 2 + decrypt_block u 3 + decrypt_block u 4 + decrypt_block u 5 + decrypt_block u 6 + decrypt_block u 7 + + add $0x100, SRC + add $0x100, DST + jmp .Ldec_u_loop + +.Ldec_out_0: + state_store0 + FRAME_END + ret + +.Ldec_out_1: + state_store1 + FRAME_END + ret + +.Ldec_out_2: + state_store2 + FRAME_END + ret + +.Ldec_out_3: + state_store3 + FRAME_END + ret + +.Ldec_out_4: + state_store4 + FRAME_END + ret + +.Ldec_out_5: + state_store5 + FRAME_END + ret + +.Ldec_out_6: + state_store6 + FRAME_END + ret + +.Ldec_out_7: + state_store7 + FRAME_END + ret + +.Ldec_out: + FRAME_END + ret +ENDPROC(crypto_aegis128l_aesni_dec) + +/* + * void crypto_aegis128l_aesni_dec_tail(void *state, unsigned int length, + * const void *src, void *dst); + */ +ENTRY(crypto_aegis128l_aesni_dec_tail) + FRAME_BEGIN + + state_load + + /* decrypt message: */ + call __load_partial + + crypt0 MSG0, MSG1 + + movdqa MSG0, T0 + movdqa MSG1, T1 + call __store_partial + + /* mask with byte count: */ + movq LEN, T0 + punpcklbw T0, T0 + punpcklbw T0, T0 + punpcklbw T0, T0 + punpcklbw T0, T0 + movdqa T0, T1 + movdqa .Laegis128l_counter0, T2 + movdqa .Laegis128l_counter1, T3 + pcmpgtb T2, T0 + pcmpgtb T3, T1 + pand T0, MSG0 + pand T1, MSG1 + + update0 + + state_store0 + + FRAME_END + ret +ENDPROC(crypto_aegis128l_aesni_dec_tail) + +/* + * void crypto_aegis128l_aesni_final(void *state, void *tag_xor, + * u64 assoclen, u64 cryptlen); + */ +ENTRY(crypto_aegis128l_aesni_final) + FRAME_BEGIN + + state_load + + /* prepare length block: */ + movq %rdx, MSG0 + movq %rcx, T0 + pslldq $8, T0 + pxor T0, MSG0 + psllq $3, MSG0 /* multiply by 8 (to get bit count) */ + + pxor STATE2, MSG0 + movdqa MSG0, MSG1 + + /* update state: */ + update0 + update1 + update2 + update3 + update4 + update5 + update6 + + /* xor tag: */ + movdqu (%rsi), T0 + + pxor STATE1, T0 + pxor STATE2, T0 + pxor STATE3, T0 + pxor STATE4, T0 + pxor STATE5, T0 + pxor STATE6, T0 + pxor STATE7, T0 + + movdqu T0, (%rsi) + + FRAME_END + ret +ENDPROC(crypto_aegis128l_aesni_final) diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c new file mode 100644 index 000000000000..876e4866e633 --- /dev/null +++ b/arch/x86/crypto/aegis128l-aesni-glue.c @@ -0,0 +1,407 @@ +/* + * The AEGIS-128L Authenticated-Encryption Algorithm + * Glue for AES-NI + SSE2 implementation + * + * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com> + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <crypto/cryptd.h> +#include <crypto/internal/aead.h> +#include <crypto/internal/skcipher.h> +#include <crypto/scatterwalk.h> +#include <linux/module.h> +#include <asm/fpu/api.h> +#include <asm/cpu_device_id.h> + +#define AEGIS128L_BLOCK_ALIGN 16 +#define AEGIS128L_BLOCK_SIZE 32 +#define AEGIS128L_NONCE_SIZE 16 +#define AEGIS128L_STATE_BLOCKS 8 +#define AEGIS128L_KEY_SIZE 16 +#define AEGIS128L_MIN_AUTH_SIZE 8 +#define AEGIS128L_MAX_AUTH_SIZE 16 + +asmlinkage void crypto_aegis128l_aesni_init(void *state, void *key, void *iv); + +asmlinkage void crypto_aegis128l_aesni_ad( + void *state, unsigned int length, const void *data); + +asmlinkage void crypto_aegis128l_aesni_enc( + void *state, unsigned int length, const void *src, void *dst); + +asmlinkage void crypto_aegis128l_aesni_dec( + void *state, unsigned int length, const void *src, void *dst); + +asmlinkage void crypto_aegis128l_aesni_enc_tail( + void *state, unsigned int length, const void *src, void *dst); + +asmlinkage void crypto_aegis128l_aesni_dec_tail( + void *state, unsigned int length, const void *src, void *dst); + +asmlinkage void crypto_aegis128l_aesni_final( + void *state, void *tag_xor, unsigned int cryptlen, + unsigned int assoclen); + +struct aegis_block { + u8 bytes[AEGIS128L_BLOCK_SIZE] __aligned(AEGIS128L_BLOCK_ALIGN); +}; + +struct aegis_state { + struct aegis_block blocks[AEGIS128L_STATE_BLOCKS]; +}; + +struct aegis_ctx { + struct aegis_block key; +}; + +struct aegis_crypt_ops { + int (*skcipher_walk_init)(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); + + void (*crypt_blocks)(void *state, unsigned int length, const void *src, + void *dst); + void (*crypt_tail)(void *state, unsigned int length, const void *src, + void *dst); +}; + +static void crypto_aegis128l_aesni_process_ad( + struct aegis_state *state, struct scatterlist *sg_src, + unsigned int assoclen) +{ + struct scatter_walk walk; + struct aegis_block buf; + unsigned int pos = 0; + + scatterwalk_start(&walk, sg_src); + while (assoclen != 0) { + unsigned int size = scatterwalk_clamp(&walk, assoclen); + unsigned int left = size; + void *mapped = scatterwalk_map(&walk); + const u8 *src = (const u8 *)mapped; + + if (pos + size >= AEGIS128L_BLOCK_SIZE) { + if (pos > 0) { + unsigned int fill = AEGIS128L_BLOCK_SIZE - pos; + memcpy(buf.bytes + pos, src, fill); + crypto_aegis128l_aesni_ad(state, + AEGIS128L_BLOCK_SIZE, + buf.bytes); + pos = 0; + left -= fill; + src += fill; + } + + crypto_aegis128l_aesni_ad(state, left, src); + + src += left & ~(AEGIS128L_BLOCK_SIZE - 1); + left &= AEGIS128L_BLOCK_SIZE - 1; + } + + memcpy(buf.bytes + pos, src, left); + pos += left; + assoclen -= size; + + scatterwalk_unmap(mapped); + scatterwalk_advance(&walk, size); + scatterwalk_done(&walk, 0, assoclen); + } + + if (pos > 0) { + memset(buf.bytes + pos, 0, AEGIS128L_BLOCK_SIZE - pos); + crypto_aegis128l_aesni_ad(state, AEGIS128L_BLOCK_SIZE, buf.bytes); + } +} + +static void crypto_aegis128l_aesni_process_crypt( + struct aegis_state *state, struct aead_request *req, + const struct aegis_crypt_ops *ops) +{ + struct skcipher_walk walk; + u8 *src, *dst; + unsigned int chunksize, base; + + ops->skcipher_walk_init(&walk, req, false); + + while (walk.nbytes) { + src = walk.src.virt.addr; + dst = walk.dst.virt.addr; + chunksize = walk.nbytes; + + ops->crypt_blocks(state, chunksize, src, dst); + + base = chunksize & ~(AEGIS128L_BLOCK_SIZE - 1); + src += base; + dst += base; + chunksize &= AEGIS128L_BLOCK_SIZE - 1; + + if (chunksize > 0) + ops->crypt_tail(state, chunksize, src, dst); + + skcipher_walk_done(&walk, 0); + } +} + +static struct aegis_ctx *crypto_aegis128l_aesni_ctx(struct crypto_aead *aead) +{ + u8 *ctx = crypto_aead_ctx(aead); + ctx = PTR_ALIGN(ctx, __alignof__(struct aegis_ctx)); + return (void *)ctx; +} + +static int crypto_aegis128l_aesni_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) +{ + struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(aead); + + if (keylen != AEGIS128L_KEY_SIZE) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->key.bytes, key, AEGIS128L_KEY_SIZE); + + return 0; +} + +static int crypto_aegis128l_aesni_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + if (authsize > AEGIS128L_MAX_AUTH_SIZE) + return -EINVAL; + if (authsize < AEGIS128L_MIN_AUTH_SIZE) + return -EINVAL; + return 0; +} + +static void crypto_aegis128l_aesni_crypt(struct aead_request *req, + struct aegis_block *tag_xor, + unsigned int cryptlen, + const struct aegis_crypt_ops *ops) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm); + struct aegis_state state; + + kernel_fpu_begin(); + + crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv); + crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen); + crypto_aegis128l_aesni_process_crypt(&state, req, ops); + crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, cryptlen); + + kernel_fpu_end(); +} + +static int crypto_aegis128l_aesni_encrypt(struct aead_request *req) +{ + static const struct aegis_crypt_ops OPS = { + .skcipher_walk_init = skcipher_walk_aead_encrypt, + .crypt_blocks = crypto_aegis128l_aesni_enc, + .crypt_tail = crypto_aegis128l_aesni_enc_tail, + }; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aegis_block tag = {}; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen; + + crypto_aegis128l_aesni_crypt(req, &tag, cryptlen, &OPS); + + scatterwalk_map_and_copy(tag.bytes, req->dst, + req->assoclen + cryptlen, authsize, 1); + return 0; +} + +static int crypto_aegis128l_aesni_decrypt(struct aead_request *req) +{ + static const struct aegis_block zeros = {}; + + static const struct aegis_crypt_ops OPS = { + .skcipher_walk_init = skcipher_walk_aead_decrypt, + .crypt_blocks = crypto_aegis128l_aesni_dec, + .crypt_tail = crypto_aegis128l_aesni_dec_tail, + }; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aegis_block tag; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen - authsize; + + scatterwalk_map_and_copy(tag.bytes, req->src, + req->assoclen + cryptlen, authsize, 0); + + crypto_aegis128l_aesni_crypt(req, &tag, cryptlen, &OPS); + + return crypto_memneq(tag.bytes, zeros.bytes, authsize) ? -EBADMSG : 0; +} + +static int crypto_aegis128l_aesni_init_tfm(struct crypto_aead *aead) +{ + return 0; +} + +static void crypto_aegis128l_aesni_exit_tfm(struct crypto_aead *aead) +{ +} + +static int cryptd_aegis128l_aesni_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + return crypto_aead_setkey(&cryptd_tfm->base, key, keylen); +} + +static int cryptd_aegis128l_aesni_setauthsize(struct crypto_aead *aead, + unsigned int authsize) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + return crypto_aead_setauthsize(&cryptd_tfm->base, authsize); +} + +static int cryptd_aegis128l_aesni_encrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + aead = &cryptd_tfm->base; + if (irq_fpu_usable() && (!in_atomic() || + !cryptd_aead_queued(cryptd_tfm))) + aead = cryptd_aead_child(cryptd_tfm); + + aead_request_set_tfm(req, aead); + + return crypto_aead_encrypt(req); +} + +static int cryptd_aegis128l_aesni_decrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + aead = &cryptd_tfm->base; + if (irq_fpu_usable() && (!in_atomic() || + !cryptd_aead_queued(cryptd_tfm))) + aead = cryptd_aead_child(cryptd_tfm); + + aead_request_set_tfm(req, aead); + + return crypto_aead_decrypt(req); +} + +static int cryptd_aegis128l_aesni_init_tfm(struct crypto_aead *aead) +{ + struct cryptd_aead *cryptd_tfm; + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + + cryptd_tfm = cryptd_alloc_aead("__aegis128l-aesni", CRYPTO_ALG_INTERNAL, + CRYPTO_ALG_INTERNAL); + if (IS_ERR(cryptd_tfm)) + return PTR_ERR(cryptd_tfm); + + *ctx = cryptd_tfm; + crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); + return 0; +} + +static void cryptd_aegis128l_aesni_exit_tfm(struct crypto_aead *aead) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + + cryptd_free_aead(*ctx); +} + +static struct aead_alg crypto_aegis128l_aesni_alg[] = { + { + .setkey = crypto_aegis128l_aesni_setkey, + .setauthsize = crypto_aegis128l_aesni_setauthsize, + .encrypt = crypto_aegis128l_aesni_encrypt, + .decrypt = crypto_aegis128l_aesni_decrypt, + .init = crypto_aegis128l_aesni_init_tfm, + .exit = crypto_aegis128l_aesni_exit_tfm, + + .ivsize = AEGIS128L_NONCE_SIZE, + .maxauthsize = AEGIS128L_MAX_AUTH_SIZE, + .chunksize = AEGIS128L_BLOCK_SIZE, + + .base = { + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct aegis_ctx) + + __alignof__(struct aegis_ctx), + .cra_alignmask = 0, + + .cra_name = "__aegis128l", + .cra_driver_name = "__aegis128l-aesni", + + .cra_module = THIS_MODULE, + } + }, { + .setkey = cryptd_aegis128l_aesni_setkey, + .setauthsize = cryptd_aegis128l_aesni_setauthsize, + .encrypt = cryptd_aegis128l_aesni_encrypt, + .decrypt = cryptd_aegis128l_aesni_decrypt, + .init = cryptd_aegis128l_aesni_init_tfm, + .exit = cryptd_aegis128l_aesni_exit_tfm, + + .ivsize = AEGIS128L_NONCE_SIZE, + .maxauthsize = AEGIS128L_MAX_AUTH_SIZE, + .chunksize = AEGIS128L_BLOCK_SIZE, + + .base = { + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct cryptd_aead *), + .cra_alignmask = 0, + + .cra_priority = 400, + + .cra_name = "aegis128l", + .cra_driver_name = "aegis128l-aesni", + + .cra_module = THIS_MODULE, + } + } +}; + +static const struct x86_cpu_id aesni_cpu_id[] = { + X86_FEATURE_MATCH(X86_FEATURE_AES), + X86_FEATURE_MATCH(X86_FEATURE_XMM2), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id); + +static int __init crypto_aegis128l_aesni_module_init(void) +{ + if (!x86_match_cpu(aesni_cpu_id)) + return -ENODEV; + + return crypto_register_aeads(crypto_aegis128l_aesni_alg, + ARRAY_SIZE(crypto_aegis128l_aesni_alg)); +} + +static void __exit crypto_aegis128l_aesni_module_exit(void) +{ + crypto_unregister_aeads(crypto_aegis128l_aesni_alg, + ARRAY_SIZE(crypto_aegis128l_aesni_alg)); +} + +module_init(crypto_aegis128l_aesni_module_init); +module_exit(crypto_aegis128l_aesni_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>"); +MODULE_DESCRIPTION("AEGIS-128L AEAD algorithm -- AESNI+SSE2 implementation"); +MODULE_ALIAS_CRYPTO("aegis128l"); +MODULE_ALIAS_CRYPTO("aegis128l-aesni"); diff --git a/arch/x86/crypto/aegis256-aesni-asm.S b/arch/x86/crypto/aegis256-aesni-asm.S new file mode 100644 index 000000000000..1d977d515bf9 --- /dev/null +++ b/arch/x86/crypto/aegis256-aesni-asm.S @@ -0,0 +1,702 @@ +/* + * AES-NI + SSE2 implementation of AEGIS-128L + * + * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com> + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <linux/linkage.h> +#include <asm/frame.h> + +#define STATE0 %xmm0 +#define STATE1 %xmm1 +#define STATE2 %xmm2 +#define STATE3 %xmm3 +#define STATE4 %xmm4 +#define STATE5 %xmm5 +#define MSG %xmm6 +#define T0 %xmm7 +#define T1 %xmm8 +#define T2 %xmm9 +#define T3 %xmm10 + +#define STATEP %rdi +#define LEN %rsi +#define SRC %rdx +#define DST %rcx + +.section .rodata.cst16.aegis256_const, "aM", @progbits, 32 +.align 16 +.Laegis256_const_0: + .byte 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d + .byte 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62 +.Laegis256_const_1: + .byte 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1 + .byte 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd + +.section .rodata.cst16.aegis256_counter, "aM", @progbits, 16 +.align 16 +.Laegis256_counter: + .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 + .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f + +.text + +/* + * __load_partial: internal ABI + * input: + * LEN - bytes + * SRC - src + * output: + * MSG - message block + * changed: + * T0 + * %r8 + * %r9 + */ +__load_partial: + xor %r9, %r9 + pxor MSG, MSG + + mov LEN, %r8 + and $0x1, %r8 + jz .Lld_partial_1 + + mov LEN, %r8 + and $0x1E, %r8 + add SRC, %r8 + mov (%r8), %r9b + +.Lld_partial_1: + mov LEN, %r8 + and $0x2, %r8 + jz .Lld_partial_2 + + mov LEN, %r8 + and $0x1C, %r8 + add SRC, %r8 + shl $0x10, %r9 + mov (%r8), %r9w + +.Lld_partial_2: + mov LEN, %r8 + and $0x4, %r8 + jz .Lld_partial_4 + + mov LEN, %r8 + and $0x18, %r8 + add SRC, %r8 + shl $32, %r9 + mov (%r8), %r8d + xor %r8, %r9 + +.Lld_partial_4: + movq %r9, MSG + + mov LEN, %r8 + and $0x8, %r8 + jz .Lld_partial_8 + + mov LEN, %r8 + and $0x10, %r8 + add SRC, %r8 + pslldq $8, MSG + movq (%r8), T0 + pxor T0, MSG + +.Lld_partial_8: + ret +ENDPROC(__load_partial) + +/* + * __store_partial: internal ABI + * input: + * LEN - bytes + * DST - dst + * output: + * T0 - message block + * changed: + * %r8 + * %r9 + * %r10 + */ +__store_partial: + mov LEN, %r8 + mov DST, %r9 + + movq T0, %r10 + + cmp $8, %r8 + jl .Lst_partial_8 + + mov %r10, (%r9) + psrldq $8, T0 + movq T0, %r10 + + sub $8, %r8 + add $8, %r9 + +.Lst_partial_8: + cmp $4, %r8 + jl .Lst_partial_4 + + mov %r10d, (%r9) + shr $32, %r10 + + sub $4, %r8 + add $4, %r9 + +.Lst_partial_4: + cmp $2, %r8 + jl .Lst_partial_2 + + mov %r10w, (%r9) + shr $0x10, %r10 + + sub $2, %r8 + add $2, %r9 + +.Lst_partial_2: + cmp $1, %r8 + jl .Lst_partial_1 + + mov %r10b, (%r9) + +.Lst_partial_1: + ret +ENDPROC(__store_partial) + +.macro update + movdqa STATE5, T0 + aesenc STATE0, STATE5 + aesenc STATE1, STATE0 + aesenc STATE2, STATE1 + aesenc STATE3, STATE2 + aesenc STATE4, STATE3 + aesenc T0, STATE4 +.endm + +.macro update0 m + update + pxor \m, STATE5 +.endm + +.macro update1 m + update + pxor \m, STATE4 +.endm + +.macro update2 m + update + pxor \m, STATE3 +.endm + +.macro update3 m + update + pxor \m, STATE2 +.endm + +.macro update4 m + update + pxor \m, STATE1 +.endm + +.macro update5 m + update + pxor \m, STATE0 +.endm + +.macro state_load + movdqu 0x00(STATEP), STATE0 + movdqu 0x10(STATEP), STATE1 + movdqu 0x20(STATEP), STATE2 + movdqu 0x30(STATEP), STATE3 + movdqu 0x40(STATEP), STATE4 + movdqu 0x50(STATEP), STATE5 +.endm + +.macro state_store s0 s1 s2 s3 s4 s5 + movdqu \s5, 0x00(STATEP) + movdqu \s0, 0x10(STATEP) + movdqu \s1, 0x20(STATEP) + movdqu \s2, 0x30(STATEP) + movdqu \s3, 0x40(STATEP) + movdqu \s4, 0x50(STATEP) +.endm + +.macro state_store0 + state_store STATE0 STATE1 STATE2 STATE3 STATE4 STATE5 +.endm + +.macro state_store1 + state_store STATE5 STATE0 STATE1 STATE2 STATE3 STATE4 +.endm + +.macro state_store2 + state_store STATE4 STATE5 STATE0 STATE1 STATE2 STATE3 +.endm + +.macro state_store3 + state_store STATE3 STATE4 STATE5 STATE0 STATE1 STATE2 +.endm + +.macro state_store4 + state_store STATE2 STATE3 STATE4 STATE5 STATE0 STATE1 +.endm + +.macro state_store5 + state_store STATE1 STATE2 STATE3 STATE4 STATE5 STATE0 +.endm + +/* + * void crypto_aegis256_aesni_init(void *state, const void *key, const void *iv); + */ +ENTRY(crypto_aegis256_aesni_init) + FRAME_BEGIN + + /* load key: */ + movdqa 0x00(%rsi), MSG + movdqa 0x10(%rsi), T1 + movdqa MSG, STATE4 + movdqa T1, STATE5 + + /* load IV: */ + movdqu 0x00(%rdx), T2 + movdqu 0x10(%rdx), T3 + pxor MSG, T2 + pxor T1, T3 + movdqa T2, STATE0 + movdqa T3, STATE1 + + /* load the constants: */ + movdqa .Laegis256_const_0, STATE3 + movdqa .Laegis256_const_1, STATE2 + pxor STATE3, STATE4 + pxor STATE2, STATE5 + + /* update 10 times with IV and KEY: */ + update0 MSG + update1 T1 + update2 T2 + update3 T3 + update4 MSG + update5 T1 + update0 T2 + update1 T3 + update2 MSG + update3 T1 + update4 T2 + update5 T3 + update0 MSG + update1 T1 + update2 T2 + update3 T3 + + state_store3 + + FRAME_END + ret +ENDPROC(crypto_aegis256_aesni_init) + +.macro ad_block a i + movdq\a (\i * 0x10)(SRC), MSG + update\i MSG + sub $0x10, LEN + cmp $0x10, LEN + jl .Lad_out_\i +.endm + +/* + * void crypto_aegis256_aesni_ad(void *state, unsigned int length, + * const void *data); + */ +ENTRY(crypto_aegis256_aesni_ad) + FRAME_BEGIN + + cmp $0x10, LEN + jb .Lad_out + + state_load + + mov SRC, %r8 + and $0xf, %r8 + jnz .Lad_u_loop + +.align 8 +.Lad_a_loop: + ad_block a 0 + ad_block a 1 + ad_block a 2 + ad_block a 3 + ad_block a 4 + ad_block a 5 + + add $0x60, SRC + jmp .Lad_a_loop + +.align 8 +.Lad_u_loop: + ad_block u 0 + ad_block u 1 + ad_block u 2 + ad_block u 3 + ad_block u 4 + ad_block u 5 + + add $0x60, SRC + jmp .Lad_u_loop + +.Lad_out_0: + state_store0 + FRAME_END + ret + +.Lad_out_1: + state_store1 + FRAME_END + ret + +.Lad_out_2: + state_store2 + FRAME_END + ret + +.Lad_out_3: + state_store3 + FRAME_END + ret + +.Lad_out_4: + state_store4 + FRAME_END + ret + +.Lad_out_5: + state_store5 + FRAME_END + ret + +.Lad_out: + FRAME_END + ret +ENDPROC(crypto_aegis256_aesni_ad) + +.macro crypt m s0 s1 s2 s3 s4 s5 + pxor \s1, \m + pxor \s4, \m + pxor \s5, \m + movdqa \s2, T3 + pand \s3, T3 + pxor T3, \m +.endm + +.macro crypt0 m + crypt \m STATE0 STATE1 STATE2 STATE3 STATE4 STATE5 +.endm + +.macro crypt1 m + crypt \m STATE5 STATE0 STATE1 STATE2 STATE3 STATE4 +.endm + +.macro crypt2 m + crypt \m STATE4 STATE5 STATE0 STATE1 STATE2 STATE3 +.endm + +.macro crypt3 m + crypt \m STATE3 STATE4 STATE5 STATE0 STATE1 STATE2 +.endm + +.macro crypt4 m + crypt \m STATE2 STATE3 STATE4 STATE5 STATE0 STATE1 +.endm + +.macro crypt5 m + crypt \m STATE1 STATE2 STATE3 STATE4 STATE5 STATE0 +.endm + +.macro encrypt_block a i + movdq\a (\i * 0x10)(SRC), MSG + movdqa MSG, T0 + crypt\i T0 + movdq\a T0, (\i * 0x10)(DST) + + update\i MSG + + sub $0x10, LEN + cmp $0x10, LEN + jl .Lenc_out_\i +.endm + +.macro decrypt_block a i + movdq\a (\i * 0x10)(SRC), MSG + crypt\i MSG + movdq\a MSG, (\i * 0x10)(DST) + + update\i MSG + + sub $0x10, LEN + cmp $0x10, LEN + jl .Ldec_out_\i +.endm + +/* + * void crypto_aegis256_aesni_enc(void *state, unsigned int length, + * const void *src, void *dst); + */ +ENTRY(crypto_aegis256_aesni_enc) + FRAME_BEGIN + + cmp $0x10, LEN + jb .Lenc_out + + state_load + + mov SRC, %r8 + or DST, %r8 + and $0xf, %r8 + jnz .Lenc_u_loop + +.align 8 +.Lenc_a_loop: + encrypt_block a 0 + encrypt_block a 1 + encrypt_block a 2 + encrypt_block a 3 + encrypt_block a 4 + encrypt_block a 5 + + add $0x60, SRC + add $0x60, DST + jmp .Lenc_a_loop + +.align 8 +.Lenc_u_loop: + encrypt_block u 0 + encrypt_block u 1 + encrypt_block u 2 + encrypt_block u 3 + encrypt_block u 4 + encrypt_block u 5 + + add $0x60, SRC + add $0x60, DST + jmp .Lenc_u_loop + +.Lenc_out_0: + state_store0 + FRAME_END + ret + +.Lenc_out_1: + state_store1 + FRAME_END + ret + +.Lenc_out_2: + state_store2 + FRAME_END + ret + +.Lenc_out_3: + state_store3 + FRAME_END + ret + +.Lenc_out_4: + state_store4 + FRAME_END + ret + +.Lenc_out_5: + state_store5 + FRAME_END + ret + +.Lenc_out: + FRAME_END + ret +ENDPROC(crypto_aegis256_aesni_enc) + +/* + * void crypto_aegis256_aesni_enc_tail(void *state, unsigned int length, + * const void *src, void *dst); + */ +ENTRY(crypto_aegis256_aesni_enc_tail) + FRAME_BEGIN + + state_load + + /* encrypt message: */ + call __load_partial + + movdqa MSG, T0 + crypt0 T0 + + call __store_partial + + update0 MSG + + state_store0 + + FRAME_END +ENDPROC(crypto_aegis256_aesni_enc_tail) + +/* + * void crypto_aegis256_aesni_dec(void *state, unsigned int length, + * const void *src, void *dst); + */ +ENTRY(crypto_aegis256_aesni_dec) + FRAME_BEGIN + + cmp $0x10, LEN + jb .Ldec_out + + state_load + + mov SRC, %r8 + or DST, %r8 + and $0xF, %r8 + jnz .Ldec_u_loop + +.align 8 +.Ldec_a_loop: + decrypt_block a 0 + decrypt_block a 1 + decrypt_block a 2 + decrypt_block a 3 + decrypt_block a 4 + decrypt_block a 5 + + add $0x60, SRC + add $0x60, DST + jmp .Ldec_a_loop + +.align 8 +.Ldec_u_loop: + decrypt_block u 0 + decrypt_block u 1 + decrypt_block u 2 + decrypt_block u 3 + decrypt_block u 4 + decrypt_block u 5 + + add $0x60, SRC + add $0x60, DST + jmp .Ldec_u_loop + +.Ldec_out_0: + state_store0 + FRAME_END + ret + +.Ldec_out_1: + state_store1 + FRAME_END + ret + +.Ldec_out_2: + state_store2 + FRAME_END + ret + +.Ldec_out_3: + state_store3 + FRAME_END + ret + +.Ldec_out_4: + state_store4 + FRAME_END + ret + +.Ldec_out_5: + state_store5 + FRAME_END + ret + +.Ldec_out: + FRAME_END + ret +ENDPROC(crypto_aegis256_aesni_dec) + +/* + * void crypto_aegis256_aesni_dec_tail(void *state, unsigned int length, + * const void *src, void *dst); + */ +ENTRY(crypto_aegis256_aesni_dec_tail) + FRAME_BEGIN + + state_load + + /* decrypt message: */ + call __load_partial + + crypt0 MSG + + movdqa MSG, T0 + call __store_partial + + /* mask with byte count: */ + movq LEN, T0 + punpcklbw T0, T0 + punpcklbw T0, T0 + punpcklbw T0, T0 + punpcklbw T0, T0 + movdqa .Laegis256_counter, T1 + pcmpgtb T1, T0 + pand T0, MSG + + update0 MSG + + state_store0 + + FRAME_END + ret +ENDPROC(crypto_aegis256_aesni_dec_tail) + +/* + * void crypto_aegis256_aesni_final(void *state, void *tag_xor, + * u64 assoclen, u64 cryptlen); + */ +ENTRY(crypto_aegis256_aesni_final) + FRAME_BEGIN + + state_load + + /* prepare length block: */ + movq %rdx, MSG + movq %rcx, T0 + pslldq $8, T0 + pxor T0, MSG + psllq $3, MSG /* multiply by 8 (to get bit count) */ + + pxor STATE3, MSG + + /* update state: */ + update0 MSG + update1 MSG + update2 MSG + update3 MSG + update4 MSG + update5 MSG + update0 MSG + + /* xor tag: */ + movdqu (%rsi), MSG + + pxor STATE0, MSG + pxor STATE1, MSG + pxor STATE2, MSG + pxor STATE3, MSG + pxor STATE4, MSG + pxor STATE5, MSG + + movdqu MSG, (%rsi) + + FRAME_END + ret +ENDPROC(crypto_aegis256_aesni_final) diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c new file mode 100644 index 000000000000..2b5dd3af8f4d --- /dev/null +++ b/arch/x86/crypto/aegis256-aesni-glue.c @@ -0,0 +1,407 @@ +/* + * The AEGIS-256 Authenticated-Encryption Algorithm + * Glue for AES-NI + SSE2 implementation + * + * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com> + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <crypto/cryptd.h> +#include <crypto/internal/aead.h> +#include <crypto/internal/skcipher.h> +#include <crypto/scatterwalk.h> +#include <linux/module.h> +#include <asm/fpu/api.h> +#include <asm/cpu_device_id.h> + +#define AEGIS256_BLOCK_ALIGN 16 +#define AEGIS256_BLOCK_SIZE 16 +#define AEGIS256_NONCE_SIZE 32 +#define AEGIS256_STATE_BLOCKS 6 +#define AEGIS256_KEY_SIZE 32 +#define AEGIS256_MIN_AUTH_SIZE 8 +#define AEGIS256_MAX_AUTH_SIZE 16 + +asmlinkage void crypto_aegis256_aesni_init(void *state, void *key, void *iv); + +asmlinkage void crypto_aegis256_aesni_ad( + void *state, unsigned int length, const void *data); + +asmlinkage void crypto_aegis256_aesni_enc( + void *state, unsigned int length, const void *src, void *dst); + +asmlinkage void crypto_aegis256_aesni_dec( + void *state, unsigned int length, const void *src, void *dst); + +asmlinkage void crypto_aegis256_aesni_enc_tail( + void *state, unsigned int length, const void *src, void *dst); + +asmlinkage void crypto_aegis256_aesni_dec_tail( + void *state, unsigned int length, const void *src, void *dst); + +asmlinkage void crypto_aegis256_aesni_final( + void *state, void *tag_xor, unsigned int cryptlen, + unsigned int assoclen); + +struct aegis_block { + u8 bytes[AEGIS256_BLOCK_SIZE] __aligned(AEGIS256_BLOCK_ALIGN); +}; + +struct aegis_state { + struct aegis_block blocks[AEGIS256_STATE_BLOCKS]; +}; + +struct aegis_ctx { + struct aegis_block key[AEGIS256_KEY_SIZE / AEGIS256_BLOCK_SIZE]; +}; + +struct aegis_crypt_ops { + int (*skcipher_walk_init)(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); + + void (*crypt_blocks)(void *state, unsigned int length, const void *src, + void *dst); + void (*crypt_tail)(void *state, unsigned int length, const void *src, + void *dst); +}; + +static void crypto_aegis256_aesni_process_ad( + struct aegis_state *state, struct scatterlist *sg_src, + unsigned int assoclen) +{ + struct scatter_walk walk; + struct aegis_block buf; + unsigned int pos = 0; + + scatterwalk_start(&walk, sg_src); + while (assoclen != 0) { + unsigned int size = scatterwalk_clamp(&walk, assoclen); + unsigned int left = size; + void *mapped = scatterwalk_map(&walk); + const u8 *src = (const u8 *)mapped; + + if (pos + size >= AEGIS256_BLOCK_SIZE) { + if (pos > 0) { + unsigned int fill = AEGIS256_BLOCK_SIZE - pos; + memcpy(buf.bytes + pos, src, fill); + crypto_aegis256_aesni_ad(state, + AEGIS256_BLOCK_SIZE, + buf.bytes); + pos = 0; + left -= fill; + src += fill; + } + + crypto_aegis256_aesni_ad(state, left, src); + + src += left & ~(AEGIS256_BLOCK_SIZE - 1); + left &= AEGIS256_BLOCK_SIZE - 1; + } + + memcpy(buf.bytes + pos, src, left); + pos += left; + assoclen -= size; + + scatterwalk_unmap(mapped); + scatterwalk_advance(&walk, size); + scatterwalk_done(&walk, 0, assoclen); + } + + if (pos > 0) { + memset(buf.bytes + pos, 0, AEGIS256_BLOCK_SIZE - pos); + crypto_aegis256_aesni_ad(state, AEGIS256_BLOCK_SIZE, buf.bytes); + } +} + +static void crypto_aegis256_aesni_process_crypt( + struct aegis_state *state, struct aead_request *req, + const struct aegis_crypt_ops *ops) +{ + struct skcipher_walk walk; + u8 *src, *dst; + unsigned int chunksize, base; + + ops->skcipher_walk_init(&walk, req, false); + + while (walk.nbytes) { + src = walk.src.virt.addr; + dst = walk.dst.virt.addr; + chunksize = walk.nbytes; + + ops->crypt_blocks(state, chunksize, src, dst); + + base = chunksize & ~(AEGIS256_BLOCK_SIZE - 1); + src += base; + dst += base; + chunksize &= AEGIS256_BLOCK_SIZE - 1; + + if (chunksize > 0) + ops->crypt_tail(state, chunksize, src, dst); + + skcipher_walk_done(&walk, 0); + } +} + +static struct aegis_ctx *crypto_aegis256_aesni_ctx(struct crypto_aead *aead) +{ + u8 *ctx = crypto_aead_ctx(aead); + ctx = PTR_ALIGN(ctx, __alignof__(struct aegis_ctx)); + return (void *)ctx; +} + +static int crypto_aegis256_aesni_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(aead); + + if (keylen != AEGIS256_KEY_SIZE) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->key, key, AEGIS256_KEY_SIZE); + + return 0; +} + +static int crypto_aegis256_aesni_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + if (authsize > AEGIS256_MAX_AUTH_SIZE) + return -EINVAL; + if (authsize < AEGIS256_MIN_AUTH_SIZE) + return -EINVAL; + return 0; +} + +static void crypto_aegis256_aesni_crypt(struct aead_request *req, + struct aegis_block *tag_xor, + unsigned int cryptlen, + const struct aegis_crypt_ops *ops) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm); + struct aegis_state state; + + kernel_fpu_begin(); + + crypto_aegis256_aesni_init(&state, ctx->key, req->iv); + crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen); + crypto_aegis256_aesni_process_crypt(&state, req, ops); + crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen); + + kernel_fpu_end(); +} + +static int crypto_aegis256_aesni_encrypt(struct aead_request *req) +{ + static const struct aegis_crypt_ops OPS = { + .skcipher_walk_init = skcipher_walk_aead_encrypt, + .crypt_blocks = crypto_aegis256_aesni_enc, + .crypt_tail = crypto_aegis256_aesni_enc_tail, + }; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aegis_block tag = {}; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen; + + crypto_aegis256_aesni_crypt(req, &tag, cryptlen, &OPS); + + scatterwalk_map_and_copy(tag.bytes, req->dst, + req->assoclen + cryptlen, authsize, 1); + return 0; +} + +static int crypto_aegis256_aesni_decrypt(struct aead_request *req) +{ + static const struct aegis_block zeros = {}; + + static const struct aegis_crypt_ops OPS = { + .skcipher_walk_init = skcipher_walk_aead_decrypt, + .crypt_blocks = crypto_aegis256_aesni_dec, + .crypt_tail = crypto_aegis256_aesni_dec_tail, + }; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aegis_block tag; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen - authsize; + + scatterwalk_map_and_copy(tag.bytes, req->src, + req->assoclen + cryptlen, authsize, 0); + + crypto_aegis256_aesni_crypt(req, &tag, cryptlen, &OPS); + + return crypto_memneq(tag.bytes, zeros.bytes, authsize) ? -EBADMSG : 0; +} + +static int crypto_aegis256_aesni_init_tfm(struct crypto_aead *aead) +{ + return 0; +} + +static void crypto_aegis256_aesni_exit_tfm(struct crypto_aead *aead) +{ +} + +static int cryptd_aegis256_aesni_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + return crypto_aead_setkey(&cryptd_tfm->base, key, keylen); +} + +static int cryptd_aegis256_aesni_setauthsize(struct crypto_aead *aead, + unsigned int authsize) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + return crypto_aead_setauthsize(&cryptd_tfm->base, authsize); +} + +static int cryptd_aegis256_aesni_encrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + aead = &cryptd_tfm->base; + if (irq_fpu_usable() && (!in_atomic() || + !cryptd_aead_queued(cryptd_tfm))) + aead = cryptd_aead_child(cryptd_tfm); + + aead_request_set_tfm(req, aead); + + return crypto_aead_encrypt(req); +} + +static int cryptd_aegis256_aesni_decrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + aead = &cryptd_tfm->base; + if (irq_fpu_usable() && (!in_atomic() || + !cryptd_aead_queued(cryptd_tfm))) + aead = cryptd_aead_child(cryptd_tfm); + + aead_request_set_tfm(req, aead); + + return crypto_aead_decrypt(req); +} + +static int cryptd_aegis256_aesni_init_tfm(struct crypto_aead *aead) +{ + struct cryptd_aead *cryptd_tfm; + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + + cryptd_tfm = cryptd_alloc_aead("__aegis256-aesni", CRYPTO_ALG_INTERNAL, + CRYPTO_ALG_INTERNAL); + if (IS_ERR(cryptd_tfm)) + return PTR_ERR(cryptd_tfm); + + *ctx = cryptd_tfm; + crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); + return 0; +} + +static void cryptd_aegis256_aesni_exit_tfm(struct crypto_aead *aead) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + + cryptd_free_aead(*ctx); +} + +static struct aead_alg crypto_aegis256_aesni_alg[] = { + { + .setkey = crypto_aegis256_aesni_setkey, + .setauthsize = crypto_aegis256_aesni_setauthsize, + .encrypt = crypto_aegis256_aesni_encrypt, + .decrypt = crypto_aegis256_aesni_decrypt, + .init = crypto_aegis256_aesni_init_tfm, + .exit = crypto_aegis256_aesni_exit_tfm, + + .ivsize = AEGIS256_NONCE_SIZE, + .maxauthsize = AEGIS256_MAX_AUTH_SIZE, + .chunksize = AEGIS256_BLOCK_SIZE, + + .base = { + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct aegis_ctx) + + __alignof__(struct aegis_ctx), + .cra_alignmask = 0, + + .cra_name = "__aegis256", + .cra_driver_name = "__aegis256-aesni", + + .cra_module = THIS_MODULE, + } + }, { + .setkey = cryptd_aegis256_aesni_setkey, + .setauthsize = cryptd_aegis256_aesni_setauthsize, + .encrypt = cryptd_aegis256_aesni_encrypt, + .decrypt = cryptd_aegis256_aesni_decrypt, + .init = cryptd_aegis256_aesni_init_tfm, + .exit = cryptd_aegis256_aesni_exit_tfm, + + .ivsize = AEGIS256_NONCE_SIZE, + .maxauthsize = AEGIS256_MAX_AUTH_SIZE, + .chunksize = AEGIS256_BLOCK_SIZE, + + .base = { + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct cryptd_aead *), + .cra_alignmask = 0, + + .cra_priority = 400, + + .cra_name = "aegis256", + .cra_driver_name = "aegis256-aesni", + + .cra_module = THIS_MODULE, + } + } +}; + +static const struct x86_cpu_id aesni_cpu_id[] = { + X86_FEATURE_MATCH(X86_FEATURE_AES), + X86_FEATURE_MATCH(X86_FEATURE_XMM2), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id); + +static int __init crypto_aegis256_aesni_module_init(void) +{ + if (!x86_match_cpu(aesni_cpu_id)) + return -ENODEV; + + return crypto_register_aeads(crypto_aegis256_aesni_alg, + ARRAY_SIZE(crypto_aegis256_aesni_alg)); +} + +static void __exit crypto_aegis256_aesni_module_exit(void) +{ + crypto_unregister_aeads(crypto_aegis256_aesni_alg, + ARRAY_SIZE(crypto_aegis256_aesni_alg)); +} + +module_init(crypto_aegis256_aesni_module_init); +module_exit(crypto_aegis256_aesni_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>"); +MODULE_DESCRIPTION("AEGIS-256 AEAD algorithm -- AESNI+SSE2 implementation"); +MODULE_ALIAS_CRYPTO("aegis256"); +MODULE_ALIAS_CRYPTO("aegis256-aesni"); diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c index 0420bab19efb..2ddbe3a1868b 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_glue.c +++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c @@ -364,5 +364,5 @@ module_exit(ghash_pclmulqdqni_mod_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("GHASH Message Digest Algorithm, " - "acclerated by PCLMULQDQ-NI"); + "accelerated by PCLMULQDQ-NI"); MODULE_ALIAS_CRYPTO("ghash"); diff --git a/arch/x86/crypto/morus1280-avx2-asm.S b/arch/x86/crypto/morus1280-avx2-asm.S new file mode 100644 index 000000000000..37d422e77931 --- /dev/null +++ b/arch/x86/crypto/morus1280-avx2-asm.S @@ -0,0 +1,621 @@ +/* + * AVX2 implementation of MORUS-1280 + * + * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com> + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <linux/linkage.h> +#include <asm/frame.h> + +#define SHUFFLE_MASK(i0, i1, i2, i3) \ + (i0 | (i1 << 2) | (i2 << 4) | (i3 << 6)) + +#define MASK1 SHUFFLE_MASK(3, 0, 1, 2) +#define MASK2 SHUFFLE_MASK(2, 3, 0, 1) +#define MASK3 SHUFFLE_MASK(1, 2, 3, 0) + +#define STATE0 %ymm0 +#define STATE0_LOW %xmm0 +#define STATE1 %ymm1 +#define STATE2 %ymm2 +#define STATE3 %ymm3 +#define STATE4 %ymm4 +#define KEY %ymm5 +#define MSG %ymm5 +#define MSG_LOW %xmm5 +#define T0 %ymm6 +#define T0_LOW %xmm6 +#define T1 %ymm7 + +.section .rodata.cst32.morus1280_const, "aM", @progbits, 32 +.align 32 +.Lmorus1280_const: + .byte 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d + .byte 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62 + .byte 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1 + .byte 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd + +.section .rodata.cst32.morus1280_counter, "aM", @progbits, 32 +.align 32 +.Lmorus1280_counter: + .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 + .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f + .byte 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 + .byte 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f + +.text + +.macro morus1280_round s0, s1, s2, s3, s4, b, w + vpand \s1, \s2, T0 + vpxor T0, \s0, \s0 + vpxor \s3, \s0, \s0 + vpsllq $\b, \s0, T0 + vpsrlq $(64 - \b), \s0, \s0 + vpxor T0, \s0, \s0 + vpermq $\w, \s3, \s3 +.endm + +/* + * __morus1280_update: internal ABI + * input: + * STATE[0-4] - input state + * MSG - message block + * output: + * STATE[0-4] - output state + * changed: + * T0 + */ +__morus1280_update: + morus1280_round STATE0, STATE1, STATE2, STATE3, STATE4, 13, MASK1 + vpxor MSG, STATE1, STATE1 + morus1280_round STATE1, STATE2, STATE3, STATE4, STATE0, 46, MASK2 + vpxor MSG, STATE2, STATE2 + morus1280_round STATE2, STATE3, STATE4, STATE0, STATE1, 38, MASK3 + vpxor MSG, STATE3, STATE3 + morus1280_round STATE3, STATE4, STATE0, STATE1, STATE2, 7, MASK2 + vpxor MSG, STATE4, STATE4 + morus1280_round STATE4, STATE0, STATE1, STATE2, STATE3, 4, MASK1 + ret +ENDPROC(__morus1280_update) + +/* + * __morus1280_update_zero: internal ABI + * input: + * STATE[0-4] - input state + * output: + * STATE[0-4] - output state + * changed: + * T0 + */ +__morus1280_update_zero: + morus1280_round STATE0, STATE1, STATE2, STATE3, STATE4, 13, MASK1 + morus1280_round STATE1, STATE2, STATE3, STATE4, STATE0, 46, MASK2 + morus1280_round STATE2, STATE3, STATE4, STATE0, STATE1, 38, MASK3 + morus1280_round STATE3, STATE4, STATE0, STATE1, STATE2, 7, MASK2 + morus1280_round STATE4, STATE0, STATE1, STATE2, STATE3, 4, MASK1 + ret +ENDPROC(__morus1280_update_zero) + +/* + * __load_partial: internal ABI + * input: + * %rsi - src + * %rcx - bytes + * output: + * MSG - message block + * changed: + * %r8 + * %r9 + */ +__load_partial: + xor %r9, %r9 + vpxor MSG, MSG, MSG + + mov %rcx, %r8 + and $0x1, %r8 + jz .Lld_partial_1 + + mov %rcx, %r8 + and $0x1E, %r8 + add %rsi, %r8 + mov (%r8), %r9b + +.Lld_partial_1: + mov %rcx, %r8 + and $0x2, %r8 + jz .Lld_partial_2 + + mov %rcx, %r8 + and $0x1C, %r8 + add %rsi, %r8 + shl $16, %r9 + mov (%r8), %r9w + +.Lld_partial_2: + mov %rcx, %r8 + and $0x4, %r8 + jz .Lld_partial_4 + + mov %rcx, %r8 + and $0x18, %r8 + add %rsi, %r8 + shl $32, %r9 + mov (%r8), %r8d + xor %r8, %r9 + +.Lld_partial_4: + movq %r9, MSG_LOW + + mov %rcx, %r8 + and $0x8, %r8 + jz .Lld_partial_8 + + mov %rcx, %r8 + and $0x10, %r8 + add %rsi, %r8 + pshufd $MASK2, MSG_LOW, MSG_LOW + pinsrq $0, (%r8), MSG_LOW + +.Lld_partial_8: + mov %rcx, %r8 + and $0x10, %r8 + jz .Lld_partial_16 + + vpermq $MASK2, MSG, MSG + movdqu (%rsi), MSG_LOW + +.Lld_partial_16: + ret +ENDPROC(__load_partial) + +/* + * __store_partial: internal ABI + * input: + * %rdx - dst + * %rcx - bytes + * output: + * T0 - message block + * changed: + * %r8 + * %r9 + * %r10 + */ +__store_partial: + mov %rcx, %r8 + mov %rdx, %r9 + + cmp $16, %r8 + jl .Lst_partial_16 + + movdqu T0_LOW, (%r9) + vpermq $MASK2, T0, T0 + + sub $16, %r8 + add $16, %r9 + +.Lst_partial_16: + movq T0_LOW, %r10 + + cmp $8, %r8 + jl .Lst_partial_8 + + mov %r10, (%r9) + pextrq $1, T0_LOW, %r10 + + sub $8, %r8 + add $8, %r9 + +.Lst_partial_8: + cmp $4, %r8 + jl .Lst_partial_4 + + mov %r10d, (%r9) + shr $32, %r10 + + sub $4, %r8 + add $4, %r9 + +.Lst_partial_4: + cmp $2, %r8 + jl .Lst_partial_2 + + mov %r10w, (%r9) + shr $16, %r10 + + sub $2, %r8 + add $2, %r9 + +.Lst_partial_2: + cmp $1, %r8 + jl .Lst_partial_1 + + mov %r10b, (%r9) + +.Lst_partial_1: + ret +ENDPROC(__store_partial) + +/* + * void crypto_morus1280_avx2_init(void *state, const void *key, + * const void *iv); + */ +ENTRY(crypto_morus1280_avx2_init) + FRAME_BEGIN + + /* load IV: */ + vpxor STATE0, STATE0, STATE0 + movdqu (%rdx), STATE0_LOW + /* load key: */ + vmovdqu (%rsi), KEY + vmovdqa KEY, STATE1 + /* load all ones: */ + vpcmpeqd STATE2, STATE2, STATE2 + /* load all zeros: */ + vpxor STATE3, STATE3, STATE3 + /* load the constant: */ + vmovdqa .Lmorus1280_const, STATE4 + + /* update 16 times with zero: */ + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + + /* xor-in the key again after updates: */ + vpxor KEY, STATE1, STATE1 + + /* store the state: */ + vmovdqu STATE0, (0 * 32)(%rdi) + vmovdqu STATE1, (1 * 32)(%rdi) + vmovdqu STATE2, (2 * 32)(%rdi) + vmovdqu STATE3, (3 * 32)(%rdi) + vmovdqu STATE4, (4 * 32)(%rdi) + + FRAME_END + ret +ENDPROC(crypto_morus1280_avx2_init) + +/* + * void crypto_morus1280_avx2_ad(void *state, const void *data, + * unsigned int length); + */ +ENTRY(crypto_morus1280_avx2_ad) + FRAME_BEGIN + + cmp $32, %rdx + jb .Lad_out + + /* load the state: */ + vmovdqu (0 * 32)(%rdi), STATE0 + vmovdqu (1 * 32)(%rdi), STATE1 + vmovdqu (2 * 32)(%rdi), STATE2 + vmovdqu (3 * 32)(%rdi), STATE3 + vmovdqu (4 * 32)(%rdi), STATE4 + + mov %rsi, %r8 + and $0x1F, %r8 + jnz .Lad_u_loop + +.align 4 +.Lad_a_loop: + vmovdqa (%rsi), MSG + call __morus1280_update + sub $32, %rdx + add $32, %rsi + cmp $32, %rdx + jge .Lad_a_loop + + jmp .Lad_cont +.align 4 +.Lad_u_loop: + vmovdqu (%rsi), MSG + call __morus1280_update + sub $32, %rdx + add $32, %rsi + cmp $32, %rdx + jge .Lad_u_loop + +.Lad_cont: + /* store the state: */ + vmovdqu STATE0, (0 * 32)(%rdi) + vmovdqu STATE1, (1 * 32)(%rdi) + vmovdqu STATE2, (2 * 32)(%rdi) + vmovdqu STATE3, (3 * 32)(%rdi) + vmovdqu STATE4, (4 * 32)(%rdi) + +.Lad_out: + FRAME_END + ret +ENDPROC(crypto_morus1280_avx2_ad) + +/* + * void crypto_morus1280_avx2_enc(void *state, const void *src, void *dst, + * unsigned int length); + */ +ENTRY(crypto_morus1280_avx2_enc) + FRAME_BEGIN + + cmp $32, %rcx + jb .Lenc_out + + /* load the state: */ + vmovdqu (0 * 32)(%rdi), STATE0 + vmovdqu (1 * 32)(%rdi), STATE1 + vmovdqu (2 * 32)(%rdi), STATE2 + vmovdqu (3 * 32)(%rdi), STATE3 + vmovdqu (4 * 32)(%rdi), STATE4 + + mov %rsi, %r8 + or %rdx, %r8 + and $0x1F, %r8 + jnz .Lenc_u_loop + +.align 4 +.Lenc_a_loop: + vmovdqa (%rsi), MSG + vmovdqa MSG, T0 + vpxor STATE0, T0, T0 + vpermq $MASK3, STATE1, T1 + vpxor T1, T0, T0 + vpand STATE2, STATE3, T1 + vpxor T1, T0, T0 + vmovdqa T0, (%rdx) + + call __morus1280_update + sub $32, %rcx + add $32, %rsi + add $32, %rdx + cmp $32, %rcx + jge .Lenc_a_loop + + jmp .Lenc_cont +.align 4 +.Lenc_u_loop: + vmovdqu (%rsi), MSG + vmovdqa MSG, T0 + vpxor STATE0, T0, T0 + vpermq $MASK3, STATE1, T1 + vpxor T1, T0, T0 + vpand STATE2, STATE3, T1 + vpxor T1, T0, T0 + vmovdqu T0, (%rdx) + + call __morus1280_update + sub $32, %rcx + add $32, %rsi + add $32, %rdx + cmp $32, %rcx + jge .Lenc_u_loop + +.Lenc_cont: + /* store the state: */ + vmovdqu STATE0, (0 * 32)(%rdi) + vmovdqu STATE1, (1 * 32)(%rdi) + vmovdqu STATE2, (2 * 32)(%rdi) + vmovdqu STATE3, (3 * 32)(%rdi) + vmovdqu STATE4, (4 * 32)(%rdi) + +.Lenc_out: + FRAME_END + ret +ENDPROC(crypto_morus1280_avx2_enc) + +/* + * void crypto_morus1280_avx2_enc_tail(void *state, const void *src, void *dst, + * unsigned int length); + */ +ENTRY(crypto_morus1280_avx2_enc_tail) + FRAME_BEGIN + + /* load the state: */ + vmovdqu (0 * 32)(%rdi), STATE0 + vmovdqu (1 * 32)(%rdi), STATE1 + vmovdqu (2 * 32)(%rdi), STATE2 + vmovdqu (3 * 32)(%rdi), STATE3 + vmovdqu (4 * 32)(%rdi), STATE4 + + /* encrypt message: */ + call __load_partial + + vmovdqa MSG, T0 + vpxor STATE0, T0, T0 + vpermq $MASK3, STATE1, T1 + vpxor T1, T0, T0 + vpand STATE2, STATE3, T1 + vpxor T1, T0, T0 + + call __store_partial + + call __morus1280_update + + /* store the state: */ + vmovdqu STATE0, (0 * 32)(%rdi) + vmovdqu STATE1, (1 * 32)(%rdi) + vmovdqu STATE2, (2 * 32)(%rdi) + vmovdqu STATE3, (3 * 32)(%rdi) + vmovdqu STATE4, (4 * 32)(%rdi) + + FRAME_END +ENDPROC(crypto_morus1280_avx2_enc_tail) + +/* + * void crypto_morus1280_avx2_dec(void *state, const void *src, void *dst, + * unsigned int length); + */ +ENTRY(crypto_morus1280_avx2_dec) + FRAME_BEGIN + + cmp $32, %rcx + jb .Ldec_out + + /* load the state: */ + vmovdqu (0 * 32)(%rdi), STATE0 + vmovdqu (1 * 32)(%rdi), STATE1 + vmovdqu (2 * 32)(%rdi), STATE2 + vmovdqu (3 * 32)(%rdi), STATE3 + vmovdqu (4 * 32)(%rdi), STATE4 + + mov %rsi, %r8 + or %rdx, %r8 + and $0x1F, %r8 + jnz .Ldec_u_loop + +.align 4 +.Ldec_a_loop: + vmovdqa (%rsi), MSG + vpxor STATE0, MSG, MSG + vpermq $MASK3, STATE1, T0 + vpxor T0, MSG, MSG + vpand STATE2, STATE3, T0 + vpxor T0, MSG, MSG + vmovdqa MSG, (%rdx) + + call __morus1280_update + sub $32, %rcx + add $32, %rsi + add $32, %rdx + cmp $32, %rcx + jge .Ldec_a_loop + + jmp .Ldec_cont +.align 4 +.Ldec_u_loop: + vmovdqu (%rsi), MSG + vpxor STATE0, MSG, MSG + vpermq $MASK3, STATE1, T0 + vpxor T0, MSG, MSG + vpand STATE2, STATE3, T0 + vpxor T0, MSG, MSG + vmovdqu MSG, (%rdx) + + call __morus1280_update + sub $32, %rcx + add $32, %rsi + add $32, %rdx + cmp $32, %rcx + jge .Ldec_u_loop + +.Ldec_cont: + /* store the state: */ + vmovdqu STATE0, (0 * 32)(%rdi) + vmovdqu STATE1, (1 * 32)(%rdi) + vmovdqu STATE2, (2 * 32)(%rdi) + vmovdqu STATE3, (3 * 32)(%rdi) + vmovdqu STATE4, (4 * 32)(%rdi) + +.Ldec_out: + FRAME_END + ret +ENDPROC(crypto_morus1280_avx2_dec) + +/* + * void crypto_morus1280_avx2_dec_tail(void *state, const void *src, void *dst, + * unsigned int length); + */ +ENTRY(crypto_morus1280_avx2_dec_tail) + FRAME_BEGIN + + /* load the state: */ + vmovdqu (0 * 32)(%rdi), STATE0 + vmovdqu (1 * 32)(%rdi), STATE1 + vmovdqu (2 * 32)(%rdi), STATE2 + vmovdqu (3 * 32)(%rdi), STATE3 + vmovdqu (4 * 32)(%rdi), STATE4 + + /* decrypt message: */ + call __load_partial + + vpxor STATE0, MSG, MSG + vpermq $MASK3, STATE1, T0 + vpxor T0, MSG, MSG + vpand STATE2, STATE3, T0 + vpxor T0, MSG, MSG + vmovdqa MSG, T0 + + call __store_partial + + /* mask with byte count: */ + movq %rcx, T0_LOW + vpbroadcastb T0_LOW, T0 + vmovdqa .Lmorus1280_counter, T1 + vpcmpgtb T1, T0, T0 + vpand T0, MSG, MSG + + call __morus1280_update + + /* store the state: */ + vmovdqu STATE0, (0 * 32)(%rdi) + vmovdqu STATE1, (1 * 32)(%rdi) + vmovdqu STATE2, (2 * 32)(%rdi) + vmovdqu STATE3, (3 * 32)(%rdi) + vmovdqu STATE4, (4 * 32)(%rdi) + + FRAME_END + ret +ENDPROC(crypto_morus1280_avx2_dec_tail) + +/* + * void crypto_morus1280_avx2_final(void *state, void *tag_xor, + * u64 assoclen, u64 cryptlen); + */ +ENTRY(crypto_morus1280_avx2_final) + FRAME_BEGIN + + /* load the state: */ + vmovdqu (0 * 32)(%rdi), STATE0 + vmovdqu (1 * 32)(%rdi), STATE1 + vmovdqu (2 * 32)(%rdi), STATE2 + vmovdqu (3 * 32)(%rdi), STATE3 + vmovdqu (4 * 32)(%rdi), STATE4 + + /* xor state[0] into state[4]: */ + vpxor STATE0, STATE4, STATE4 + + /* prepare length block: */ + vpxor MSG, MSG, MSG + vpinsrq $0, %rdx, MSG_LOW, MSG_LOW + vpinsrq $1, %rcx, MSG_LOW, MSG_LOW + vpsllq $3, MSG, MSG /* multiply by 8 (to get bit count) */ + + /* update state: */ + call __morus1280_update + call __morus1280_update + call __morus1280_update + call __morus1280_update + call __morus1280_update + call __morus1280_update + call __morus1280_update + call __morus1280_update + call __morus1280_update + call __morus1280_update + + /* xor tag: */ + vmovdqu (%rsi), MSG + + vpxor STATE0, MSG, MSG + vpermq $MASK3, STATE1, T0 + vpxor T0, MSG, MSG + vpand STATE2, STATE3, T0 + vpxor T0, MSG, MSG + vmovdqu MSG, (%rsi) + + FRAME_END + ret +ENDPROC(crypto_morus1280_avx2_final) diff --git a/arch/x86/crypto/morus1280-avx2-glue.c b/arch/x86/crypto/morus1280-avx2-glue.c new file mode 100644 index 000000000000..f111f36d26dc --- /dev/null +++ b/arch/x86/crypto/morus1280-avx2-glue.c @@ -0,0 +1,68 @@ +/* + * The MORUS-1280 Authenticated-Encryption Algorithm + * Glue for AVX2 implementation + * + * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com> + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <crypto/internal/aead.h> +#include <crypto/morus1280_glue.h> +#include <linux/module.h> +#include <asm/fpu/api.h> +#include <asm/cpu_device_id.h> + +asmlinkage void crypto_morus1280_avx2_init(void *state, const void *key, + const void *iv); +asmlinkage void crypto_morus1280_avx2_ad(void *state, const void *data, + unsigned int length); + +asmlinkage void crypto_morus1280_avx2_enc(void *state, const void *src, + void *dst, unsigned int length); +asmlinkage void crypto_morus1280_avx2_dec(void *state, const void *src, + void *dst, unsigned int length); + +asmlinkage void crypto_morus1280_avx2_enc_tail(void *state, const void *src, + void *dst, unsigned int length); +asmlinkage void crypto_morus1280_avx2_dec_tail(void *state, const void *src, + void *dst, unsigned int length); + +asmlinkage void crypto_morus1280_avx2_final(void *state, void *tag_xor, + u64 assoclen, u64 cryptlen); + +MORUS1280_DECLARE_ALGS(avx2, "morus1280-avx2", 400); + +static const struct x86_cpu_id avx2_cpu_id[] = { + X86_FEATURE_MATCH(X86_FEATURE_AVX2), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, avx2_cpu_id); + +static int __init crypto_morus1280_avx2_module_init(void) +{ + if (!x86_match_cpu(avx2_cpu_id)) + return -ENODEV; + + return crypto_register_aeads(crypto_morus1280_avx2_algs, + ARRAY_SIZE(crypto_morus1280_avx2_algs)); +} + +static void __exit crypto_morus1280_avx2_module_exit(void) +{ + crypto_unregister_aeads(crypto_morus1280_avx2_algs, + ARRAY_SIZE(crypto_morus1280_avx2_algs)); +} + +module_init(crypto_morus1280_avx2_module_init); +module_exit(crypto_morus1280_avx2_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>"); +MODULE_DESCRIPTION("MORUS-1280 AEAD algorithm -- AVX2 implementation"); +MODULE_ALIAS_CRYPTO("morus1280"); +MODULE_ALIAS_CRYPTO("morus1280-avx2"); diff --git a/arch/x86/crypto/morus1280-sse2-asm.S b/arch/x86/crypto/morus1280-sse2-asm.S new file mode 100644 index 000000000000..1fe637c7be9d --- /dev/null +++ b/arch/x86/crypto/morus1280-sse2-asm.S @@ -0,0 +1,895 @@ +/* + * SSE2 implementation of MORUS-1280 + * + * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com> + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <linux/linkage.h> +#include <asm/frame.h> + +#define SHUFFLE_MASK(i0, i1, i2, i3) \ + (i0 | (i1 << 2) | (i2 << 4) | (i3 << 6)) + +#define MASK2 SHUFFLE_MASK(2, 3, 0, 1) + +#define STATE0_LO %xmm0 +#define STATE0_HI %xmm1 +#define STATE1_LO %xmm2 +#define STATE1_HI %xmm3 +#define STATE2_LO %xmm4 +#define STATE2_HI %xmm5 +#define STATE3_LO %xmm6 +#define STATE3_HI %xmm7 +#define STATE4_LO %xmm8 +#define STATE4_HI %xmm9 +#define KEY_LO %xmm10 +#define KEY_HI %xmm11 +#define MSG_LO %xmm10 +#define MSG_HI %xmm11 +#define T0_LO %xmm12 +#define T0_HI %xmm13 +#define T1_LO %xmm14 +#define T1_HI %xmm15 + +.section .rodata.cst16.morus640_const, "aM", @progbits, 16 +.align 16 +.Lmorus640_const_0: + .byte 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d + .byte 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62 +.Lmorus640_const_1: + .byte 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1 + .byte 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd + +.section .rodata.cst16.morus640_counter, "aM", @progbits, 16 +.align 16 +.Lmorus640_counter_0: + .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 + .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f +.Lmorus640_counter_1: + .byte 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 + .byte 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f + +.text + +.macro rol1 hi, lo + /* + * HI_1 | HI_0 || LO_1 | LO_0 + * ==> + * HI_0 | HI_1 || LO_1 | LO_0 + * ==> + * HI_0 | LO_1 || LO_0 | HI_1 + */ + pshufd $MASK2, \hi, \hi + movdqa \hi, T0_LO + punpcklqdq \lo, T0_LO + punpckhqdq \hi, \lo + movdqa \lo, \hi + movdqa T0_LO, \lo +.endm + +.macro rol2 hi, lo + movdqa \lo, T0_LO + movdqa \hi, \lo + movdqa T0_LO, \hi +.endm + +.macro rol3 hi, lo + /* + * HI_1 | HI_0 || LO_1 | LO_0 + * ==> + * HI_0 | HI_1 || LO_1 | LO_0 + * ==> + * LO_0 | HI_1 || HI_0 | LO_1 + */ + pshufd $MASK2, \hi, \hi + movdqa \lo, T0_LO + punpckhqdq \hi, T0_LO + punpcklqdq \lo, \hi + movdqa T0_LO, \lo +.endm + +.macro morus1280_round s0_l, s0_h, s1_l, s1_h, s2_l, s2_h, s3_l, s3_h, s4_l, s4_h, b, w + movdqa \s1_l, T0_LO + pand \s2_l, T0_LO + pxor T0_LO, \s0_l + + movdqa \s1_h, T0_LO + pand \s2_h, T0_LO + pxor T0_LO, \s0_h + + pxor \s3_l, \s0_l + pxor \s3_h, \s0_h + + movdqa \s0_l, T0_LO + psllq $\b, T0_LO + psrlq $(64 - \b), \s0_l + pxor T0_LO, \s0_l + + movdqa \s0_h, T0_LO + psllq $\b, T0_LO + psrlq $(64 - \b), \s0_h + pxor T0_LO, \s0_h + + \w \s3_h, \s3_l +.endm + +/* + * __morus1280_update: internal ABI + * input: + * STATE[0-4] - input state + * MSG - message block + * output: + * STATE[0-4] - output state + * changed: + * T0 + */ +__morus1280_update: + morus1280_round \ + STATE0_LO, STATE0_HI, \ + STATE1_LO, STATE1_HI, \ + STATE2_LO, STATE2_HI, \ + STATE3_LO, STATE3_HI, \ + STATE4_LO, STATE4_HI, \ + 13, rol1 + pxor MSG_LO, STATE1_LO + pxor MSG_HI, STATE1_HI + morus1280_round \ + STATE1_LO, STATE1_HI, \ + STATE2_LO, STATE2_HI, \ + STATE3_LO, STATE3_HI, \ + STATE4_LO, STATE4_HI, \ + STATE0_LO, STATE0_HI, \ + 46, rol2 + pxor MSG_LO, STATE2_LO + pxor MSG_HI, STATE2_HI + morus1280_round \ + STATE2_LO, STATE2_HI, \ + STATE3_LO, STATE3_HI, \ + STATE4_LO, STATE4_HI, \ + STATE0_LO, STATE0_HI, \ + STATE1_LO, STATE1_HI, \ + 38, rol3 + pxor MSG_LO, STATE3_LO + pxor MSG_HI, STATE3_HI + morus1280_round \ + STATE3_LO, STATE3_HI, \ + STATE4_LO, STATE4_HI, \ + STATE0_LO, STATE0_HI, \ + STATE1_LO, STATE1_HI, \ + STATE2_LO, STATE2_HI, \ + 7, rol2 + pxor MSG_LO, STATE4_LO + pxor MSG_HI, STATE4_HI + morus1280_round \ + STATE4_LO, STATE4_HI, \ + STATE0_LO, STATE0_HI, \ + STATE1_LO, STATE1_HI, \ + STATE2_LO, STATE2_HI, \ + STATE3_LO, STATE3_HI, \ + 4, rol1 + ret +ENDPROC(__morus1280_update) + +/* + * __morus1280_update_zero: internal ABI + * input: + * STATE[0-4] - input state + * output: + * STATE[0-4] - output state + * changed: + * T0 + */ +__morus1280_update_zero: + morus1280_round \ + STATE0_LO, STATE0_HI, \ + STATE1_LO, STATE1_HI, \ + STATE2_LO, STATE2_HI, \ + STATE3_LO, STATE3_HI, \ + STATE4_LO, STATE4_HI, \ + 13, rol1 + morus1280_round \ + STATE1_LO, STATE1_HI, \ + STATE2_LO, STATE2_HI, \ + STATE3_LO, STATE3_HI, \ + STATE4_LO, STATE4_HI, \ + STATE0_LO, STATE0_HI, \ + 46, rol2 + morus1280_round \ + STATE2_LO, STATE2_HI, \ + STATE3_LO, STATE3_HI, \ + STATE4_LO, STATE4_HI, \ + STATE0_LO, STATE0_HI, \ + STATE1_LO, STATE1_HI, \ + 38, rol3 + morus1280_round \ + STATE3_LO, STATE3_HI, \ + STATE4_LO, STATE4_HI, \ + STATE0_LO, STATE0_HI, \ + STATE1_LO, STATE1_HI, \ + STATE2_LO, STATE2_HI, \ + 7, rol2 + morus1280_round \ + STATE4_LO, STATE4_HI, \ + STATE0_LO, STATE0_HI, \ + STATE1_LO, STATE1_HI, \ + STATE2_LO, STATE2_HI, \ + STATE3_LO, STATE3_HI, \ + 4, rol1 + ret +ENDPROC(__morus1280_update_zero) + +/* + * __load_partial: internal ABI + * input: + * %rsi - src + * %rcx - bytes + * output: + * MSG - message block + * changed: + * %r8 + * %r9 + */ +__load_partial: + xor %r9, %r9 + pxor MSG_LO, MSG_LO + pxor MSG_HI, MSG_HI + + mov %rcx, %r8 + and $0x1, %r8 + jz .Lld_partial_1 + + mov %rcx, %r8 + and $0x1E, %r8 + add %rsi, %r8 + mov (%r8), %r9b + +.Lld_partial_1: + mov %rcx, %r8 + and $0x2, %r8 + jz .Lld_partial_2 + + mov %rcx, %r8 + and $0x1C, %r8 + add %rsi, %r8 + shl $16, %r9 + mov (%r8), %r9w + +.Lld_partial_2: + mov %rcx, %r8 + and $0x4, %r8 + jz .Lld_partial_4 + + mov %rcx, %r8 + and $0x18, %r8 + add %rsi, %r8 + shl $32, %r9 + mov (%r8), %r8d + xor %r8, %r9 + +.Lld_partial_4: + movq %r9, MSG_LO + + mov %rcx, %r8 + and $0x8, %r8 + jz .Lld_partial_8 + + mov %rcx, %r8 + and $0x10, %r8 + add %rsi, %r8 + pslldq $8, MSG_LO + movq (%r8), T0_LO + pxor T0_LO, MSG_LO + +.Lld_partial_8: + mov %rcx, %r8 + and $0x10, %r8 + jz .Lld_partial_16 + + movdqa MSG_LO, MSG_HI + movdqu (%rsi), MSG_LO + +.Lld_partial_16: + ret +ENDPROC(__load_partial) + +/* + * __store_partial: internal ABI + * input: + * %rdx - dst + * %rcx - bytes + * output: + * T0 - message block + * changed: + * %r8 + * %r9 + * %r10 + */ +__store_partial: + mov %rcx, %r8 + mov %rdx, %r9 + + cmp $16, %r8 + jl .Lst_partial_16 + + movdqu T0_LO, (%r9) + movdqa T0_HI, T0_LO + + sub $16, %r8 + add $16, %r9 + +.Lst_partial_16: + movq T0_LO, %r10 + + cmp $8, %r8 + jl .Lst_partial_8 + + mov %r10, (%r9) + psrldq $8, T0_LO + movq T0_LO, %r10 + + sub $8, %r8 + add $8, %r9 + +.Lst_partial_8: + cmp $4, %r8 + jl .Lst_partial_4 + + mov %r10d, (%r9) + shr $32, %r10 + + sub $4, %r8 + add $4, %r9 + +.Lst_partial_4: + cmp $2, %r8 + jl .Lst_partial_2 + + mov %r10w, (%r9) + shr $16, %r10 + + sub $2, %r8 + add $2, %r9 + +.Lst_partial_2: + cmp $1, %r8 + jl .Lst_partial_1 + + mov %r10b, (%r9) + +.Lst_partial_1: + ret +ENDPROC(__store_partial) + +/* + * void crypto_morus1280_sse2_init(void *state, const void *key, + * const void *iv); + */ +ENTRY(crypto_morus1280_sse2_init) + FRAME_BEGIN + + /* load IV: */ + pxor STATE0_HI, STATE0_HI + movdqu (%rdx), STATE0_LO + /* load key: */ + movdqu 0(%rsi), KEY_LO + movdqu 16(%rsi), KEY_HI + movdqa KEY_LO, STATE1_LO + movdqa KEY_HI, STATE1_HI + /* load all ones: */ + pcmpeqd STATE2_LO, STATE2_LO + pcmpeqd STATE2_HI, STATE2_HI + /* load all zeros: */ + pxor STATE3_LO, STATE3_LO + pxor STATE3_HI, STATE3_HI + /* load the constant: */ + movdqa .Lmorus640_const_0, STATE4_LO + movdqa .Lmorus640_const_1, STATE4_HI + + /* update 16 times with zero: */ + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + call __morus1280_update_zero + + /* xor-in the key again after updates: */ + pxor KEY_LO, STATE1_LO + pxor KEY_HI, STATE1_HI + + /* store the state: */ + movdqu STATE0_LO, (0 * 16)(%rdi) + movdqu STATE0_HI, (1 * 16)(%rdi) + movdqu STATE1_LO, (2 * 16)(%rdi) + movdqu STATE1_HI, (3 * 16)(%rdi) + movdqu STATE2_LO, (4 * 16)(%rdi) + movdqu STATE2_HI, (5 * 16)(%rdi) + movdqu STATE3_LO, (6 * 16)(%rdi) + movdqu STATE3_HI, (7 * 16)(%rdi) + movdqu STATE4_LO, (8 * 16)(%rdi) + movdqu STATE4_HI, (9 * 16)(%rdi) + + FRAME_END + ret +ENDPROC(crypto_morus1280_sse2_init) + +/* + * void crypto_morus1280_sse2_ad(void *state, const void *data, + * unsigned int length); + */ +ENTRY(crypto_morus1280_sse2_ad) + FRAME_BEGIN + + cmp $32, %rdx + jb .Lad_out + + /* load the state: */ + movdqu (0 * 16)(%rdi), STATE0_LO + movdqu (1 * 16)(%rdi), STATE0_HI + movdqu (2 * 16)(%rdi), STATE1_LO + movdqu (3 * 16)(%rdi), STATE1_HI + movdqu (4 * 16)(%rdi), STATE2_LO + movdqu (5 * 16)(%rdi), STATE2_HI + movdqu (6 * 16)(%rdi), STATE3_LO + movdqu (7 * 16)(%rdi), STATE3_HI + movdqu (8 * 16)(%rdi), STATE4_LO + movdqu (9 * 16)(%rdi), STATE4_HI + + mov %rsi, %r8 + and $0xF, %r8 + jnz .Lad_u_loop + +.align 4 +.Lad_a_loop: + movdqa 0(%rsi), MSG_LO + movdqa 16(%rsi), MSG_HI + call __morus1280_update + sub $32, %rdx + add $32, %rsi + cmp $32, %rdx + jge .Lad_a_loop + + jmp .Lad_cont +.align 4 +.Lad_u_loop: + movdqu 0(%rsi), MSG_LO + movdqu 16(%rsi), MSG_HI + call __morus1280_update + sub $32, %rdx + add $32, %rsi + cmp $32, %rdx + jge .Lad_u_loop + +.Lad_cont: + /* store the state: */ + movdqu STATE0_LO, (0 * 16)(%rdi) + movdqu STATE0_HI, (1 * 16)(%rdi) + movdqu STATE1_LO, (2 * 16)(%rdi) + movdqu STATE1_HI, (3 * 16)(%rdi) + movdqu STATE2_LO, (4 * 16)(%rdi) + movdqu STATE2_HI, (5 * 16)(%rdi) + movdqu STATE3_LO, (6 * 16)(%rdi) + movdqu STATE3_HI, (7 * 16)(%rdi) + movdqu STATE4_LO, (8 * 16)(%rdi) + movdqu STATE4_HI, (9 * 16)(%rdi) + +.Lad_out: + FRAME_END + ret +ENDPROC(crypto_morus1280_sse2_ad) + +/* + * void crypto_morus1280_sse2_enc(void *state, const void *src, void *dst, + * unsigned int length); + */ +ENTRY(crypto_morus1280_sse2_enc) + FRAME_BEGIN + + cmp $32, %rcx + jb .Lenc_out + + /* load the state: */ + movdqu (0 * 16)(%rdi), STATE0_LO + movdqu (1 * 16)(%rdi), STATE0_HI + movdqu (2 * 16)(%rdi), STATE1_LO + movdqu (3 * 16)(%rdi), STATE1_HI + movdqu (4 * 16)(%rdi), STATE2_LO + movdqu (5 * 16)(%rdi), STATE2_HI + movdqu (6 * 16)(%rdi), STATE3_LO + movdqu (7 * 16)(%rdi), STATE3_HI + movdqu (8 * 16)(%rdi), STATE4_LO + movdqu (9 * 16)(%rdi), STATE4_HI + + mov %rsi, %r8 + or %rdx, %r8 + and $0xF, %r8 + jnz .Lenc_u_loop + +.align 4 +.Lenc_a_loop: + movdqa 0(%rsi), MSG_LO + movdqa 16(%rsi), MSG_HI + movdqa STATE1_LO, T1_LO + movdqa STATE1_HI, T1_HI + rol3 T1_HI, T1_LO + movdqa MSG_LO, T0_LO + movdqa MSG_HI, T0_HI + pxor T1_LO, T0_LO + pxor T1_HI, T0_HI + pxor STATE0_LO, T0_LO + pxor STATE0_HI, T0_HI + movdqa STATE2_LO, T1_LO + movdqa STATE2_HI, T1_HI + pand STATE3_LO, T1_LO + pand STATE3_HI, T1_HI + pxor T1_LO, T0_LO + pxor T1_HI, T0_HI + movdqa T0_LO, 0(%rdx) + movdqa T0_HI, 16(%rdx) + + call __morus1280_update + sub $32, %rcx + add $32, %rsi + add $32, %rdx + cmp $32, %rcx + jge .Lenc_a_loop + + jmp .Lenc_cont +.align 4 +.Lenc_u_loop: + movdqu 0(%rsi), MSG_LO + movdqu 16(%rsi), MSG_HI + movdqa STATE1_LO, T1_LO + movdqa STATE1_HI, T1_HI + rol3 T1_HI, T1_LO + movdqa MSG_LO, T0_LO + movdqa MSG_HI, T0_HI + pxor T1_LO, T0_LO + pxor T1_HI, T0_HI + pxor STATE0_LO, T0_LO + pxor STATE0_HI, T0_HI + movdqa STATE2_LO, T1_LO + movdqa STATE2_HI, T1_HI + pand STATE3_LO, T1_LO + pand STATE3_HI, T1_HI + pxor T1_LO, T0_LO + pxor T1_HI, T0_HI + movdqu T0_LO, 0(%rdx) + movdqu T0_HI, 16(%rdx) + + call __morus1280_update + sub $32, %rcx + add $32, %rsi + add $32, %rdx + cmp $32, %rcx + jge .Lenc_u_loop + +.Lenc_cont: + /* store the state: */ + movdqu STATE0_LO, (0 * 16)(%rdi) + movdqu STATE0_HI, (1 * 16)(%rdi) + movdqu STATE1_LO, (2 * 16)(%rdi) + movdqu STATE1_HI, (3 * 16)(%rdi) + movdqu STATE2_LO, (4 * 16)(%rdi) + movdqu STATE2_HI, (5 * 16)(%rdi) + movdqu STATE3_LO, (6 * 16)(%rdi) + movdqu STATE3_HI, (7 * 16)(%rdi) + movdqu STATE4_LO, (8 * 16)(%rdi) + movdqu STATE4_HI, (9 * 16)(%rdi) + +.Lenc_out: + FRAME_END + ret +ENDPROC(crypto_morus1280_sse2_enc) + +/* + * void crypto_morus1280_sse2_enc_tail(void *state, const void *src, void *dst, + * unsigned int length); + */ +ENTRY(crypto_morus1280_sse2_enc_tail) + FRAME_BEGIN + + /* load the state: */ + movdqu (0 * 16)(%rdi), STATE0_LO + movdqu (1 * 16)(%rdi), STATE0_HI + movdqu (2 * 16)(%rdi), STATE1_LO + movdqu (3 * 16)(%rdi), STATE1_HI + movdqu (4 * 16)(%rdi), STATE2_LO + movdqu (5 * 16)(%rdi), STATE2_HI + movdqu (6 * 16)(%rdi), STATE3_LO + movdqu (7 * 16)(%rdi), STATE3_HI + movdqu (8 * 16)(%rdi), STATE4_LO + movdqu (9 * 16)(%rdi), STATE4_HI + + /* encrypt message: */ + call __load_partial + + movdqa STATE1_LO, T1_LO + movdqa STATE1_HI, T1_HI + rol3 T1_HI, T1_LO + movdqa MSG_LO, T0_LO + movdqa MSG_HI, T0_HI + pxor T1_LO, T0_LO + pxor T1_HI, T0_HI + pxor STATE0_LO, T0_LO + pxor STATE0_HI, T0_HI + movdqa STATE2_LO, T1_LO + movdqa STATE2_HI, T1_HI + pand STATE3_LO, T1_LO + pand STATE3_HI, T1_HI + pxor T1_LO, T0_LO + pxor T1_HI, T0_HI + + call __store_partial + + call __morus1280_update + + /* store the state: */ + movdqu STATE0_LO, (0 * 16)(%rdi) + movdqu STATE0_HI, (1 * 16)(%rdi) + movdqu STATE1_LO, (2 * 16)(%rdi) + movdqu STATE1_HI, (3 * 16)(%rdi) + movdqu STATE2_LO, (4 * 16)(%rdi) + movdqu STATE2_HI, (5 * 16)(%rdi) + movdqu STATE3_LO, (6 * 16)(%rdi) + movdqu STATE3_HI, (7 * 16)(%rdi) + movdqu STATE4_LO, (8 * 16)(%rdi) + movdqu STATE4_HI, (9 * 16)(%rdi) + + FRAME_END +ENDPROC(crypto_morus1280_sse2_enc_tail) + +/* + * void crypto_morus1280_sse2_dec(void *state, const void *src, void *dst, + * unsigned int length); + */ +ENTRY(crypto_morus1280_sse2_dec) + FRAME_BEGIN + + cmp $32, %rcx + jb .Ldec_out + + /* load the state: */ + movdqu (0 * 16)(%rdi), STATE0_LO + movdqu (1 * 16)(%rdi), STATE0_HI + movdqu (2 * 16)(%rdi), STATE1_LO + movdqu (3 * 16)(%rdi), STATE1_HI + movdqu (4 * 16)(%rdi), STATE2_LO + movdqu (5 * 16)(%rdi), STATE2_HI + movdqu (6 * 16)(%rdi), STATE3_LO + movdqu (7 * 16)(%rdi), STATE3_HI + movdqu (8 * 16)(%rdi), STATE4_LO + movdqu (9 * 16)(%rdi), STATE4_HI + + mov %rsi, %r8 + or %rdx, %r8 + and $0xF, %r8 + jnz .Ldec_u_loop + +.align 4 +.Ldec_a_loop: + movdqa 0(%rsi), MSG_LO + movdqa 16(%rsi), MSG_HI + pxor STATE0_LO, MSG_LO + pxor STATE0_HI, MSG_HI + movdqa STATE1_LO, T1_LO + movdqa STATE1_HI, T1_HI + rol3 T1_HI, T1_LO + pxor T1_LO, MSG_LO + pxor T1_HI, MSG_HI + movdqa STATE2_LO, T1_LO + movdqa STATE2_HI, T1_HI + pand STATE3_LO, T1_LO + pand STATE3_HI, T1_HI + pxor T1_LO, MSG_LO + pxor T1_HI, MSG_HI + movdqa MSG_LO, 0(%rdx) + movdqa MSG_HI, 16(%rdx) + + call __morus1280_update + sub $32, %rcx + add $32, %rsi + add $32, %rdx + cmp $32, %rcx + jge .Ldec_a_loop + + jmp .Ldec_cont +.align 4 +.Ldec_u_loop: + movdqu 0(%rsi), MSG_LO + movdqu 16(%rsi), MSG_HI + pxor STATE0_LO, MSG_LO + pxor STATE0_HI, MSG_HI + movdqa STATE1_LO, T1_LO + movdqa STATE1_HI, T1_HI + rol3 T1_HI, T1_LO + pxor T1_LO, MSG_LO + pxor T1_HI, MSG_HI + movdqa STATE2_LO, T1_LO + movdqa STATE2_HI, T1_HI + pand STATE3_LO, T1_LO + pand STATE3_HI, T1_HI + pxor T1_LO, MSG_LO + pxor T1_HI, MSG_HI + movdqu MSG_LO, 0(%rdx) + movdqu MSG_HI, 16(%rdx) + + call __morus1280_update + sub $32, %rcx + add $32, %rsi + add $32, %rdx + cmp $32, %rcx + jge .Ldec_u_loop + +.Ldec_cont: + /* store the state: */ + movdqu STATE0_LO, (0 * 16)(%rdi) + movdqu STATE0_HI, (1 * 16)(%rdi) + movdqu STATE1_LO, (2 * 16)(%rdi) + movdqu STATE1_HI, (3 * 16)(%rdi) + movdqu STATE2_LO, (4 * 16)(%rdi) + movdqu STATE2_HI, (5 * 16)(%rdi) + movdqu STATE3_LO, (6 * 16)(%rdi) + movdqu STATE3_HI, (7 * 16)(%rdi) + movdqu STATE4_LO, (8 * 16)(%rdi) + movdqu STATE4_HI, (9 * 16)(%rdi) + +.Ldec_out: + FRAME_END + ret +ENDPROC(crypto_morus1280_sse2_dec) + +/* + * void crypto_morus1280_sse2_dec_tail(void *state, const void *src, void *dst, + * unsigned int length); + */ +ENTRY(crypto_morus1280_sse2_dec_tail) + FRAME_BEGIN + + /* load the state: */ + movdqu (0 * 16)(%rdi), STATE0_LO + movdqu (1 * 16)(%rdi), STATE0_HI + movdqu (2 * 16)(%rdi), STATE1_LO + movdqu (3 * 16)(%rdi), STATE1_HI + movdqu (4 * 16)(%rdi), STATE2_LO + movdqu (5 * 16)(%rdi), STATE2_HI + movdqu (6 * 16)(%rdi), STATE3_LO + movdqu (7 * 16)(%rdi), STATE3_HI + movdqu (8 * 16)(%rdi), STATE4_LO + movdqu (9 * 16)(%rdi), STATE4_HI + + /* decrypt message: */ + call __load_partial + + pxor STATE0_LO, MSG_LO + pxor STATE0_HI, MSG_HI + movdqa STATE1_LO, T1_LO + movdqa STATE1_HI, T1_HI + rol3 T1_HI, T1_LO + pxor T1_LO, MSG_LO + pxor T1_HI, MSG_HI + movdqa STATE2_LO, T1_LO + movdqa STATE2_HI, T1_HI + pand STATE3_LO, T1_LO + pand STATE3_HI, T1_HI + pxor T1_LO, MSG_LO + pxor T1_HI, MSG_HI + movdqa MSG_LO, T0_LO + movdqa MSG_HI, T0_HI + + call __store_partial + + /* mask with byte count: */ + movq %rcx, T0_LO + punpcklbw T0_LO, T0_LO + punpcklbw T0_LO, T0_LO + punpcklbw T0_LO, T0_LO + punpcklbw T0_LO, T0_LO + movdqa T0_LO, T0_HI + movdqa .Lmorus640_counter_0, T1_LO + movdqa .Lmorus640_counter_1, T1_HI + pcmpgtb T1_LO, T0_LO + pcmpgtb T1_HI, T0_HI + pand T0_LO, MSG_LO + pand T0_HI, MSG_HI + + call __morus1280_update + + /* store the state: */ + movdqu STATE0_LO, (0 * 16)(%rdi) + movdqu STATE0_HI, (1 * 16)(%rdi) + movdqu STATE1_LO, (2 * 16)(%rdi) + movdqu STATE1_HI, (3 * 16)(%rdi) + movdqu STATE2_LO, (4 * 16)(%rdi) + movdqu STATE2_HI, (5 * 16)(%rdi) + movdqu STATE3_LO, (6 * 16)(%rdi) + movdqu STATE3_HI, (7 * 16)(%rdi) + movdqu STATE4_LO, (8 * 16)(%rdi) + movdqu STATE4_HI, (9 * 16)(%rdi) + + FRAME_END + ret +ENDPROC(crypto_morus1280_sse2_dec_tail) + +/* + * void crypto_morus1280_sse2_final(void *state, void *tag_xor, + * u64 assoclen, u64 cryptlen); + */ +ENTRY(crypto_morus1280_sse2_final) + FRAME_BEGIN + + /* load the state: */ + movdqu (0 * 16)(%rdi), STATE0_LO + movdqu (1 * 16)(%rdi), STATE0_HI + movdqu (2 * 16)(%rdi), STATE1_LO + movdqu (3 * 16)(%rdi), STATE1_HI + movdqu (4 * 16)(%rdi), STATE2_LO + movdqu (5 * 16)(%rdi), STATE2_HI + movdqu (6 * 16)(%rdi), STATE3_LO + movdqu (7 * 16)(%rdi), STATE3_HI + movdqu (8 * 16)(%rdi), STATE4_LO + movdqu (9 * 16)(%rdi), STATE4_HI + + /* xor state[0] into state[4]: */ + pxor STATE0_LO, STATE4_LO + pxor STATE0_HI, STATE4_HI + + /* prepare length block: */ + movq %rdx, MSG_LO + movq %rcx, T0_LO + pslldq $8, T0_LO + pxor T0_LO, MSG_LO + psllq $3, MSG_LO /* multiply by 8 (to get bit count) */ + pxor MSG_HI, MSG_HI + + /* update state: */ + call __morus1280_update + call __morus1280_update + call __morus1280_update + call __morus1280_update + call __morus1280_update + call __morus1280_update + call __morus1280_update + call __morus1280_update + call __morus1280_update + call __morus1280_update + + /* xor tag: */ + movdqu 0(%rsi), MSG_LO + movdqu 16(%rsi), MSG_HI + + pxor STATE0_LO, MSG_LO + pxor STATE0_HI, MSG_HI + movdqa STATE1_LO, T0_LO + movdqa STATE1_HI, T0_HI + rol3 T0_HI, T0_LO + pxor T0_LO, MSG_LO + pxor T0_HI, MSG_HI + movdqa STATE2_LO, T0_LO + movdqa STATE2_HI, T0_HI + pand STATE3_LO, T0_LO + pand STATE3_HI, T0_HI + pxor T0_LO, MSG_LO + pxor T0_HI, MSG_HI + + movdqu MSG_LO, 0(%rsi) + movdqu MSG_HI, 16(%rsi) + + FRAME_END + ret +ENDPROC(crypto_morus1280_sse2_final) diff --git a/arch/x86/crypto/morus1280-sse2-glue.c b/arch/x86/crypto/morus1280-sse2-glue.c new file mode 100644 index 000000000000..839270aa713c --- /dev/null +++ b/arch/x86/crypto/morus1280-sse2-glue.c @@ -0,0 +1,68 @@ +/* + * The MORUS-1280 Authenticated-Encryption Algorithm + * Glue for SSE2 implementation + * + * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com> + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <crypto/internal/aead.h> +#include <crypto/morus1280_glue.h> +#include <linux/module.h> +#include <asm/fpu/api.h> +#include <asm/cpu_device_id.h> + +asmlinkage void crypto_morus1280_sse2_init(void *state, const void *key, + const void *iv); +asmlinkage void crypto_morus1280_sse2_ad(void *state, const void *data, + unsigned int length); + +asmlinkage void crypto_morus1280_sse2_enc(void *state, const void *src, + void *dst, unsigned int length); +asmlinkage void crypto_morus1280_sse2_dec(void *state, const void *src, + void *dst, unsigned int length); + +asmlinkage void crypto_morus1280_sse2_enc_tail(void *state, const void *src, + void *dst, unsigned int length); +asmlinkage void crypto_morus1280_sse2_dec_tail(void *state, const void *src, + void *dst, unsigned int length); + +asmlinkage void crypto_morus1280_sse2_final(void *state, void *tag_xor, + u64 assoclen, u64 cryptlen); + +MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350); + +static const struct x86_cpu_id sse2_cpu_id[] = { + X86_FEATURE_MATCH(X86_FEATURE_XMM2), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, sse2_cpu_id); + +static int __init crypto_morus1280_sse2_module_init(void) +{ + if (!x86_match_cpu(sse2_cpu_id)) + return -ENODEV; + + return crypto_register_aeads(crypto_morus1280_sse2_algs, + ARRAY_SIZE(crypto_morus1280_sse2_algs)); +} + +static void __exit crypto_morus1280_sse2_module_exit(void) +{ + crypto_unregister_aeads(crypto_morus1280_sse2_algs, + ARRAY_SIZE(crypto_morus1280_sse2_algs)); +} + +module_init(crypto_morus1280_sse2_module_init); +module_exit(crypto_morus1280_sse2_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>"); +MODULE_DESCRIPTION("MORUS-1280 AEAD algorithm -- SSE2 implementation"); +MODULE_ALIAS_CRYPTO("morus1280"); +MODULE_ALIAS_CRYPTO("morus1280-sse2"); diff --git a/arch/x86/crypto/morus1280_glue.c b/arch/x86/crypto/morus1280_glue.c new file mode 100644 index 000000000000..0dccdda1eb3a --- /dev/null +++ b/arch/x86/crypto/morus1280_glue.c @@ -0,0 +1,302 @@ +/* + * The MORUS-1280 Authenticated-Encryption Algorithm + * Common x86 SIMD glue skeleton + * + * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com> + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <crypto/cryptd.h> +#include <crypto/internal/aead.h> +#include <crypto/internal/skcipher.h> +#include <crypto/morus1280_glue.h> +#include <crypto/scatterwalk.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/scatterlist.h> +#include <asm/fpu/api.h> + +struct morus1280_state { + struct morus1280_block s[MORUS_STATE_BLOCKS]; +}; + +struct morus1280_ops { + int (*skcipher_walk_init)(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); + + void (*crypt_blocks)(void *state, const void *src, void *dst, + unsigned int length); + void (*crypt_tail)(void *state, const void *src, void *dst, + unsigned int length); +}; + +static void crypto_morus1280_glue_process_ad( + struct morus1280_state *state, + const struct morus1280_glue_ops *ops, + struct scatterlist *sg_src, unsigned int assoclen) +{ + struct scatter_walk walk; + struct morus1280_block buf; + unsigned int pos = 0; + + scatterwalk_start(&walk, sg_src); + while (assoclen != 0) { + unsigned int size = scatterwalk_clamp(&walk, assoclen); + unsigned int left = size; + void *mapped = scatterwalk_map(&walk); + const u8 *src = (const u8 *)mapped; + + if (pos + size >= MORUS1280_BLOCK_SIZE) { + if (pos > 0) { + unsigned int fill = MORUS1280_BLOCK_SIZE - pos; + memcpy(buf.bytes + pos, src, fill); + ops->ad(state, buf.bytes, MORUS1280_BLOCK_SIZE); + pos = 0; + left -= fill; + src += fill; + } + + ops->ad(state, src, left); + src += left & ~(MORUS1280_BLOCK_SIZE - 1); + left &= MORUS1280_BLOCK_SIZE - 1; + } + + memcpy(buf.bytes + pos, src, left); + + pos += left; + assoclen -= size; + scatterwalk_unmap(mapped); + scatterwalk_advance(&walk, size); + scatterwalk_done(&walk, 0, assoclen); + } + + if (pos > 0) { + memset(buf.bytes + pos, 0, MORUS1280_BLOCK_SIZE - pos); + ops->ad(state, buf.bytes, MORUS1280_BLOCK_SIZE); + } +} + +static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state, + struct morus1280_ops ops, + struct aead_request *req) +{ + struct skcipher_walk walk; + u8 *cursor_src, *cursor_dst; + unsigned int chunksize, base; + + ops.skcipher_walk_init(&walk, req, false); + + while (walk.nbytes) { + cursor_src = walk.src.virt.addr; + cursor_dst = walk.dst.virt.addr; + chunksize = walk.nbytes; + + ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize); + + base = chunksize & ~(MORUS1280_BLOCK_SIZE - 1); + cursor_src += base; + cursor_dst += base; + chunksize &= MORUS1280_BLOCK_SIZE - 1; + + if (chunksize > 0) + ops.crypt_tail(state, cursor_src, cursor_dst, + chunksize); + + skcipher_walk_done(&walk, 0); + } +} + +int crypto_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct morus1280_ctx *ctx = crypto_aead_ctx(aead); + + if (keylen == MORUS1280_BLOCK_SIZE) { + memcpy(ctx->key.bytes, key, MORUS1280_BLOCK_SIZE); + } else if (keylen == MORUS1280_BLOCK_SIZE / 2) { + memcpy(ctx->key.bytes, key, keylen); + memcpy(ctx->key.bytes + keylen, key, keylen); + } else { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_morus1280_glue_setkey); + +int crypto_morus1280_glue_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL; +} +EXPORT_SYMBOL_GPL(crypto_morus1280_glue_setauthsize); + +static void crypto_morus1280_glue_crypt(struct aead_request *req, + struct morus1280_ops ops, + unsigned int cryptlen, + struct morus1280_block *tag_xor) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct morus1280_ctx *ctx = crypto_aead_ctx(tfm); + struct morus1280_state state; + + kernel_fpu_begin(); + + ctx->ops->init(&state, &ctx->key, req->iv); + crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen); + crypto_morus1280_glue_process_crypt(&state, ops, req); + ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen); + + kernel_fpu_end(); +} + +int crypto_morus1280_glue_encrypt(struct aead_request *req) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct morus1280_ctx *ctx = crypto_aead_ctx(tfm); + struct morus1280_ops OPS = { + .skcipher_walk_init = skcipher_walk_aead_encrypt, + .crypt_blocks = ctx->ops->enc, + .crypt_tail = ctx->ops->enc_tail, + }; + + struct morus1280_block tag = {}; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen; + + crypto_morus1280_glue_crypt(req, OPS, cryptlen, &tag); + + scatterwalk_map_and_copy(tag.bytes, req->dst, + req->assoclen + cryptlen, authsize, 1); + return 0; +} +EXPORT_SYMBOL_GPL(crypto_morus1280_glue_encrypt); + +int crypto_morus1280_glue_decrypt(struct aead_request *req) +{ + static const u8 zeros[MORUS1280_BLOCK_SIZE] = {}; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct morus1280_ctx *ctx = crypto_aead_ctx(tfm); + struct morus1280_ops OPS = { + .skcipher_walk_init = skcipher_walk_aead_decrypt, + .crypt_blocks = ctx->ops->dec, + .crypt_tail = ctx->ops->dec_tail, + }; + + struct morus1280_block tag; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen - authsize; + + scatterwalk_map_and_copy(tag.bytes, req->src, + req->assoclen + cryptlen, authsize, 0); + + crypto_morus1280_glue_crypt(req, OPS, cryptlen, &tag); + + return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; +} +EXPORT_SYMBOL_GPL(crypto_morus1280_glue_decrypt); + +void crypto_morus1280_glue_init_ops(struct crypto_aead *aead, + const struct morus1280_glue_ops *ops) +{ + struct morus1280_ctx *ctx = crypto_aead_ctx(aead); + ctx->ops = ops; +} +EXPORT_SYMBOL_GPL(crypto_morus1280_glue_init_ops); + +int cryptd_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + return crypto_aead_setkey(&cryptd_tfm->base, key, keylen); +} +EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setkey); + +int cryptd_morus1280_glue_setauthsize(struct crypto_aead *aead, + unsigned int authsize) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + return crypto_aead_setauthsize(&cryptd_tfm->base, authsize); +} +EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setauthsize); + +int cryptd_morus1280_glue_encrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + aead = &cryptd_tfm->base; + if (irq_fpu_usable() && (!in_atomic() || + !cryptd_aead_queued(cryptd_tfm))) + aead = cryptd_aead_child(cryptd_tfm); + + aead_request_set_tfm(req, aead); + + return crypto_aead_encrypt(req); +} +EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_encrypt); + +int cryptd_morus1280_glue_decrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + aead = &cryptd_tfm->base; + if (irq_fpu_usable() && (!in_atomic() || + !cryptd_aead_queued(cryptd_tfm))) + aead = cryptd_aead_child(cryptd_tfm); + + aead_request_set_tfm(req, aead); + + return crypto_aead_decrypt(req); +} +EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_decrypt); + +int cryptd_morus1280_glue_init_tfm(struct crypto_aead *aead) +{ + struct cryptd_aead *cryptd_tfm; + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + const char *name = crypto_aead_alg(aead)->base.cra_driver_name; + char internal_name[CRYPTO_MAX_ALG_NAME]; + + if (snprintf(internal_name, CRYPTO_MAX_ALG_NAME, "__%s", name) + >= CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + cryptd_tfm = cryptd_alloc_aead(internal_name, CRYPTO_ALG_INTERNAL, + CRYPTO_ALG_INTERNAL); + if (IS_ERR(cryptd_tfm)) + return PTR_ERR(cryptd_tfm); + + *ctx = cryptd_tfm; + crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); + return 0; +} +EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_init_tfm); + +void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + + cryptd_free_aead(*ctx); +} +EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_exit_tfm); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>"); +MODULE_DESCRIPTION("MORUS-1280 AEAD mode -- glue for x86 optimizations"); diff --git a/arch/x86/crypto/morus640-sse2-asm.S b/arch/x86/crypto/morus640-sse2-asm.S new file mode 100644 index 000000000000..71c72a0a0862 --- /dev/null +++ b/arch/x86/crypto/morus640-sse2-asm.S @@ -0,0 +1,614 @@ +/* + * SSE2 implementation of MORUS-640 + * + * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com> + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <linux/linkage.h> +#include <asm/frame.h> + +#define SHUFFLE_MASK(i0, i1, i2, i3) \ + (i0 | (i1 << 2) | (i2 << 4) | (i3 << 6)) + +#define MASK1 SHUFFLE_MASK(3, 0, 1, 2) +#define MASK2 SHUFFLE_MASK(2, 3, 0, 1) +#define MASK3 SHUFFLE_MASK(1, 2, 3, 0) + +#define STATE0 %xmm0 +#define STATE1 %xmm1 +#define STATE2 %xmm2 +#define STATE3 %xmm3 +#define STATE4 %xmm4 +#define KEY %xmm5 +#define MSG %xmm5 +#define T0 %xmm6 +#define T1 %xmm7 + +.section .rodata.cst16.morus640_const, "aM", @progbits, 32 +.align 16 +.Lmorus640_const_0: + .byte 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d + .byte 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62 +.Lmorus640_const_1: + .byte 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1 + .byte 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd + +.section .rodata.cst16.morus640_counter, "aM", @progbits, 16 +.align 16 +.Lmorus640_counter: + .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 + .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f + +.text + +.macro morus640_round s0, s1, s2, s3, s4, b, w + movdqa \s1, T0 + pand \s2, T0 + pxor T0, \s0 + pxor \s3, \s0 + movdqa \s0, T0 + pslld $\b, T0 + psrld $(32 - \b), \s0 + pxor T0, \s0 + pshufd $\w, \s3, \s3 +.endm + +/* + * __morus640_update: internal ABI + * input: + * STATE[0-4] - input state + * MSG - message block + * output: + * STATE[0-4] - output state + * changed: + * T0 + */ +__morus640_update: + morus640_round STATE0, STATE1, STATE2, STATE3, STATE4, 5, MASK1 + pxor MSG, STATE1 + morus640_round STATE1, STATE2, STATE3, STATE4, STATE0, 31, MASK2 + pxor MSG, STATE2 + morus640_round STATE2, STATE3, STATE4, STATE0, STATE1, 7, MASK3 + pxor MSG, STATE3 + morus640_round STATE3, STATE4, STATE0, STATE1, STATE2, 22, MASK2 + pxor MSG, STATE4 + morus640_round STATE4, STATE0, STATE1, STATE2, STATE3, 13, MASK1 + ret +ENDPROC(__morus640_update) + + +/* + * __morus640_update_zero: internal ABI + * input: + * STATE[0-4] - input state + * output: + * STATE[0-4] - output state + * changed: + * T0 + */ +__morus640_update_zero: + morus640_round STATE0, STATE1, STATE2, STATE3, STATE4, 5, MASK1 + morus640_round STATE1, STATE2, STATE3, STATE4, STATE0, 31, MASK2 + morus640_round STATE2, STATE3, STATE4, STATE0, STATE1, 7, MASK3 + morus640_round STATE3, STATE4, STATE0, STATE1, STATE2, 22, MASK2 + morus640_round STATE4, STATE0, STATE1, STATE2, STATE3, 13, MASK1 + ret +ENDPROC(__morus640_update_zero) + +/* + * __load_partial: internal ABI + * input: + * %rsi - src + * %rcx - bytes + * output: + * MSG - message block + * changed: + * T0 + * %r8 + * %r9 + */ +__load_partial: + xor %r9, %r9 + pxor MSG, MSG + + mov %rcx, %r8 + and $0x1, %r8 + jz .Lld_partial_1 + + mov %rcx, %r8 + and $0x1E, %r8 + add %rsi, %r8 + mov (%r8), %r9b + +.Lld_partial_1: + mov %rcx, %r8 + and $0x2, %r8 + jz .Lld_partial_2 + + mov %rcx, %r8 + and $0x1C, %r8 + add %rsi, %r8 + shl $16, %r9 + mov (%r8), %r9w + +.Lld_partial_2: + mov %rcx, %r8 + and $0x4, %r8 + jz .Lld_partial_4 + + mov %rcx, %r8 + and $0x18, %r8 + add %rsi, %r8 + shl $32, %r9 + mov (%r8), %r8d + xor %r8, %r9 + +.Lld_partial_4: + movq %r9, MSG + + mov %rcx, %r8 + and $0x8, %r8 + jz .Lld_partial_8 + + mov %rcx, %r8 + and $0x10, %r8 + add %rsi, %r8 + pslldq $8, MSG + movq (%r8), T0 + pxor T0, MSG + +.Lld_partial_8: + ret +ENDPROC(__load_partial) + +/* + * __store_partial: internal ABI + * input: + * %rdx - dst + * %rcx - bytes + * output: + * T0 - message block + * changed: + * %r8 + * %r9 + * %r10 + */ +__store_partial: + mov %rcx, %r8 + mov %rdx, %r9 + + movq T0, %r10 + + cmp $8, %r8 + jl .Lst_partial_8 + + mov %r10, (%r9) + psrldq $8, T0 + movq T0, %r10 + + sub $8, %r8 + add $8, %r9 + +.Lst_partial_8: + cmp $4, %r8 + jl .Lst_partial_4 + + mov %r10d, (%r9) + shr $32, %r10 + + sub $4, %r8 + add $4, %r9 + +.Lst_partial_4: + cmp $2, %r8 + jl .Lst_partial_2 + + mov %r10w, (%r9) + shr $16, %r10 + + sub $2, %r8 + add $2, %r9 + +.Lst_partial_2: + cmp $1, %r8 + jl .Lst_partial_1 + + mov %r10b, (%r9) + +.Lst_partial_1: + ret +ENDPROC(__store_partial) + +/* + * void crypto_morus640_sse2_init(void *state, const void *key, const void *iv); + */ +ENTRY(crypto_morus640_sse2_init) + FRAME_BEGIN + + /* load IV: */ + movdqu (%rdx), STATE0 + /* load key: */ + movdqu (%rsi), KEY + movdqa KEY, STATE1 + /* load all ones: */ + pcmpeqd STATE2, STATE2 + /* load the constants: */ + movdqa .Lmorus640_const_0, STATE3 + movdqa .Lmorus640_const_1, STATE4 + + /* update 16 times with zero: */ + call __morus640_update_zero + call __morus640_update_zero + call __morus640_update_zero + call __morus640_update_zero + call __morus640_update_zero + call __morus640_update_zero + call __morus640_update_zero + call __morus640_update_zero + call __morus640_update_zero + call __morus640_update_zero + call __morus640_update_zero + call __morus640_update_zero + call __morus640_update_zero + call __morus640_update_zero + call __morus640_update_zero + call __morus640_update_zero + + /* xor-in the key again after updates: */ + pxor KEY, STATE1 + + /* store the state: */ + movdqu STATE0, (0 * 16)(%rdi) + movdqu STATE1, (1 * 16)(%rdi) + movdqu STATE2, (2 * 16)(%rdi) + movdqu STATE3, (3 * 16)(%rdi) + movdqu STATE4, (4 * 16)(%rdi) + + FRAME_END + ret +ENDPROC(crypto_morus640_sse2_init) + +/* + * void crypto_morus640_sse2_ad(void *state, const void *data, + * unsigned int length); + */ +ENTRY(crypto_morus640_sse2_ad) + FRAME_BEGIN + + cmp $16, %rdx + jb .Lad_out + + /* load the state: */ + movdqu (0 * 16)(%rdi), STATE0 + movdqu (1 * 16)(%rdi), STATE1 + movdqu (2 * 16)(%rdi), STATE2 + movdqu (3 * 16)(%rdi), STATE3 + movdqu (4 * 16)(%rdi), STATE4 + + mov %rsi, %r8 + and $0xF, %r8 + jnz .Lad_u_loop + +.align 4 +.Lad_a_loop: + movdqa (%rsi), MSG + call __morus640_update + sub $16, %rdx + add $16, %rsi + cmp $16, %rdx + jge .Lad_a_loop + + jmp .Lad_cont +.align 4 +.Lad_u_loop: + movdqu (%rsi), MSG + call __morus640_update + sub $16, %rdx + add $16, %rsi + cmp $16, %rdx + jge .Lad_u_loop + +.Lad_cont: + /* store the state: */ + movdqu STATE0, (0 * 16)(%rdi) + movdqu STATE1, (1 * 16)(%rdi) + movdqu STATE2, (2 * 16)(%rdi) + movdqu STATE3, (3 * 16)(%rdi) + movdqu STATE4, (4 * 16)(%rdi) + +.Lad_out: + FRAME_END + ret +ENDPROC(crypto_morus640_sse2_ad) + +/* + * void crypto_morus640_sse2_enc(void *state, const void *src, void *dst, + * unsigned int length); + */ +ENTRY(crypto_morus640_sse2_enc) + FRAME_BEGIN + + cmp $16, %rcx + jb .Lenc_out + + /* load the state: */ + movdqu (0 * 16)(%rdi), STATE0 + movdqu (1 * 16)(%rdi), STATE1 + movdqu (2 * 16)(%rdi), STATE2 + movdqu (3 * 16)(%rdi), STATE3 + movdqu (4 * 16)(%rdi), STATE4 + + mov %rsi, %r8 + or %rdx, %r8 + and $0xF, %r8 + jnz .Lenc_u_loop + +.align 4 +.Lenc_a_loop: + movdqa (%rsi), MSG + movdqa MSG, T0 + pxor STATE0, T0 + pshufd $MASK3, STATE1, T1 + pxor T1, T0 + movdqa STATE2, T1 + pand STATE3, T1 + pxor T1, T0 + movdqa T0, (%rdx) + + call __morus640_update + sub $16, %rcx + add $16, %rsi + add $16, %rdx + cmp $16, %rcx + jge .Lenc_a_loop + + jmp .Lenc_cont +.align 4 +.Lenc_u_loop: + movdqu (%rsi), MSG + movdqa MSG, T0 + pxor STATE0, T0 + pshufd $MASK3, STATE1, T1 + pxor T1, T0 + movdqa STATE2, T1 + pand STATE3, T1 + pxor T1, T0 + movdqu T0, (%rdx) + + call __morus640_update + sub $16, %rcx + add $16, %rsi + add $16, %rdx + cmp $16, %rcx + jge .Lenc_u_loop + +.Lenc_cont: + /* store the state: */ + movdqu STATE0, (0 * 16)(%rdi) + movdqu STATE1, (1 * 16)(%rdi) + movdqu STATE2, (2 * 16)(%rdi) + movdqu STATE3, (3 * 16)(%rdi) + movdqu STATE4, (4 * 16)(%rdi) + +.Lenc_out: + FRAME_END + ret +ENDPROC(crypto_morus640_sse2_enc) + +/* + * void crypto_morus640_sse2_enc_tail(void *state, const void *src, void *dst, + * unsigned int length); + */ +ENTRY(crypto_morus640_sse2_enc_tail) + FRAME_BEGIN + + /* load the state: */ + movdqu (0 * 16)(%rdi), STATE0 + movdqu (1 * 16)(%rdi), STATE1 + movdqu (2 * 16)(%rdi), STATE2 + movdqu (3 * 16)(%rdi), STATE3 + movdqu (4 * 16)(%rdi), STATE4 + + /* encrypt message: */ + call __load_partial + + movdqa MSG, T0 + pxor STATE0, T0 + pshufd $MASK3, STATE1, T1 + pxor T1, T0 + movdqa STATE2, T1 + pand STATE3, T1 + pxor T1, T0 + + call __store_partial + + call __morus640_update + + /* store the state: */ + movdqu STATE0, (0 * 16)(%rdi) + movdqu STATE1, (1 * 16)(%rdi) + movdqu STATE2, (2 * 16)(%rdi) + movdqu STATE3, (3 * 16)(%rdi) + movdqu STATE4, (4 * 16)(%rdi) + + FRAME_END +ENDPROC(crypto_morus640_sse2_enc_tail) + +/* + * void crypto_morus640_sse2_dec(void *state, const void *src, void *dst, + * unsigned int length); + */ +ENTRY(crypto_morus640_sse2_dec) + FRAME_BEGIN + + cmp $16, %rcx + jb .Ldec_out + + /* load the state: */ + movdqu (0 * 16)(%rdi), STATE0 + movdqu (1 * 16)(%rdi), STATE1 + movdqu (2 * 16)(%rdi), STATE2 + movdqu (3 * 16)(%rdi), STATE3 + movdqu (4 * 16)(%rdi), STATE4 + + mov %rsi, %r8 + or %rdx, %r8 + and $0xF, %r8 + jnz .Ldec_u_loop + +.align 4 +.Ldec_a_loop: + movdqa (%rsi), MSG + pxor STATE0, MSG + pshufd $MASK3, STATE1, T0 + pxor T0, MSG + movdqa STATE2, T0 + pand STATE3, T0 + pxor T0, MSG + movdqa MSG, (%rdx) + + call __morus640_update + sub $16, %rcx + add $16, %rsi + add $16, %rdx + cmp $16, %rcx + jge .Ldec_a_loop + + jmp .Ldec_cont +.align 4 +.Ldec_u_loop: + movdqu (%rsi), MSG + pxor STATE0, MSG + pshufd $MASK3, STATE1, T0 + pxor T0, MSG + movdqa STATE2, T0 + pand STATE3, T0 + pxor T0, MSG + movdqu MSG, (%rdx) + + call __morus640_update + sub $16, %rcx + add $16, %rsi + add $16, %rdx + cmp $16, %rcx + jge .Ldec_u_loop + +.Ldec_cont: + /* store the state: */ + movdqu STATE0, (0 * 16)(%rdi) + movdqu STATE1, (1 * 16)(%rdi) + movdqu STATE2, (2 * 16)(%rdi) + movdqu STATE3, (3 * 16)(%rdi) + movdqu STATE4, (4 * 16)(%rdi) + +.Ldec_out: + FRAME_END + ret +ENDPROC(crypto_morus640_sse2_dec) + +/* + * void crypto_morus640_sse2_dec_tail(void *state, const void *src, void *dst, + * unsigned int length); + */ +ENTRY(crypto_morus640_sse2_dec_tail) + FRAME_BEGIN + + /* load the state: */ + movdqu (0 * 16)(%rdi), STATE0 + movdqu (1 * 16)(%rdi), STATE1 + movdqu (2 * 16)(%rdi), STATE2 + movdqu (3 * 16)(%rdi), STATE3 + movdqu (4 * 16)(%rdi), STATE4 + + /* decrypt message: */ + call __load_partial + + pxor STATE0, MSG + pshufd $MASK3, STATE1, T0 + pxor T0, MSG + movdqa STATE2, T0 + pand STATE3, T0 + pxor T0, MSG + movdqa MSG, T0 + + call __store_partial + + /* mask with byte count: */ + movq %rcx, T0 + punpcklbw T0, T0 + punpcklbw T0, T0 + punpcklbw T0, T0 + punpcklbw T0, T0 + movdqa .Lmorus640_counter, T1 + pcmpgtb T1, T0 + pand T0, MSG + + call __morus640_update + + /* store the state: */ + movdqu STATE0, (0 * 16)(%rdi) + movdqu STATE1, (1 * 16)(%rdi) + movdqu STATE2, (2 * 16)(%rdi) + movdqu STATE3, (3 * 16)(%rdi) + movdqu STATE4, (4 * 16)(%rdi) + + FRAME_END + ret +ENDPROC(crypto_morus640_sse2_dec_tail) + +/* + * void crypto_morus640_sse2_final(void *state, void *tag_xor, + * u64 assoclen, u64 cryptlen); + */ +ENTRY(crypto_morus640_sse2_final) + FRAME_BEGIN + + /* load the state: */ + movdqu (0 * 16)(%rdi), STATE0 + movdqu (1 * 16)(%rdi), STATE1 + movdqu (2 * 16)(%rdi), STATE2 + movdqu (3 * 16)(%rdi), STATE3 + movdqu (4 * 16)(%rdi), STATE4 + + /* xor state[0] into state[4]: */ + pxor STATE0, STATE4 + + /* prepare length block: */ + movq %rdx, MSG + movq %rcx, T0 + pslldq $8, T0 + pxor T0, MSG + psllq $3, MSG /* multiply by 8 (to get bit count) */ + + /* update state: */ + call __morus640_update + call __morus640_update + call __morus640_update + call __morus640_update + call __morus640_update + call __morus640_update + call __morus640_update + call __morus640_update + call __morus640_update + call __morus640_update + + /* xor tag: */ + movdqu (%rsi), MSG + + pxor STATE0, MSG + pshufd $MASK3, STATE1, T0 + pxor T0, MSG + movdqa STATE2, T0 + pand STATE3, T0 + pxor T0, MSG + + movdqu MSG, (%rsi) + + FRAME_END + ret +ENDPROC(crypto_morus640_sse2_final) diff --git a/arch/x86/crypto/morus640-sse2-glue.c b/arch/x86/crypto/morus640-sse2-glue.c new file mode 100644 index 000000000000..26b47e2db8d2 --- /dev/null +++ b/arch/x86/crypto/morus640-sse2-glue.c @@ -0,0 +1,68 @@ +/* + * The MORUS-640 Authenticated-Encryption Algorithm + * Glue for SSE2 implementation + * + * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com> + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <crypto/internal/aead.h> +#include <crypto/morus640_glue.h> +#include <linux/module.h> +#include <asm/fpu/api.h> +#include <asm/cpu_device_id.h> + +asmlinkage void crypto_morus640_sse2_init(void *state, const void *key, + const void *iv); +asmlinkage void crypto_morus640_sse2_ad(void *state, const void *data, + unsigned int length); + +asmlinkage void crypto_morus640_sse2_enc(void *state, const void *src, + void *dst, unsigned int length); +asmlinkage void crypto_morus640_sse2_dec(void *state, const void *src, + void *dst, unsigned int length); + +asmlinkage void crypto_morus640_sse2_enc_tail(void *state, const void *src, + void *dst, unsigned int length); +asmlinkage void crypto_morus640_sse2_dec_tail(void *state, const void *src, + void *dst, unsigned int length); + +asmlinkage void crypto_morus640_sse2_final(void *state, void *tag_xor, + u64 assoclen, u64 cryptlen); + +MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400); + +static const struct x86_cpu_id sse2_cpu_id[] = { + X86_FEATURE_MATCH(X86_FEATURE_XMM2), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, sse2_cpu_id); + +static int __init crypto_morus640_sse2_module_init(void) +{ + if (!x86_match_cpu(sse2_cpu_id)) + return -ENODEV; + + return crypto_register_aeads(crypto_morus640_sse2_algs, + ARRAY_SIZE(crypto_morus640_sse2_algs)); +} + +static void __exit crypto_morus640_sse2_module_exit(void) +{ + crypto_unregister_aeads(crypto_morus640_sse2_algs, + ARRAY_SIZE(crypto_morus640_sse2_algs)); +} + +module_init(crypto_morus640_sse2_module_init); +module_exit(crypto_morus640_sse2_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>"); +MODULE_DESCRIPTION("MORUS-640 AEAD algorithm -- SSE2 implementation"); +MODULE_ALIAS_CRYPTO("morus640"); +MODULE_ALIAS_CRYPTO("morus640-sse2"); diff --git a/arch/x86/crypto/morus640_glue.c b/arch/x86/crypto/morus640_glue.c new file mode 100644 index 000000000000..7b58fe4d9bd1 --- /dev/null +++ b/arch/x86/crypto/morus640_glue.c @@ -0,0 +1,298 @@ +/* + * The MORUS-640 Authenticated-Encryption Algorithm + * Common x86 SIMD glue skeleton + * + * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com> + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <crypto/cryptd.h> +#include <crypto/internal/aead.h> +#include <crypto/internal/skcipher.h> +#include <crypto/morus640_glue.h> +#include <crypto/scatterwalk.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/scatterlist.h> +#include <asm/fpu/api.h> + +struct morus640_state { + struct morus640_block s[MORUS_STATE_BLOCKS]; +}; + +struct morus640_ops { + int (*skcipher_walk_init)(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); + + void (*crypt_blocks)(void *state, const void *src, void *dst, + unsigned int length); + void (*crypt_tail)(void *state, const void *src, void *dst, + unsigned int length); +}; + +static void crypto_morus640_glue_process_ad( + struct morus640_state *state, + const struct morus640_glue_ops *ops, + struct scatterlist *sg_src, unsigned int assoclen) +{ + struct scatter_walk walk; + struct morus640_block buf; + unsigned int pos = 0; + + scatterwalk_start(&walk, sg_src); + while (assoclen != 0) { + unsigned int size = scatterwalk_clamp(&walk, assoclen); + unsigned int left = size; + void *mapped = scatterwalk_map(&walk); + const u8 *src = (const u8 *)mapped; + + if (pos + size >= MORUS640_BLOCK_SIZE) { + if (pos > 0) { + unsigned int fill = MORUS640_BLOCK_SIZE - pos; + memcpy(buf.bytes + pos, src, fill); + ops->ad(state, buf.bytes, MORUS640_BLOCK_SIZE); + pos = 0; + left -= fill; + src += fill; + } + + ops->ad(state, src, left); + src += left & ~(MORUS640_BLOCK_SIZE - 1); + left &= MORUS640_BLOCK_SIZE - 1; + } + + memcpy(buf.bytes + pos, src, left); + + pos += left; + assoclen -= size; + scatterwalk_unmap(mapped); + scatterwalk_advance(&walk, size); + scatterwalk_done(&walk, 0, assoclen); + } + + if (pos > 0) { + memset(buf.bytes + pos, 0, MORUS640_BLOCK_SIZE - pos); + ops->ad(state, buf.bytes, MORUS640_BLOCK_SIZE); + } +} + +static void crypto_morus640_glue_process_crypt(struct morus640_state *state, + struct morus640_ops ops, + struct aead_request *req) +{ + struct skcipher_walk walk; + u8 *cursor_src, *cursor_dst; + unsigned int chunksize, base; + + ops.skcipher_walk_init(&walk, req, false); + + while (walk.nbytes) { + cursor_src = walk.src.virt.addr; + cursor_dst = walk.dst.virt.addr; + chunksize = walk.nbytes; + + ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize); + + base = chunksize & ~(MORUS640_BLOCK_SIZE - 1); + cursor_src += base; + cursor_dst += base; + chunksize &= MORUS640_BLOCK_SIZE - 1; + + if (chunksize > 0) + ops.crypt_tail(state, cursor_src, cursor_dst, + chunksize); + + skcipher_walk_done(&walk, 0); + } +} + +int crypto_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct morus640_ctx *ctx = crypto_aead_ctx(aead); + + if (keylen != MORUS640_BLOCK_SIZE) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->key.bytes, key, MORUS640_BLOCK_SIZE); + return 0; +} +EXPORT_SYMBOL_GPL(crypto_morus640_glue_setkey); + +int crypto_morus640_glue_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL; +} +EXPORT_SYMBOL_GPL(crypto_morus640_glue_setauthsize); + +static void crypto_morus640_glue_crypt(struct aead_request *req, + struct morus640_ops ops, + unsigned int cryptlen, + struct morus640_block *tag_xor) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct morus640_ctx *ctx = crypto_aead_ctx(tfm); + struct morus640_state state; + + kernel_fpu_begin(); + + ctx->ops->init(&state, &ctx->key, req->iv); + crypto_morus640_glue_process_ad(&state, ctx->ops, req->src, req->assoclen); + crypto_morus640_glue_process_crypt(&state, ops, req); + ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen); + + kernel_fpu_end(); +} + +int crypto_morus640_glue_encrypt(struct aead_request *req) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct morus640_ctx *ctx = crypto_aead_ctx(tfm); + struct morus640_ops OPS = { + .skcipher_walk_init = skcipher_walk_aead_encrypt, + .crypt_blocks = ctx->ops->enc, + .crypt_tail = ctx->ops->enc_tail, + }; + + struct morus640_block tag = {}; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen; + + crypto_morus640_glue_crypt(req, OPS, cryptlen, &tag); + + scatterwalk_map_and_copy(tag.bytes, req->dst, + req->assoclen + cryptlen, authsize, 1); + return 0; +} +EXPORT_SYMBOL_GPL(crypto_morus640_glue_encrypt); + +int crypto_morus640_glue_decrypt(struct aead_request *req) +{ + static const u8 zeros[MORUS640_BLOCK_SIZE] = {}; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct morus640_ctx *ctx = crypto_aead_ctx(tfm); + struct morus640_ops OPS = { + .skcipher_walk_init = skcipher_walk_aead_decrypt, + .crypt_blocks = ctx->ops->dec, + .crypt_tail = ctx->ops->dec_tail, + }; + + struct morus640_block tag; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen - authsize; + + scatterwalk_map_and_copy(tag.bytes, req->src, + req->assoclen + cryptlen, authsize, 0); + + crypto_morus640_glue_crypt(req, OPS, cryptlen, &tag); + + return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; +} +EXPORT_SYMBOL_GPL(crypto_morus640_glue_decrypt); + +void crypto_morus640_glue_init_ops(struct crypto_aead *aead, + const struct morus640_glue_ops *ops) +{ + struct morus640_ctx *ctx = crypto_aead_ctx(aead); + ctx->ops = ops; +} +EXPORT_SYMBOL_GPL(crypto_morus640_glue_init_ops); + +int cryptd_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + return crypto_aead_setkey(&cryptd_tfm->base, key, keylen); +} +EXPORT_SYMBOL_GPL(cryptd_morus640_glue_setkey); + +int cryptd_morus640_glue_setauthsize(struct crypto_aead *aead, + unsigned int authsize) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + return crypto_aead_setauthsize(&cryptd_tfm->base, authsize); +} +EXPORT_SYMBOL_GPL(cryptd_morus640_glue_setauthsize); + +int cryptd_morus640_glue_encrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + aead = &cryptd_tfm->base; + if (irq_fpu_usable() && (!in_atomic() || + !cryptd_aead_queued(cryptd_tfm))) + aead = cryptd_aead_child(cryptd_tfm); + + aead_request_set_tfm(req, aead); + + return crypto_aead_encrypt(req); +} +EXPORT_SYMBOL_GPL(cryptd_morus640_glue_encrypt); + +int cryptd_morus640_glue_decrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + aead = &cryptd_tfm->base; + if (irq_fpu_usable() && (!in_atomic() || + !cryptd_aead_queued(cryptd_tfm))) + aead = cryptd_aead_child(cryptd_tfm); + + aead_request_set_tfm(req, aead); + + return crypto_aead_decrypt(req); +} +EXPORT_SYMBOL_GPL(cryptd_morus640_glue_decrypt); + +int cryptd_morus640_glue_init_tfm(struct crypto_aead *aead) +{ + struct cryptd_aead *cryptd_tfm; + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + const char *name = crypto_aead_alg(aead)->base.cra_driver_name; + char internal_name[CRYPTO_MAX_ALG_NAME]; + + if (snprintf(internal_name, CRYPTO_MAX_ALG_NAME, "__%s", name) + >= CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + cryptd_tfm = cryptd_alloc_aead(internal_name, CRYPTO_ALG_INTERNAL, + CRYPTO_ALG_INTERNAL); + if (IS_ERR(cryptd_tfm)) + return PTR_ERR(cryptd_tfm); + + *ctx = cryptd_tfm; + crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); + return 0; +} +EXPORT_SYMBOL_GPL(cryptd_morus640_glue_init_tfm); + +void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + + cryptd_free_aead(*ctx); +} +EXPORT_SYMBOL_GPL(cryptd_morus640_glue_exit_tfm); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>"); +MODULE_DESCRIPTION("MORUS-640 AEAD mode -- glue for x86 optimizations"); diff --git a/arch/x86/crypto/salsa20-i586-asm_32.S b/arch/x86/crypto/salsa20-i586-asm_32.S deleted file mode 100644 index 6014b7b9e52a..000000000000 --- a/arch/x86/crypto/salsa20-i586-asm_32.S +++ /dev/null @@ -1,938 +0,0 @@ -# Derived from: -# salsa20_pm.s version 20051229 -# D. J. Bernstein -# Public domain. - -#include <linux/linkage.h> - -.text - -# enter salsa20_encrypt_bytes -ENTRY(salsa20_encrypt_bytes) - mov %esp,%eax - and $31,%eax - add $256,%eax - sub %eax,%esp - # eax_stack = eax - movl %eax,80(%esp) - # ebx_stack = ebx - movl %ebx,84(%esp) - # esi_stack = esi - movl %esi,88(%esp) - # edi_stack = edi - movl %edi,92(%esp) - # ebp_stack = ebp - movl %ebp,96(%esp) - # x = arg1 - movl 4(%esp,%eax),%edx - # m = arg2 - movl 8(%esp,%eax),%esi - # out = arg3 - movl 12(%esp,%eax),%edi - # bytes = arg4 - movl 16(%esp,%eax),%ebx - # bytes -= 0 - sub $0,%ebx - # goto done if unsigned<= - jbe ._done -._start: - # in0 = *(uint32 *) (x + 0) - movl 0(%edx),%eax - # in1 = *(uint32 *) (x + 4) - movl 4(%edx),%ecx - # in2 = *(uint32 *) (x + 8) - movl 8(%edx),%ebp - # j0 = in0 - movl %eax,164(%esp) - # in3 = *(uint32 *) (x + 12) - movl 12(%edx),%eax - # j1 = in1 - movl %ecx,168(%esp) - # in4 = *(uint32 *) (x + 16) - movl 16(%edx),%ecx - # j2 = in2 - movl %ebp,172(%esp) - # in5 = *(uint32 *) (x + 20) - movl 20(%edx),%ebp - # j3 = in3 - movl %eax,176(%esp) - # in6 = *(uint32 *) (x + 24) - movl 24(%edx),%eax - # j4 = in4 - movl %ecx,180(%esp) - # in7 = *(uint32 *) (x + 28) - movl 28(%edx),%ecx - # j5 = in5 - movl %ebp,184(%esp) - # in8 = *(uint32 *) (x + 32) - movl 32(%edx),%ebp - # j6 = in6 - movl %eax,188(%esp) - # in9 = *(uint32 *) (x + 36) - movl 36(%edx),%eax - # j7 = in7 - movl %ecx,192(%esp) - # in10 = *(uint32 *) (x + 40) - movl 40(%edx),%ecx - # j8 = in8 - movl %ebp,196(%esp) - # in11 = *(uint32 *) (x + 44) - movl 44(%edx),%ebp - # j9 = in9 - movl %eax,200(%esp) - # in12 = *(uint32 *) (x + 48) - movl 48(%edx),%eax - # j10 = in10 - movl %ecx,204(%esp) - # in13 = *(uint32 *) (x + 52) - movl 52(%edx),%ecx - # j11 = in11 - movl %ebp,208(%esp) - # in14 = *(uint32 *) (x + 56) - movl 56(%edx),%ebp - # j12 = in12 - movl %eax,212(%esp) - # in15 = *(uint32 *) (x + 60) - movl 60(%edx),%eax - # j13 = in13 - movl %ecx,216(%esp) - # j14 = in14 - movl %ebp,220(%esp) - # j15 = in15 - movl %eax,224(%esp) - # x_backup = x - movl %edx,64(%esp) -._bytesatleast1: - # bytes - 64 - cmp $64,%ebx - # goto nocopy if unsigned>= - jae ._nocopy - # ctarget = out - movl %edi,228(%esp) - # out = &tmp - leal 0(%esp),%edi - # i = bytes - mov %ebx,%ecx - # while (i) { *out++ = *m++; --i } - rep movsb - # out = &tmp - leal 0(%esp),%edi - # m = &tmp - leal 0(%esp),%esi -._nocopy: - # out_backup = out - movl %edi,72(%esp) - # m_backup = m - movl %esi,68(%esp) - # bytes_backup = bytes - movl %ebx,76(%esp) - # in0 = j0 - movl 164(%esp),%eax - # in1 = j1 - movl 168(%esp),%ecx - # in2 = j2 - movl 172(%esp),%edx - # in3 = j3 - movl 176(%esp),%ebx - # x0 = in0 - movl %eax,100(%esp) - # x1 = in1 - movl %ecx,104(%esp) - # x2 = in2 - movl %edx,108(%esp) - # x3 = in3 - movl %ebx,112(%esp) - # in4 = j4 - movl 180(%esp),%eax - # in5 = j5 - movl 184(%esp),%ecx - # in6 = j6 - movl 188(%esp),%edx - # in7 = j7 - movl 192(%esp),%ebx - # x4 = in4 - movl %eax,116(%esp) - # x5 = in5 - movl %ecx,120(%esp) - # x6 = in6 - movl %edx,124(%esp) - # x7 = in7 - movl %ebx,128(%esp) - # in8 = j8 - movl 196(%esp),%eax - # in9 = j9 - movl 200(%esp),%ecx - # in10 = j10 - movl 204(%esp),%edx - # in11 = j11 - movl 208(%esp),%ebx - # x8 = in8 - movl %eax,132(%esp) - # x9 = in9 - movl %ecx,136(%esp) - # x10 = in10 - movl %edx,140(%esp) - # x11 = in11 - movl %ebx,144(%esp) - # in12 = j12 - movl 212(%esp),%eax - # in13 = j13 - movl 216(%esp),%ecx - # in14 = j14 - movl 220(%esp),%edx - # in15 = j15 - movl 224(%esp),%ebx - # x12 = in12 - movl %eax,148(%esp) - # x13 = in13 - movl %ecx,152(%esp) - # x14 = in14 - movl %edx,156(%esp) - # x15 = in15 - movl %ebx,160(%esp) - # i = 20 - mov $20,%ebp - # p = x0 - movl 100(%esp),%eax - # s = x5 - movl 120(%esp),%ecx - # t = x10 - movl 140(%esp),%edx - # w = x15 - movl 160(%esp),%ebx -._mainloop: - # x0 = p - movl %eax,100(%esp) - # x10 = t - movl %edx,140(%esp) - # p += x12 - addl 148(%esp),%eax - # x5 = s - movl %ecx,120(%esp) - # t += x6 - addl 124(%esp),%edx - # x15 = w - movl %ebx,160(%esp) - # r = x1 - movl 104(%esp),%esi - # r += s - add %ecx,%esi - # v = x11 - movl 144(%esp),%edi - # v += w - add %ebx,%edi - # p <<<= 7 - rol $7,%eax - # p ^= x4 - xorl 116(%esp),%eax - # t <<<= 7 - rol $7,%edx - # t ^= x14 - xorl 156(%esp),%edx - # r <<<= 7 - rol $7,%esi - # r ^= x9 - xorl 136(%esp),%esi - # v <<<= 7 - rol $7,%edi - # v ^= x3 - xorl 112(%esp),%edi - # x4 = p - movl %eax,116(%esp) - # x14 = t - movl %edx,156(%esp) - # p += x0 - addl 100(%esp),%eax - # x9 = r - movl %esi,136(%esp) - # t += x10 - addl 140(%esp),%edx - # x3 = v - movl %edi,112(%esp) - # p <<<= 9 - rol $9,%eax - # p ^= x8 - xorl 132(%esp),%eax - # t <<<= 9 - rol $9,%edx - # t ^= x2 - xorl 108(%esp),%edx - # s += r - add %esi,%ecx - # s <<<= 9 - rol $9,%ecx - # s ^= x13 - xorl 152(%esp),%ecx - # w += v - add %edi,%ebx - # w <<<= 9 - rol $9,%ebx - # w ^= x7 - xorl 128(%esp),%ebx - # x8 = p - movl %eax,132(%esp) - # x2 = t - movl %edx,108(%esp) - # p += x4 - addl 116(%esp),%eax - # x13 = s - movl %ecx,152(%esp) - # t += x14 - addl 156(%esp),%edx - # x7 = w - movl %ebx,128(%esp) - # p <<<= 13 - rol $13,%eax - # p ^= x12 - xorl 148(%esp),%eax - # t <<<= 13 - rol $13,%edx - # t ^= x6 - xorl 124(%esp),%edx - # r += s - add %ecx,%esi - # r <<<= 13 - rol $13,%esi - # r ^= x1 - xorl 104(%esp),%esi - # v += w - add %ebx,%edi - # v <<<= 13 - rol $13,%edi - # v ^= x11 - xorl 144(%esp),%edi - # x12 = p - movl %eax,148(%esp) - # x6 = t - movl %edx,124(%esp) - # p += x8 - addl 132(%esp),%eax - # x1 = r - movl %esi,104(%esp) - # t += x2 - addl 108(%esp),%edx - # x11 = v - movl %edi,144(%esp) - # p <<<= 18 - rol $18,%eax - # p ^= x0 - xorl 100(%esp),%eax - # t <<<= 18 - rol $18,%edx - # t ^= x10 - xorl 140(%esp),%edx - # s += r - add %esi,%ecx - # s <<<= 18 - rol $18,%ecx - # s ^= x5 - xorl 120(%esp),%ecx - # w += v - add %edi,%ebx - # w <<<= 18 - rol $18,%ebx - # w ^= x15 - xorl 160(%esp),%ebx - # x0 = p - movl %eax,100(%esp) - # x10 = t - movl %edx,140(%esp) - # p += x3 - addl 112(%esp),%eax - # p <<<= 7 - rol $7,%eax - # x5 = s - movl %ecx,120(%esp) - # t += x9 - addl 136(%esp),%edx - # x15 = w - movl %ebx,160(%esp) - # r = x4 - movl 116(%esp),%esi - # r += s - add %ecx,%esi - # v = x14 - movl 156(%esp),%edi - # v += w - add %ebx,%edi - # p ^= x1 - xorl 104(%esp),%eax - # t <<<= 7 - rol $7,%edx - # t ^= x11 - xorl 144(%esp),%edx - # r <<<= 7 - rol $7,%esi - # r ^= x6 - xorl 124(%esp),%esi - # v <<<= 7 - rol $7,%edi - # v ^= x12 - xorl 148(%esp),%edi - # x1 = p - movl %eax,104(%esp) - # x11 = t - movl %edx,144(%esp) - # p += x0 - addl 100(%esp),%eax - # x6 = r - movl %esi,124(%esp) - # t += x10 - addl 140(%esp),%edx - # x12 = v - movl %edi,148(%esp) - # p <<<= 9 - rol $9,%eax - # p ^= x2 - xorl 108(%esp),%eax - # t <<<= 9 - rol $9,%edx - # t ^= x8 - xorl 132(%esp),%edx - # s += r - add %esi,%ecx - # s <<<= 9 - rol $9,%ecx - # s ^= x7 - xorl 128(%esp),%ecx - # w += v - add %edi,%ebx - # w <<<= 9 - rol $9,%ebx - # w ^= x13 - xorl 152(%esp),%ebx - # x2 = p - movl %eax,108(%esp) - # x8 = t - movl %edx,132(%esp) - # p += x1 - addl 104(%esp),%eax - # x7 = s - movl %ecx,128(%esp) - # t += x11 - addl 144(%esp),%edx - # x13 = w - movl %ebx,152(%esp) - # p <<<= 13 - rol $13,%eax - # p ^= x3 - xorl 112(%esp),%eax - # t <<<= 13 - rol $13,%edx - # t ^= x9 - xorl 136(%esp),%edx - # r += s - add %ecx,%esi - # r <<<= 13 - rol $13,%esi - # r ^= x4 - xorl 116(%esp),%esi - # v += w - add %ebx,%edi - # v <<<= 13 - rol $13,%edi - # v ^= x14 - xorl 156(%esp),%edi - # x3 = p - movl %eax,112(%esp) - # x9 = t - movl %edx,136(%esp) - # p += x2 - addl 108(%esp),%eax - # x4 = r - movl %esi,116(%esp) - # t += x8 - addl 132(%esp),%edx - # x14 = v - movl %edi,156(%esp) - # p <<<= 18 - rol $18,%eax - # p ^= x0 - xorl 100(%esp),%eax - # t <<<= 18 - rol $18,%edx - # t ^= x10 - xorl 140(%esp),%edx - # s += r - add %esi,%ecx - # s <<<= 18 - rol $18,%ecx - # s ^= x5 - xorl 120(%esp),%ecx - # w += v - add %edi,%ebx - # w <<<= 18 - rol $18,%ebx - # w ^= x15 - xorl 160(%esp),%ebx - # x0 = p - movl %eax,100(%esp) - # x10 = t - movl %edx,140(%esp) - # p += x12 - addl 148(%esp),%eax - # x5 = s - movl %ecx,120(%esp) - # t += x6 - addl 124(%esp),%edx - # x15 = w - movl %ebx,160(%esp) - # r = x1 - movl 104(%esp),%esi - # r += s - add %ecx,%esi - # v = x11 - movl 144(%esp),%edi - # v += w - add %ebx,%edi - # p <<<= 7 - rol $7,%eax - # p ^= x4 - xorl 116(%esp),%eax - # t <<<= 7 - rol $7,%edx - # t ^= x14 - xorl 156(%esp),%edx - # r <<<= 7 - rol $7,%esi - # r ^= x9 - xorl 136(%esp),%esi - # v <<<= 7 - rol $7,%edi - # v ^= x3 - xorl 112(%esp),%edi - # x4 = p - movl %eax,116(%esp) - # x14 = t - movl %edx,156(%esp) - # p += x0 - addl 100(%esp),%eax - # x9 = r - movl %esi,136(%esp) - # t += x10 - addl 140(%esp),%edx - # x3 = v - movl %edi,112(%esp) - # p <<<= 9 - rol $9,%eax - # p ^= x8 - xorl 132(%esp),%eax - # t <<<= 9 - rol $9,%edx - # t ^= x2 - xorl 108(%esp),%edx - # s += r - add %esi,%ecx - # s <<<= 9 - rol $9,%ecx - # s ^= x13 - xorl 152(%esp),%ecx - # w += v - add %edi,%ebx - # w <<<= 9 - rol $9,%ebx - # w ^= x7 - xorl 128(%esp),%ebx - # x8 = p - movl %eax,132(%esp) - # x2 = t - movl %edx,108(%esp) - # p += x4 - addl 116(%esp),%eax - # x13 = s - movl %ecx,152(%esp) - # t += x14 - addl 156(%esp),%edx - # x7 = w - movl %ebx,128(%esp) - # p <<<= 13 - rol $13,%eax - # p ^= x12 - xorl 148(%esp),%eax - # t <<<= 13 - rol $13,%edx - # t ^= x6 - xorl 124(%esp),%edx - # r += s - add %ecx,%esi - # r <<<= 13 - rol $13,%esi - # r ^= x1 - xorl 104(%esp),%esi - # v += w - add %ebx,%edi - # v <<<= 13 - rol $13,%edi - # v ^= x11 - xorl 144(%esp),%edi - # x12 = p - movl %eax,148(%esp) - # x6 = t - movl %edx,124(%esp) - # p += x8 - addl 132(%esp),%eax - # x1 = r - movl %esi,104(%esp) - # t += x2 - addl 108(%esp),%edx - # x11 = v - movl %edi,144(%esp) - # p <<<= 18 - rol $18,%eax - # p ^= x0 - xorl 100(%esp),%eax - # t <<<= 18 - rol $18,%edx - # t ^= x10 - xorl 140(%esp),%edx - # s += r - add %esi,%ecx - # s <<<= 18 - rol $18,%ecx - # s ^= x5 - xorl 120(%esp),%ecx - # w += v - add %edi,%ebx - # w <<<= 18 - rol $18,%ebx - # w ^= x15 - xorl 160(%esp),%ebx - # x0 = p - movl %eax,100(%esp) - # x10 = t - movl %edx,140(%esp) - # p += x3 - addl 112(%esp),%eax - # p <<<= 7 - rol $7,%eax - # x5 = s - movl %ecx,120(%esp) - # t += x9 - addl 136(%esp),%edx - # x15 = w - movl %ebx,160(%esp) - # r = x4 - movl 116(%esp),%esi - # r += s - add %ecx,%esi - # v = x14 - movl 156(%esp),%edi - # v += w - add %ebx,%edi - # p ^= x1 - xorl 104(%esp),%eax - # t <<<= 7 - rol $7,%edx - # t ^= x11 - xorl 144(%esp),%edx - # r <<<= 7 - rol $7,%esi - # r ^= x6 - xorl 124(%esp),%esi - # v <<<= 7 - rol $7,%edi - # v ^= x12 - xorl 148(%esp),%edi - # x1 = p - movl %eax,104(%esp) - # x11 = t - movl %edx,144(%esp) - # p += x0 - addl 100(%esp),%eax - # x6 = r - movl %esi,124(%esp) - # t += x10 - addl 140(%esp),%edx - # x12 = v - movl %edi,148(%esp) - # p <<<= 9 - rol $9,%eax - # p ^= x2 - xorl 108(%esp),%eax - # t <<<= 9 - rol $9,%edx - # t ^= x8 - xorl 132(%esp),%edx - # s += r - add %esi,%ecx - # s <<<= 9 - rol $9,%ecx - # s ^= x7 - xorl 128(%esp),%ecx - # w += v - add %edi,%ebx - # w <<<= 9 - rol $9,%ebx - # w ^= x13 - xorl 152(%esp),%ebx - # x2 = p - movl %eax,108(%esp) - # x8 = t - movl %edx,132(%esp) - # p += x1 - addl 104(%esp),%eax - # x7 = s - movl %ecx,128(%esp) - # t += x11 - addl 144(%esp),%edx - # x13 = w - movl %ebx,152(%esp) - # p <<<= 13 - rol $13,%eax - # p ^= x3 - xorl 112(%esp),%eax - # t <<<= 13 - rol $13,%edx - # t ^= x9 - xorl 136(%esp),%edx - # r += s - add %ecx,%esi - # r <<<= 13 - rol $13,%esi - # r ^= x4 - xorl 116(%esp),%esi - # v += w - add %ebx,%edi - # v <<<= 13 - rol $13,%edi - # v ^= x14 - xorl 156(%esp),%edi - # x3 = p - movl %eax,112(%esp) - # x9 = t - movl %edx,136(%esp) - # p += x2 - addl 108(%esp),%eax - # x4 = r - movl %esi,116(%esp) - # t += x8 - addl 132(%esp),%edx - # x14 = v - movl %edi,156(%esp) - # p <<<= 18 - rol $18,%eax - # p ^= x0 - xorl 100(%esp),%eax - # t <<<= 18 - rol $18,%edx - # t ^= x10 - xorl 140(%esp),%edx - # s += r - add %esi,%ecx - # s <<<= 18 - rol $18,%ecx - # s ^= x5 - xorl 120(%esp),%ecx - # w += v - add %edi,%ebx - # w <<<= 18 - rol $18,%ebx - # w ^= x15 - xorl 160(%esp),%ebx - # i -= 4 - sub $4,%ebp - # goto mainloop if unsigned > - ja ._mainloop - # x0 = p - movl %eax,100(%esp) - # x5 = s - movl %ecx,120(%esp) - # x10 = t - movl %edx,140(%esp) - # x15 = w - movl %ebx,160(%esp) - # out = out_backup - movl 72(%esp),%edi - # m = m_backup - movl 68(%esp),%esi - # in0 = x0 - movl 100(%esp),%eax - # in1 = x1 - movl 104(%esp),%ecx - # in0 += j0 - addl 164(%esp),%eax - # in1 += j1 - addl 168(%esp),%ecx - # in0 ^= *(uint32 *) (m + 0) - xorl 0(%esi),%eax - # in1 ^= *(uint32 *) (m + 4) - xorl 4(%esi),%ecx - # *(uint32 *) (out + 0) = in0 - movl %eax,0(%edi) - # *(uint32 *) (out + 4) = in1 - movl %ecx,4(%edi) - # in2 = x2 - movl 108(%esp),%eax - # in3 = x3 - movl 112(%esp),%ecx - # in2 += j2 - addl 172(%esp),%eax - # in3 += j3 - addl 176(%esp),%ecx - # in2 ^= *(uint32 *) (m + 8) - xorl 8(%esi),%eax - # in3 ^= *(uint32 *) (m + 12) - xorl 12(%esi),%ecx - # *(uint32 *) (out + 8) = in2 - movl %eax,8(%edi) - # *(uint32 *) (out + 12) = in3 - movl %ecx,12(%edi) - # in4 = x4 - movl 116(%esp),%eax - # in5 = x5 - movl 120(%esp),%ecx - # in4 += j4 - addl 180(%esp),%eax - # in5 += j5 - addl 184(%esp),%ecx - # in4 ^= *(uint32 *) (m + 16) - xorl 16(%esi),%eax - # in5 ^= *(uint32 *) (m + 20) - xorl 20(%esi),%ecx - # *(uint32 *) (out + 16) = in4 - movl %eax,16(%edi) - # *(uint32 *) (out + 20) = in5 - movl %ecx,20(%edi) - # in6 = x6 - movl 124(%esp),%eax - # in7 = x7 - movl 128(%esp),%ecx - # in6 += j6 - addl 188(%esp),%eax - # in7 += j7 - addl 192(%esp),%ecx - # in6 ^= *(uint32 *) (m + 24) - xorl 24(%esi),%eax - # in7 ^= *(uint32 *) (m + 28) - xorl 28(%esi),%ecx - # *(uint32 *) (out + 24) = in6 - movl %eax,24(%edi) - # *(uint32 *) (out + 28) = in7 - movl %ecx,28(%edi) - # in8 = x8 - movl 132(%esp),%eax - # in9 = x9 - movl 136(%esp),%ecx - # in8 += j8 - addl 196(%esp),%eax - # in9 += j9 - addl 200(%esp),%ecx - # in8 ^= *(uint32 *) (m + 32) - xorl 32(%esi),%eax - # in9 ^= *(uint32 *) (m + 36) - xorl 36(%esi),%ecx - # *(uint32 *) (out + 32) = in8 - movl %eax,32(%edi) - # *(uint32 *) (out + 36) = in9 - movl %ecx,36(%edi) - # in10 = x10 - movl 140(%esp),%eax - # in11 = x11 - movl 144(%esp),%ecx - # in10 += j10 - addl 204(%esp),%eax - # in11 += j11 - addl 208(%esp),%ecx - # in10 ^= *(uint32 *) (m + 40) - xorl 40(%esi),%eax - # in11 ^= *(uint32 *) (m + 44) - xorl 44(%esi),%ecx - # *(uint32 *) (out + 40) = in10 - movl %eax,40(%edi) - # *(uint32 *) (out + 44) = in11 - movl %ecx,44(%edi) - # in12 = x12 - movl 148(%esp),%eax - # in13 = x13 - movl 152(%esp),%ecx - # in12 += j12 - addl 212(%esp),%eax - # in13 += j13 - addl 216(%esp),%ecx - # in12 ^= *(uint32 *) (m + 48) - xorl 48(%esi),%eax - # in13 ^= *(uint32 *) (m + 52) - xorl 52(%esi),%ecx - # *(uint32 *) (out + 48) = in12 - movl %eax,48(%edi) - # *(uint32 *) (out + 52) = in13 - movl %ecx,52(%edi) - # in14 = x14 - movl 156(%esp),%eax - # in15 = x15 - movl 160(%esp),%ecx - # in14 += j14 - addl 220(%esp),%eax - # in15 += j15 - addl 224(%esp),%ecx - # in14 ^= *(uint32 *) (m + 56) - xorl 56(%esi),%eax - # in15 ^= *(uint32 *) (m + 60) - xorl 60(%esi),%ecx - # *(uint32 *) (out + 56) = in14 - movl %eax,56(%edi) - # *(uint32 *) (out + 60) = in15 - movl %ecx,60(%edi) - # bytes = bytes_backup - movl 76(%esp),%ebx - # in8 = j8 - movl 196(%esp),%eax - # in9 = j9 - movl 200(%esp),%ecx - # in8 += 1 - add $1,%eax - # in9 += 0 + carry - adc $0,%ecx - # j8 = in8 - movl %eax,196(%esp) - # j9 = in9 - movl %ecx,200(%esp) - # bytes - 64 - cmp $64,%ebx - # goto bytesatleast65 if unsigned> - ja ._bytesatleast65 - # goto bytesatleast64 if unsigned>= - jae ._bytesatleast64 - # m = out - mov %edi,%esi - # out = ctarget - movl 228(%esp),%edi - # i = bytes - mov %ebx,%ecx - # while (i) { *out++ = *m++; --i } - rep movsb -._bytesatleast64: - # x = x_backup - movl 64(%esp),%eax - # in8 = j8 - movl 196(%esp),%ecx - # in9 = j9 - movl 200(%esp),%edx - # *(uint32 *) (x + 32) = in8 - movl %ecx,32(%eax) - # *(uint32 *) (x + 36) = in9 - movl %edx,36(%eax) -._done: - # eax = eax_stack - movl 80(%esp),%eax - # ebx = ebx_stack - movl 84(%esp),%ebx - # esi = esi_stack - movl 88(%esp),%esi - # edi = edi_stack - movl 92(%esp),%edi - # ebp = ebp_stack - movl 96(%esp),%ebp - # leave - add %eax,%esp - ret -._bytesatleast65: - # bytes -= 64 - sub $64,%ebx - # out += 64 - add $64,%edi - # m += 64 - add $64,%esi - # goto bytesatleast1 - jmp ._bytesatleast1 -ENDPROC(salsa20_encrypt_bytes) diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S deleted file mode 100644 index 03a4918f41ee..000000000000 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S +++ /dev/null @@ -1,805 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#include <linux/linkage.h> - -# enter salsa20_encrypt_bytes -ENTRY(salsa20_encrypt_bytes) - mov %rsp,%r11 - and $31,%r11 - add $256,%r11 - sub %r11,%rsp - # x = arg1 - mov %rdi,%r8 - # m = arg2 - mov %rsi,%rsi - # out = arg3 - mov %rdx,%rdi - # bytes = arg4 - mov %rcx,%rdx - # unsigned>? bytes - 0 - cmp $0,%rdx - # comment:fp stack unchanged by jump - # goto done if !unsigned> - jbe ._done - # comment:fp stack unchanged by fallthrough -# start: -._start: - # r11_stack = r11 - movq %r11,0(%rsp) - # r12_stack = r12 - movq %r12,8(%rsp) - # r13_stack = r13 - movq %r13,16(%rsp) - # r14_stack = r14 - movq %r14,24(%rsp) - # r15_stack = r15 - movq %r15,32(%rsp) - # rbx_stack = rbx - movq %rbx,40(%rsp) - # rbp_stack = rbp - movq %rbp,48(%rsp) - # in0 = *(uint64 *) (x + 0) - movq 0(%r8),%rcx - # in2 = *(uint64 *) (x + 8) - movq 8(%r8),%r9 - # in4 = *(uint64 *) (x + 16) - movq 16(%r8),%rax - # in6 = *(uint64 *) (x + 24) - movq 24(%r8),%r10 - # in8 = *(uint64 *) (x + 32) - movq 32(%r8),%r11 - # in10 = *(uint64 *) (x + 40) - movq 40(%r8),%r12 - # in12 = *(uint64 *) (x + 48) - movq 48(%r8),%r13 - # in14 = *(uint64 *) (x + 56) - movq 56(%r8),%r14 - # j0 = in0 - movq %rcx,56(%rsp) - # j2 = in2 - movq %r9,64(%rsp) - # j4 = in4 - movq %rax,72(%rsp) - # j6 = in6 - movq %r10,80(%rsp) - # j8 = in8 - movq %r11,88(%rsp) - # j10 = in10 - movq %r12,96(%rsp) - # j12 = in12 - movq %r13,104(%rsp) - # j14 = in14 - movq %r14,112(%rsp) - # x_backup = x - movq %r8,120(%rsp) -# bytesatleast1: -._bytesatleast1: - # unsigned<? bytes - 64 - cmp $64,%rdx - # comment:fp stack unchanged by jump - # goto nocopy if !unsigned< - jae ._nocopy - # ctarget = out - movq %rdi,128(%rsp) - # out = &tmp - leaq 192(%rsp),%rdi - # i = bytes - mov %rdx,%rcx - # while (i) { *out++ = *m++; --i } - rep movsb - # out = &tmp - leaq 192(%rsp),%rdi - # m = &tmp - leaq 192(%rsp),%rsi - # comment:fp stack unchanged by fallthrough -# nocopy: -._nocopy: - # out_backup = out - movq %rdi,136(%rsp) - # m_backup = m - movq %rsi,144(%rsp) - # bytes_backup = bytes - movq %rdx,152(%rsp) - # x1 = j0 - movq 56(%rsp),%rdi - # x0 = x1 - mov %rdi,%rdx - # (uint64) x1 >>= 32 - shr $32,%rdi - # x3 = j2 - movq 64(%rsp),%rsi - # x2 = x3 - mov %rsi,%rcx - # (uint64) x3 >>= 32 - shr $32,%rsi - # x5 = j4 - movq 72(%rsp),%r8 - # x4 = x5 - mov %r8,%r9 - # (uint64) x5 >>= 32 - shr $32,%r8 - # x5_stack = x5 - movq %r8,160(%rsp) - # x7 = j6 - movq 80(%rsp),%r8 - # x6 = x7 - mov %r8,%rax - # (uint64) x7 >>= 32 - shr $32,%r8 - # x9 = j8 - movq 88(%rsp),%r10 - # x8 = x9 - mov %r10,%r11 - # (uint64) x9 >>= 32 - shr $32,%r10 - # x11 = j10 - movq 96(%rsp),%r12 - # x10 = x11 - mov %r12,%r13 - # x10_stack = x10 - movq %r13,168(%rsp) - # (uint64) x11 >>= 32 - shr $32,%r12 - # x13 = j12 - movq 104(%rsp),%r13 - # x12 = x13 - mov %r13,%r14 - # (uint64) x13 >>= 32 - shr $32,%r13 - # x15 = j14 - movq 112(%rsp),%r15 - # x14 = x15 - mov %r15,%rbx - # (uint64) x15 >>= 32 - shr $32,%r15 - # x15_stack = x15 - movq %r15,176(%rsp) - # i = 20 - mov $20,%r15 -# mainloop: -._mainloop: - # i_backup = i - movq %r15,184(%rsp) - # x5 = x5_stack - movq 160(%rsp),%r15 - # a = x12 + x0 - lea (%r14,%rdx),%rbp - # (uint32) a <<<= 7 - rol $7,%ebp - # x4 ^= a - xor %rbp,%r9 - # b = x1 + x5 - lea (%rdi,%r15),%rbp - # (uint32) b <<<= 7 - rol $7,%ebp - # x9 ^= b - xor %rbp,%r10 - # a = x0 + x4 - lea (%rdx,%r9),%rbp - # (uint32) a <<<= 9 - rol $9,%ebp - # x8 ^= a - xor %rbp,%r11 - # b = x5 + x9 - lea (%r15,%r10),%rbp - # (uint32) b <<<= 9 - rol $9,%ebp - # x13 ^= b - xor %rbp,%r13 - # a = x4 + x8 - lea (%r9,%r11),%rbp - # (uint32) a <<<= 13 - rol $13,%ebp - # x12 ^= a - xor %rbp,%r14 - # b = x9 + x13 - lea (%r10,%r13),%rbp - # (uint32) b <<<= 13 - rol $13,%ebp - # x1 ^= b - xor %rbp,%rdi - # a = x8 + x12 - lea (%r11,%r14),%rbp - # (uint32) a <<<= 18 - rol $18,%ebp - # x0 ^= a - xor %rbp,%rdx - # b = x13 + x1 - lea (%r13,%rdi),%rbp - # (uint32) b <<<= 18 - rol $18,%ebp - # x5 ^= b - xor %rbp,%r15 - # x10 = x10_stack - movq 168(%rsp),%rbp - # x5_stack = x5 - movq %r15,160(%rsp) - # c = x6 + x10 - lea (%rax,%rbp),%r15 - # (uint32) c <<<= 7 - rol $7,%r15d - # x14 ^= c - xor %r15,%rbx - # c = x10 + x14 - lea (%rbp,%rbx),%r15 - # (uint32) c <<<= 9 - rol $9,%r15d - # x2 ^= c - xor %r15,%rcx - # c = x14 + x2 - lea (%rbx,%rcx),%r15 - # (uint32) c <<<= 13 - rol $13,%r15d - # x6 ^= c - xor %r15,%rax - # c = x2 + x6 - lea (%rcx,%rax),%r15 - # (uint32) c <<<= 18 - rol $18,%r15d - # x10 ^= c - xor %r15,%rbp - # x15 = x15_stack - movq 176(%rsp),%r15 - # x10_stack = x10 - movq %rbp,168(%rsp) - # d = x11 + x15 - lea (%r12,%r15),%rbp - # (uint32) d <<<= 7 - rol $7,%ebp - # x3 ^= d - xor %rbp,%rsi - # d = x15 + x3 - lea (%r15,%rsi),%rbp - # (uint32) d <<<= 9 - rol $9,%ebp - # x7 ^= d - xor %rbp,%r8 - # d = x3 + x7 - lea (%rsi,%r8),%rbp - # (uint32) d <<<= 13 - rol $13,%ebp - # x11 ^= d - xor %rbp,%r12 - # d = x7 + x11 - lea (%r8,%r12),%rbp - # (uint32) d <<<= 18 - rol $18,%ebp - # x15 ^= d - xor %rbp,%r15 - # x15_stack = x15 - movq %r15,176(%rsp) - # x5 = x5_stack - movq 160(%rsp),%r15 - # a = x3 + x0 - lea (%rsi,%rdx),%rbp - # (uint32) a <<<= 7 - rol $7,%ebp - # x1 ^= a - xor %rbp,%rdi - # b = x4 + x5 - lea (%r9,%r15),%rbp - # (uint32) b <<<= 7 - rol $7,%ebp - # x6 ^= b - xor %rbp,%rax - # a = x0 + x1 - lea (%rdx,%rdi),%rbp - # (uint32) a <<<= 9 - rol $9,%ebp - # x2 ^= a - xor %rbp,%rcx - # b = x5 + x6 - lea (%r15,%rax),%rbp - # (uint32) b <<<= 9 - rol $9,%ebp - # x7 ^= b - xor %rbp,%r8 - # a = x1 + x2 - lea (%rdi,%rcx),%rbp - # (uint32) a <<<= 13 - rol $13,%ebp - # x3 ^= a - xor %rbp,%rsi - # b = x6 + x7 - lea (%rax,%r8),%rbp - # (uint32) b <<<= 13 - rol $13,%ebp - # x4 ^= b - xor %rbp,%r9 - # a = x2 + x3 - lea (%rcx,%rsi),%rbp - # (uint32) a <<<= 18 - rol $18,%ebp - # x0 ^= a - xor %rbp,%rdx - # b = x7 + x4 - lea (%r8,%r9),%rbp - # (uint32) b <<<= 18 - rol $18,%ebp - # x5 ^= b - xor %rbp,%r15 - # x10 = x10_stack - movq 168(%rsp),%rbp - # x5_stack = x5 - movq %r15,160(%rsp) - # c = x9 + x10 - lea (%r10,%rbp),%r15 - # (uint32) c <<<= 7 - rol $7,%r15d - # x11 ^= c - xor %r15,%r12 - # c = x10 + x11 - lea (%rbp,%r12),%r15 - # (uint32) c <<<= 9 - rol $9,%r15d - # x8 ^= c - xor %r15,%r11 - # c = x11 + x8 - lea (%r12,%r11),%r15 - # (uint32) c <<<= 13 - rol $13,%r15d - # x9 ^= c - xor %r15,%r10 - # c = x8 + x9 - lea (%r11,%r10),%r15 - # (uint32) c <<<= 18 - rol $18,%r15d - # x10 ^= c - xor %r15,%rbp - # x15 = x15_stack - movq 176(%rsp),%r15 - # x10_stack = x10 - movq %rbp,168(%rsp) - # d = x14 + x15 - lea (%rbx,%r15),%rbp - # (uint32) d <<<= 7 - rol $7,%ebp - # x12 ^= d - xor %rbp,%r14 - # d = x15 + x12 - lea (%r15,%r14),%rbp - # (uint32) d <<<= 9 - rol $9,%ebp - # x13 ^= d - xor %rbp,%r13 - # d = x12 + x13 - lea (%r14,%r13),%rbp - # (uint32) d <<<= 13 - rol $13,%ebp - # x14 ^= d - xor %rbp,%rbx - # d = x13 + x14 - lea (%r13,%rbx),%rbp - # (uint32) d <<<= 18 - rol $18,%ebp - # x15 ^= d - xor %rbp,%r15 - # x15_stack = x15 - movq %r15,176(%rsp) - # x5 = x5_stack - movq 160(%rsp),%r15 - # a = x12 + x0 - lea (%r14,%rdx),%rbp - # (uint32) a <<<= 7 - rol $7,%ebp - # x4 ^= a - xor %rbp,%r9 - # b = x1 + x5 - lea (%rdi,%r15),%rbp - # (uint32) b <<<= 7 - rol $7,%ebp - # x9 ^= b - xor %rbp,%r10 - # a = x0 + x4 - lea (%rdx,%r9),%rbp - # (uint32) a <<<= 9 - rol $9,%ebp - # x8 ^= a - xor %rbp,%r11 - # b = x5 + x9 - lea (%r15,%r10),%rbp - # (uint32) b <<<= 9 - rol $9,%ebp - # x13 ^= b - xor %rbp,%r13 - # a = x4 + x8 - lea (%r9,%r11),%rbp - # (uint32) a <<<= 13 - rol $13,%ebp - # x12 ^= a - xor %rbp,%r14 - # b = x9 + x13 - lea (%r10,%r13),%rbp - # (uint32) b <<<= 13 - rol $13,%ebp - # x1 ^= b - xor %rbp,%rdi - # a = x8 + x12 - lea (%r11,%r14),%rbp - # (uint32) a <<<= 18 - rol $18,%ebp - # x0 ^= a - xor %rbp,%rdx - # b = x13 + x1 - lea (%r13,%rdi),%rbp - # (uint32) b <<<= 18 - rol $18,%ebp - # x5 ^= b - xor %rbp,%r15 - # x10 = x10_stack - movq 168(%rsp),%rbp - # x5_stack = x5 - movq %r15,160(%rsp) - # c = x6 + x10 - lea (%rax,%rbp),%r15 - # (uint32) c <<<= 7 - rol $7,%r15d - # x14 ^= c - xor %r15,%rbx - # c = x10 + x14 - lea (%rbp,%rbx),%r15 - # (uint32) c <<<= 9 - rol $9,%r15d - # x2 ^= c - xor %r15,%rcx - # c = x14 + x2 - lea (%rbx,%rcx),%r15 - # (uint32) c <<<= 13 - rol $13,%r15d - # x6 ^= c - xor %r15,%rax - # c = x2 + x6 - lea (%rcx,%rax),%r15 - # (uint32) c <<<= 18 - rol $18,%r15d - # x10 ^= c - xor %r15,%rbp - # x15 = x15_stack - movq 176(%rsp),%r15 - # x10_stack = x10 - movq %rbp,168(%rsp) - # d = x11 + x15 - lea (%r12,%r15),%rbp - # (uint32) d <<<= 7 - rol $7,%ebp - # x3 ^= d - xor %rbp,%rsi - # d = x15 + x3 - lea (%r15,%rsi),%rbp - # (uint32) d <<<= 9 - rol $9,%ebp - # x7 ^= d - xor %rbp,%r8 - # d = x3 + x7 - lea (%rsi,%r8),%rbp - # (uint32) d <<<= 13 - rol $13,%ebp - # x11 ^= d - xor %rbp,%r12 - # d = x7 + x11 - lea (%r8,%r12),%rbp - # (uint32) d <<<= 18 - rol $18,%ebp - # x15 ^= d - xor %rbp,%r15 - # x15_stack = x15 - movq %r15,176(%rsp) - # x5 = x5_stack - movq 160(%rsp),%r15 - # a = x3 + x0 - lea (%rsi,%rdx),%rbp - # (uint32) a <<<= 7 - rol $7,%ebp - # x1 ^= a - xor %rbp,%rdi - # b = x4 + x5 - lea (%r9,%r15),%rbp - # (uint32) b <<<= 7 - rol $7,%ebp - # x6 ^= b - xor %rbp,%rax - # a = x0 + x1 - lea (%rdx,%rdi),%rbp - # (uint32) a <<<= 9 - rol $9,%ebp - # x2 ^= a - xor %rbp,%rcx - # b = x5 + x6 - lea (%r15,%rax),%rbp - # (uint32) b <<<= 9 - rol $9,%ebp - # x7 ^= b - xor %rbp,%r8 - # a = x1 + x2 - lea (%rdi,%rcx),%rbp - # (uint32) a <<<= 13 - rol $13,%ebp - # x3 ^= a - xor %rbp,%rsi - # b = x6 + x7 - lea (%rax,%r8),%rbp - # (uint32) b <<<= 13 - rol $13,%ebp - # x4 ^= b - xor %rbp,%r9 - # a = x2 + x3 - lea (%rcx,%rsi),%rbp - # (uint32) a <<<= 18 - rol $18,%ebp - # x0 ^= a - xor %rbp,%rdx - # b = x7 + x4 - lea (%r8,%r9),%rbp - # (uint32) b <<<= 18 - rol $18,%ebp - # x5 ^= b - xor %rbp,%r15 - # x10 = x10_stack - movq 168(%rsp),%rbp - # x5_stack = x5 - movq %r15,160(%rsp) - # c = x9 + x10 - lea (%r10,%rbp),%r15 - # (uint32) c <<<= 7 - rol $7,%r15d - # x11 ^= c - xor %r15,%r12 - # c = x10 + x11 - lea (%rbp,%r12),%r15 - # (uint32) c <<<= 9 - rol $9,%r15d - # x8 ^= c - xor %r15,%r11 - # c = x11 + x8 - lea (%r12,%r11),%r15 - # (uint32) c <<<= 13 - rol $13,%r15d - # x9 ^= c - xor %r15,%r10 - # c = x8 + x9 - lea (%r11,%r10),%r15 - # (uint32) c <<<= 18 - rol $18,%r15d - # x10 ^= c - xor %r15,%rbp - # x15 = x15_stack - movq 176(%rsp),%r15 - # x10_stack = x10 - movq %rbp,168(%rsp) - # d = x14 + x15 - lea (%rbx,%r15),%rbp - # (uint32) d <<<= 7 - rol $7,%ebp - # x12 ^= d - xor %rbp,%r14 - # d = x15 + x12 - lea (%r15,%r14),%rbp - # (uint32) d <<<= 9 - rol $9,%ebp - # x13 ^= d - xor %rbp,%r13 - # d = x12 + x13 - lea (%r14,%r13),%rbp - # (uint32) d <<<= 13 - rol $13,%ebp - # x14 ^= d - xor %rbp,%rbx - # d = x13 + x14 - lea (%r13,%rbx),%rbp - # (uint32) d <<<= 18 - rol $18,%ebp - # x15 ^= d - xor %rbp,%r15 - # x15_stack = x15 - movq %r15,176(%rsp) - # i = i_backup - movq 184(%rsp),%r15 - # unsigned>? i -= 4 - sub $4,%r15 - # comment:fp stack unchanged by jump - # goto mainloop if unsigned> - ja ._mainloop - # (uint32) x2 += j2 - addl 64(%rsp),%ecx - # x3 <<= 32 - shl $32,%rsi - # x3 += j2 - addq 64(%rsp),%rsi - # (uint64) x3 >>= 32 - shr $32,%rsi - # x3 <<= 32 - shl $32,%rsi - # x2 += x3 - add %rsi,%rcx - # (uint32) x6 += j6 - addl 80(%rsp),%eax - # x7 <<= 32 - shl $32,%r8 - # x7 += j6 - addq 80(%rsp),%r8 - # (uint64) x7 >>= 32 - shr $32,%r8 - # x7 <<= 32 - shl $32,%r8 - # x6 += x7 - add %r8,%rax - # (uint32) x8 += j8 - addl 88(%rsp),%r11d - # x9 <<= 32 - shl $32,%r10 - # x9 += j8 - addq 88(%rsp),%r10 - # (uint64) x9 >>= 32 - shr $32,%r10 - # x9 <<= 32 - shl $32,%r10 - # x8 += x9 - add %r10,%r11 - # (uint32) x12 += j12 - addl 104(%rsp),%r14d - # x13 <<= 32 - shl $32,%r13 - # x13 += j12 - addq 104(%rsp),%r13 - # (uint64) x13 >>= 32 - shr $32,%r13 - # x13 <<= 32 - shl $32,%r13 - # x12 += x13 - add %r13,%r14 - # (uint32) x0 += j0 - addl 56(%rsp),%edx - # x1 <<= 32 - shl $32,%rdi - # x1 += j0 - addq 56(%rsp),%rdi - # (uint64) x1 >>= 32 - shr $32,%rdi - # x1 <<= 32 - shl $32,%rdi - # x0 += x1 - add %rdi,%rdx - # x5 = x5_stack - movq 160(%rsp),%rdi - # (uint32) x4 += j4 - addl 72(%rsp),%r9d - # x5 <<= 32 - shl $32,%rdi - # x5 += j4 - addq 72(%rsp),%rdi - # (uint64) x5 >>= 32 - shr $32,%rdi - # x5 <<= 32 - shl $32,%rdi - # x4 += x5 - add %rdi,%r9 - # x10 = x10_stack - movq 168(%rsp),%r8 - # (uint32) x10 += j10 - addl 96(%rsp),%r8d - # x11 <<= 32 - shl $32,%r12 - # x11 += j10 - addq 96(%rsp),%r12 - # (uint64) x11 >>= 32 - shr $32,%r12 - # x11 <<= 32 - shl $32,%r12 - # x10 += x11 - add %r12,%r8 - # x15 = x15_stack - movq 176(%rsp),%rdi - # (uint32) x14 += j14 - addl 112(%rsp),%ebx - # x15 <<= 32 - shl $32,%rdi - # x15 += j14 - addq 112(%rsp),%rdi - # (uint64) x15 >>= 32 - shr $32,%rdi - # x15 <<= 32 - shl $32,%rdi - # x14 += x15 - add %rdi,%rbx - # out = out_backup - movq 136(%rsp),%rdi - # m = m_backup - movq 144(%rsp),%rsi - # x0 ^= *(uint64 *) (m + 0) - xorq 0(%rsi),%rdx - # *(uint64 *) (out + 0) = x0 - movq %rdx,0(%rdi) - # x2 ^= *(uint64 *) (m + 8) - xorq 8(%rsi),%rcx - # *(uint64 *) (out + 8) = x2 - movq %rcx,8(%rdi) - # x4 ^= *(uint64 *) (m + 16) - xorq 16(%rsi),%r9 - # *(uint64 *) (out + 16) = x4 - movq %r9,16(%rdi) - # x6 ^= *(uint64 *) (m + 24) - xorq 24(%rsi),%rax - # *(uint64 *) (out + 24) = x6 - movq %rax,24(%rdi) - # x8 ^= *(uint64 *) (m + 32) - xorq 32(%rsi),%r11 - # *(uint64 *) (out + 32) = x8 - movq %r11,32(%rdi) - # x10 ^= *(uint64 *) (m + 40) - xorq 40(%rsi),%r8 - # *(uint64 *) (out + 40) = x10 - movq %r8,40(%rdi) - # x12 ^= *(uint64 *) (m + 48) - xorq 48(%rsi),%r14 - # *(uint64 *) (out + 48) = x12 - movq %r14,48(%rdi) - # x14 ^= *(uint64 *) (m + 56) - xorq 56(%rsi),%rbx - # *(uint64 *) (out + 56) = x14 - movq %rbx,56(%rdi) - # bytes = bytes_backup - movq 152(%rsp),%rdx - # in8 = j8 - movq 88(%rsp),%rcx - # in8 += 1 - add $1,%rcx - # j8 = in8 - movq %rcx,88(%rsp) - # unsigned>? unsigned<? bytes - 64 - cmp $64,%rdx - # comment:fp stack unchanged by jump - # goto bytesatleast65 if unsigned> - ja ._bytesatleast65 - # comment:fp stack unchanged by jump - # goto bytesatleast64 if !unsigned< - jae ._bytesatleast64 - # m = out - mov %rdi,%rsi - # out = ctarget - movq 128(%rsp),%rdi - # i = bytes - mov %rdx,%rcx - # while (i) { *out++ = *m++; --i } - rep movsb - # comment:fp stack unchanged by fallthrough -# bytesatleast64: -._bytesatleast64: - # x = x_backup - movq 120(%rsp),%rdi - # in8 = j8 - movq 88(%rsp),%rsi - # *(uint64 *) (x + 32) = in8 - movq %rsi,32(%rdi) - # r11 = r11_stack - movq 0(%rsp),%r11 - # r12 = r12_stack - movq 8(%rsp),%r12 - # r13 = r13_stack - movq 16(%rsp),%r13 - # r14 = r14_stack - movq 24(%rsp),%r14 - # r15 = r15_stack - movq 32(%rsp),%r15 - # rbx = rbx_stack - movq 40(%rsp),%rbx - # rbp = rbp_stack - movq 48(%rsp),%rbp - # comment:fp stack unchanged by fallthrough -# done: -._done: - # leave - add %r11,%rsp - mov %rdi,%rax - mov %rsi,%rdx - ret -# bytesatleast65: -._bytesatleast65: - # bytes -= 64 - sub $64,%rdx - # out += 64 - add $64,%rdi - # m += 64 - add $64,%rsi - # comment:fp stack unchanged by jump - # goto bytesatleast1 - jmp ._bytesatleast1 -ENDPROC(salsa20_encrypt_bytes) diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c deleted file mode 100644 index b07d7d959806..000000000000 --- a/arch/x86/crypto/salsa20_glue.c +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Glue code for optimized assembly version of Salsa20. - * - * Copyright (c) 2007 Tan Swee Heng <thesweeheng@gmail.com> - * - * The assembly codes are public domain assembly codes written by Daniel. J. - * Bernstein <djb@cr.yp.to>. The codes are modified to include indentation - * and to remove extraneous comments and functions that are not needed. - * - i586 version, renamed as salsa20-i586-asm_32.S - * available from <http://cr.yp.to/snuffle/salsa20/x86-pm/salsa20.s> - * - x86-64 version, renamed as salsa20-x86_64-asm_64.S - * available from <http://cr.yp.to/snuffle/salsa20/amd64-3/salsa20.s> - * - * Also modified to set up the initial state using the generic C code rather - * than in assembly. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - */ - -#include <asm/unaligned.h> -#include <crypto/internal/skcipher.h> -#include <crypto/salsa20.h> -#include <linux/module.h> - -asmlinkage void salsa20_encrypt_bytes(u32 state[16], const u8 *src, u8 *dst, - u32 bytes); - -static int salsa20_asm_crypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - u32 state[16]; - int err; - - err = skcipher_walk_virt(&walk, req, true); - - crypto_salsa20_init(state, ctx, walk.iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - - if (nbytes < walk.total) - nbytes = round_down(nbytes, walk.stride); - - salsa20_encrypt_bytes(state, walk.src.virt.addr, - walk.dst.virt.addr, nbytes); - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - } - - return err; -} - -static struct skcipher_alg alg = { - .base.cra_name = "salsa20", - .base.cra_driver_name = "salsa20-asm", - .base.cra_priority = 200, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct salsa20_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = SALSA20_MIN_KEY_SIZE, - .max_keysize = SALSA20_MAX_KEY_SIZE, - .ivsize = SALSA20_IV_SIZE, - .chunksize = SALSA20_BLOCK_SIZE, - .setkey = crypto_salsa20_setkey, - .encrypt = salsa20_asm_crypt, - .decrypt = salsa20_asm_crypt, -}; - -static int __init init(void) -{ - return crypto_register_skcipher(&alg); -} - -static void __exit fini(void) -{ - crypto_unregister_skcipher(&alg); -} - -module_init(init); -module_exit(fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)"); -MODULE_ALIAS_CRYPTO("salsa20"); -MODULE_ALIAS_CRYPTO("salsa20-asm"); diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index d6b27dab1b30..14a2f996e543 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -396,3 +396,4 @@ 382 i386 pkey_free sys_pkey_free __ia32_sys_pkey_free 383 i386 statx sys_statx __ia32_sys_statx 384 i386 arch_prctl sys_arch_prctl __ia32_compat_sys_arch_prctl +385 i386 io_pgetevents sys_io_pgetevents __ia32_compat_sys_io_pgetevents diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index 4dfe42666d0c..cd36232ab62f 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -341,6 +341,7 @@ 330 common pkey_alloc __x64_sys_pkey_alloc 331 common pkey_free __x64_sys_pkey_free 332 common statx __x64_sys_statx +333 common io_pgetevents __x64_sys_io_pgetevents # # x32-specific system call numbers start at 512 to avoid cache impact diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index d998a487c9b1..261802b1cc50 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -44,14 +44,14 @@ obj-y += $(vdso_img_objs) targets += $(vdso_img_cfiles) targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so) -export CPPFLAGS_vdso.lds += -P -C +CPPFLAGS_vdso.lds += -P -C VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \ -Wl,--no-undefined \ -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 \ $(DISABLE_LTO) -$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE +$(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE $(call if_changed,vdso) HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi -I$(srctree)/arch/$(SUBARCH)/include/uapi @@ -100,11 +100,8 @@ VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \ -Wl,-z,max-page-size=4096 \ -Wl,-z,common-page-size=4096 -# 64-bit objects to re-brand as x32 -vobjs64-for-x32 := $(filter-out $(vobjs-nox32),$(vobjs-y)) - # x32-rebranded versions -vobjx32s-y := $(vobjs64-for-x32:.o=-x32.o) +vobjx32s-y := $(vobjs-y:.o=-x32.o) # same thing, but in the output directory vobjx32s := $(foreach F,$(vobjx32s-y),$(obj)/$F) @@ -122,7 +119,7 @@ $(obj)/%.so: OBJCOPYFLAGS := -S $(obj)/%.so: $(obj)/%.so.dbg $(call if_changed,objcopy) -$(obj)/vdsox32.so.dbg: $(src)/vdsox32.lds $(vobjx32s) FORCE +$(obj)/vdsox32.so.dbg: $(obj)/vdsox32.lds $(vobjx32s) FORCE $(call if_changed,vdso) CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds) diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 70b7845434cb..7782cdbcd67d 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -107,7 +107,7 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size) thread->cr2 = ptr; thread->trap_nr = X86_TRAP_PF; - memset(&info, 0, sizeof(info)); + clear_siginfo(&info); info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index 786fd875de92..4b98101209a1 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -889,7 +889,7 @@ static void force_ibs_eilvt_setup(void) if (!ibs_eilvt_valid()) goto out; - pr_info("IBS: LVT offset %d assigned\n", offset); + pr_info("LVT offset %d assigned\n", offset); return; out: diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index f5cbbba99283..981ba5e8241b 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -19,6 +19,7 @@ #include <asm/cpufeature.h> #include <asm/perf_event.h> #include <asm/msr.h> +#include <asm/smp.h> #define NUM_COUNTERS_NB 4 #define NUM_COUNTERS_L2 4 @@ -399,26 +400,8 @@ static int amd_uncore_cpu_starting(unsigned int cpu) } if (amd_uncore_llc) { - unsigned int apicid = cpu_data(cpu).apicid; - unsigned int nshared, subleaf, prev_eax = 0; - uncore = *per_cpu_ptr(amd_uncore_llc, cpu); - /* - * Iterate over Cache Topology Definition leaves until no - * more cache descriptions are available. - */ - for (subleaf = 0; subleaf < 5; subleaf++) { - cpuid_count(0x8000001d, subleaf, &eax, &ebx, &ecx, &edx); - - /* EAX[0:4] gives type of cache */ - if (!(eax & 0x1f)) - break; - - prev_eax = eax; - } - nshared = ((prev_eax >> 14) & 0xfff) + 1; - - uncore->id = apicid - (apicid % nshared); + uncore->id = per_cpu(cpu_llc_id, cpu); uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc); *per_cpu_ptr(amd_uncore_llc, cpu) = uncore; diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 45b2b1c93d04..6e461fb1e0d4 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2397,7 +2397,7 @@ static unsigned long get_segment_base(unsigned int segment) #ifdef CONFIG_IA32_EMULATION -#include <asm/compat.h> +#include <linux/compat.h> static inline int perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry) diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 3b993942a0e4..8d016ce5b80d 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -1194,7 +1194,7 @@ static int pt_event_addr_filters_validate(struct list_head *filters) filter->action == PERF_ADDR_FILTER_ACTION_START) return -EOPNOTSUPP; - if (!filter->inode) { + if (!filter->path.dentry) { if (!valid_kernel_ip(filter->offset)) return -EINVAL; @@ -1221,7 +1221,7 @@ static void pt_event_addr_filters_sync(struct perf_event *event) return; list_for_each_entry(filter, &head->list, entry) { - if (filter->inode && !offs[range]) { + if (filter->path.dentry && !offs[range]) { msr_a = msr_b = 0; } else { /* apply the offset */ diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index a7956fc7ca1d..15b07379e72d 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -203,7 +203,7 @@ static void uncore_assign_hw_event(struct intel_uncore_box *box, hwc->idx = idx; hwc->last_tag = ++box->tags[idx]; - if (hwc->idx == UNCORE_PMC_IDX_FIXED) { + if (uncore_pmc_fixed(hwc->idx)) { hwc->event_base = uncore_fixed_ctr(box); hwc->config_base = uncore_fixed_ctl(box); return; @@ -218,7 +218,9 @@ void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *e u64 prev_count, new_count, delta; int shift; - if (event->hw.idx >= UNCORE_PMC_IDX_FIXED) + if (uncore_pmc_freerunning(event->hw.idx)) + shift = 64 - uncore_freerunning_bits(box, event); + else if (uncore_pmc_fixed(event->hw.idx)) shift = 64 - uncore_fixed_ctr_bits(box); else shift = 64 - uncore_perf_ctr_bits(box); @@ -449,15 +451,30 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int return ret ? -EINVAL : 0; } -static void uncore_pmu_event_start(struct perf_event *event, int flags) +void uncore_pmu_event_start(struct perf_event *event, int flags) { struct intel_uncore_box *box = uncore_event_to_box(event); int idx = event->hw.idx; - if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX)) return; - if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX)) + /* + * Free running counter is read-only and always active. + * Use the current counter value as start point. + * There is no overflow interrupt for free running counter. + * Use hrtimer to periodically poll the counter to avoid overflow. + */ + if (uncore_pmc_freerunning(event->hw.idx)) { + list_add_tail(&event->active_entry, &box->active_list); + local64_set(&event->hw.prev_count, + uncore_read_counter(box, event)); + if (box->n_active++ == 0) + uncore_pmu_start_hrtimer(box); + return; + } + + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) return; event->hw.state = 0; @@ -474,11 +491,20 @@ static void uncore_pmu_event_start(struct perf_event *event, int flags) } } -static void uncore_pmu_event_stop(struct perf_event *event, int flags) +void uncore_pmu_event_stop(struct perf_event *event, int flags) { struct intel_uncore_box *box = uncore_event_to_box(event); struct hw_perf_event *hwc = &event->hw; + /* Cannot disable free running counter which is read-only */ + if (uncore_pmc_freerunning(hwc->idx)) { + list_del(&event->active_entry); + if (--box->n_active == 0) + uncore_pmu_cancel_hrtimer(box); + uncore_perf_event_update(box, event); + return; + } + if (__test_and_clear_bit(hwc->idx, box->active_mask)) { uncore_disable_event(box, event); box->n_active--; @@ -502,7 +528,7 @@ static void uncore_pmu_event_stop(struct perf_event *event, int flags) } } -static int uncore_pmu_event_add(struct perf_event *event, int flags) +int uncore_pmu_event_add(struct perf_event *event, int flags) { struct intel_uncore_box *box = uncore_event_to_box(event); struct hw_perf_event *hwc = &event->hw; @@ -512,6 +538,17 @@ static int uncore_pmu_event_add(struct perf_event *event, int flags) if (!box) return -ENODEV; + /* + * The free funning counter is assigned in event_init(). + * The free running counter event and free running counter + * are 1:1 mapped. It doesn't need to be tracked in event_list. + */ + if (uncore_pmc_freerunning(hwc->idx)) { + if (flags & PERF_EF_START) + uncore_pmu_event_start(event, 0); + return 0; + } + ret = n = uncore_collect_events(box, event, false); if (ret < 0) return ret; @@ -563,13 +600,21 @@ static int uncore_pmu_event_add(struct perf_event *event, int flags) return 0; } -static void uncore_pmu_event_del(struct perf_event *event, int flags) +void uncore_pmu_event_del(struct perf_event *event, int flags) { struct intel_uncore_box *box = uncore_event_to_box(event); int i; uncore_pmu_event_stop(event, PERF_EF_UPDATE); + /* + * The event for free running counter is not tracked by event_list. + * It doesn't need to force event->hw.idx = -1 to reassign the counter. + * Because the event and the free running counter are 1:1 mapped. + */ + if (uncore_pmc_freerunning(event->hw.idx)) + return; + for (i = 0; i < box->n_events; i++) { if (event == box->event_list[i]) { uncore_put_event_constraint(box, event); @@ -603,6 +648,10 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu, struct intel_uncore_box *fake_box; int ret = -EINVAL, n; + /* The free running counter is always active. */ + if (uncore_pmc_freerunning(event->hw.idx)) + return 0; + fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE); if (!fake_box) return -ENOMEM; @@ -690,6 +739,17 @@ static int uncore_pmu_event_init(struct perf_event *event) /* fixed counters have event field hardcoded to zero */ hwc->config = 0ULL; + } else if (is_freerunning_event(event)) { + if (!check_valid_freerunning_event(box, event)) + return -EINVAL; + event->hw.idx = UNCORE_PMC_IDX_FREERUNNING; + /* + * The free running counter event and free running counter + * are always 1:1 mapped. + * The free running counter is always active. + * Assign the free running counter here. + */ + event->hw.event_base = uncore_freerunning_counter(box, event); } else { hwc->config = event->attr.config & (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32)); diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index 414dc7e7c950..c9e1e0bef3c3 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -12,8 +12,13 @@ #define UNCORE_FIXED_EVENT 0xff #define UNCORE_PMC_IDX_MAX_GENERIC 8 +#define UNCORE_PMC_IDX_MAX_FIXED 1 +#define UNCORE_PMC_IDX_MAX_FREERUNNING 1 #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC -#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1) +#define UNCORE_PMC_IDX_FREERUNNING (UNCORE_PMC_IDX_FIXED + \ + UNCORE_PMC_IDX_MAX_FIXED) +#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FREERUNNING + \ + UNCORE_PMC_IDX_MAX_FREERUNNING) #define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \ ((dev << 24) | (func << 16) | (type << 8) | idx) @@ -35,6 +40,7 @@ struct intel_uncore_ops; struct intel_uncore_pmu; struct intel_uncore_box; struct uncore_event_desc; +struct freerunning_counters; struct intel_uncore_type { const char *name; @@ -42,6 +48,7 @@ struct intel_uncore_type { int num_boxes; int perf_ctr_bits; int fixed_ctr_bits; + int num_freerunning_types; unsigned perf_ctr; unsigned event_ctl; unsigned event_mask; @@ -59,6 +66,7 @@ struct intel_uncore_type { struct intel_uncore_pmu *pmus; struct intel_uncore_ops *ops; struct uncore_event_desc *event_descs; + struct freerunning_counters *freerunning; const struct attribute_group *attr_groups[4]; struct pmu *pmu; /* for custom pmu ops */ }; @@ -129,6 +137,14 @@ struct uncore_event_desc { const char *config; }; +struct freerunning_counters { + unsigned int counter_base; + unsigned int counter_offset; + unsigned int box_offset; + unsigned int num_counters; + unsigned int bits; +}; + struct pci2phy_map { struct list_head list; int segment; @@ -157,6 +173,16 @@ static ssize_t __uncore_##_var##_show(struct kobject *kobj, \ static struct kobj_attribute format_attr_##_var = \ __ATTR(_name, 0444, __uncore_##_var##_show, NULL) +static inline bool uncore_pmc_fixed(int idx) +{ + return idx == UNCORE_PMC_IDX_FIXED; +} + +static inline bool uncore_pmc_freerunning(int idx) +{ + return idx == UNCORE_PMC_IDX_FREERUNNING; +} + static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box) { return box->pmu->type->box_ctl; @@ -214,6 +240,60 @@ static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); } + +/* + * In the uncore document, there is no event-code assigned to free running + * counters. Some events need to be defined to indicate the free running + * counters. The events are encoded as event-code + umask-code. + * + * The event-code for all free running counters is 0xff, which is the same as + * the fixed counters. + * + * The umask-code is used to distinguish a fixed counter and a free running + * counter, and different types of free running counters. + * - For fixed counters, the umask-code is 0x0X. + * X indicates the index of the fixed counter, which starts from 0. + * - For free running counters, the umask-code uses the rest of the space. + * It would bare the format of 0xXY. + * X stands for the type of free running counters, which starts from 1. + * Y stands for the index of free running counters of same type, which + * starts from 0. + * + * For example, there are three types of IIO free running counters on Skylake + * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters. + * The event-code for all the free running counters is 0xff. + * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type, + * which umask-code starts from 0x10. + * So 'ioclk' is encoded as event=0xff,umask=0x10 + * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is + * the second type, which umask-code starts from 0x20. + * So 'bw_in_port2' is encoded as event=0xff,umask=0x22 + */ +static inline unsigned int uncore_freerunning_idx(u64 config) +{ + return ((config >> 8) & 0xf); +} + +#define UNCORE_FREERUNNING_UMASK_START 0x10 + +static inline unsigned int uncore_freerunning_type(u64 config) +{ + return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf); +} + +static inline +unsigned int uncore_freerunning_counter(struct intel_uncore_box *box, + struct perf_event *event) +{ + unsigned int type = uncore_freerunning_type(event->attr.config); + unsigned int idx = uncore_freerunning_idx(event->attr.config); + struct intel_uncore_pmu *pmu = box->pmu; + + return pmu->type->freerunning[type].counter_base + + pmu->type->freerunning[type].counter_offset * idx + + pmu->type->freerunning[type].box_offset * pmu->pmu_idx; +} + static inline unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) { @@ -276,11 +356,52 @@ static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box) return box->pmu->type->fixed_ctr_bits; } +static inline +unsigned int uncore_freerunning_bits(struct intel_uncore_box *box, + struct perf_event *event) +{ + unsigned int type = uncore_freerunning_type(event->attr.config); + + return box->pmu->type->freerunning[type].bits; +} + +static inline int uncore_num_freerunning(struct intel_uncore_box *box, + struct perf_event *event) +{ + unsigned int type = uncore_freerunning_type(event->attr.config); + + return box->pmu->type->freerunning[type].num_counters; +} + +static inline int uncore_num_freerunning_types(struct intel_uncore_box *box, + struct perf_event *event) +{ + return box->pmu->type->num_freerunning_types; +} + +static inline bool check_valid_freerunning_event(struct intel_uncore_box *box, + struct perf_event *event) +{ + unsigned int type = uncore_freerunning_type(event->attr.config); + unsigned int idx = uncore_freerunning_idx(event->attr.config); + + return (type < uncore_num_freerunning_types(box, event)) && + (idx < uncore_num_freerunning(box, event)); +} + static inline int uncore_num_counters(struct intel_uncore_box *box) { return box->pmu->type->num_counters; } +static inline bool is_freerunning_event(struct perf_event *event) +{ + u64 cfg = event->attr.config; + + return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) && + (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START); +} + static inline void uncore_disable_box(struct intel_uncore_box *box) { if (box->pmu->type->ops->disable_box) @@ -346,6 +467,10 @@ struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event); void uncore_pmu_start_hrtimer(struct intel_uncore_box *box); void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box); +void uncore_pmu_event_start(struct perf_event *event, int flags); +void uncore_pmu_event_stop(struct perf_event *event, int flags); +int uncore_pmu_event_add(struct perf_event *event, int flags); +void uncore_pmu_event_del(struct perf_event *event, int flags); void uncore_pmu_event_read(struct perf_event *event); void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event); struct event_constraint * diff --git a/arch/x86/events/intel/uncore_nhmex.c b/arch/x86/events/intel/uncore_nhmex.c index 93e7a8397cde..173e2674be6e 100644 --- a/arch/x86/events/intel/uncore_nhmex.c +++ b/arch/x86/events/intel/uncore_nhmex.c @@ -246,7 +246,7 @@ static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct p { struct hw_perf_event *hwc = &event->hw; - if (hwc->idx >= UNCORE_PMC_IDX_FIXED) + if (hwc->idx == UNCORE_PMC_IDX_FIXED) wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0); else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0) wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index aee5e8496be4..8527c3e1038b 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -285,6 +285,15 @@ static struct uncore_event_desc snb_uncore_imc_events[] = { #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054 #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE +enum perf_snb_uncore_imc_freerunning_types { + SNB_PCI_UNCORE_IMC_DATA = 0, + SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX, +}; + +static struct freerunning_counters snb_uncore_imc_freerunning[] = { + [SNB_PCI_UNCORE_IMC_DATA] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, 0x4, 0x0, 2, 32 }, +}; + static struct attribute *snb_uncore_imc_formats_attr[] = { &format_attr_event.attr, NULL, @@ -341,9 +350,8 @@ static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf } /* - * custom event_init() function because we define our own fixed, free - * running counters, so we do not want to conflict with generic uncore - * logic. Also simplifies processing + * Keep the custom event_init() function compatible with old event + * encoding for free running counters. */ static int snb_uncore_imc_event_init(struct perf_event *event) { @@ -405,11 +413,11 @@ static int snb_uncore_imc_event_init(struct perf_event *event) switch (cfg) { case SNB_UNCORE_PCI_IMC_DATA_READS: base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE; - idx = UNCORE_PMC_IDX_FIXED; + idx = UNCORE_PMC_IDX_FREERUNNING; break; case SNB_UNCORE_PCI_IMC_DATA_WRITES: base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE; - idx = UNCORE_PMC_IDX_FIXED + 1; + idx = UNCORE_PMC_IDX_FREERUNNING; break; default: return -EINVAL; @@ -430,75 +438,6 @@ static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_ev return 0; } -static void snb_uncore_imc_event_start(struct perf_event *event, int flags) -{ - struct intel_uncore_box *box = uncore_event_to_box(event); - u64 count; - - if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) - return; - - event->hw.state = 0; - box->n_active++; - - list_add_tail(&event->active_entry, &box->active_list); - - count = snb_uncore_imc_read_counter(box, event); - local64_set(&event->hw.prev_count, count); - - if (box->n_active == 1) - uncore_pmu_start_hrtimer(box); -} - -static void snb_uncore_imc_event_stop(struct perf_event *event, int flags) -{ - struct intel_uncore_box *box = uncore_event_to_box(event); - struct hw_perf_event *hwc = &event->hw; - - if (!(hwc->state & PERF_HES_STOPPED)) { - box->n_active--; - - WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); - hwc->state |= PERF_HES_STOPPED; - - list_del(&event->active_entry); - - if (box->n_active == 0) - uncore_pmu_cancel_hrtimer(box); - } - - if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { - /* - * Drain the remaining delta count out of a event - * that we are disabling: - */ - uncore_perf_event_update(box, event); - hwc->state |= PERF_HES_UPTODATE; - } -} - -static int snb_uncore_imc_event_add(struct perf_event *event, int flags) -{ - struct intel_uncore_box *box = uncore_event_to_box(event); - struct hw_perf_event *hwc = &event->hw; - - if (!box) - return -ENODEV; - - hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; - if (!(flags & PERF_EF_START)) - hwc->state |= PERF_HES_ARCH; - - snb_uncore_imc_event_start(event, 0); - - return 0; -} - -static void snb_uncore_imc_event_del(struct perf_event *event, int flags) -{ - snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); -} - int snb_pci2phy_map_init(int devid) { struct pci_dev *dev = NULL; @@ -530,10 +469,10 @@ int snb_pci2phy_map_init(int devid) static struct pmu snb_uncore_imc_pmu = { .task_ctx_nr = perf_invalid_context, .event_init = snb_uncore_imc_event_init, - .add = snb_uncore_imc_event_add, - .del = snb_uncore_imc_event_del, - .start = snb_uncore_imc_event_start, - .stop = snb_uncore_imc_event_stop, + .add = uncore_pmu_event_add, + .del = uncore_pmu_event_del, + .start = uncore_pmu_event_start, + .stop = uncore_pmu_event_stop, .read = uncore_pmu_event_read, }; @@ -552,12 +491,10 @@ static struct intel_uncore_type snb_uncore_imc = { .name = "imc", .num_counters = 2, .num_boxes = 1, - .fixed_ctr_bits = 32, - .fixed_ctr = SNB_UNCORE_PCI_IMC_CTR_BASE, + .num_freerunning_types = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX, + .freerunning = snb_uncore_imc_freerunning, .event_descs = snb_uncore_imc_events, .format_group = &snb_uncore_imc_format_group, - .perf_ctr = SNB_UNCORE_PCI_IMC_DATA_READS_BASE, - .event_mask = SNB_UNCORE_PCI_IMC_EVENT_MASK, .ops = &snb_uncore_imc_ops, .pmu = &snb_uncore_imc_pmu, }; diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 77076a102e34..87dc0263a2e1 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -3522,6 +3522,87 @@ static struct intel_uncore_type skx_uncore_iio = { .format_group = &skx_uncore_iio_format_group, }; +enum perf_uncore_iio_freerunning_type_id { + SKX_IIO_MSR_IOCLK = 0, + SKX_IIO_MSR_BW = 1, + SKX_IIO_MSR_UTIL = 2, + + SKX_IIO_FREERUNNING_TYPE_MAX, +}; + + +static struct freerunning_counters skx_iio_freerunning[] = { + [SKX_IIO_MSR_IOCLK] = { 0xa45, 0x1, 0x20, 1, 36 }, + [SKX_IIO_MSR_BW] = { 0xb00, 0x1, 0x10, 8, 36 }, + [SKX_IIO_MSR_UTIL] = { 0xb08, 0x1, 0x10, 8, 36 }, +}; + +static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = { + /* Free-Running IO CLOCKS Counter */ + INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"), + /* Free-Running IIO BANDWIDTH Counters */ + INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"), + INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"), + INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"), + INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"), + INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x24"), + INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x25"), + INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x26"), + INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x27"), + INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"), + /* Free-running IIO UTILIZATION Counters */ + INTEL_UNCORE_EVENT_DESC(util_in_port0, "event=0xff,umask=0x30"), + INTEL_UNCORE_EVENT_DESC(util_out_port0, "event=0xff,umask=0x31"), + INTEL_UNCORE_EVENT_DESC(util_in_port1, "event=0xff,umask=0x32"), + INTEL_UNCORE_EVENT_DESC(util_out_port1, "event=0xff,umask=0x33"), + INTEL_UNCORE_EVENT_DESC(util_in_port2, "event=0xff,umask=0x34"), + INTEL_UNCORE_EVENT_DESC(util_out_port2, "event=0xff,umask=0x35"), + INTEL_UNCORE_EVENT_DESC(util_in_port3, "event=0xff,umask=0x36"), + INTEL_UNCORE_EVENT_DESC(util_out_port3, "event=0xff,umask=0x37"), + { /* end: all zeroes */ }, +}; + +static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = { + .read_counter = uncore_msr_read_counter, +}; + +static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + NULL, +}; + +static const struct attribute_group skx_uncore_iio_freerunning_format_group = { + .name = "format", + .attrs = skx_uncore_iio_freerunning_formats_attr, +}; + +static struct intel_uncore_type skx_uncore_iio_free_running = { + .name = "iio_free_running", + .num_counters = 17, + .num_boxes = 6, + .num_freerunning_types = SKX_IIO_FREERUNNING_TYPE_MAX, + .freerunning = skx_iio_freerunning, + .ops = &skx_uncore_iio_freerunning_ops, + .event_descs = skx_uncore_iio_freerunning_events, + .format_group = &skx_uncore_iio_freerunning_format_group, +}; + static struct attribute *skx_uncore_formats_attr[] = { &format_attr_event.attr, &format_attr_umask.attr, @@ -3595,6 +3676,7 @@ static struct intel_uncore_type *skx_msr_uncores[] = { &skx_uncore_ubox, &skx_uncore_chabox, &skx_uncore_iio, + &skx_uncore_iio_free_running, &skx_uncore_irp, &skx_uncore_pcu, NULL, diff --git a/arch/x86/hyperv/Makefile b/arch/x86/hyperv/Makefile index 367a8203cfcf..b173d404e3df 100644 --- a/arch/x86/hyperv/Makefile +++ b/arch/x86/hyperv/Makefile @@ -1 +1,2 @@ -obj-y := hv_init.o mmu.o +obj-y := hv_init.o mmu.o +obj-$(CONFIG_X86_64) += hv_apic.o diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c new file mode 100644 index 000000000000..f68855499391 --- /dev/null +++ b/arch/x86/hyperv/hv_apic.c @@ -0,0 +1,256 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Hyper-V specific APIC code. + * + * Copyright (C) 2018, Microsoft, Inc. + * + * Author : K. Y. Srinivasan <kys@microsoft.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + */ + +#include <linux/types.h> +#include <linux/version.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <linux/clockchips.h> +#include <linux/hyperv.h> +#include <linux/slab.h> +#include <linux/cpuhotplug.h> +#include <asm/hypervisor.h> +#include <asm/mshyperv.h> +#include <asm/apic.h> + +static struct apic orig_apic; + +static u64 hv_apic_icr_read(void) +{ + u64 reg_val; + + rdmsrl(HV_X64_MSR_ICR, reg_val); + return reg_val; +} + +static void hv_apic_icr_write(u32 low, u32 id) +{ + u64 reg_val; + + reg_val = SET_APIC_DEST_FIELD(id); + reg_val = reg_val << 32; + reg_val |= low; + + wrmsrl(HV_X64_MSR_ICR, reg_val); +} + +static u32 hv_apic_read(u32 reg) +{ + u32 reg_val, hi; + + switch (reg) { + case APIC_EOI: + rdmsr(HV_X64_MSR_EOI, reg_val, hi); + return reg_val; + case APIC_TASKPRI: + rdmsr(HV_X64_MSR_TPR, reg_val, hi); + return reg_val; + + default: + return native_apic_mem_read(reg); + } +} + +static void hv_apic_write(u32 reg, u32 val) +{ + switch (reg) { + case APIC_EOI: + wrmsr(HV_X64_MSR_EOI, val, 0); + break; + case APIC_TASKPRI: + wrmsr(HV_X64_MSR_TPR, val, 0); + break; + default: + native_apic_mem_write(reg, val); + } +} + +static void hv_apic_eoi_write(u32 reg, u32 val) +{ + wrmsr(HV_X64_MSR_EOI, val, 0); +} + +/* + * IPI implementation on Hyper-V. + */ +static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) +{ + struct ipi_arg_ex **arg; + struct ipi_arg_ex *ipi_arg; + unsigned long flags; + int nr_bank = 0; + int ret = 1; + + local_irq_save(flags); + arg = (struct ipi_arg_ex **)this_cpu_ptr(hyperv_pcpu_input_arg); + + ipi_arg = *arg; + if (unlikely(!ipi_arg)) + goto ipi_mask_ex_done; + + ipi_arg->vector = vector; + ipi_arg->reserved = 0; + ipi_arg->vp_set.valid_bank_mask = 0; + + if (!cpumask_equal(mask, cpu_present_mask)) { + ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K; + nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask); + } + if (!nr_bank) + ipi_arg->vp_set.format = HV_GENERIC_SET_ALL; + + ret = hv_do_rep_hypercall(HVCALL_SEND_IPI_EX, 0, nr_bank, + ipi_arg, NULL); + +ipi_mask_ex_done: + local_irq_restore(flags); + return ((ret == 0) ? true : false); +} + +static bool __send_ipi_mask(const struct cpumask *mask, int vector) +{ + int cur_cpu, vcpu; + struct ipi_arg_non_ex **arg; + struct ipi_arg_non_ex *ipi_arg; + int ret = 1; + unsigned long flags; + + if (cpumask_empty(mask)) + return true; + + if (!hv_hypercall_pg) + return false; + + if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR)) + return false; + + if ((ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) + return __send_ipi_mask_ex(mask, vector); + + local_irq_save(flags); + arg = (struct ipi_arg_non_ex **)this_cpu_ptr(hyperv_pcpu_input_arg); + + ipi_arg = *arg; + if (unlikely(!ipi_arg)) + goto ipi_mask_done; + + ipi_arg->vector = vector; + ipi_arg->reserved = 0; + ipi_arg->cpu_mask = 0; + + for_each_cpu(cur_cpu, mask) { + vcpu = hv_cpu_number_to_vp_number(cur_cpu); + /* + * This particular version of the IPI hypercall can + * only target upto 64 CPUs. + */ + if (vcpu >= 64) + goto ipi_mask_done; + + __set_bit(vcpu, (unsigned long *)&ipi_arg->cpu_mask); + } + + ret = hv_do_hypercall(HVCALL_SEND_IPI, ipi_arg, NULL); + +ipi_mask_done: + local_irq_restore(flags); + return ((ret == 0) ? true : false); +} + +static bool __send_ipi_one(int cpu, int vector) +{ + struct cpumask mask = CPU_MASK_NONE; + + cpumask_set_cpu(cpu, &mask); + return __send_ipi_mask(&mask, vector); +} + +static void hv_send_ipi(int cpu, int vector) +{ + if (!__send_ipi_one(cpu, vector)) + orig_apic.send_IPI(cpu, vector); +} + +static void hv_send_ipi_mask(const struct cpumask *mask, int vector) +{ + if (!__send_ipi_mask(mask, vector)) + orig_apic.send_IPI_mask(mask, vector); +} + +static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector) +{ + unsigned int this_cpu = smp_processor_id(); + struct cpumask new_mask; + const struct cpumask *local_mask; + + cpumask_copy(&new_mask, mask); + cpumask_clear_cpu(this_cpu, &new_mask); + local_mask = &new_mask; + if (!__send_ipi_mask(local_mask, vector)) + orig_apic.send_IPI_mask_allbutself(mask, vector); +} + +static void hv_send_ipi_allbutself(int vector) +{ + hv_send_ipi_mask_allbutself(cpu_online_mask, vector); +} + +static void hv_send_ipi_all(int vector) +{ + if (!__send_ipi_mask(cpu_online_mask, vector)) + orig_apic.send_IPI_all(vector); +} + +static void hv_send_ipi_self(int vector) +{ + if (!__send_ipi_one(smp_processor_id(), vector)) + orig_apic.send_IPI_self(vector); +} + +void __init hv_apic_init(void) +{ + if (ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) { + if ((ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) + pr_info("Hyper-V: Using ext hypercalls for IPI\n"); + else + pr_info("Hyper-V: Using IPI hypercalls\n"); + /* + * Set the IPI entry points. + */ + orig_apic = *apic; + + apic->send_IPI = hv_send_ipi; + apic->send_IPI_mask = hv_send_ipi_mask; + apic->send_IPI_mask_allbutself = hv_send_ipi_mask_allbutself; + apic->send_IPI_allbutself = hv_send_ipi_allbutself; + apic->send_IPI_all = hv_send_ipi_all; + apic->send_IPI_self = hv_send_ipi_self; + } + + if (ms_hyperv.hints & HV_X64_APIC_ACCESS_RECOMMENDED) { + pr_info("Hyper-V: Using MSR based APIC access\n"); + apic_set_eoi_write(hv_apic_eoi_write); + apic->read = hv_apic_read; + apic->write = hv_apic_write; + apic->icr_write = hv_apic_icr_write; + apic->icr_read = hv_apic_icr_read; + } +} diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index cfecc2272f2d..4c431e1c1eff 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -91,12 +91,19 @@ EXPORT_SYMBOL_GPL(hv_vp_index); struct hv_vp_assist_page **hv_vp_assist_page; EXPORT_SYMBOL_GPL(hv_vp_assist_page); +void __percpu **hyperv_pcpu_input_arg; +EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg); + u32 hv_max_vp_index; static int hv_cpu_init(unsigned int cpu) { u64 msr_vp_index; struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()]; + void **input_arg; + + input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); + *input_arg = page_address(alloc_page(GFP_KERNEL)); hv_get_vp_index(msr_vp_index); @@ -217,6 +224,16 @@ static int hv_cpu_die(unsigned int cpu) { struct hv_reenlightenment_control re_ctrl; unsigned int new_cpu; + unsigned long flags; + void **input_arg; + void *input_pg = NULL; + + local_irq_save(flags); + input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); + input_pg = *input_arg; + *input_arg = NULL; + local_irq_restore(flags); + free_page((unsigned long)input_pg); if (hv_vp_assist_page && hv_vp_assist_page[cpu]) wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, 0); @@ -242,8 +259,9 @@ static int hv_cpu_die(unsigned int cpu) * * 1. Setup the hypercall page. * 2. Register Hyper-V specific clocksource. + * 3. Setup Hyper-V specific APIC entry points. */ -void hyperv_init(void) +void __init hyperv_init(void) { u64 guest_id, required_msrs; union hv_x64_msr_hypercall_contents hypercall_msr; @@ -259,6 +277,16 @@ void hyperv_init(void) if ((ms_hyperv.features & required_msrs) != required_msrs) return; + /* + * Allocate the per-CPU state for the hypercall input arg. + * If this allocation fails, we will not be able to setup + * (per-CPU) hypercall input page and thus this failure is + * fatal on Hyper-V. + */ + hyperv_pcpu_input_arg = alloc_percpu(void *); + + BUG_ON(hyperv_pcpu_input_arg == NULL); + /* Allocate percpu VP index */ hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index), GFP_KERNEL); @@ -296,7 +324,7 @@ void hyperv_init(void) hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg); wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); - hyper_alloc_mmu(); + hv_apic_init(); /* * Register Hyper-V specific clocksource. diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c index 56c9ebac946f..5f053d7d1bd9 100644 --- a/arch/x86/hyperv/mmu.c +++ b/arch/x86/hyperv/mmu.c @@ -25,20 +25,13 @@ struct hv_flush_pcpu { struct hv_flush_pcpu_ex { u64 address_space; u64 flags; - struct { - u64 format; - u64 valid_bank_mask; - u64 bank_contents[]; - } hv_vp_set; + struct hv_vpset hv_vp_set; u64 gva_list[]; }; /* Each gva in gva_list encodes up to 4096 pages to flush */ #define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE) -static struct hv_flush_pcpu __percpu **pcpu_flush; - -static struct hv_flush_pcpu_ex __percpu **pcpu_flush_ex; /* * Fills in gva_list starting from offset. Returns the number of items added. @@ -70,41 +63,6 @@ static inline int fill_gva_list(u64 gva_list[], int offset, return gva_n - offset; } -/* Return the number of banks in the resulting vp_set */ -static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush, - const struct cpumask *cpus) -{ - int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; - - /* valid_bank_mask can represent up to 64 banks */ - if (hv_max_vp_index / 64 >= 64) - return 0; - - /* - * Clear all banks up to the maximum possible bank as hv_flush_pcpu_ex - * structs are not cleared between calls, we risk flushing unneeded - * vCPUs otherwise. - */ - for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++) - flush->hv_vp_set.bank_contents[vcpu_bank] = 0; - - /* - * Some banks may end up being empty but this is acceptable. - */ - for_each_cpu(cpu, cpus) { - vcpu = hv_cpu_number_to_vp_number(cpu); - vcpu_bank = vcpu / 64; - vcpu_offset = vcpu % 64; - __set_bit(vcpu_offset, (unsigned long *) - &flush->hv_vp_set.bank_contents[vcpu_bank]); - if (vcpu_bank >= nr_bank) - nr_bank = vcpu_bank + 1; - } - flush->hv_vp_set.valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0); - - return nr_bank; -} - static void hyperv_flush_tlb_others(const struct cpumask *cpus, const struct flush_tlb_info *info) { @@ -116,7 +74,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus, trace_hyperv_mmu_flush_tlb_others(cpus, info); - if (!pcpu_flush || !hv_hypercall_pg) + if (!hv_hypercall_pg) goto do_native; if (cpumask_empty(cpus)) @@ -124,10 +82,8 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus, local_irq_save(flags); - flush_pcpu = this_cpu_ptr(pcpu_flush); - - if (unlikely(!*flush_pcpu)) - *flush_pcpu = page_address(alloc_page(GFP_ATOMIC)); + flush_pcpu = (struct hv_flush_pcpu **) + this_cpu_ptr(hyperv_pcpu_input_arg); flush = *flush_pcpu; @@ -203,7 +159,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus, trace_hyperv_mmu_flush_tlb_others(cpus, info); - if (!pcpu_flush_ex || !hv_hypercall_pg) + if (!hv_hypercall_pg) goto do_native; if (cpumask_empty(cpus)) @@ -211,10 +167,8 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus, local_irq_save(flags); - flush_pcpu = this_cpu_ptr(pcpu_flush_ex); - - if (unlikely(!*flush_pcpu)) - *flush_pcpu = page_address(alloc_page(GFP_ATOMIC)); + flush_pcpu = (struct hv_flush_pcpu_ex **) + this_cpu_ptr(hyperv_pcpu_input_arg); flush = *flush_pcpu; @@ -239,8 +193,8 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus, flush->hv_vp_set.valid_bank_mask = 0; if (!cpumask_equal(cpus, cpu_present_mask)) { - flush->hv_vp_set.format = HV_GENERIC_SET_SPARCE_4K; - nr_bank = cpumask_to_vp_set(flush, cpus); + flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K; + nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus); } if (!nr_bank) { @@ -296,14 +250,3 @@ void hyperv_setup_mmu_ops(void) pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others_ex; } } - -void hyper_alloc_mmu(void) -{ - if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED)) - return; - - if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) - pcpu_flush = alloc_percpu(struct hv_flush_pcpu *); - else - pcpu_flush_ex = alloc_percpu(struct hv_flush_pcpu_ex *); -} diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h new file mode 100644 index 000000000000..e958e28f7ab5 --- /dev/null +++ b/arch/x86/include/asm/cacheinfo.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_CACHEINFO_H +#define _ASM_X86_CACHEINFO_H + +void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id); + +#endif /* _ASM_X86_CACHEINFO_H */ diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index e1c8dab86670..fb97cf7c4137 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h @@ -17,7 +17,6 @@ typedef u32 compat_size_t; typedef s32 compat_ssize_t; -typedef s32 compat_time_t; typedef s32 compat_clock_t; typedef s32 compat_pid_t; typedef u16 __compat_uid_t; @@ -46,16 +45,6 @@ typedef u32 compat_u32; typedef u64 __attribute__((aligned(4))) compat_u64; typedef u32 compat_uptr_t; -struct compat_timespec { - compat_time_t tv_sec; - s32 tv_nsec; -}; - -struct compat_timeval { - compat_time_t tv_sec; - s32 tv_usec; -}; - struct compat_stat { compat_dev_t st_dev; u16 __pad1; @@ -145,10 +134,10 @@ struct compat_ipc64_perm { struct compat_semid64_ds { struct compat_ipc64_perm sem_perm; - compat_time_t sem_otime; - compat_ulong_t __unused1; - compat_time_t sem_ctime; - compat_ulong_t __unused2; + compat_ulong_t sem_otime; + compat_ulong_t sem_otime_high; + compat_ulong_t sem_ctime; + compat_ulong_t sem_ctime_high; compat_ulong_t sem_nsems; compat_ulong_t __unused3; compat_ulong_t __unused4; @@ -156,12 +145,12 @@ struct compat_semid64_ds { struct compat_msqid64_ds { struct compat_ipc64_perm msg_perm; - compat_time_t msg_stime; - compat_ulong_t __unused1; - compat_time_t msg_rtime; - compat_ulong_t __unused2; - compat_time_t msg_ctime; - compat_ulong_t __unused3; + compat_ulong_t msg_stime; + compat_ulong_t msg_stime_high; + compat_ulong_t msg_rtime; + compat_ulong_t msg_rtime_high; + compat_ulong_t msg_ctime; + compat_ulong_t msg_ctime_high; compat_ulong_t msg_cbytes; compat_ulong_t msg_qnum; compat_ulong_t msg_qbytes; @@ -174,12 +163,12 @@ struct compat_msqid64_ds { struct compat_shmid64_ds { struct compat_ipc64_perm shm_perm; compat_size_t shm_segsz; - compat_time_t shm_atime; - compat_ulong_t __unused1; - compat_time_t shm_dtime; - compat_ulong_t __unused2; - compat_time_t shm_ctime; - compat_ulong_t __unused3; + compat_ulong_t shm_atime; + compat_ulong_t shm_atime_high; + compat_ulong_t shm_dtime; + compat_ulong_t shm_dtime_high; + compat_ulong_t shm_ctime; + compat_ulong_t shm_ctime_high; compat_pid_t shm_cpid; compat_pid_t shm_lpid; compat_ulong_t shm_nattch; diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 89ce4bfd241f..ce4d176b3d13 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -30,10 +30,7 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return dma_ops; } -int arch_dma_supported(struct device *dev, u64 mask); -#define arch_dma_supported arch_dma_supported - -bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); +bool arch_dma_alloc_attrs(struct device **dev); #define arch_dma_alloc_attrs arch_dma_alloc_attrs #endif diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index cc8f8fcf9b4a..c18ed65287d5 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -63,7 +63,7 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name #ifndef COMPILE_OFFSETS #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION) -#include <asm/compat.h> +#include <linux/compat.h> /* * Because ia32 syscalls do not map to x86_64 syscall numbers diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 5ea2afd4c871..740a428acf1e 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -50,14 +50,6 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); #define inc_irq_stat(member) this_cpu_inc(irq_stat.member) -#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending) - -#define __ARCH_SET_SOFTIRQ_PENDING - -#define set_softirq_pending(x) \ - this_cpu_write(irq_stat.__softirq_pending, (x)) -#define or_softirq_pending(x) this_cpu_or(irq_stat.__softirq_pending, (x)) - extern void ack_bad_irq(unsigned int irq); extern u64 arch_irq_stat_cpu(unsigned int cpu); diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index 416cb0e0c496..3bfa92c2793c 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h @@ -164,6 +164,11 @@ */ #define HV_X64_DEPRECATING_AEOI_RECOMMENDED (1 << 9) +/* + * Recommend using cluster IPI hypercalls. + */ +#define HV_X64_CLUSTER_IPI_RECOMMENDED (1 << 10) + /* Recommend using the newer ExProcessorMasks interface */ #define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED (1 << 11) @@ -329,12 +334,17 @@ struct hv_tsc_emulation_status { #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \ (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1)) +#define HV_IPI_LOW_VECTOR 0x10 +#define HV_IPI_HIGH_VECTOR 0xff + /* Declare the various hypercall operations. */ #define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002 #define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003 #define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008 +#define HVCALL_SEND_IPI 0x000b #define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013 #define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014 +#define HVCALL_SEND_IPI_EX 0x0015 #define HVCALL_POST_MESSAGE 0x005c #define HVCALL_SIGNAL_EVENT 0x005d @@ -360,7 +370,7 @@ struct hv_tsc_emulation_status { #define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3) enum HV_GENERIC_SET_FORMAT { - HV_GENERIC_SET_SPARCE_4K, + HV_GENERIC_SET_SPARSE_4K, HV_GENERIC_SET_ALL, }; @@ -706,4 +716,22 @@ struct hv_enlightened_vmcs { #define HV_STIMER_AUTOENABLE (1ULL << 3) #define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F) +struct ipi_arg_non_ex { + u32 vector; + u32 reserved; + u64 cpu_mask; +}; + +struct hv_vpset { + u64 format; + u64 valid_bank_mask; + u64 bank_contents[]; +}; + +struct ipi_arg_ex { + u32 vector; + u32 reserved; + struct hv_vpset vp_set; +}; + #endif diff --git a/arch/x86/include/asm/intel_mid_vrtc.h b/arch/x86/include/asm/intel_mid_vrtc.h index 35555016b1be..0b44b1abe4d9 100644 --- a/arch/x86/include/asm/intel_mid_vrtc.h +++ b/arch/x86/include/asm/intel_mid_vrtc.h @@ -4,7 +4,7 @@ extern unsigned char vrtc_cmos_read(unsigned char reg); extern void vrtc_cmos_write(unsigned char val, unsigned char reg); -extern void vrtc_get_time(struct timespec *now); -extern int vrtc_set_mmss(const struct timespec *now); +extern void vrtc_get_time(struct timespec64 *now); +extern int vrtc_set_mmss(const struct timespec64 *now); #endif diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index f6e5b9375d8c..6de64840dd22 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -94,10 +94,10 @@ build_mmio_write(__writel, "l", unsigned int, "r", ) #ifdef CONFIG_X86_64 -build_mmio_read(readq, "q", unsigned long, "=r", :"memory") -build_mmio_read(__readq, "q", unsigned long, "=r", ) -build_mmio_write(writeq, "q", unsigned long, "r", :"memory") -build_mmio_write(__writeq, "q", unsigned long, "r", ) +build_mmio_read(readq, "q", u64, "=r", :"memory") +build_mmio_read(__readq, "q", u64, "=r", ) +build_mmio_write(writeq, "q", u64, "r", :"memory") +build_mmio_write(__writeq, "q", u64, "r", ) #define readq_relaxed(a) __readq(a) #define writeq_relaxed(v, a) __writeq(v, a) diff --git a/arch/x86/include/asm/mc146818rtc.h b/arch/x86/include/asm/mc146818rtc.h index 1775a32f7ea6..97198001e567 100644 --- a/arch/x86/include/asm/mc146818rtc.h +++ b/arch/x86/include/asm/mc146818rtc.h @@ -95,8 +95,8 @@ static inline unsigned char current_lock_cmos_reg(void) unsigned char rtc_cmos_read(unsigned char addr); void rtc_cmos_write(unsigned char val, unsigned char addr); -extern int mach_set_rtc_mmss(const struct timespec *now); -extern void mach_get_cmos_time(struct timespec *now); +extern int mach_set_rtc_mmss(const struct timespec64 *now); +extern void mach_get_cmos_time(struct timespec64 *now); #define RTC_IRQ 8 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index cf9911b5a53c..bbc796eb0a3b 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -288,21 +288,6 @@ static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, mpx_notify_unmap(mm, vma, start, end); } -#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS -static inline int vma_pkey(struct vm_area_struct *vma) -{ - unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 | - VM_PKEY_BIT2 | VM_PKEY_BIT3; - - return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT; -} -#else -static inline int vma_pkey(struct vm_area_struct *vma) -{ - return 0; -} -#endif - /* * We only want to enforce protection keys on the current process * because we effectively have no access to PKRU for other diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index b90e79610cf7..997192131b7b 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -122,6 +122,7 @@ static inline void hv_disable_stimer0_percpu_irq(int irq) {} #if IS_ENABLED(CONFIG_HYPERV) extern struct clocksource *hyperv_cs; extern void *hv_hypercall_pg; +extern void __percpu **hyperv_pcpu_input_arg; static inline u64 hv_do_hypercall(u64 control, void *input, void *output) { @@ -258,9 +259,41 @@ static inline int hv_cpu_number_to_vp_number(int cpu_number) return hv_vp_index[cpu_number]; } -void hyperv_init(void); +static inline int cpumask_to_vpset(struct hv_vpset *vpset, + const struct cpumask *cpus) +{ + int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; + + /* valid_bank_mask can represent up to 64 banks */ + if (hv_max_vp_index / 64 >= 64) + return 0; + + /* + * Clear all banks up to the maximum possible bank as hv_flush_pcpu_ex + * structs are not cleared between calls, we risk flushing unneeded + * vCPUs otherwise. + */ + for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++) + vpset->bank_contents[vcpu_bank] = 0; + + /* + * Some banks may end up being empty but this is acceptable. + */ + for_each_cpu(cpu, cpus) { + vcpu = hv_cpu_number_to_vp_number(cpu); + vcpu_bank = vcpu / 64; + vcpu_offset = vcpu % 64; + __set_bit(vcpu_offset, (unsigned long *) + &vpset->bank_contents[vcpu_bank]); + if (vcpu_bank >= nr_bank) + nr_bank = vcpu_bank + 1; + } + vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0); + return nr_bank; +} + +void __init hyperv_init(void); void hyperv_setup_mmu_ops(void); -void hyper_alloc_mmu(void); void hyperv_report_panic(struct pt_regs *regs, long err); bool hv_is_hyperv_initialized(void); void hyperv_cleanup(void); @@ -269,6 +302,13 @@ void hyperv_reenlightenment_intr(struct pt_regs *regs); void set_hv_tscchange_cb(void (*cb)(void)); void clear_hv_tscchange_cb(void); void hyperv_stop_tsc_emulation(void); + +#ifdef CONFIG_X86_64 +void hv_apic_init(void); +#else +static inline void hv_apic_init(void) {} +#endif + #else /* CONFIG_HYPERV */ static inline void hyperv_init(void) {} static inline bool hv_is_hyperv_initialized(void) { return false; } diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index fda2114197b3..68b2c3150de1 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -62,8 +62,8 @@ #define NHM_C3_AUTO_DEMOTE (1UL << 25) #define NHM_C1_AUTO_DEMOTE (1UL << 26) #define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25) -#define SNB_C1_AUTO_UNDEMOTE (1UL << 27) -#define SNB_C3_AUTO_UNDEMOTE (1UL << 28) +#define SNB_C3_AUTO_UNDEMOTE (1UL << 27) +#define SNB_C1_AUTO_UNDEMOTE (1UL << 28) #define MSR_MTRRcap 0x000000fe diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 8b38df98548e..f6f6c63da62f 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -308,16 +308,20 @@ do { \ * lfence * jmp spec_trap * do_rop: - * mov %rax,(%rsp) + * mov %rax,(%rsp) for x86_64 + * mov %edx,(%esp) for x86_32 * retq * * Without retpolines configured: * - * jmp *%rax + * jmp *%rax for x86_64 + * jmp *%edx for x86_32 */ #ifdef CONFIG_RETPOLINE -# define RETPOLINE_RAX_BPF_JIT_SIZE 17 -# define RETPOLINE_RAX_BPF_JIT() \ +# ifdef CONFIG_X86_64 +# define RETPOLINE_RAX_BPF_JIT_SIZE 17 +# define RETPOLINE_RAX_BPF_JIT() \ +do { \ EMIT1_off32(0xE8, 7); /* callq do_rop */ \ /* spec_trap: */ \ EMIT2(0xF3, 0x90); /* pause */ \ @@ -325,11 +329,30 @@ do { \ EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \ /* do_rop: */ \ EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \ - EMIT1(0xC3); /* retq */ -#else -# define RETPOLINE_RAX_BPF_JIT_SIZE 2 -# define RETPOLINE_RAX_BPF_JIT() \ - EMIT2(0xFF, 0xE0); /* jmp *%rax */ + EMIT1(0xC3); /* retq */ \ +} while (0) +# else /* !CONFIG_X86_64 */ +# define RETPOLINE_EDX_BPF_JIT() \ +do { \ + EMIT1_off32(0xE8, 7); /* call do_rop */ \ + /* spec_trap: */ \ + EMIT2(0xF3, 0x90); /* pause */ \ + EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \ + EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \ + /* do_rop: */ \ + EMIT3(0x89, 0x14, 0x24); /* mov %edx,(%esp) */ \ + EMIT1(0xC3); /* ret */ \ +} while (0) +# endif +#else /* !CONFIG_RETPOLINE */ +# ifdef CONFIG_X86_64 +# define RETPOLINE_RAX_BPF_JIT_SIZE 2 +# define RETPOLINE_RAX_BPF_JIT() \ + EMIT2(0xFF, 0xE0); /* jmp *%rax */ +# else /* !CONFIG_X86_64 */ +# define RETPOLINE_EDX_BPF_JIT() \ + EMIT2(0xFF, 0xE2) /* jmp *%edx */ +# endif #endif #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */ diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 2c5a966dc222..6afac386a434 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -53,7 +53,7 @@ #define __PHYSICAL_MASK_SHIFT 52 #ifdef CONFIG_X86_5LEVEL -#define __VIRTUAL_MASK_SHIFT (pgtable_l5_enabled ? 56 : 47) +#define __VIRTUAL_MASK_SHIFT (pgtable_l5_enabled() ? 56 : 47) #else #define __VIRTUAL_MASK_SHIFT 47 #endif diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 9be2bf13825b..d49bbf4bb5c8 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -574,14 +574,14 @@ static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd) } #define set_pgd(pgdp, pgdval) do { \ - if (pgtable_l5_enabled) \ + if (pgtable_l5_enabled()) \ __set_pgd(pgdp, pgdval); \ else \ set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd }); \ } while (0) #define pgd_clear(pgdp) do { \ - if (pgtable_l5_enabled) \ + if (pgtable_l5_enabled()) \ set_pgd(pgdp, __pgd(0)); \ } while (0) diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index d32175e30259..662963681ea6 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h @@ -117,9 +117,6 @@ void native_restore_msi_irqs(struct pci_dev *dev); #define native_setup_msi_irqs NULL #define native_teardown_msi_irq NULL #endif - -#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) - #endif /* __KERNEL__ */ #ifdef CONFIG_X86_64 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index 263c142a6a6c..ada6410fd2ec 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -167,7 +167,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, #if CONFIG_PGTABLE_LEVELS > 4 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) { - if (!pgtable_l5_enabled) + if (!pgtable_l5_enabled()) return; paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d))); @@ -193,7 +193,7 @@ extern void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d); static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, unsigned long address) { - if (pgtable_l5_enabled) + if (pgtable_l5_enabled()) ___p4d_free_tlb(tlb, p4d); } diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index f1633de5a675..99ecde23c3ec 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -65,7 +65,7 @@ extern pmdval_t early_pmd_flags; #ifndef __PAGETABLE_P4D_FOLDED #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) -#define pgd_clear(pgd) (pgtable_l5_enabled ? native_pgd_clear(pgd) : 0) +#define pgd_clear(pgd) (pgtable_l5_enabled() ? native_pgd_clear(pgd) : 0) #endif #ifndef set_p4d @@ -881,7 +881,7 @@ static inline unsigned long p4d_index(unsigned long address) #if CONFIG_PGTABLE_LEVELS > 4 static inline int pgd_present(pgd_t pgd) { - if (!pgtable_l5_enabled) + if (!pgtable_l5_enabled()) return 1; return pgd_flags(pgd) & _PAGE_PRESENT; } @@ -898,9 +898,9 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd) #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd)) /* to find an entry in a page-table-directory. */ -static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) +static __always_inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) { - if (!pgtable_l5_enabled) + if (!pgtable_l5_enabled()) return (p4d_t *)pgd; return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address); } @@ -909,7 +909,7 @@ static inline int pgd_bad(pgd_t pgd) { unsigned long ignore_flags = _PAGE_USER; - if (!pgtable_l5_enabled) + if (!pgtable_l5_enabled()) return 0; if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) @@ -920,7 +920,7 @@ static inline int pgd_bad(pgd_t pgd) static inline int pgd_none(pgd_t pgd) { - if (!pgtable_l5_enabled) + if (!pgtable_l5_enabled()) return 0; /* * There is no need to do a workaround for the KNL stray diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h index e3225e83db7d..d9a001a4a872 100644 --- a/arch/x86/include/asm/pgtable_32_types.h +++ b/arch/x86/include/asm/pgtable_32_types.h @@ -15,7 +15,7 @@ # include <asm/pgtable-2level_types.h> #endif -#define pgtable_l5_enabled 0 +#define pgtable_l5_enabled() 0 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE - 1)) diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 877bc27718ae..3c5385f9a88f 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -220,7 +220,7 @@ static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) { pgd_t pgd; - if (pgtable_l5_enabled || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) { + if (pgtable_l5_enabled() || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) { *p4dp = p4d; return; } diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index adb47552e6bb..054765ab2da2 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -22,12 +22,23 @@ typedef struct { pteval_t pte; } pte_t; #ifdef CONFIG_X86_5LEVEL extern unsigned int __pgtable_l5_enabled; -#ifndef pgtable_l5_enabled -#define pgtable_l5_enabled cpu_feature_enabled(X86_FEATURE_LA57) -#endif + +#ifdef USE_EARLY_PGTABLE_L5 +/* + * cpu_feature_enabled() is not available in early boot code. + * Use variable instead. + */ +static inline bool pgtable_l5_enabled(void) +{ + return __pgtable_l5_enabled; +} #else -#define pgtable_l5_enabled 0 -#endif +#define pgtable_l5_enabled() cpu_feature_enabled(X86_FEATURE_LA57) +#endif /* USE_EARLY_PGTABLE_L5 */ + +#else +#define pgtable_l5_enabled() 0 +#endif /* CONFIG_X86_5LEVEL */ extern unsigned int pgdir_shift; extern unsigned int ptrs_per_p4d; @@ -102,7 +113,7 @@ extern unsigned int ptrs_per_p4d; #define LDT_PGD_ENTRY_L4 -3UL #define LDT_PGD_ENTRY_L5 -112UL -#define LDT_PGD_ENTRY (pgtable_l5_enabled ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4) +#define LDT_PGD_ENTRY (pgtable_l5_enabled() ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4) #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) #define __VMALLOC_BASE_L4 0xffffc90000000000UL @@ -116,7 +127,7 @@ extern unsigned int ptrs_per_p4d; #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT # define VMALLOC_START vmalloc_base -# define VMALLOC_SIZE_TB (pgtable_l5_enabled ? VMALLOC_SIZE_TB_L5 : VMALLOC_SIZE_TB_L4) +# define VMALLOC_SIZE_TB (pgtable_l5_enabled() ? VMALLOC_SIZE_TB_L5 : VMALLOC_SIZE_TB_L4) # define VMEMMAP_START vmemmap_base #else # define VMALLOC_START __VMALLOC_BASE_L4 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 1e5a40673953..99fff853c944 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -65,7 +65,6 @@ #define _PAGE_PKEY_BIT2 (_AT(pteval_t, 0)) #define _PAGE_PKEY_BIT3 (_AT(pteval_t, 0)) #endif -#define __HAVE_ARCH_PTE_SPECIAL #define _PAGE_PKEY_MASK (_PAGE_PKEY_BIT0 | \ _PAGE_PKEY_BIT1 | \ diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h index 851c04b7a092..19b137f1b3be 100644 --- a/arch/x86/include/asm/pkeys.h +++ b/arch/x86/include/asm/pkeys.h @@ -9,6 +9,11 @@ extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long init_val); +static inline bool arch_pkeys_enabled(void) +{ + return boot_cpu_has(X86_FEATURE_OSPKE); +} + /* * Try to dedicate one of the protection keys to be used as an * execute-only protection key. @@ -116,4 +121,12 @@ extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long init_val); extern void copy_init_pkru_to_fpregs(void); +static inline int vma_pkey(struct vm_area_struct *vma) +{ + unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 | + VM_PKEY_BIT2 | VM_PKEY_BIT3; + + return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT; +} + #endif /*_ASM_X86_PKEYS_H */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 21a114914ba4..e28add6b791f 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -186,15 +186,6 @@ extern void identify_boot_cpu(void); extern void identify_secondary_cpu(struct cpuinfo_x86 *); extern void print_cpu_info(struct cpuinfo_x86 *); void print_cpu_msr(struct cpuinfo_x86 *); -extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); -extern u32 get_scattered_cpuid_leaf(unsigned int level, - unsigned int sub_leaf, - enum cpuid_regs_idx reg); -extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); -extern void init_amd_cacheinfo(struct cpuinfo_x86 *c); - -extern void detect_extended_topology(struct cpuinfo_x86 *c); -extern void detect_ht(struct cpuinfo_x86 *c); #ifdef CONFIG_X86_32 extern int have_cpuid_p(void); diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index a7471dcd2205..b6033680d458 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h @@ -12,7 +12,7 @@ void pvclock_set_flags(u8 flags); unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src); void pvclock_read_wallclock(struct pvclock_wall_clock *wall, struct pvclock_vcpu_time_info *vcpu, - struct timespec *ts); + struct timespec64 *ts); void pvclock_resume(void); void pvclock_touch_watchdogs(void); diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index 5e16b5d40d32..3e70bed8a978 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -7,6 +7,14 @@ #include <asm-generic/qspinlock_types.h> #include <asm/paravirt.h> +#define _Q_PENDING_LOOPS (1 << 9) + +#ifdef CONFIG_PARAVIRT_SPINLOCKS +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_init_lock_hash(void); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock); + #define queued_spin_unlock queued_spin_unlock /** * queued_spin_unlock - release a queued spinlock @@ -16,15 +24,9 @@ */ static inline void native_queued_spin_unlock(struct qspinlock *lock) { - smp_store_release((u8 *)lock, 0); + smp_store_release(&lock->locked, 0); } -#ifdef CONFIG_PARAVIRT_SPINLOCKS -extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); -extern void __pv_init_lock_hash(void); -extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); -extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock); - static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) { pv_queued_spin_lock_slowpath(lock, val); @@ -40,11 +42,6 @@ static inline bool vcpu_is_preempted(long cpu) { return pv_vcpu_is_preempted(cpu); } -#else -static inline void queued_spin_unlock(struct qspinlock *lock) -{ - native_queued_spin_unlock(lock); -} #endif #ifdef CONFIG_PARAVIRT diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h index 923307ea11c7..9ef5ee03d2d7 100644 --- a/arch/x86/include/asm/qspinlock_paravirt.h +++ b/arch/x86/include/asm/qspinlock_paravirt.h @@ -22,8 +22,7 @@ PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath); * * void __pv_queued_spin_unlock(struct qspinlock *lock) * { - * struct __qspinlock *l = (void *)lock; - * u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0); + * u8 lockval = cmpxchg(&lock->locked, _Q_LOCKED_VAL, 0); * * if (likely(lockval == _Q_LOCKED_VAL)) * return; diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index f75bff8f9d82..547c4fe50711 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -171,7 +171,6 @@ static inline int wbinvd_on_all_cpus(void) wbinvd(); return 0; } -#define smp_num_siblings 1 #endif /* CONFIG_SMP */ extern unsigned disabled_cpus; diff --git a/arch/x86/include/asm/sparsemem.h b/arch/x86/include/asm/sparsemem.h index 4617a2bf123c..199218719a86 100644 --- a/arch/x86/include/asm/sparsemem.h +++ b/arch/x86/include/asm/sparsemem.h @@ -27,8 +27,8 @@ # endif #else /* CONFIG_X86_32 */ # define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */ -# define MAX_PHYSADDR_BITS (pgtable_l5_enabled ? 52 : 44) -# define MAX_PHYSMEM_BITS (pgtable_l5_enabled ? 52 : 46) +# define MAX_PHYSADDR_BITS (pgtable_l5_enabled() ? 52 : 44) +# define MAX_PHYSMEM_BITS (pgtable_l5_enabled() ? 52 : 46) #endif #endif /* CONFIG_SPARSEMEM */ diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index 133d9425fced..b6dc698f992a 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h @@ -111,4 +111,6 @@ static inline unsigned long caller_frame_pointer(void) return (unsigned long)frame; } +void show_opcodes(u8 *rip, const char *loglvl); +void show_ip(struct pt_regs *regs, const char *loglvl); #endif /* _ASM_X86_STACKTRACE_H */ diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index ce8b4da07e35..2d27236c16a3 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -170,7 +170,7 @@ struct x86_cpuinit_ops { void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node); }; -struct timespec; +struct timespec64; /** * struct x86_legacy_devices - legacy x86 devices @@ -264,8 +264,8 @@ struct x86_hyper_runtime { struct x86_platform_ops { unsigned long (*calibrate_cpu)(void); unsigned long (*calibrate_tsc)(void); - void (*get_wallclock)(struct timespec *ts); - int (*set_wallclock)(const struct timespec *ts); + void (*get_wallclock)(struct timespec64 *ts); + int (*set_wallclock)(const struct timespec64 *ts); void (*iommu_shutdown)(void); bool (*is_untracked_pat_range)(u64 start, u64 end); void (*nmi_init)(void); diff --git a/arch/x86/include/uapi/asm/sembuf.h b/arch/x86/include/uapi/asm/sembuf.h index cabd7476bd6c..89de6cd9f0a7 100644 --- a/arch/x86/include/uapi/asm/sembuf.h +++ b/arch/x86/include/uapi/asm/sembuf.h @@ -8,15 +8,24 @@ * between kernel and user space. * * Pad space is left for: - * - 64-bit time_t to solve y2038 problem * - 2 miscellaneous 32-bit values + * + * x86_64 and x32 incorrectly added padding here, so the structures + * are still incompatible with the padding on x86. */ struct semid64_ds { struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ +#ifdef __i386__ + unsigned long sem_otime; /* last semop time */ + unsigned long sem_otime_high; + unsigned long sem_ctime; /* last change time */ + unsigned long sem_ctime_high; +#else __kernel_time_t sem_otime; /* last semop time */ __kernel_ulong_t __unused1; __kernel_time_t sem_ctime; /* last change time */ __kernel_ulong_t __unused2; +#endif __kernel_ulong_t sem_nsems; /* no. of semaphores in array */ __kernel_ulong_t __unused3; __kernel_ulong_t __unused4; diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index dfcbe6924eaf..5d0de79fdab0 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -1715,19 +1715,6 @@ static int proc_apm_show(struct seq_file *m, void *v) return 0; } -static int proc_apm_open(struct inode *inode, struct file *file) -{ - return single_open(file, proc_apm_show, NULL); -} - -static const struct file_operations apm_file_ops = { - .owner = THIS_MODULE, - .open = proc_apm_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - static int apm(void *unused) { unsigned short bx; @@ -2360,7 +2347,7 @@ static int __init apm_init(void) set_desc_base(&gdt[APM_DS >> 3], (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4)); - proc_create("apm", 0, NULL, &apm_file_ops); + proc_create_single("apm", 0, NULL, proc_apm_show); kapmd_task = kthread_create(apm, NULL, "kapmd"); if (IS_ERR(kapmd_task)) { @@ -2446,7 +2433,7 @@ MODULE_PARM_DESC(idle_threshold, "System idle percentage above which to make APM BIOS idle calls"); module_param(idle_period, int, 0444); MODULE_PARM_DESC(idle_period, - "Period (in sec/100) over which to caculate the idle percentage"); + "Period (in sec/100) over which to calculate the idle percentage"); module_param(smp, bool, 0444); MODULE_PARM_DESC(smp, "Set this to enable APM use on an SMP platform. Use with caution on older systems"); diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index a66229f51b12..7a40196967cb 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -17,7 +17,7 @@ KCOV_INSTRUMENT_perf_event.o := n nostackp := $(call cc-option, -fno-stack-protector) CFLAGS_common.o := $(nostackp) -obj-y := intel_cacheinfo.o scattered.o topology.o +obj-y := cacheinfo.o scattered.o topology.o obj-y += common.o obj-y += rdrand.o obj-y += match.o diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 1b18be3f35a8..082d7875cef8 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -9,6 +9,7 @@ #include <linux/random.h> #include <asm/processor.h> #include <asm/apic.h> +#include <asm/cacheinfo.h> #include <asm/cpu.h> #include <asm/spec-ctrl.h> #include <asm/smp.h> @@ -298,7 +299,6 @@ static int nearby_node(int apicid) } #endif -#ifdef CONFIG_SMP /* * Fix up cpu_core_id for pre-F17h systems to be in the * [0 .. cores_per_node - 1] range. Not really needed but @@ -328,6 +328,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c) /* get information required for multi-node processors */ if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { + int err; u32 eax, ebx, ecx, edx; cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); @@ -346,21 +347,15 @@ static void amd_get_topology(struct cpuinfo_x86 *c) } /* - * We may have multiple LLCs if L3 caches exist, so check if we - * have an L3 cache by looking at the L3 cache CPUID leaf. + * In case leaf B is available, use it to derive + * topology information. */ - if (cpuid_edx(0x80000006)) { - if (c->x86 == 0x17) { - /* - * LLC is at the core complex level. - * Core complex id is ApicId[3]. - */ - per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; - } else { - /* LLC is at the node level. */ - per_cpu(cpu_llc_id, cpu) = node_id; - } - } + err = detect_extended_topology(c); + if (!err) + c->x86_coreid_bits = get_count_order(c->x86_max_cores); + + cacheinfo_amd_init_llc_id(c, cpu, node_id); + } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { u64 value; @@ -376,7 +371,6 @@ static void amd_get_topology(struct cpuinfo_x86 *c) legacy_fixup_core_id(c); } } -#endif /* * On a AMD dual core setup the lower bits of the APIC id distinguish the cores. @@ -384,7 +378,6 @@ static void amd_get_topology(struct cpuinfo_x86 *c) */ static void amd_detect_cmp(struct cpuinfo_x86 *c) { -#ifdef CONFIG_SMP unsigned bits; int cpu = smp_processor_id(); @@ -395,17 +388,11 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c) c->phys_proc_id = c->initial_apicid >> bits; /* use socket ID also for last level cache */ per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; - amd_get_topology(c); -#endif } u16 amd_get_nb_id(int cpu) { - u16 id = 0; -#ifdef CONFIG_SMP - id = per_cpu(cpu_llc_id, cpu); -#endif - return id; + return per_cpu(cpu_llc_id, cpu); } EXPORT_SYMBOL_GPL(amd_get_nb_id); @@ -864,6 +851,7 @@ static void init_amd(struct cpuinfo_x86 *c) /* Multi core CPU? */ if (c->extended_cpuid_level >= 0x80000008) { amd_detect_cmp(c); + amd_get_topology(c); srat_detect_node(c); } diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index 54d04d574148..38354c66df81 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -20,6 +20,8 @@ #include <asm/amd_nb.h> #include <asm/smp.h> +#include "cpu.h" + #define LVL_1_INST 1 #define LVL_1_DATA 2 #define LVL_2 3 @@ -637,6 +639,45 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c) return i; } +void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id) +{ + /* + * We may have multiple LLCs if L3 caches exist, so check if we + * have an L3 cache by looking at the L3 cache CPUID leaf. + */ + if (!cpuid_edx(0x80000006)) + return; + + if (c->x86 < 0x17) { + /* LLC is at the node level. */ + per_cpu(cpu_llc_id, cpu) = node_id; + } else if (c->x86 == 0x17 && + c->x86_model >= 0 && c->x86_model <= 0x1F) { + /* + * LLC is at the core complex level. + * Core complex ID is ApicId[3] for these processors. + */ + per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; + } else { + /* + * LLC ID is calculated from the number of threads sharing the + * cache. + * */ + u32 eax, ebx, ecx, edx, num_sharing_cache = 0; + u32 llc_index = find_num_cache_leaves(c) - 1; + + cpuid_count(0x8000001d, llc_index, &eax, &ebx, &ecx, &edx); + if (eax) + num_sharing_cache = ((eax >> 14) & 0xfff) + 1; + + if (num_sharing_cache) { + int bits = get_count_order(num_sharing_cache) - 1; + + per_cpu(cpu_llc_id, cpu) = c->apicid >> bits; + } + } +} + void init_amd_cacheinfo(struct cpuinfo_x86 *c) { @@ -650,7 +691,7 @@ void init_amd_cacheinfo(struct cpuinfo_x86 *c) } } -unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c) +void init_intel_cacheinfo(struct cpuinfo_x86 *c) { /* Cache sizes */ unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; @@ -802,7 +843,8 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c) c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); - return l2; + if (!l2) + cpu_detect_cache_sizes(c); } static int __cache_amd_cpumap_setup(unsigned int cpu, int index, diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index e5ec0f11c0de..14433ff5b828 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -18,6 +18,13 @@ #define RNG_ENABLED (1 << 3) #define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */ +#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 +#define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000 +#define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000 +#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001 +#define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002 +#define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020 + static void init_c3(struct cpuinfo_x86 *c) { u32 lo, hi; @@ -112,6 +119,31 @@ static void early_init_centaur(struct cpuinfo_x86 *c) } } +static void centaur_detect_vmx_virtcap(struct cpuinfo_x86 *c) +{ + u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2; + + rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high); + msr_ctl = vmx_msr_high | vmx_msr_low; + + if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW) + set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); + if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI) + set_cpu_cap(c, X86_FEATURE_VNMI); + if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) { + rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, + vmx_msr_low, vmx_msr_high); + msr_ctl2 = vmx_msr_high | vmx_msr_low; + if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) && + (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)) + set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); + if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) + set_cpu_cap(c, X86_FEATURE_EPT); + if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID) + set_cpu_cap(c, X86_FEATURE_VPID); + } +} + static void init_centaur(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_32 @@ -128,6 +160,24 @@ static void init_centaur(struct cpuinfo_x86 *c) clear_cpu_cap(c, 0*32+31); #endif early_init_centaur(c); + init_intel_cacheinfo(c); + detect_num_cpu_cores(c); +#ifdef CONFIG_X86_32 + detect_ht(c); +#endif + + if (c->cpuid_level > 9) { + unsigned int eax = cpuid_eax(10); + + /* + * Check for version and the number of counters + * Version(eax[7:0]) can't be 0; + * Counters(eax[15:8]) should be greater than 1; + */ + if ((eax & 0xff) && (((eax >> 8) & 0xff) > 1)) + set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); + } + switch (c->x86) { #ifdef CONFIG_X86_32 case 5: @@ -199,6 +249,9 @@ static void init_centaur(struct cpuinfo_x86 *c) #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); #endif + + if (cpu_has(c, X86_FEATURE_VMX)) + centaur_detect_vmx_virtcap(c); } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 38276f58d3bf..95c8e507580d 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -66,6 +66,13 @@ cpumask_var_t cpu_callin_mask; /* representing cpus for which sibling maps can be computed */ cpumask_var_t cpu_sibling_setup_mask; +/* Number of siblings per CPU package */ +int smp_num_siblings = 1; +EXPORT_SYMBOL(smp_num_siblings); + +/* Last level cache ID of each logical CPU */ +DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID; + /* correctly size the local cpu masks */ void __init setup_cpu_local_masks(void) { @@ -577,6 +584,19 @@ static void get_model_name(struct cpuinfo_x86 *c) *(s + 1) = '\0'; } +void detect_num_cpu_cores(struct cpuinfo_x86 *c) +{ + unsigned int eax, ebx, ecx, edx; + + c->x86_max_cores = 1; + if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4) + return; + + cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); + if (eax & 0x1f) + c->x86_max_cores = (eax >> 26) + 1; +} + void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) { unsigned int n, dummy, ebx, ecx, edx, l2size; @@ -1044,6 +1064,21 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) */ setup_clear_cpu_cap(X86_FEATURE_PCID); #endif + + /* + * Later in the boot process pgtable_l5_enabled() relies on + * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not + * enabled by this point we need to clear the feature bit to avoid + * false-positives at the later stage. + * + * pgtable_l5_enabled() can be false here for several reasons: + * - 5-level paging is disabled compile-time; + * - it's 32-bit kernel; + * - machine doesn't support 5-level paging; + * - user specified 'no5lvl' in kernel command line. + */ + if (!pgtable_l5_enabled()) + setup_clear_cpu_cap(X86_FEATURE_LA57); } void __init early_cpu_init(void) diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 37672d299e35..38216f678fc3 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -47,6 +47,16 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[], extern void get_cpu_cap(struct cpuinfo_x86 *c); extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); +extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); +extern u32 get_scattered_cpuid_leaf(unsigned int level, + unsigned int sub_leaf, + enum cpuid_regs_idx reg); +extern void init_intel_cacheinfo(struct cpuinfo_x86 *c); +extern void init_amd_cacheinfo(struct cpuinfo_x86 *c); + +extern void detect_num_cpu_cores(struct cpuinfo_x86 *c); +extern int detect_extended_topology(struct cpuinfo_x86 *c); +extern void detect_ht(struct cpuinfo_x86 *c); unsigned int aperfmperf_get_khz(int cpu); diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 577e7f7ae273..eb75564f2d25 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -456,24 +456,6 @@ static void srat_detect_node(struct cpuinfo_x86 *c) #endif } -/* - * find out the number of processor cores on the die - */ -static int intel_num_cpu_cores(struct cpuinfo_x86 *c) -{ - unsigned int eax, ebx, ecx, edx; - - if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4) - return 1; - - /* Intel has a non-standard dependency on %ecx for this CPUID level. */ - cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); - if (eax & 0x1f) - return (eax >> 26) + 1; - else - return 1; -} - static void detect_vmx_virtcap(struct cpuinfo_x86 *c) { /* Intel VMX MSR indicated features */ @@ -656,8 +638,6 @@ static void init_intel_misc_features(struct cpuinfo_x86 *c) static void init_intel(struct cpuinfo_x86 *c) { - unsigned int l2 = 0; - early_init_intel(c); intel_workarounds(c); @@ -674,19 +654,13 @@ static void init_intel(struct cpuinfo_x86 *c) * let's use the legacy cpuid vector 0x1 and 0x4 for topology * detection. */ - c->x86_max_cores = intel_num_cpu_cores(c); + detect_num_cpu_cores(c); #ifdef CONFIG_X86_32 detect_ht(c); #endif } - l2 = init_intel_cacheinfo(c); - - /* Detect legacy cache sizes if init_intel_cacheinfo did not */ - if (l2 == 0) { - cpu_detect_cache_sizes(c); - l2 = c->x86_cache_size; - } + init_intel_cacheinfo(c); if (c->cpuid_level > 9) { unsigned eax = cpuid_eax(10); @@ -699,7 +673,8 @@ static void init_intel(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); if (boot_cpu_has(X86_FEATURE_DS)) { - unsigned int l1; + unsigned int l1, l2; + rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); if (!(l1 & (1<<11))) set_cpu_cap(c, X86_FEATURE_BTS); @@ -727,6 +702,7 @@ static void init_intel(struct cpuinfo_x86 *c) * Dixon is NOT a Celeron. */ if (c->x86 == 6) { + unsigned int l2 = c->x86_cache_size; char *p = NULL; switch (c->x86_model) { diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c index 589b948e6e01..24bfa63e86cf 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/intel_rdt.c @@ -33,8 +33,8 @@ #include <asm/intel_rdt_sched.h> #include "intel_rdt.h" -#define MAX_MBA_BW 100u #define MBA_IS_LINEAR 0x4 +#define MBA_MAX_MBPS U32_MAX /* Mutex to protect rdtgroup access. */ DEFINE_MUTEX(rdtgroup_mutex); @@ -178,7 +178,7 @@ struct rdt_resource rdt_resources_all[] = { .msr_update = mba_wrmsr, .cache_level = 3, .parse_ctrlval = parse_bw, - .format_str = "%d=%*d", + .format_str = "%d=%*u", .fflags = RFTYPE_RES_MB, }, }; @@ -230,6 +230,14 @@ static inline void cache_alloc_hsw_probe(void) rdt_alloc_capable = true; } +bool is_mba_sc(struct rdt_resource *r) +{ + if (!r) + return rdt_resources_all[RDT_RESOURCE_MBA].membw.mba_sc; + + return r->membw.mba_sc; +} + /* * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values * exposed to user interface and the h/w understandable delay values. @@ -341,7 +349,7 @@ static int get_cache_id(int cpu, int level) * that can be written to QOS_MSRs. * There are currently no SKUs which support non linear delay values. */ -static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r) +u32 delay_bw_map(unsigned long bw, struct rdt_resource *r) { if (r->membw.delay_linear) return MAX_MBA_BW - bw; @@ -431,25 +439,40 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, return NULL; } +void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm) +{ + int i; + + /* + * Initialize the Control MSRs to having no control. + * For Cache Allocation: Set all bits in cbm + * For Memory Allocation: Set b/w requested to 100% + * and the bandwidth in MBps to U32_MAX + */ + for (i = 0; i < r->num_closid; i++, dc++, dm++) { + *dc = r->default_ctrl; + *dm = MBA_MAX_MBPS; + } +} + static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d) { struct msr_param m; - u32 *dc; - int i; + u32 *dc, *dm; dc = kmalloc_array(r->num_closid, sizeof(*d->ctrl_val), GFP_KERNEL); if (!dc) return -ENOMEM; - d->ctrl_val = dc; + dm = kmalloc_array(r->num_closid, sizeof(*d->mbps_val), GFP_KERNEL); + if (!dm) { + kfree(dc); + return -ENOMEM; + } - /* - * Initialize the Control MSRs to having no control. - * For Cache Allocation: Set all bits in cbm - * For Memory Allocation: Set b/w requested to 100 - */ - for (i = 0; i < r->num_closid; i++, dc++) - *dc = r->default_ctrl; + d->ctrl_val = dc; + d->mbps_val = dm; + setup_default_ctrlval(r, dc, dm); m.low = 0; m.high = r->num_closid; @@ -588,6 +611,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) } kfree(d->ctrl_val); + kfree(d->mbps_val); kfree(d->rmid_busy_llc); kfree(d->mbm_total); kfree(d->mbm_local); diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h index 3fd7a70ee04a..39752825e376 100644 --- a/arch/x86/kernel/cpu/intel_rdt.h +++ b/arch/x86/kernel/cpu/intel_rdt.h @@ -28,6 +28,7 @@ #define MBM_CNTR_WIDTH 24 #define MBM_OVERFLOW_INTERVAL 1000 +#define MAX_MBA_BW 100u #define RMID_VAL_ERROR BIT_ULL(63) #define RMID_VAL_UNAVAIL BIT_ULL(62) @@ -180,10 +181,20 @@ struct rftype { * struct mbm_state - status for each MBM counter in each domain * @chunks: Total data moved (multiply by rdt_group.mon_scale to get bytes) * @prev_msr Value of IA32_QM_CTR for this RMID last time we read it + * @chunks_bw Total local data moved. Used for bandwidth calculation + * @prev_bw_msr:Value of previous IA32_QM_CTR for bandwidth counting + * @prev_bw The most recent bandwidth in MBps + * @delta_bw Difference between the current and previous bandwidth + * @delta_comp Indicates whether to compute the delta_bw */ struct mbm_state { u64 chunks; u64 prev_msr; + u64 chunks_bw; + u64 prev_bw_msr; + u32 prev_bw; + u32 delta_bw; + bool delta_comp; }; /** @@ -202,6 +213,7 @@ struct mbm_state { * @cqm_work_cpu: * worker cpu for CQM h/w counters * @ctrl_val: array of cache or mem ctrl values (indexed by CLOSID) + * @mbps_val: When mba_sc is enabled, this holds the bandwidth in MBps * @new_ctrl: new ctrl value to be loaded * @have_new_ctrl: did user provide new_ctrl for this domain */ @@ -217,6 +229,7 @@ struct rdt_domain { int mbm_work_cpu; int cqm_work_cpu; u32 *ctrl_val; + u32 *mbps_val; u32 new_ctrl; bool have_new_ctrl; }; @@ -259,6 +272,7 @@ struct rdt_cache { * @min_bw: Minimum memory bandwidth percentage user can request * @bw_gran: Granularity at which the memory bandwidth is allocated * @delay_linear: True if memory B/W delay is in linear scale + * @mba_sc: True if MBA software controller(mba_sc) is enabled * @mb_map: Mapping of memory B/W percentage to memory B/W delay */ struct rdt_membw { @@ -266,6 +280,7 @@ struct rdt_membw { u32 min_bw; u32 bw_gran; u32 delay_linear; + bool mba_sc; u32 *mb_map; }; @@ -445,6 +460,9 @@ void mon_event_read(struct rmid_read *rr, struct rdt_domain *d, void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms); void mbm_handle_overflow(struct work_struct *work); +bool is_mba_sc(struct rdt_resource *r); +void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm); +u32 delay_bw_map(unsigned long bw, struct rdt_resource *r); void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms); void cqm_handle_limbo(struct work_struct *work); bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d); diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c index 23e1d5c249c6..116d57b248d3 100644 --- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c +++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c @@ -53,7 +53,8 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r) return false; } - if (bw < r->membw.min_bw || bw > r->default_ctrl) { + if ((bw < r->membw.min_bw || bw > r->default_ctrl) && + !is_mba_sc(r)) { rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw, r->membw.min_bw, r->default_ctrl); return false; @@ -179,6 +180,8 @@ static int update_domains(struct rdt_resource *r, int closid) struct msr_param msr_param; cpumask_var_t cpu_mask; struct rdt_domain *d; + bool mba_sc; + u32 *dc; int cpu; if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) @@ -188,13 +191,20 @@ static int update_domains(struct rdt_resource *r, int closid) msr_param.high = msr_param.low + 1; msr_param.res = r; + mba_sc = is_mba_sc(r); list_for_each_entry(d, &r->domains, list) { - if (d->have_new_ctrl && d->new_ctrl != d->ctrl_val[closid]) { + dc = !mba_sc ? d->ctrl_val : d->mbps_val; + if (d->have_new_ctrl && d->new_ctrl != dc[closid]) { cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); - d->ctrl_val[closid] = d->new_ctrl; + dc[closid] = d->new_ctrl; } } - if (cpumask_empty(cpu_mask)) + + /* + * Avoid writing the control msr with control values when + * MBA software controller is enabled + */ + if (cpumask_empty(cpu_mask) || mba_sc) goto done; cpu = get_cpu(); /* Update CBM on this cpu if it's in cpu_mask. */ @@ -282,13 +292,17 @@ static void show_doms(struct seq_file *s, struct rdt_resource *r, int closid) { struct rdt_domain *dom; bool sep = false; + u32 ctrl_val; seq_printf(s, "%*s:", max_name_width, r->name); list_for_each_entry(dom, &r->domains, list) { if (sep) seq_puts(s, ";"); + + ctrl_val = (!is_mba_sc(r) ? dom->ctrl_val[closid] : + dom->mbps_val[closid]); seq_printf(s, r->format_str, dom->id, max_data_width, - dom->ctrl_val[closid]); + ctrl_val); sep = true; } seq_puts(s, "\n"); diff --git a/arch/x86/kernel/cpu/intel_rdt_monitor.c b/arch/x86/kernel/cpu/intel_rdt_monitor.c index 681450eee428..b0f3aed76b75 100644 --- a/arch/x86/kernel/cpu/intel_rdt_monitor.c +++ b/arch/x86/kernel/cpu/intel_rdt_monitor.c @@ -225,10 +225,18 @@ void free_rmid(u32 rmid) list_add_tail(&entry->list, &rmid_free_lru); } +static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr) +{ + u64 shift = 64 - MBM_CNTR_WIDTH, chunks; + + chunks = (cur_msr << shift) - (prev_msr << shift); + return chunks >>= shift; +} + static int __mon_event_count(u32 rmid, struct rmid_read *rr) { - u64 chunks, shift, tval; struct mbm_state *m; + u64 chunks, tval; tval = __rmid_read(rmid, rr->evtid); if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) { @@ -254,14 +262,12 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr) } if (rr->first) { - m->prev_msr = tval; - m->chunks = 0; + memset(m, 0, sizeof(struct mbm_state)); + m->prev_bw_msr = m->prev_msr = tval; return 0; } - shift = 64 - MBM_CNTR_WIDTH; - chunks = (tval << shift) - (m->prev_msr << shift); - chunks >>= shift; + chunks = mbm_overflow_count(m->prev_msr, tval); m->chunks += chunks; m->prev_msr = tval; @@ -270,6 +276,32 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr) } /* + * Supporting function to calculate the memory bandwidth + * and delta bandwidth in MBps. + */ +static void mbm_bw_count(u32 rmid, struct rmid_read *rr) +{ + struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3]; + struct mbm_state *m = &rr->d->mbm_local[rmid]; + u64 tval, cur_bw, chunks; + + tval = __rmid_read(rmid, rr->evtid); + if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) + return; + + chunks = mbm_overflow_count(m->prev_bw_msr, tval); + m->chunks_bw += chunks; + m->chunks = m->chunks_bw; + cur_bw = (chunks * r->mon_scale) >> 20; + + if (m->delta_comp) + m->delta_bw = abs(cur_bw - m->prev_bw); + m->delta_comp = false; + m->prev_bw = cur_bw; + m->prev_bw_msr = tval; +} + +/* * This is called via IPI to read the CQM/MBM counters * on a domain. */ @@ -297,6 +329,118 @@ void mon_event_count(void *info) } } +/* + * Feedback loop for MBA software controller (mba_sc) + * + * mba_sc is a feedback loop where we periodically read MBM counters and + * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so + * that: + * + * current bandwdith(cur_bw) < user specified bandwidth(user_bw) + * + * This uses the MBM counters to measure the bandwidth and MBA throttle + * MSRs to control the bandwidth for a particular rdtgrp. It builds on the + * fact that resctrl rdtgroups have both monitoring and control. + * + * The frequency of the checks is 1s and we just tag along the MBM overflow + * timer. Having 1s interval makes the calculation of bandwidth simpler. + * + * Although MBA's goal is to restrict the bandwidth to a maximum, there may + * be a need to increase the bandwidth to avoid uncecessarily restricting + * the L2 <-> L3 traffic. + * + * Since MBA controls the L2 external bandwidth where as MBM measures the + * L3 external bandwidth the following sequence could lead to such a + * situation. + * + * Consider an rdtgroup which had high L3 <-> memory traffic in initial + * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but + * after some time rdtgroup has mostly L2 <-> L3 traffic. + * + * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its + * throttle MSRs already have low percentage values. To avoid + * unnecessarily restricting such rdtgroups, we also increase the bandwidth. + */ +static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) +{ + u32 closid, rmid, cur_msr, cur_msr_val, new_msr_val; + struct mbm_state *pmbm_data, *cmbm_data; + u32 cur_bw, delta_bw, user_bw; + struct rdt_resource *r_mba; + struct rdt_domain *dom_mba; + struct list_head *head; + struct rdtgroup *entry; + + r_mba = &rdt_resources_all[RDT_RESOURCE_MBA]; + closid = rgrp->closid; + rmid = rgrp->mon.rmid; + pmbm_data = &dom_mbm->mbm_local[rmid]; + + dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba); + if (!dom_mba) { + pr_warn_once("Failure to get domain for MBA update\n"); + return; + } + + cur_bw = pmbm_data->prev_bw; + user_bw = dom_mba->mbps_val[closid]; + delta_bw = pmbm_data->delta_bw; + cur_msr_val = dom_mba->ctrl_val[closid]; + + /* + * For Ctrl groups read data from child monitor groups. + */ + head = &rgrp->mon.crdtgrp_list; + list_for_each_entry(entry, head, mon.crdtgrp_list) { + cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid]; + cur_bw += cmbm_data->prev_bw; + delta_bw += cmbm_data->delta_bw; + } + + /* + * Scale up/down the bandwidth linearly for the ctrl group. The + * bandwidth step is the bandwidth granularity specified by the + * hardware. + * + * The delta_bw is used when increasing the bandwidth so that we + * dont alternately increase and decrease the control values + * continuously. + * + * For ex: consider cur_bw = 90MBps, user_bw = 100MBps and if + * bandwidth step is 20MBps(> user_bw - cur_bw), we would keep + * switching between 90 and 110 continuously if we only check + * cur_bw < user_bw. + */ + if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) { + new_msr_val = cur_msr_val - r_mba->membw.bw_gran; + } else if (cur_msr_val < MAX_MBA_BW && + (user_bw > (cur_bw + delta_bw))) { + new_msr_val = cur_msr_val + r_mba->membw.bw_gran; + } else { + return; + } + + cur_msr = r_mba->msr_base + closid; + wrmsrl(cur_msr, delay_bw_map(new_msr_val, r_mba)); + dom_mba->ctrl_val[closid] = new_msr_val; + + /* + * Delta values are updated dynamically package wise for each + * rdtgrp everytime the throttle MSR changes value. + * + * This is because (1)the increase in bandwidth is not perfectly + * linear and only "approximately" linear even when the hardware + * says it is linear.(2)Also since MBA is a core specific + * mechanism, the delta values vary based on number of cores used + * by the rdtgrp. + */ + pmbm_data->delta_comp = true; + list_for_each_entry(entry, head, mon.crdtgrp_list) { + cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid]; + cmbm_data->delta_comp = true; + } +} + static void mbm_update(struct rdt_domain *d, int rmid) { struct rmid_read rr; @@ -314,7 +458,16 @@ static void mbm_update(struct rdt_domain *d, int rmid) } if (is_mbm_local_enabled()) { rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID; - __mon_event_count(rmid, &rr); + + /* + * Call the MBA software controller only for the + * control groups and when user has enabled + * the software controller explicitly. + */ + if (!is_mba_sc(NULL)) + __mon_event_count(rmid, &rr); + else + mbm_bw_count(rmid, &rr); } } @@ -385,6 +538,9 @@ void mbm_handle_overflow(struct work_struct *work) head = &prgrp->mon.crdtgrp_list; list_for_each_entry(crgrp, head, mon.crdtgrp_list) mbm_update(d, crgrp->mon.rmid); + + if (is_mba_sc(NULL)) + update_mba_bw(prgrp, d); } schedule_delayed_work_on(cpu, &d->mbm_over, delay); diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index fca759d272a1..749856a2e736 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -1005,6 +1005,11 @@ static void l2_qos_cfg_update(void *arg) wrmsrl(IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); } +static inline bool is_mba_linear(void) +{ + return rdt_resources_all[RDT_RESOURCE_MBA].membw.delay_linear; +} + static int set_cache_qos_cfg(int level, bool enable) { void (*update)(void *arg); @@ -1041,6 +1046,28 @@ static int set_cache_qos_cfg(int level, bool enable) return 0; } +/* + * Enable or disable the MBA software controller + * which helps user specify bandwidth in MBps. + * MBA software controller is supported only if + * MBM is supported and MBA is in linear scale. + */ +static int set_mba_sc(bool mba_sc) +{ + struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA]; + struct rdt_domain *d; + + if (!is_mbm_enabled() || !is_mba_linear() || + mba_sc == is_mba_sc(r)) + return -EINVAL; + + r->membw.mba_sc = mba_sc; + list_for_each_entry(d, &r->domains, list) + setup_default_ctrlval(r, d->ctrl_val, d->mbps_val); + + return 0; +} + static int cdp_enable(int level, int data_type, int code_type) { struct rdt_resource *r_ldata = &rdt_resources_all[data_type]; @@ -1123,6 +1150,10 @@ static int parse_rdtgroupfs_options(char *data) ret = cdpl2_enable(); if (ret) goto out; + } else if (!strcmp(token, "mba_MBps")) { + ret = set_mba_sc(true); + if (ret) + goto out; } else { ret = -EINVAL; goto out; @@ -1445,6 +1476,8 @@ static void rdt_kill_sb(struct super_block *sb) cpus_read_lock(); mutex_lock(&rdtgroup_mutex); + set_mba_sc(false); + /*Put everything back to default values. */ for_each_alloc_enabled_rdt_resource(r) reset_all_ctrls(r); diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c index 475cb4f5f14f..c805a06e14c3 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c @@ -48,7 +48,7 @@ static struct dentry *dfs_inj; static u8 n_banks; -#define MAX_FLAG_OPT_SIZE 3 +#define MAX_FLAG_OPT_SIZE 4 #define NBCFG 0x44 enum injection_type { diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 42cf2880d0ed..cd76380af79f 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1727,6 +1727,21 @@ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) } } +static void mce_centaur_feature_init(struct cpuinfo_x86 *c) +{ + struct mca_config *cfg = &mca_cfg; + + /* + * All newer Centaur CPUs support MCE broadcasting. Enable + * synchronization with a one second timeout. + */ + if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) || + c->x86 > 6) { + if (cfg->monarch_timeout < 0) + cfg->monarch_timeout = USEC_PER_SEC; + } +} + static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) { switch (c->x86_vendor) { @@ -1739,6 +1754,9 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) mce_amd_feature_init(c); break; } + case X86_VENDOR_CENTAUR: + mce_centaur_feature_init(c); + break; default: break; diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index c8e038800591..f591b01930db 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -436,8 +436,7 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c) wrmsr(MSR_CU_DEF_ERR, low, high); } -static u32 smca_get_block_address(unsigned int cpu, unsigned int bank, - unsigned int block) +static u32 smca_get_block_address(unsigned int bank, unsigned int block) { u32 low, high; u32 addr = 0; @@ -456,13 +455,13 @@ static u32 smca_get_block_address(unsigned int cpu, unsigned int bank, * For SMCA enabled processors, BLKPTR field of the first MISC register * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4). */ - if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) + if (rdmsr_safe(MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) goto out; if (!(low & MCI_CONFIG_MCAX)) goto out; - if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) && + if (!rdmsr_safe(MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) && (low & MASK_BLKPTR_LO)) addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); @@ -471,7 +470,7 @@ out: return addr; } -static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high, +static u32 get_block_address(u32 current_addr, u32 low, u32 high, unsigned int bank, unsigned int block) { u32 addr = 0, offset = 0; @@ -480,7 +479,7 @@ static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 hi return addr; if (mce_flags.smca) - return smca_get_block_address(cpu, bank, block); + return smca_get_block_address(bank, block); /* Fall back to method we used for older processors: */ switch (block) { @@ -558,7 +557,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) smca_configure(bank, cpu); for (block = 0; block < NR_BLOCKS; ++block) { - address = get_block_address(cpu, address, low, high, bank, block); + address = get_block_address(address, low, high, bank, block); if (!address) break; @@ -1175,7 +1174,7 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank, if (err) goto out_free; recurse: - address = get_block_address(cpu, address, low, high, bank, ++block); + address = get_block_address(address, low, high, bank, ++block); if (!address) return 0; diff --git a/arch/x86/kernel/cpu/mtrr/Makefile b/arch/x86/kernel/cpu/mtrr/Makefile index ad9e5ed81181..2ad9107ee980 100644 --- a/arch/x86/kernel/cpu/mtrr/Makefile +++ b/arch/x86/kernel/cpu/mtrr/Makefile @@ -1,3 +1,3 @@ -obj-y := main.o if.o generic.o cleanup.o +obj-y := mtrr.o if.o generic.o cleanup.o obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/mtrr.c index 7468de429087..9a19c800fe40 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/mtrr.c @@ -46,6 +46,7 @@ #include <linux/pci.h> #include <linux/smp.h> #include <linux/syscore_ops.h> +#include <linux/rcupdate.h> #include <asm/cpufeature.h> #include <asm/e820/api.h> @@ -100,7 +101,7 @@ static int have_wrcomb(void) if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && dev->device == PCI_DEVICE_ID_SERVERWORKS_LE && dev->revision <= 5) { - pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n"); + pr_info("Serverworks LE rev < 6 detected. Write-combining disabled.\n"); pci_dev_put(dev); return 0; } @@ -110,7 +111,7 @@ static int have_wrcomb(void) */ if (dev->vendor == PCI_VENDOR_ID_INTEL && dev->device == PCI_DEVICE_ID_INTEL_82451NX) { - pr_info("mtrr: Intel 450NX MMC detected. Write-combining disabled.\n"); + pr_info("Intel 450NX MMC detected. Write-combining disabled.\n"); pci_dev_put(dev); return 0; } @@ -312,24 +313,24 @@ int mtrr_add_page(unsigned long base, unsigned long size, return error; if (type >= MTRR_NUM_TYPES) { - pr_warn("mtrr: type: %u invalid\n", type); + pr_warn("type: %u invalid\n", type); return -EINVAL; } /* If the type is WC, check that this processor supports it */ if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { - pr_warn("mtrr: your processor doesn't support write-combining\n"); + pr_warn("your processor doesn't support write-combining\n"); return -ENOSYS; } if (!size) { - pr_warn("mtrr: zero sized request\n"); + pr_warn("zero sized request\n"); return -EINVAL; } if ((base | (base + size - 1)) >> (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) { - pr_warn("mtrr: base or size exceeds the MTRR width\n"); + pr_warn("base or size exceeds the MTRR width\n"); return -EINVAL; } @@ -360,8 +361,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, } else if (types_compatible(type, ltype)) continue; } - pr_warn("mtrr: 0x%lx000,0x%lx000 overlaps existing" - " 0x%lx000,0x%lx000\n", base, size, lbase, + pr_warn("0x%lx000,0x%lx000 overlaps existing 0x%lx000,0x%lx000\n", base, size, lbase, lsize); goto out; } @@ -369,7 +369,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, if (ltype != type) { if (types_compatible(type, ltype)) continue; - pr_warn("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", + pr_warn("type mismatch for %lx000,%lx000 old: %s new: %s\n", base, size, mtrr_attrib_to_str(ltype), mtrr_attrib_to_str(type)); goto out; @@ -395,7 +395,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, } } } else { - pr_info("mtrr: no more MTRRs available\n"); + pr_info("no more MTRRs available\n"); } error = i; out: @@ -407,8 +407,8 @@ int mtrr_add_page(unsigned long base, unsigned long size, static int mtrr_check(unsigned long base, unsigned long size) { if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { - pr_warn("mtrr: size and base must be multiples of 4 kiB\n"); - pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base); + pr_warn("size and base must be multiples of 4 kiB\n"); + pr_debug("size: 0x%lx base: 0x%lx\n", size, base); dump_stack(); return -1; } @@ -499,22 +499,22 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) } } if (reg < 0) { - pr_debug("mtrr: no MTRR for %lx000,%lx000 found\n", + pr_debug("no MTRR for %lx000,%lx000 found\n", base, size); goto out; } } if (reg >= max) { - pr_warn("mtrr: register: %d too big\n", reg); + pr_warn("register: %d too big\n", reg); goto out; } mtrr_if->get(reg, &lbase, &lsize, <ype); if (lsize < 1) { - pr_warn("mtrr: MTRR %d not used\n", reg); + pr_warn("MTRR %d not used\n", reg); goto out; } if (mtrr_usage_table[reg] < 1) { - pr_warn("mtrr: reg: %d has count=0\n", reg); + pr_warn("reg: %d has count=0\n", reg); goto out; } if (--mtrr_usage_table[reg] < 1) @@ -775,7 +775,7 @@ void __init mtrr_bp_init(void) } if (!mtrr_enabled()) { - pr_info("MTRR: Disabled\n"); + pr_info("Disabled\n"); /* * PAT initialization relies on MTRR's rendezvous handler. @@ -793,6 +793,9 @@ void mtrr_ap_init(void) if (!use_intel() || mtrr_aps_delayed_init) return; + + rcu_cpu_starting(smp_processor_id()); + /* * Ideally we should hold mtrr_mutex here to avoid mtrr entries * changed, but this routine will be called in cpu boot time, diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c index b099024d339c..81c0afb39d0a 100644 --- a/arch/x86/kernel/cpu/topology.c +++ b/arch/x86/kernel/cpu/topology.c @@ -27,7 +27,7 @@ * exists, use it for populating initial_apicid and cpu topology * detection. */ -void detect_extended_topology(struct cpuinfo_x86 *c) +int detect_extended_topology(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP unsigned int eax, ebx, ecx, edx, sub_index; @@ -36,7 +36,7 @@ void detect_extended_topology(struct cpuinfo_x86 *c) static bool printed; if (c->cpuid_level < 0xb) - return; + return -1; cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); @@ -44,7 +44,7 @@ void detect_extended_topology(struct cpuinfo_x86 *c) * check if the cpuid leaf 0xb is actually implemented. */ if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE)) - return; + return -1; set_cpu_cap(c, X86_FEATURE_XTOPOLOGY); @@ -95,6 +95,6 @@ void detect_extended_topology(struct cpuinfo_x86 *c) c->cpu_core_id); printed = 1; } - return; #endif + return 0; } diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 18fa9d74c182..666a284116ac 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -22,11 +22,14 @@ #include <asm/stacktrace.h> #include <asm/unwind.h> +#define OPCODE_BUFSIZE 64 + int panic_on_unrecovered_nmi; int panic_on_io_nmi; -static unsigned int code_bytes = 64; static int die_counter; +static struct pt_regs exec_summary_regs; + bool in_task_stack(unsigned long *stack, struct task_struct *task, struct stack_info *info) { @@ -69,9 +72,62 @@ static void printk_stack_address(unsigned long address, int reliable, printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address); } +/* + * There are a couple of reasons for the 2/3rd prologue, courtesy of Linus: + * + * In case where we don't have the exact kernel image (which, if we did, we can + * simply disassemble and navigate to the RIP), the purpose of the bigger + * prologue is to have more context and to be able to correlate the code from + * the different toolchains better. + * + * In addition, it helps in recreating the register allocation of the failing + * kernel and thus make sense of the register dump. + * + * What is more, the additional complication of a variable length insn arch like + * x86 warrants having longer byte sequence before rIP so that the disassembler + * can "sync" up properly and find instruction boundaries when decoding the + * opcode bytes. + * + * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random + * guesstimate in attempt to achieve all of the above. + */ +void show_opcodes(u8 *rip, const char *loglvl) +{ + unsigned int code_prologue = OPCODE_BUFSIZE * 2 / 3; + u8 opcodes[OPCODE_BUFSIZE]; + u8 *ip; + int i; + + printk("%sCode: ", loglvl); + + ip = (u8 *)rip - code_prologue; + if (probe_kernel_read(opcodes, ip, OPCODE_BUFSIZE)) { + pr_cont("Bad RIP value.\n"); + return; + } + + for (i = 0; i < OPCODE_BUFSIZE; i++, ip++) { + if (ip == rip) + pr_cont("<%02x> ", opcodes[i]); + else + pr_cont("%02x ", opcodes[i]); + } + pr_cont("\n"); +} + +void show_ip(struct pt_regs *regs, const char *loglvl) +{ +#ifdef CONFIG_X86_32 + printk("%sEIP: %pS\n", loglvl, (void *)regs->ip); +#else + printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip); +#endif + show_opcodes((u8 *)regs->ip, loglvl); +} + void show_iret_regs(struct pt_regs *regs) { - printk(KERN_DEFAULT "RIP: %04x:%pS\n", (int)regs->cs, (void *)regs->ip); + show_ip(regs, KERN_DEFAULT); printk(KERN_DEFAULT "RSP: %04x:%016lx EFLAGS: %08lx", (int)regs->ss, regs->sp, regs->flags); } @@ -267,7 +323,6 @@ unsigned long oops_begin(void) bust_spinlocks(1); return flags; } -EXPORT_SYMBOL_GPL(oops_begin); NOKPROBE_SYMBOL(oops_begin); void __noreturn rewind_stack_do_exit(int signr); @@ -287,6 +342,9 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr) raw_local_irq_restore(flags); oops_exit(); + /* Executive summary in case the oops scrolled away */ + __show_regs(&exec_summary_regs, true); + if (!signr) return; if (in_interrupt()) @@ -305,10 +363,10 @@ NOKPROBE_SYMBOL(oops_end); int __die(const char *str, struct pt_regs *regs, long err) { -#ifdef CONFIG_X86_32 - unsigned short ss; - unsigned long sp; -#endif + /* Save the regs of the first oops for the executive summary later. */ + if (!die_counter) + exec_summary_regs = *regs; + printk(KERN_DEFAULT "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter, IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "", @@ -318,26 +376,13 @@ int __die(const char *str, struct pt_regs *regs, long err) IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ? (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : ""); + show_regs(regs); + print_modules(); + if (notify_die(DIE_OOPS, str, regs, err, current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP) return 1; - print_modules(); - show_regs(regs); -#ifdef CONFIG_X86_32 - if (user_mode(regs)) { - sp = regs->sp; - ss = regs->ss; - } else { - sp = kernel_stack_pointer(regs); - savesegment(ss, ss); - } - printk(KERN_EMERG "EIP: %pS SS:ESP: %04x:%08lx\n", - (void *)regs->ip, ss, sp); -#else - /* Executive summary in case the oops scrolled away */ - printk(KERN_ALERT "RIP: %pS RSP: %016lx\n", (void *)regs->ip, regs->sp); -#endif return 0; } NOKPROBE_SYMBOL(__die); @@ -356,30 +401,9 @@ void die(const char *str, struct pt_regs *regs, long err) oops_end(flags, regs, sig); } -static int __init code_bytes_setup(char *s) -{ - ssize_t ret; - unsigned long val; - - if (!s) - return -EINVAL; - - ret = kstrtoul(s, 0, &val); - if (ret) - return ret; - - code_bytes = val; - if (code_bytes > 8192) - code_bytes = 8192; - - return 1; -} -__setup("code_bytes=", code_bytes_setup); - void show_regs(struct pt_regs *regs) { bool all = true; - int i; show_regs_print_info(KERN_DEFAULT); @@ -389,36 +413,8 @@ void show_regs(struct pt_regs *regs) __show_regs(regs, all); /* - * When in-kernel, we also print out the stack and code at the - * time of the fault.. + * When in-kernel, we also print out the stack at the time of the fault.. */ - if (!user_mode(regs)) { - unsigned int code_prologue = code_bytes * 43 / 64; - unsigned int code_len = code_bytes; - unsigned char c; - u8 *ip; - + if (!user_mode(regs)) show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT); - - printk(KERN_DEFAULT "Code: "); - - ip = (u8 *)regs->ip - code_prologue; - if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { - /* try starting at IP */ - ip = (u8 *)regs->ip; - code_len = code_len - code_prologue + 1; - } - for (i = 0; i < code_len; i++, ip++) { - if (ip < (u8 *)PAGE_OFFSET || - probe_kernel_address(ip, c)) { - pr_cont(" Bad RIP value."); - break; - } - if (ip == (u8 *)regs->ip) - pr_cont("<%02x> ", c); - else - pr_cont("%02x ", c); - } - } - pr_cont("\n"); } diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 6a2cb1442e05..d1f25c831447 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -155,7 +155,8 @@ static void __init __e820__range_add(struct e820_table *table, u64 start, u64 si int x = table->nr_entries; if (x >= ARRAY_SIZE(table->entries)) { - pr_err("e820: too many entries; ignoring [mem %#010llx-%#010llx]\n", start, start + size - 1); + pr_err("too many entries; ignoring [mem %#010llx-%#010llx]\n", + start, start + size - 1); return; } @@ -190,9 +191,10 @@ void __init e820__print_table(char *who) int i; for (i = 0; i < e820_table->nr_entries; i++) { - pr_info("%s: [mem %#018Lx-%#018Lx] ", who, - e820_table->entries[i].addr, - e820_table->entries[i].addr + e820_table->entries[i].size - 1); + pr_info("%s: [mem %#018Lx-%#018Lx] ", + who, + e820_table->entries[i].addr, + e820_table->entries[i].addr + e820_table->entries[i].size - 1); e820_print_type(e820_table->entries[i].type); pr_cont("\n"); @@ -574,7 +576,7 @@ void __init e820__update_table_print(void) if (e820__update_table(e820_table)) return; - pr_info("e820: modified physical RAM map:\n"); + pr_info("modified physical RAM map:\n"); e820__print_table("modified"); } @@ -636,9 +638,8 @@ __init void e820__setup_pci_gap(void) if (!found) { #ifdef CONFIG_X86_64 gapstart = (max_pfn << PAGE_SHIFT) + 1024*1024; - pr_err( - "e820: Cannot find an available gap in the 32-bit address range\n" - "e820: PCI devices with unassigned 32-bit BARs may not work!\n"); + pr_err("Cannot find an available gap in the 32-bit address range\n"); + pr_err("PCI devices with unassigned 32-bit BARs may not work!\n"); #else gapstart = 0x10000000; #endif @@ -649,7 +650,8 @@ __init void e820__setup_pci_gap(void) */ pci_mem_start = gapstart; - pr_info("e820: [mem %#010lx-%#010lx] available for PCI devices\n", gapstart, gapstart + gapsize - 1); + pr_info("[mem %#010lx-%#010lx] available for PCI devices\n", + gapstart, gapstart + gapsize - 1); } /* @@ -711,7 +713,7 @@ void __init e820__memory_setup_extended(u64 phys_addr, u32 data_len) memcpy(e820_table_firmware, e820_table, sizeof(*e820_table_firmware)); early_memunmap(sdata, data_len); - pr_info("e820: extended physical RAM map:\n"); + pr_info("extended physical RAM map:\n"); e820__print_table("extended"); } @@ -780,7 +782,7 @@ u64 __init e820__memblock_alloc_reserved(u64 size, u64 align) addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); if (addr) { e820__range_update_kexec(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED); - pr_info("e820: update e820_table_kexec for e820__memblock_alloc_reserved()\n"); + pr_info("update e820_table_kexec for e820__memblock_alloc_reserved()\n"); e820__update_table_kexec(); } @@ -830,8 +832,8 @@ static unsigned long __init e820_end_pfn(unsigned long limit_pfn, enum e820_type if (last_pfn > max_arch_pfn) last_pfn = max_arch_pfn; - pr_info("e820: last_pfn = %#lx max_arch_pfn = %#lx\n", - last_pfn, max_arch_pfn); + pr_info("last_pfn = %#lx max_arch_pfn = %#lx\n", + last_pfn, max_arch_pfn); return last_pfn; } @@ -1005,7 +1007,7 @@ void __init e820__finish_early_params(void) if (e820__update_table(e820_table) < 0) early_panic("Invalid user supplied memory map"); - pr_info("e820: user-defined physical RAM map:\n"); + pr_info("user-defined physical RAM map:\n"); e820__print_table("user"); } } @@ -1238,7 +1240,7 @@ void __init e820__memory_setup(void) memcpy(e820_table_kexec, e820_table, sizeof(*e820_table_kexec)); memcpy(e820_table_firmware, e820_table, sizeof(*e820_table_firmware)); - pr_info("e820: BIOS-provided physical RAM map:\n"); + pr_info("BIOS-provided physical RAM map:\n"); e820__print_table(who); } diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index bae0d32e327b..da5d8ac60062 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -28,8 +28,6 @@ #include <asm/irq_remapping.h> #include <asm/early_ioremap.h> -#define dev_err(msg) pr_err("pci 0000:%02x:%02x.%d: %s", bus, slot, func, msg) - static void __init fix_hypertransport_config(int num, int slot, int func) { u32 htcfg; @@ -617,7 +615,8 @@ static void __init apple_airport_reset(int bus, int slot, int func) pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL); if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) { - dev_err("Cannot power up Apple AirPort card\n"); + pr_err("pci 0000:%02x:%02x.%d: Cannot power up Apple AirPort card\n", + bus, slot, func); return; } } @@ -628,7 +627,8 @@ static void __init apple_airport_reset(int bus, int slot, int func) mmio = early_ioremap(addr, BCM4331_MMIO_SIZE); if (!mmio) { - dev_err("Cannot iomap Apple AirPort card\n"); + pr_err("pci 0000:%02x:%02x.%d: Cannot iomap Apple AirPort card\n", + bus, slot, func); return; } diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 2d29e47c056e..a21d6ace648e 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -6,6 +6,10 @@ */ #define DISABLE_BRANCH_PROFILING + +/* cpu_feature_enabled() cannot be used this early */ +#define USE_EARLY_PGTABLE_L5 + #include <linux/init.h> #include <linux/linkage.h> #include <linux/types.h> @@ -32,11 +36,6 @@ #include <asm/microcode.h> #include <asm/kasan.h> -#ifdef CONFIG_X86_5LEVEL -#undef pgtable_l5_enabled -#define pgtable_l5_enabled __pgtable_l5_enabled -#endif - /* * Manage page tables very early on. */ @@ -45,8 +44,7 @@ static unsigned int __initdata next_early_pgt; pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); #ifdef CONFIG_X86_5LEVEL -unsigned int __pgtable_l5_enabled __ro_after_init; -EXPORT_SYMBOL(__pgtable_l5_enabled); +unsigned int __pgtable_l5_enabled __initdata; unsigned int pgdir_shift __ro_after_init = 39; EXPORT_SYMBOL(pgdir_shift); unsigned int ptrs_per_p4d __ro_after_init = 1; @@ -82,13 +80,14 @@ static unsigned int __head *fixup_int(void *ptr, unsigned long physaddr) static bool __head check_la57_support(unsigned long physaddr) { - if (native_cpuid_eax(0) < 7) - return false; - - if (!(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)))) + /* + * 5-level paging is detected and enabled at kernel decomression + * stage. Only check if it has been enabled there. + */ + if (!(native_read_cr4() & X86_CR4_LA57)) return false; - *fixup_int(&pgtable_l5_enabled, physaddr) = 1; + *fixup_int(&__pgtable_l5_enabled, physaddr) = 1; *fixup_int(&pgdir_shift, physaddr) = 48; *fixup_int(&ptrs_per_p4d, physaddr) = 512; *fixup_long(&page_offset_base, physaddr) = __PAGE_OFFSET_BASE_L5; @@ -281,7 +280,7 @@ again: * critical -- __PAGE_OFFSET would point us back into the dynamic * range and we might end up looping forever... */ - if (!pgtable_l5_enabled) + if (!pgtable_l5_enabled()) p4d_p = pgd_p; else if (pgd) p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 8ce4212e2b8d..b6be34ee88e9 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -975,8 +975,7 @@ int __init hpet_enable(void) cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY); hpet_writel(cfg, HPET_CFG); if (cfg) - pr_warn("HPET: Unrecognized bits %#x set in global cfg\n", - cfg); + pr_warn("Unrecognized bits %#x set in global cfg\n", cfg); for (i = 0; i <= last; ++i) { cfg = hpet_readl(HPET_Tn_CFG(i)); @@ -988,7 +987,7 @@ int __init hpet_enable(void) | HPET_TN_64BIT_CAP | HPET_TN_32BIT | HPET_TN_ROUTE | HPET_TN_FSB | HPET_TN_FSB_CAP); if (cfg) - pr_warn("HPET: Unrecognized bits %#x set in cfg#%u\n", + pr_warn("Unrecognized bits %#x set in cfg#%u\n", cfg, i); } hpet_print_config(); diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c index a15fe0e92cf9..108c48d0d40e 100644 --- a/arch/x86/kernel/jailhouse.c +++ b/arch/x86/kernel/jailhouse.c @@ -37,7 +37,7 @@ static uint32_t __init jailhouse_detect(void) return jailhouse_cpuid_base(); } -static void jailhouse_get_wallclock(struct timespec *now) +static void jailhouse_get_wallclock(struct timespec64 *now) { memset(now, 0, sizeof(*now)); } diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 8b26c9e01cc4..bf8d1eb7fca3 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -53,7 +53,7 @@ static struct pvclock_wall_clock *wall_clock; * have elapsed since the hypervisor wrote the data. So we try to account for * that with system time */ -static void kvm_get_wallclock(struct timespec *now) +static void kvm_get_wallclock(struct timespec64 *now) { struct pvclock_vcpu_time_info *vcpu_time; int low, high; @@ -72,7 +72,7 @@ static void kvm_get_wallclock(struct timespec *now) put_cpu(); } -static int kvm_set_wallclock(const struct timespec *now) +static int kvm_set_wallclock(const struct timespec64 *now) { return -ENODEV; } diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 6010449ca6d2..4c8acdfdc5a7 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -354,7 +354,8 @@ void arch_crash_save_vmcoreinfo(void) { VMCOREINFO_NUMBER(phys_base); VMCOREINFO_SYMBOL(init_top_pgt); - VMCOREINFO_NUMBER(pgtable_l5_enabled); + vmcoreinfo_append_str("NUMBER(pgtable_l5_enabled)=%d\n", + pgtable_l5_enabled()); #ifdef CONFIG_NUMA VMCOREINFO_SYMBOL(node_data); diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 77625b60a510..ab5d9dd668d2 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -15,13 +15,11 @@ #include <asm/x86_init.h> #include <asm/iommu_table.h> -static int forbid_dac __read_mostly; +static bool disable_dac_quirk __read_mostly; const struct dma_map_ops *dma_ops = &dma_direct_ops; EXPORT_SYMBOL(dma_ops); -static int iommu_sac_force __read_mostly; - #ifdef CONFIG_IOMMU_DEBUG int panic_on_overflow __read_mostly = 1; int force_iommu __read_mostly = 1; @@ -55,9 +53,6 @@ struct device x86_dma_fallback_dev = { }; EXPORT_SYMBOL(x86_dma_fallback_dev); -/* Number of entries preallocated for DMA-API debugging */ -#define PREALLOC_DMA_DEBUG_ENTRIES 65536 - void __init pci_iommu_alloc(void) { struct iommu_table_entry *p; @@ -76,7 +71,7 @@ void __init pci_iommu_alloc(void) } } -bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp) +bool arch_dma_alloc_attrs(struct device **dev) { if (!*dev) *dev = &x86_dma_fallback_dev; @@ -125,13 +120,13 @@ static __init int iommu_setup(char *p) if (!strncmp(p, "nomerge", 7)) iommu_merge = 0; if (!strncmp(p, "forcesac", 8)) - iommu_sac_force = 1; + pr_warn("forcesac option ignored.\n"); if (!strncmp(p, "allowdac", 8)) - forbid_dac = 0; + pr_warn("allowdac option ignored.\n"); if (!strncmp(p, "nodac", 5)) - forbid_dac = 1; + pr_warn("nodac option ignored.\n"); if (!strncmp(p, "usedac", 6)) { - forbid_dac = -1; + disable_dac_quirk = true; return 1; } #ifdef CONFIG_SWIOTLB @@ -156,40 +151,9 @@ static __init int iommu_setup(char *p) } early_param("iommu", iommu_setup); -int arch_dma_supported(struct device *dev, u64 mask) -{ -#ifdef CONFIG_PCI - if (mask > 0xffffffff && forbid_dac > 0) { - dev_info(dev, "PCI: Disallowing DAC for device\n"); - return 0; - } -#endif - - /* Tell the device to use SAC when IOMMU force is on. This - allows the driver to use cheaper accesses in some cases. - - Problem with this is that if we overflow the IOMMU area and - return DAC as fallback address the device may not handle it - correctly. - - As a special case some controllers have a 39bit address - mode that is as efficient as 32bit (aic79xx). Don't force - SAC for these. Assume all masks <= 40 bits are of this - type. Normally this doesn't make any difference, but gives - more gentle handling of IOMMU overflow. */ - if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) { - dev_info(dev, "Force SAC with mask %Lx\n", mask); - return 0; - } - - return 1; -} -EXPORT_SYMBOL(arch_dma_supported); - static int __init pci_iommu_init(void) { struct iommu_table_entry *p; - dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); #ifdef CONFIG_PCI dma_debug_add_bus(&pci_bus_type); @@ -209,11 +173,17 @@ rootfs_initcall(pci_iommu_init); #ifdef CONFIG_PCI /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ +static int via_no_dac_cb(struct pci_dev *pdev, void *data) +{ + pdev->dev.dma_32bit_limit = true; + return 0; +} + static void via_no_dac(struct pci_dev *dev) { - if (forbid_dac == 0) { + if (!disable_dac_quirk) { dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n"); - forbid_dac = 1; + pci_walk_bus(dev->subordinate, via_no_dac_cb, NULL); } } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c index e47b2dbbdef3..c06c4c16c6b6 100644 --- a/arch/x86/kernel/perf_regs.c +++ b/arch/x86/kernel/perf_regs.c @@ -151,17 +151,19 @@ void perf_get_regs_user(struct perf_regs *regs_user, regs_user_copy->sp = user_regs->sp; regs_user_copy->cs = user_regs->cs; regs_user_copy->ss = user_regs->ss; - /* - * Most system calls don't save these registers, don't report them. + * Store user space frame-pointer value on sample + * to facilitate stack unwinding for cases when + * user space executable code has such support + * enabled at compile time: */ + regs_user_copy->bp = user_regs->bp; + regs_user_copy->bx = -1; - regs_user_copy->bp = -1; regs_user_copy->r12 = -1; regs_user_copy->r13 = -1; regs_user_copy->r14 = -1; regs_user_copy->r15 = -1; - /* * For this to be at all useful, we need a reasonable guess for * the ABI. Be careful: we're in NMI context, and we're diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 5224c6099184..0ae659de21eb 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -76,16 +76,14 @@ void __show_regs(struct pt_regs *regs, int all) savesegment(gs, gs); } - printk(KERN_DEFAULT "EIP: %pS\n", (void *)regs->ip); - printk(KERN_DEFAULT "EFLAGS: %08lx CPU: %d\n", regs->flags, - raw_smp_processor_id()); + show_ip(regs, KERN_DEFAULT); printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", regs->ax, regs->bx, regs->cx, regs->dx); printk(KERN_DEFAULT "ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", regs->si, regs->di, regs->bp, sp); - printk(KERN_DEFAULT " DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", - (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); + printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n", + (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags); if (!all) return; diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index ed5c4cdf0a34..e2ee403865eb 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -1377,7 +1377,6 @@ static void fill_sigtrap_info(struct task_struct *tsk, tsk->thread.trap_nr = X86_TRAP_DB; tsk->thread.error_code = error_code; - memset(info, 0, sizeof(*info)); info->si_signo = SIGTRAP; info->si_code = si_code; info->si_addr = user_mode(regs) ? (void __user *)regs->ip : NULL; @@ -1395,6 +1394,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, { struct siginfo info; + clear_siginfo(&info); fill_sigtrap_info(tsk, regs, error_code, si_code, &info); /* Send us the fake SIGTRAP */ force_sig_info(SIGTRAP, &info, tsk); diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 761f6af6efa5..637982efecd8 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -123,28 +123,35 @@ u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock, struct pvclock_vcpu_time_info *vcpu_time, - struct timespec *ts) + struct timespec64 *ts) { u32 version; u64 delta; - struct timespec now; + struct timespec64 now; /* get wallclock at system boot */ do { version = wall_clock->version; rmb(); /* fetch version before time */ + /* + * Note: wall_clock->sec is a u32 value, so it can + * only store dates between 1970 and 2106. To allow + * times beyond that, we need to create a new hypercall + * interface with an extended pvclock_wall_clock structure + * like ARM has. + */ now.tv_sec = wall_clock->sec; now.tv_nsec = wall_clock->nsec; rmb(); /* fetch time before checking version */ } while ((wall_clock->version & 1) || (version != wall_clock->version)); delta = pvclock_clocksource_read(vcpu_time); /* time since system boot */ - delta += now.tv_sec * (u64)NSEC_PER_SEC + now.tv_nsec; + delta += now.tv_sec * NSEC_PER_SEC + now.tv_nsec; now.tv_nsec = do_div(delta, NSEC_PER_SEC); now.tv_sec = delta; - set_normalized_timespec(ts, now.tv_sec, now.tv_nsec); + set_normalized_timespec64(ts, now.tv_sec, now.tv_nsec); } void pvclock_set_pvti_cpu0_va(struct pvclock_vsyscall_time_info *pvti) diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index f7b82ed7b5b5..586f718b8e95 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c @@ -39,7 +39,7 @@ EXPORT_SYMBOL(rtc_lock); * jump to the next second precisely 500 ms later. Check the Motorola * MC146818A or Dallas DS12887 data sheet for details. */ -int mach_set_rtc_mmss(const struct timespec *now) +int mach_set_rtc_mmss(const struct timespec64 *now) { unsigned long long nowtime = now->tv_sec; struct rtc_time tm; @@ -60,7 +60,7 @@ int mach_set_rtc_mmss(const struct timespec *now) return retval; } -void mach_get_cmos_time(struct timespec *now) +void mach_get_cmos_time(struct timespec64 *now) { unsigned int status, year, mon, day, hour, min, sec, century = 0; unsigned long flags; @@ -118,7 +118,7 @@ void mach_get_cmos_time(struct timespec *now) } else year += CMOS_YEARS_OFFS; - now->tv_sec = mktime(year, mon, day, hour, min, sec); + now->tv_sec = mktime64(year, mon, day, hour, min, sec); now->tv_nsec = 0; } @@ -145,13 +145,13 @@ void rtc_cmos_write(unsigned char val, unsigned char addr) } EXPORT_SYMBOL(rtc_cmos_write); -int update_persistent_clock(struct timespec now) +int update_persistent_clock64(struct timespec64 now) { return x86_platform.set_wallclock(&now); } /* not static: needed by APM */ -void read_persistent_clock(struct timespec *ts) +void read_persistent_clock64(struct timespec64 *ts) { x86_platform.get_wallclock(ts); } diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 5c623dfe39d1..2f86d883dd95 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1312,11 +1312,3 @@ static int __init register_kernel_offset_dumper(void) return 0; } __initcall(register_kernel_offset_dumper); - -void arch_show_smap(struct seq_file *m, struct vm_area_struct *vma) -{ - if (!boot_cpu_has(X86_FEATURE_OSPKE)) - return; - - seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma)); -} diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c index 14c057f29979..9ccbf0576cd0 100644 --- a/arch/x86/kernel/signal_compat.c +++ b/arch/x86/kernel/signal_compat.c @@ -29,7 +29,7 @@ static inline void signal_compat_build_tests(void) BUILD_BUG_ON(NSIGFPE != 15); BUILD_BUG_ON(NSIGSEGV != 7); BUILD_BUG_ON(NSIGBUS != 5); - BUILD_BUG_ON(NSIGTRAP != 4); + BUILD_BUG_ON(NSIGTRAP != 5); BUILD_BUG_ON(NSIGCHLD != 6); BUILD_BUG_ON(NSIGSYS != 1); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 9dd324ae4832..c2f7d1d2a5c3 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -81,13 +81,6 @@ #include <asm/cpu_device_id.h> #include <asm/spec-ctrl.h> -/* Number of siblings per CPU package */ -int smp_num_siblings = 1; -EXPORT_SYMBOL(smp_num_siblings); - -/* Last level cache ID of each logical CPU */ -DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID; - /* representing HT siblings of each logical CPU */ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index a3f15ed545b5..6a78d4b36a79 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/compat.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/sched/mm.h> @@ -19,7 +20,6 @@ #include <linux/elf.h> #include <asm/elf.h> -#include <asm/compat.h> #include <asm/ia32.h> #include <asm/syscalls.h> #include <asm/mpx.h> diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 03f3d7695dac..a535dd64de63 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -299,6 +299,7 @@ static void do_error_trap(struct pt_regs *regs, long error_code, char *str, if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != NOTIFY_STOP) { cond_local_irq_enable(regs); + clear_siginfo(&info); do_trap(trapnr, signr, str, regs, error_code, fill_trap_info(regs, signr, trapnr, &info)); } @@ -854,6 +855,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr) task->thread.trap_nr = trapnr; task->thread.error_code = error_code; + clear_siginfo(&info); info.si_signo = SIGFPE; info.si_errno = 0; info.si_addr = (void __user *)uprobe_get_trap_addr(regs); @@ -929,6 +931,7 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); local_irq_enable(); + clear_siginfo(&info); info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_BADSTK; diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c index f44ce0fb3583..ff20b35e98dd 100644 --- a/arch/x86/kernel/umip.c +++ b/arch/x86/kernel/umip.c @@ -278,6 +278,7 @@ static void force_sig_info_umip_fault(void __user *addr, struct pt_regs *regs) tsk->thread.error_code = X86_PF_USER | X86_PF_WRITE; tsk->thread.trap_nr = X86_TRAP_PF; + clear_siginfo(&info); info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index c84bb5396958..58d8d800875d 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -1083,8 +1083,8 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs return orig_ret_vaddr; if (nleft != rasize) { - pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, " - "%%ip=%#lx\n", current->pid, regs->sp, regs->ip); + pr_err("return address clobbered: pid=%d, %%sp=%#lx, %%ip=%#lx\n", + current->pid, regs->sp, regs->ip); force_sig_info(SIGSEGV, SEND_SIG_FORCED, current); } diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 795f3a80e576..5e1458f609a1 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -117,11 +117,11 @@ SECTIONS #ifdef CONFIG_X86_64 . = ALIGN(PAGE_SIZE); - VMLINUX_SYMBOL(__entry_trampoline_start) = .; + __entry_trampoline_start = .; _entry_trampoline = .; *(.entry_trampoline) . = ALIGN(PAGE_SIZE); - VMLINUX_SYMBOL(__entry_trampoline_end) = .; + __entry_trampoline_end = .; ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big"); #endif diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 8494dbae41b9..d634f0332c0f 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3007,6 +3007,7 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct * { siginfo_t info; + clear_siginfo(&info); info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_MCEERR_AR; diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 7ebc9901dd05..9c5606d88f61 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -23,13 +23,13 @@ unsigned long __clear_user(void __user *addr, unsigned long size) asm volatile( " testq %[size8],%[size8]\n" " jz 4f\n" - "0: movq %[zero],(%[dst])\n" - " addq %[eight],%[dst]\n" + "0: movq $0,(%[dst])\n" + " addq $8,%[dst]\n" " decl %%ecx ; jnz 0b\n" "4: movq %[size1],%%rcx\n" " testl %%ecx,%%ecx\n" " jz 2f\n" - "1: movb %b[zero],(%[dst])\n" + "1: movb $0,(%[dst])\n" " incq %[dst]\n" " decl %%ecx ; jnz 1b\n" "2:\n" @@ -40,8 +40,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size) _ASM_EXTABLE(0b,3b) _ASM_EXTABLE(1b,2b) : [size8] "=&c"(size), [dst] "=&D" (__d0) - : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), - [zero] "r" (0UL), [eight] "r" (8UL)); + : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr)); clac(); return size; } diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index cc7ff5957194..2f3c9196b834 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -360,7 +360,7 @@ static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st, void *pt) { if (__pa(pt) == __pa(kasan_zero_pmd) || - (pgtable_l5_enabled && __pa(pt) == __pa(kasan_zero_p4d)) || + (pgtable_l5_enabled() && __pa(pt) == __pa(kasan_zero_p4d)) || __pa(pt) == __pa(kasan_zero_pud)) { pgprotval_t prot = pte_flags(kasan_zero_pte[0]); note_page(m, st, __pgprot(prot), 0, 5); @@ -476,8 +476,8 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr, } } -#define pgd_large(a) (pgtable_l5_enabled ? pgd_large(a) : p4d_large(__p4d(pgd_val(a)))) -#define pgd_none(a) (pgtable_l5_enabled ? pgd_none(a) : p4d_none(__p4d(pgd_val(a)))) +#define pgd_large(a) (pgtable_l5_enabled() ? pgd_large(a) : p4d_large(__p4d(pgd_val(a)))) +#define pgd_none(a) (pgtable_l5_enabled() ? pgd_none(a) : p4d_none(__p4d(pgd_val(a)))) static inline bool is_hypervisor_range(int idx) { diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 73bd8c95ac71..9a84a0d08727 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -209,6 +209,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address, unsigned lsb = 0; siginfo_t info; + clear_siginfo(&info); info.si_signo = si_signo; info.si_errno = 0; info.si_code = si_code; @@ -439,7 +440,7 @@ static noinline int vmalloc_fault(unsigned long address) if (pgd_none(*pgd_k)) return -1; - if (pgtable_l5_enabled) { + if (pgtable_l5_enabled()) { if (pgd_none(*pgd)) { set_pgd(pgd, *pgd_k); arch_flush_lazy_mmu_mode(); @@ -454,7 +455,7 @@ static noinline int vmalloc_fault(unsigned long address) if (p4d_none(*p4d_k)) return -1; - if (p4d_none(*p4d) && !pgtable_l5_enabled) { + if (p4d_none(*p4d) && !pgtable_l5_enabled()) { set_p4d(p4d, *p4d_k); arch_flush_lazy_mmu_mode(); } else { @@ -828,6 +829,8 @@ static inline void show_signal_msg(struct pt_regs *regs, unsigned long error_code, unsigned long address, struct task_struct *tsk) { + const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG; + if (!unhandled_signal(tsk, SIGSEGV)) return; @@ -835,13 +838,14 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code, return; printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx", - task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, - tsk->comm, task_pid_nr(tsk), address, + loglvl, tsk->comm, task_pid_nr(tsk), address, (void *)regs->ip, (void *)regs->sp, error_code); print_vma_addr(KERN_CONT " in ", regs->ip); printk(KERN_CONT "\n"); + + show_opcodes((u8 *)regs->ip, loglvl); } static void diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c index a2f0c7e20fb0..fe7a12599d8e 100644 --- a/arch/x86/mm/ident_map.c +++ b/arch/x86/mm/ident_map.c @@ -123,7 +123,7 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, result = ident_p4d_init(info, p4d, addr, next); if (result) return result; - if (pgtable_l5_enabled) { + if (pgtable_l5_enabled()) { set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag)); } else { /* diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 0a400606dea0..17383f9677fa 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -180,7 +180,7 @@ static void sync_global_pgds_l4(unsigned long start, unsigned long end) */ void sync_global_pgds(unsigned long start, unsigned long end) { - if (pgtable_l5_enabled) + if (pgtable_l5_enabled()) sync_global_pgds_l5(start, end); else sync_global_pgds_l4(start, end); @@ -643,7 +643,7 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, unsigned long vaddr = (unsigned long)__va(paddr); int i = p4d_index(vaddr); - if (!pgtable_l5_enabled) + if (!pgtable_l5_enabled()) return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, page_size_mask); for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) { @@ -723,7 +723,7 @@ kernel_physical_mapping_init(unsigned long paddr_start, page_size_mask); spin_lock(&init_mm.page_table_lock); - if (pgtable_l5_enabled) + if (pgtable_l5_enabled()) pgd_populate(&init_mm, pgd, p4d); else p4d_populate(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d); @@ -1100,7 +1100,7 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end, * 5-level case we should free them. This code will have to change * to adapt for boot-time switching between 4 and 5 level page tables. */ - if (pgtable_l5_enabled) + if (pgtable_l5_enabled()) free_pud_table(pud_base, p4d); } diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 980dbebd0ca7..e3e77527f8df 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -2,10 +2,8 @@ #define DISABLE_BRANCH_PROFILING #define pr_fmt(fmt) "kasan: " fmt -#ifdef CONFIG_X86_5LEVEL -/* Too early to use cpu_feature_enabled() */ -#define pgtable_l5_enabled __pgtable_l5_enabled -#endif +/* cpu_feature_enabled() cannot be used this early */ +#define USE_EARLY_PGTABLE_L5 #include <linux/bootmem.h> #include <linux/kasan.h> @@ -182,7 +180,7 @@ static void __init clear_pgds(unsigned long start, * With folded p4d, pgd_clear() is nop, use p4d_clear() * instead. */ - if (pgtable_l5_enabled) + if (pgtable_l5_enabled()) pgd_clear(pgd); else p4d_clear(p4d_offset(pgd, start)); @@ -197,7 +195,7 @@ static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr) { unsigned long p4d; - if (!pgtable_l5_enabled) + if (!pgtable_l5_enabled()) return (p4d_t *)pgd; p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK; @@ -284,7 +282,7 @@ void __init kasan_early_init(void) for (i = 0; i < PTRS_PER_PUD; i++) kasan_zero_pud[i] = __pud(pud_val); - for (i = 0; pgtable_l5_enabled && i < PTRS_PER_P4D; i++) + for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++) kasan_zero_p4d[i] = __p4d(p4d_val); kasan_map_early_shadow(early_top_pgt); @@ -315,7 +313,7 @@ void __init kasan_init(void) * bunch of things like kernel code, modules, EFI mapping, etc. * We need to take extra steps to not overwrite them. */ - if (pgtable_l5_enabled) { + if (pgtable_l5_enabled()) { void *ptr; ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END)); diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index 615cc03ced84..61db77b0eda9 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -78,7 +78,7 @@ void __init kernel_randomize_memory(void) struct rnd_state rand_state; unsigned long remain_entropy; - vaddr_start = pgtable_l5_enabled ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4; + vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4; vaddr = vaddr_start; /* @@ -124,7 +124,7 @@ void __init kernel_randomize_memory(void) */ entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i); prandom_bytes_state(&rand_state, &rand, sizeof(rand)); - if (pgtable_l5_enabled) + if (pgtable_l5_enabled()) entropy = (rand % (entropy + 1)) & P4D_MASK; else entropy = (rand % (entropy + 1)) & PUD_MASK; @@ -136,7 +136,7 @@ void __init kernel_randomize_memory(void) * randomization alignment. */ vaddr += get_padding(&kaslr_regions[i]); - if (pgtable_l5_enabled) + if (pgtable_l5_enabled()) vaddr = round_up(vaddr + 1, P4D_SIZE); else vaddr = round_up(vaddr + 1, PUD_SIZE); @@ -212,7 +212,7 @@ void __meminit init_trampoline(void) return; } - if (pgtable_l5_enabled) + if (pgtable_l5_enabled()) init_trampoline_p4d(); else init_trampoline_pud(); diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 25504d5aa816..fa150855647c 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -136,13 +136,13 @@ static int __init numa_add_memblk_to(int nid, u64 start, u64 end, /* whine about and ignore invalid blks */ if (start > end || nid < 0 || nid >= MAX_NUMNODES) { - pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n", - nid, start, end - 1); + pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n", + nid, start, end - 1); return 0; } if (mi->nr_blks >= NR_NODE_MEMBLKS) { - pr_err("NUMA: too many memblk ranges\n"); + pr_err("too many memblk ranges\n"); return -EINVAL; } @@ -267,14 +267,14 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi) */ if (bi->end > bj->start && bi->start < bj->end) { if (bi->nid != bj->nid) { - pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n", + pr_err("node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n", bi->nid, bi->start, bi->end - 1, bj->nid, bj->start, bj->end - 1); return -EINVAL; } - pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n", - bi->nid, bi->start, bi->end - 1, - bj->start, bj->end - 1); + pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n", + bi->nid, bi->start, bi->end - 1, + bj->start, bj->end - 1); } /* @@ -364,7 +364,7 @@ static int __init numa_alloc_distance(void) phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), size, PAGE_SIZE); if (!phys) { - pr_warning("NUMA: Warning: can't allocate distance table!\n"); + pr_warn("Warning: can't allocate distance table!\n"); /* don't retry until explicitly reset */ numa_distance = (void *)1LU; return -ENOMEM; @@ -410,14 +410,14 @@ void __init numa_set_distance(int from, int to, int distance) if (from >= numa_distance_cnt || to >= numa_distance_cnt || from < 0 || to < 0) { - pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n", - from, to, distance); + pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n", + from, to, distance); return; } if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) { - pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n", + pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n", from, to, distance); return; } diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index ffc8c13c50e4..938dbcd46b97 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -114,13 +114,12 @@ static inline void pgd_list_del(pgd_t *pgd) static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) { - BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm)); - virt_to_page(pgd)->index = (pgoff_t)mm; + virt_to_page(pgd)->pt_mm = mm; } struct mm_struct *pgd_page_get_mm(struct page *page) { - return (struct mm_struct *)page->index; + return page->pt_mm; } static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index e055d1a06699..6eb1f34c3c85 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -157,7 +157,7 @@ static void sync_current_stack_to_mm(struct mm_struct *mm) unsigned long sp = current_stack_pointer; pgd_t *pgd = pgd_offset(mm, sp); - if (pgtable_l5_enabled) { + if (pgtable_l5_enabled()) { if (unlikely(pgd_none(*pgd))) { pgd_t *pgd_ref = pgd_offset_k(sp); diff --git a/arch/x86/net/Makefile b/arch/x86/net/Makefile index fefb4b619598..59e123da580c 100644 --- a/arch/x86/net/Makefile +++ b/arch/x86/net/Makefile @@ -1,6 +1,9 @@ # # Arch-specific network modules # -OBJECT_FILES_NON_STANDARD_bpf_jit.o += y -obj-$(CONFIG_BPF_JIT) += bpf_jit.o bpf_jit_comp.o +ifeq ($(CONFIG_X86_32),y) + obj-$(CONFIG_BPF_JIT) += bpf_jit_comp32.o +else + obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o +endif diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S deleted file mode 100644 index b33093f84528..000000000000 --- a/arch/x86/net/bpf_jit.S +++ /dev/null @@ -1,154 +0,0 @@ -/* bpf_jit.S : BPF JIT helper functions - * - * Copyright (C) 2011 Eric Dumazet (eric.dumazet@gmail.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; version 2 - * of the License. - */ -#include <linux/linkage.h> -#include <asm/frame.h> - -/* - * Calling convention : - * rbx : skb pointer (callee saved) - * esi : offset of byte(s) to fetch in skb (can be scratched) - * r10 : copy of skb->data - * r9d : hlen = skb->len - skb->data_len - */ -#define SKBDATA %r10 -#define SKF_MAX_NEG_OFF $(-0x200000) /* SKF_LL_OFF from filter.h */ - -#define FUNC(name) \ - .globl name; \ - .type name, @function; \ - name: - -FUNC(sk_load_word) - test %esi,%esi - js bpf_slow_path_word_neg - -FUNC(sk_load_word_positive_offset) - mov %r9d,%eax # hlen - sub %esi,%eax # hlen - offset - cmp $3,%eax - jle bpf_slow_path_word - mov (SKBDATA,%rsi),%eax - bswap %eax /* ntohl() */ - ret - -FUNC(sk_load_half) - test %esi,%esi - js bpf_slow_path_half_neg - -FUNC(sk_load_half_positive_offset) - mov %r9d,%eax - sub %esi,%eax # hlen - offset - cmp $1,%eax - jle bpf_slow_path_half - movzwl (SKBDATA,%rsi),%eax - rol $8,%ax # ntohs() - ret - -FUNC(sk_load_byte) - test %esi,%esi - js bpf_slow_path_byte_neg - -FUNC(sk_load_byte_positive_offset) - cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */ - jle bpf_slow_path_byte - movzbl (SKBDATA,%rsi),%eax - ret - -/* rsi contains offset and can be scratched */ -#define bpf_slow_path_common(LEN) \ - lea 32(%rbp), %rdx;\ - FRAME_BEGIN; \ - mov %rbx, %rdi; /* arg1 == skb */ \ - push %r9; \ - push SKBDATA; \ -/* rsi already has offset */ \ - mov $LEN,%ecx; /* len */ \ - call skb_copy_bits; \ - test %eax,%eax; \ - pop SKBDATA; \ - pop %r9; \ - FRAME_END - - -bpf_slow_path_word: - bpf_slow_path_common(4) - js bpf_error - mov 32(%rbp),%eax - bswap %eax - ret - -bpf_slow_path_half: - bpf_slow_path_common(2) - js bpf_error - mov 32(%rbp),%ax - rol $8,%ax - movzwl %ax,%eax - ret - -bpf_slow_path_byte: - bpf_slow_path_common(1) - js bpf_error - movzbl 32(%rbp),%eax - ret - -#define sk_negative_common(SIZE) \ - FRAME_BEGIN; \ - mov %rbx, %rdi; /* arg1 == skb */ \ - push %r9; \ - push SKBDATA; \ -/* rsi already has offset */ \ - mov $SIZE,%edx; /* size */ \ - call bpf_internal_load_pointer_neg_helper; \ - test %rax,%rax; \ - pop SKBDATA; \ - pop %r9; \ - FRAME_END; \ - jz bpf_error - -bpf_slow_path_word_neg: - cmp SKF_MAX_NEG_OFF, %esi /* test range */ - jl bpf_error /* offset lower -> error */ - -FUNC(sk_load_word_negative_offset) - sk_negative_common(4) - mov (%rax), %eax - bswap %eax - ret - -bpf_slow_path_half_neg: - cmp SKF_MAX_NEG_OFF, %esi - jl bpf_error - -FUNC(sk_load_half_negative_offset) - sk_negative_common(2) - mov (%rax),%ax - rol $8,%ax - movzwl %ax,%eax - ret - -bpf_slow_path_byte_neg: - cmp SKF_MAX_NEG_OFF, %esi - jl bpf_error - -FUNC(sk_load_byte_negative_offset) - sk_negative_common(1) - movzbl (%rax), %eax - ret - -bpf_error: -# force a return 0 from jit handler - xor %eax,%eax - mov (%rbp),%rbx - mov 8(%rbp),%r13 - mov 16(%rbp),%r14 - mov 24(%rbp),%r15 - add $40, %rbp - leaveq - ret diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 263c8453815e..8fca446aaef6 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1,4 +1,5 @@ -/* bpf_jit_comp.c : BPF JIT compiler +/* + * bpf_jit_comp.c: BPF JIT compiler * * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com @@ -16,15 +17,6 @@ #include <asm/set_memory.h> #include <asm/nospec-branch.h> -/* - * assembly code in arch/x86/net/bpf_jit.S - */ -extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; -extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[]; -extern u8 sk_load_byte_positive_offset[]; -extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[]; -extern u8 sk_load_byte_negative_offset[]; - static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) { if (len == 1) @@ -45,14 +37,15 @@ static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) + #define EMIT1_off32(b1, off) \ - do {EMIT1(b1); EMIT(off, 4); } while (0) + do { EMIT1(b1); EMIT(off, 4); } while (0) #define EMIT2_off32(b1, b2, off) \ - do {EMIT2(b1, b2); EMIT(off, 4); } while (0) + do { EMIT2(b1, b2); EMIT(off, 4); } while (0) #define EMIT3_off32(b1, b2, b3, off) \ - do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) + do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) #define EMIT4_off32(b1, b2, b3, b4, off) \ - do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) + do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) static bool is_imm8(int value) { @@ -70,9 +63,10 @@ static bool is_uimm32(u64 value) } /* mov dst, src */ -#define EMIT_mov(DST, SRC) \ - do {if (DST != SRC) \ - EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ +#define EMIT_mov(DST, SRC) \ + do { \ + if (DST != SRC) \ + EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ } while (0) static int bpf_size_to_x86_bytes(int bpf_size) @@ -89,7 +83,8 @@ static int bpf_size_to_x86_bytes(int bpf_size) return 0; } -/* list of x86 cond jumps opcodes (. + s8) +/* + * List of x86 cond jumps opcodes (. + s8) * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) */ #define X86_JB 0x72 @@ -103,38 +98,37 @@ static int bpf_size_to_x86_bytes(int bpf_size) #define X86_JLE 0x7E #define X86_JG 0x7F -#define CHOOSE_LOAD_FUNC(K, func) \ - ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) - -/* pick a register outside of BPF range for JIT internal work */ +/* Pick a register outside of BPF range for JIT internal work */ #define AUX_REG (MAX_BPF_JIT_REG + 1) -/* The following table maps BPF registers to x64 registers. +/* + * The following table maps BPF registers to x86-64 registers. * - * x64 register r12 is unused, since if used as base address + * x86-64 register R12 is unused, since if used as base address * register in load/store instructions, it always needs an * extra byte of encoding and is callee saved. * - * r9 caches skb->len - skb->data_len - * r10 caches skb->data, and used for blinding (if enabled) + * Also x86-64 register R9 is unused. x86-64 register R10 is + * used for blinding (if enabled). */ static const int reg2hex[] = { - [BPF_REG_0] = 0, /* rax */ - [BPF_REG_1] = 7, /* rdi */ - [BPF_REG_2] = 6, /* rsi */ - [BPF_REG_3] = 2, /* rdx */ - [BPF_REG_4] = 1, /* rcx */ - [BPF_REG_5] = 0, /* r8 */ - [BPF_REG_6] = 3, /* rbx callee saved */ - [BPF_REG_7] = 5, /* r13 callee saved */ - [BPF_REG_8] = 6, /* r14 callee saved */ - [BPF_REG_9] = 7, /* r15 callee saved */ - [BPF_REG_FP] = 5, /* rbp readonly */ - [BPF_REG_AX] = 2, /* r10 temp register */ - [AUX_REG] = 3, /* r11 temp register */ + [BPF_REG_0] = 0, /* RAX */ + [BPF_REG_1] = 7, /* RDI */ + [BPF_REG_2] = 6, /* RSI */ + [BPF_REG_3] = 2, /* RDX */ + [BPF_REG_4] = 1, /* RCX */ + [BPF_REG_5] = 0, /* R8 */ + [BPF_REG_6] = 3, /* RBX callee saved */ + [BPF_REG_7] = 5, /* R13 callee saved */ + [BPF_REG_8] = 6, /* R14 callee saved */ + [BPF_REG_9] = 7, /* R15 callee saved */ + [BPF_REG_FP] = 5, /* RBP readonly */ + [BPF_REG_AX] = 2, /* R10 temp register */ + [AUX_REG] = 3, /* R11 temp register */ }; -/* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15 +/* + * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15 * which need extra byte of encoding. * rax,rcx,...,rbp have simpler encoding */ @@ -153,7 +147,7 @@ static bool is_axreg(u32 reg) return reg == BPF_REG_0; } -/* add modifiers if 'reg' maps to x64 registers r8..r15 */ +/* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */ static u8 add_1mod(u8 byte, u32 reg) { if (is_ereg(reg)) @@ -170,13 +164,13 @@ static u8 add_2mod(u8 byte, u32 r1, u32 r2) return byte; } -/* encode 'dst_reg' register into x64 opcode 'byte' */ +/* Encode 'dst_reg' register into x86-64 opcode 'byte' */ static u8 add_1reg(u8 byte, u32 dst_reg) { return byte + reg2hex[dst_reg]; } -/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */ +/* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) { return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); @@ -184,27 +178,24 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) static void jit_fill_hole(void *area, unsigned int size) { - /* fill whole space with int3 instructions */ + /* Fill whole space with INT3 instructions */ memset(area, 0xcc, size); } struct jit_context { - int cleanup_addr; /* epilogue code offset */ - bool seen_ld_abs; - bool seen_ax_reg; + int cleanup_addr; /* Epilogue code offset */ }; -/* maximum number of bytes emitted while JITing one eBPF insn */ +/* Maximum number of bytes emitted while JITing one eBPF insn */ #define BPF_MAX_INSN_SIZE 128 #define BPF_INSN_SAFETY 64 -#define AUX_STACK_SPACE \ - (32 /* space for rbx, r13, r14, r15 */ + \ - 8 /* space for skb_copy_bits() buffer */) +#define AUX_STACK_SPACE 40 /* Space for RBX, R13, R14, R15, tailcnt */ -#define PROLOGUE_SIZE 37 +#define PROLOGUE_SIZE 37 -/* emit x64 prologue code for BPF program and check it's size. +/* + * Emit x86-64 prologue code for BPF program and check its size. * bpf_tail_call helper will skip it while jumping into another program */ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf) @@ -212,8 +203,11 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf) u8 *prog = *pprog; int cnt = 0; - EMIT1(0x55); /* push rbp */ - EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */ + /* push rbp */ + EMIT1(0x55); + + /* mov rbp,rsp */ + EMIT3(0x48, 0x89, 0xE5); /* sub rsp, rounded_stack_depth + AUX_STACK_SPACE */ EMIT3_off32(0x48, 0x81, 0xEC, @@ -222,19 +216,8 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf) /* sub rbp, AUX_STACK_SPACE */ EMIT4(0x48, 0x83, 0xED, AUX_STACK_SPACE); - /* all classic BPF filters use R6(rbx) save it */ - /* mov qword ptr [rbp+0],rbx */ EMIT4(0x48, 0x89, 0x5D, 0); - - /* bpf_convert_filter() maps classic BPF register X to R7 and uses R8 - * as temporary, so all tcpdump filters need to spill/fill R7(r13) and - * R8(r14). R9(r15) spill could be made conditional, but there is only - * one 'bpf_error' return path out of helper functions inside bpf_jit.S - * The overhead of extra spill is negligible for any filter other - * than synthetic ones. Therefore not worth adding complexity. - */ - /* mov qword ptr [rbp+8],r13 */ EMIT4(0x4C, 0x89, 0x6D, 8); /* mov qword ptr [rbp+16],r14 */ @@ -243,9 +226,10 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf) EMIT4(0x4C, 0x89, 0x7D, 24); if (!ebpf_from_cbpf) { - /* Clear the tail call counter (tail_call_cnt): for eBPF tail + /* + * Clear the tail call counter (tail_call_cnt): for eBPF tail * calls we need to reset the counter to 0. It's done in two - * instructions, resetting rax register to 0, and moving it + * instructions, resetting RAX register to 0, and moving it * to the counter location. */ @@ -260,7 +244,9 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf) *pprog = prog; } -/* generate the following code: +/* + * Generate the following code: + * * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... * if (index >= array->map.max_entries) * goto out; @@ -278,23 +264,26 @@ static void emit_bpf_tail_call(u8 **pprog) int label1, label2, label3; int cnt = 0; - /* rdi - pointer to ctx + /* + * rdi - pointer to ctx * rsi - pointer to bpf_array * rdx - index in bpf_array */ - /* if (index >= array->map.max_entries) - * goto out; + /* + * if (index >= array->map.max_entries) + * goto out; */ EMIT2(0x89, 0xD2); /* mov edx, edx */ EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ offsetof(struct bpf_array, map.max_entries)); -#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */ +#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* Number of bytes to jump */ EMIT2(X86_JBE, OFFSET1); /* jbe out */ label1 = cnt; - /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) - * goto out; + /* + * if (tail_call_cnt > MAX_TAIL_CALL_CNT) + * goto out; */ EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ @@ -308,8 +297,9 @@ static void emit_bpf_tail_call(u8 **pprog) EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */ offsetof(struct bpf_array, ptrs)); - /* if (prog == NULL) - * goto out; + /* + * if (prog == NULL) + * goto out; */ EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ #define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE) @@ -321,7 +311,8 @@ static void emit_bpf_tail_call(u8 **pprog) offsetof(struct bpf_prog, bpf_func)); EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE); /* add rax, prologue_size */ - /* now we're ready to jump into next BPF program + /* + * Wow we're ready to jump into next BPF program * rdi == ctx (1st arg) * rax == prog->bpf_func + prologue_size */ @@ -334,26 +325,6 @@ static void emit_bpf_tail_call(u8 **pprog) *pprog = prog; } - -static void emit_load_skb_data_hlen(u8 **pprog) -{ - u8 *prog = *pprog; - int cnt = 0; - - /* r9d = skb->len - skb->data_len (headlen) - * r10 = skb->data - */ - /* mov %r9d, off32(%rdi) */ - EMIT3_off32(0x44, 0x8b, 0x8f, offsetof(struct sk_buff, len)); - - /* sub %r9d, off32(%rdi) */ - EMIT3_off32(0x44, 0x2b, 0x8f, offsetof(struct sk_buff, data_len)); - - /* mov %r10, off32(%rdi) */ - EMIT3_off32(0x4c, 0x8b, 0x97, offsetof(struct sk_buff, data)); - *pprog = prog; -} - static void emit_mov_imm32(u8 **pprog, bool sign_propagate, u32 dst_reg, const u32 imm32) { @@ -361,7 +332,8 @@ static void emit_mov_imm32(u8 **pprog, bool sign_propagate, u8 b1, b2, b3; int cnt = 0; - /* optimization: if imm32 is positive, use 'mov %eax, imm32' + /* + * Optimization: if imm32 is positive, use 'mov %eax, imm32' * (which zero-extends imm32) to save 2 bytes. */ if (sign_propagate && (s32)imm32 < 0) { @@ -373,7 +345,8 @@ static void emit_mov_imm32(u8 **pprog, bool sign_propagate, goto done; } - /* optimization: if imm32 is zero, use 'xor %eax, %eax' + /* + * Optimization: if imm32 is zero, use 'xor %eax, %eax' * to save 3 bytes. */ if (imm32 == 0) { @@ -400,7 +373,8 @@ static void emit_mov_imm64(u8 **pprog, u32 dst_reg, int cnt = 0; if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) { - /* For emitting plain u32, where sign bit must not be + /* + * For emitting plain u32, where sign bit must not be * propagated LLVM tends to load imm64 over mov32 * directly, so save couple of bytes by just doing * 'mov %eax, imm32' instead. @@ -439,8 +413,6 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, { struct bpf_insn *insn = bpf_prog->insnsi; int insn_cnt = bpf_prog->len; - bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0); - bool seen_ax_reg = ctx->seen_ax_reg | (oldproglen == 0); bool seen_exit = false; u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; int i, cnt = 0; @@ -450,9 +422,6 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, emit_prologue(&prog, bpf_prog->aux->stack_depth, bpf_prog_was_classic(bpf_prog)); - if (seen_ld_abs) - emit_load_skb_data_hlen(&prog); - for (i = 0; i < insn_cnt; i++, insn++) { const s32 imm32 = insn->imm; u32 dst_reg = insn->dst_reg; @@ -460,13 +429,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 b2 = 0, b3 = 0; s64 jmp_offset; u8 jmp_cond; - bool reload_skb_data; int ilen; u8 *func; - if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX) - ctx->seen_ax_reg = seen_ax_reg = true; - switch (insn->code) { /* ALU */ case BPF_ALU | BPF_ADD | BPF_X: @@ -525,7 +490,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, else if (is_ereg(dst_reg)) EMIT1(add_1mod(0x40, dst_reg)); - /* b3 holds 'normal' opcode, b2 short form only valid + /* + * b3 holds 'normal' opcode, b2 short form only valid * in case dst is eax/rax. */ switch (BPF_OP(insn->code)) { @@ -593,7 +559,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, /* mov rax, dst_reg */ EMIT_mov(BPF_REG_0, dst_reg); - /* xor edx, edx + /* + * xor edx, edx * equivalent to 'xor rdx, rdx', but one byte less */ EMIT2(0x31, 0xd2); @@ -655,7 +622,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, } break; } - /* shifts */ + /* Shifts */ case BPF_ALU | BPF_LSH | BPF_K: case BPF_ALU | BPF_RSH | BPF_K: case BPF_ALU | BPF_ARSH | BPF_K: @@ -686,7 +653,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_ALU64 | BPF_RSH | BPF_X: case BPF_ALU64 | BPF_ARSH | BPF_X: - /* check for bad case when dst_reg == rcx */ + /* Check for bad case when dst_reg == rcx */ if (dst_reg == BPF_REG_4) { /* mov r11, dst_reg */ EMIT_mov(AUX_REG, dst_reg); @@ -724,13 +691,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_ALU | BPF_END | BPF_FROM_BE: switch (imm32) { case 16: - /* emit 'ror %ax, 8' to swap lower 2 bytes */ + /* Emit 'ror %ax, 8' to swap lower 2 bytes */ EMIT1(0x66); if (is_ereg(dst_reg)) EMIT1(0x41); EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); - /* emit 'movzwl eax, ax' */ + /* Emit 'movzwl eax, ax' */ if (is_ereg(dst_reg)) EMIT3(0x45, 0x0F, 0xB7); else @@ -738,7 +705,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); break; case 32: - /* emit 'bswap eax' to swap lower 4 bytes */ + /* Emit 'bswap eax' to swap lower 4 bytes */ if (is_ereg(dst_reg)) EMIT2(0x41, 0x0F); else @@ -746,7 +713,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, EMIT1(add_1reg(0xC8, dst_reg)); break; case 64: - /* emit 'bswap rax' to swap 8 bytes */ + /* Emit 'bswap rax' to swap 8 bytes */ EMIT3(add_1mod(0x48, dst_reg), 0x0F, add_1reg(0xC8, dst_reg)); break; @@ -756,7 +723,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_ALU | BPF_END | BPF_FROM_LE: switch (imm32) { case 16: - /* emit 'movzwl eax, ax' to zero extend 16-bit + /* + * Emit 'movzwl eax, ax' to zero extend 16-bit * into 64 bit */ if (is_ereg(dst_reg)) @@ -766,7 +734,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); break; case 32: - /* emit 'mov eax, eax' to clear upper 32-bits */ + /* Emit 'mov eax, eax' to clear upper 32-bits */ if (is_ereg(dst_reg)) EMIT1(0x45); EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); @@ -809,9 +777,9 @@ st: if (is_imm8(insn->off)) /* STX: *(u8*)(dst_reg + off) = src_reg */ case BPF_STX | BPF_MEM | BPF_B: - /* emit 'mov byte ptr [rax + off], al' */ + /* Emit 'mov byte ptr [rax + off], al' */ if (is_ereg(dst_reg) || is_ereg(src_reg) || - /* have to add extra byte for x86 SIL, DIL regs */ + /* We have to add extra byte for x86 SIL, DIL regs */ src_reg == BPF_REG_1 || src_reg == BPF_REG_2) EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); else @@ -840,25 +808,26 @@ stx: if (is_imm8(insn->off)) /* LDX: dst_reg = *(u8*)(src_reg + off) */ case BPF_LDX | BPF_MEM | BPF_B: - /* emit 'movzx rax, byte ptr [rax + off]' */ + /* Emit 'movzx rax, byte ptr [rax + off]' */ EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); goto ldx; case BPF_LDX | BPF_MEM | BPF_H: - /* emit 'movzx rax, word ptr [rax + off]' */ + /* Emit 'movzx rax, word ptr [rax + off]' */ EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); goto ldx; case BPF_LDX | BPF_MEM | BPF_W: - /* emit 'mov eax, dword ptr [rax+0x14]' */ + /* Emit 'mov eax, dword ptr [rax+0x14]' */ if (is_ereg(dst_reg) || is_ereg(src_reg)) EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); else EMIT1(0x8B); goto ldx; case BPF_LDX | BPF_MEM | BPF_DW: - /* emit 'mov rax, qword ptr [rax+0x14]' */ + /* Emit 'mov rax, qword ptr [rax+0x14]' */ EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); -ldx: /* if insn->off == 0 we can save one extra byte, but - * special case of x86 r13 which always needs an offset +ldx: /* + * If insn->off == 0 we can save one extra byte, but + * special case of x86 R13 which always needs an offset * is not worth the hassle */ if (is_imm8(insn->off)) @@ -870,7 +839,7 @@ ldx: /* if insn->off == 0 we can save one extra byte, but /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */ case BPF_STX | BPF_XADD | BPF_W: - /* emit 'lock add dword ptr [rax + off], eax' */ + /* Emit 'lock add dword ptr [rax + off], eax' */ if (is_ereg(dst_reg) || is_ereg(src_reg)) EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01); else @@ -889,35 +858,12 @@ xadd: if (is_imm8(insn->off)) case BPF_JMP | BPF_CALL: func = (u8 *) __bpf_call_base + imm32; jmp_offset = func - (image + addrs[i]); - if (seen_ld_abs) { - reload_skb_data = bpf_helper_changes_pkt_data(func); - if (reload_skb_data) { - EMIT1(0x57); /* push %rdi */ - jmp_offset += 22; /* pop, mov, sub, mov */ - } else { - EMIT2(0x41, 0x52); /* push %r10 */ - EMIT2(0x41, 0x51); /* push %r9 */ - /* need to adjust jmp offset, since - * pop %r9, pop %r10 take 4 bytes after call insn - */ - jmp_offset += 4; - } - } if (!imm32 || !is_simm32(jmp_offset)) { - pr_err("unsupported bpf func %d addr %p image %p\n", + pr_err("unsupported BPF func %d addr %p image %p\n", imm32, func, image); return -EINVAL; } EMIT1_off32(0xE8, jmp_offset); - if (seen_ld_abs) { - if (reload_skb_data) { - EMIT1(0x5F); /* pop %rdi */ - emit_load_skb_data_hlen(&prog); - } else { - EMIT2(0x41, 0x59); /* pop %r9 */ - EMIT2(0x41, 0x5A); /* pop %r10 */ - } - } break; case BPF_JMP | BPF_TAIL_CALL: @@ -970,7 +916,7 @@ xadd: if (is_imm8(insn->off)) else EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); -emit_cond_jmp: /* convert BPF opcode to x86 */ +emit_cond_jmp: /* Convert BPF opcode to x86 */ switch (BPF_OP(insn->code)) { case BPF_JEQ: jmp_cond = X86_JE; @@ -996,22 +942,22 @@ emit_cond_jmp: /* convert BPF opcode to x86 */ jmp_cond = X86_JBE; break; case BPF_JSGT: - /* signed '>', GT in x86 */ + /* Signed '>', GT in x86 */ jmp_cond = X86_JG; break; case BPF_JSLT: - /* signed '<', LT in x86 */ + /* Signed '<', LT in x86 */ jmp_cond = X86_JL; break; case BPF_JSGE: - /* signed '>=', GE in x86 */ + /* Signed '>=', GE in x86 */ jmp_cond = X86_JGE; break; case BPF_JSLE: - /* signed '<=', LE in x86 */ + /* Signed '<=', LE in x86 */ jmp_cond = X86_JLE; break; - default: /* to silence gcc warning */ + default: /* to silence GCC warning */ return -EFAULT; } jmp_offset = addrs[i + insn->off] - addrs[i]; @@ -1039,7 +985,7 @@ emit_cond_jmp: /* convert BPF opcode to x86 */ jmp_offset = addrs[i + insn->off] - addrs[i]; if (!jmp_offset) - /* optimize out nop jumps */ + /* Optimize out nop jumps */ break; emit_jmp: if (is_imm8(jmp_offset)) { @@ -1052,66 +998,13 @@ emit_jmp: } break; - case BPF_LD | BPF_IND | BPF_W: - func = sk_load_word; - goto common_load; - case BPF_LD | BPF_ABS | BPF_W: - func = CHOOSE_LOAD_FUNC(imm32, sk_load_word); -common_load: - ctx->seen_ld_abs = seen_ld_abs = true; - jmp_offset = func - (image + addrs[i]); - if (!func || !is_simm32(jmp_offset)) { - pr_err("unsupported bpf func %d addr %p image %p\n", - imm32, func, image); - return -EINVAL; - } - if (BPF_MODE(insn->code) == BPF_ABS) { - /* mov %esi, imm32 */ - EMIT1_off32(0xBE, imm32); - } else { - /* mov %rsi, src_reg */ - EMIT_mov(BPF_REG_2, src_reg); - if (imm32) { - if (is_imm8(imm32)) - /* add %esi, imm8 */ - EMIT3(0x83, 0xC6, imm32); - else - /* add %esi, imm32 */ - EMIT2_off32(0x81, 0xC6, imm32); - } - } - /* skb pointer is in R6 (%rbx), it will be copied into - * %rdi if skb_copy_bits() call is necessary. - * sk_load_* helpers also use %r10 and %r9d. - * See bpf_jit.S - */ - if (seen_ax_reg) - /* r10 = skb->data, mov %r10, off32(%rbx) */ - EMIT3_off32(0x4c, 0x8b, 0x93, - offsetof(struct sk_buff, data)); - EMIT1_off32(0xE8, jmp_offset); /* call */ - break; - - case BPF_LD | BPF_IND | BPF_H: - func = sk_load_half; - goto common_load; - case BPF_LD | BPF_ABS | BPF_H: - func = CHOOSE_LOAD_FUNC(imm32, sk_load_half); - goto common_load; - case BPF_LD | BPF_IND | BPF_B: - func = sk_load_byte; - goto common_load; - case BPF_LD | BPF_ABS | BPF_B: - func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte); - goto common_load; - case BPF_JMP | BPF_EXIT: if (seen_exit) { jmp_offset = ctx->cleanup_addr - addrs[i]; goto emit_jmp; } seen_exit = true; - /* update cleanup_addr */ + /* Update cleanup_addr */ ctx->cleanup_addr = proglen; /* mov rbx, qword ptr [rbp+0] */ EMIT4(0x48, 0x8B, 0x5D, 0); @@ -1129,10 +1022,11 @@ common_load: break; default: - /* By design x64 JIT should support all BPF instructions + /* + * By design x86-64 JIT should support all BPF instructions. * This error will be seen if new instruction was added - * to interpreter, but not to JIT - * or if there is junk in bpf_prog + * to the interpreter, but not to the JIT, or if there is + * junk in bpf_prog. */ pr_err("bpf_jit: unknown opcode %02x\n", insn->code); return -EINVAL; @@ -1184,7 +1078,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) return orig_prog; tmp = bpf_jit_blind_constants(prog); - /* If blinding was requested and we failed during blinding, + /* + * If blinding was requested and we failed during blinding, * we must fall back to the interpreter. */ if (IS_ERR(tmp)) @@ -1218,8 +1113,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) goto out_addrs; } - /* Before first pass, make a rough estimation of addrs[] - * each bpf instruction is translated to less than 64 bytes + /* + * Before first pass, make a rough estimation of addrs[] + * each BPF instruction is translated to less than 64 bytes */ for (proglen = 0, i = 0; i < prog->len; i++) { proglen += 64; @@ -1228,10 +1124,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ctx.cleanup_addr = proglen; skip_init_addrs: - /* JITed image shrinks with every pass and the loop iterates - * until the image stops shrinking. Very large bpf programs + /* + * JITed image shrinks with every pass and the loop iterates + * until the image stops shrinking. Very large BPF programs * may converge on the last pass. In such case do one more - * pass to emit the final image + * pass to emit the final image. */ for (pass = 0; pass < 20 || image; pass++) { proglen = do_jit(prog, addrs, image, oldproglen, &ctx); diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c new file mode 100644 index 000000000000..0cc04e30adc1 --- /dev/null +++ b/arch/x86/net/bpf_jit_comp32.c @@ -0,0 +1,2419 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Just-In-Time compiler for eBPF filters on IA32 (32bit x86) + * + * Author: Wang YanQing (udknight@gmail.com) + * The code based on code and ideas from: + * Eric Dumazet (eric.dumazet@gmail.com) + * and from: + * Shubham Bansal <illusionist.neo@gmail.com> + */ + +#include <linux/netdevice.h> +#include <linux/filter.h> +#include <linux/if_vlan.h> +#include <asm/cacheflush.h> +#include <asm/set_memory.h> +#include <asm/nospec-branch.h> +#include <linux/bpf.h> + +/* + * eBPF prog stack layout: + * + * high + * original ESP => +-----+ + * | | callee saved registers + * +-----+ + * | ... | eBPF JIT scratch space + * BPF_FP,IA32_EBP => +-----+ + * | ... | eBPF prog stack + * +-----+ + * |RSVD | JIT scratchpad + * current ESP => +-----+ + * | | + * | ... | Function call stack + * | | + * +-----+ + * low + * + * The callee saved registers: + * + * high + * original ESP => +------------------+ \ + * | ebp | | + * current EBP => +------------------+ } callee saved registers + * | ebx,esi,edi | | + * +------------------+ / + * low + */ + +static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) +{ + if (len == 1) + *ptr = bytes; + else if (len == 2) + *(u16 *)ptr = bytes; + else { + *(u32 *)ptr = bytes; + barrier(); + } + return ptr + len; +} + +#define EMIT(bytes, len) \ + do { prog = emit_code(prog, bytes, len); cnt += len; } while (0) + +#define EMIT1(b1) EMIT(b1, 1) +#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) +#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) +#define EMIT4(b1, b2, b3, b4) \ + EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) + +#define EMIT1_off32(b1, off) \ + do { EMIT1(b1); EMIT(off, 4); } while (0) +#define EMIT2_off32(b1, b2, off) \ + do { EMIT2(b1, b2); EMIT(off, 4); } while (0) +#define EMIT3_off32(b1, b2, b3, off) \ + do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) +#define EMIT4_off32(b1, b2, b3, b4, off) \ + do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) + +#define jmp_label(label, jmp_insn_len) (label - cnt - jmp_insn_len) + +static bool is_imm8(int value) +{ + return value <= 127 && value >= -128; +} + +static bool is_simm32(s64 value) +{ + return value == (s64) (s32) value; +} + +#define STACK_OFFSET(k) (k) +#define TCALL_CNT (MAX_BPF_JIT_REG + 0) /* Tail Call Count */ + +#define IA32_EAX (0x0) +#define IA32_EBX (0x3) +#define IA32_ECX (0x1) +#define IA32_EDX (0x2) +#define IA32_ESI (0x6) +#define IA32_EDI (0x7) +#define IA32_EBP (0x5) +#define IA32_ESP (0x4) + +/* + * List of x86 cond jumps opcodes (. + s8) + * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) + */ +#define IA32_JB 0x72 +#define IA32_JAE 0x73 +#define IA32_JE 0x74 +#define IA32_JNE 0x75 +#define IA32_JBE 0x76 +#define IA32_JA 0x77 +#define IA32_JL 0x7C +#define IA32_JGE 0x7D +#define IA32_JLE 0x7E +#define IA32_JG 0x7F + +/* + * Map eBPF registers to IA32 32bit registers or stack scratch space. + * + * 1. All the registers, R0-R10, are mapped to scratch space on stack. + * 2. We need two 64 bit temp registers to do complex operations on eBPF + * registers. + * 3. For performance reason, the BPF_REG_AX for blinding constant, is + * mapped to real hardware register pair, IA32_ESI and IA32_EDI. + * + * As the eBPF registers are all 64 bit registers and IA32 has only 32 bit + * registers, we have to map each eBPF registers with two IA32 32 bit regs + * or scratch memory space and we have to build eBPF 64 bit register from those. + * + * We use IA32_EAX, IA32_EDX, IA32_ECX, IA32_EBX as temporary registers. + */ +static const u8 bpf2ia32[][2] = { + /* Return value from in-kernel function, and exit value from eBPF */ + [BPF_REG_0] = {STACK_OFFSET(0), STACK_OFFSET(4)}, + + /* The arguments from eBPF program to in-kernel function */ + /* Stored on stack scratch space */ + [BPF_REG_1] = {STACK_OFFSET(8), STACK_OFFSET(12)}, + [BPF_REG_2] = {STACK_OFFSET(16), STACK_OFFSET(20)}, + [BPF_REG_3] = {STACK_OFFSET(24), STACK_OFFSET(28)}, + [BPF_REG_4] = {STACK_OFFSET(32), STACK_OFFSET(36)}, + [BPF_REG_5] = {STACK_OFFSET(40), STACK_OFFSET(44)}, + + /* Callee saved registers that in-kernel function will preserve */ + /* Stored on stack scratch space */ + [BPF_REG_6] = {STACK_OFFSET(48), STACK_OFFSET(52)}, + [BPF_REG_7] = {STACK_OFFSET(56), STACK_OFFSET(60)}, + [BPF_REG_8] = {STACK_OFFSET(64), STACK_OFFSET(68)}, + [BPF_REG_9] = {STACK_OFFSET(72), STACK_OFFSET(76)}, + + /* Read only Frame Pointer to access Stack */ + [BPF_REG_FP] = {STACK_OFFSET(80), STACK_OFFSET(84)}, + + /* Temporary register for blinding constants. */ + [BPF_REG_AX] = {IA32_ESI, IA32_EDI}, + + /* Tail call count. Stored on stack scratch space. */ + [TCALL_CNT] = {STACK_OFFSET(88), STACK_OFFSET(92)}, +}; + +#define dst_lo dst[0] +#define dst_hi dst[1] +#define src_lo src[0] +#define src_hi src[1] + +#define STACK_ALIGNMENT 8 +/* + * Stack space for BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, + * BPF_REG_5, BPF_REG_6, BPF_REG_7, BPF_REG_8, BPF_REG_9, + * BPF_REG_FP, BPF_REG_AX and Tail call counts. + */ +#define SCRATCH_SIZE 96 + +/* Total stack size used in JITed code */ +#define _STACK_SIZE (stack_depth + SCRATCH_SIZE) + +#define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT) + +/* Get the offset of eBPF REGISTERs stored on scratch space. */ +#define STACK_VAR(off) (off) + +/* Encode 'dst_reg' register into IA32 opcode 'byte' */ +static u8 add_1reg(u8 byte, u32 dst_reg) +{ + return byte + dst_reg; +} + +/* Encode 'dst_reg' and 'src_reg' registers into IA32 opcode 'byte' */ +static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) +{ + return byte + dst_reg + (src_reg << 3); +} + +static void jit_fill_hole(void *area, unsigned int size) +{ + /* Fill whole space with int3 instructions */ + memset(area, 0xcc, size); +} + +static inline void emit_ia32_mov_i(const u8 dst, const u32 val, bool dstk, + u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + + if (dstk) { + if (val == 0) { + /* xor eax,eax */ + EMIT2(0x33, add_2reg(0xC0, IA32_EAX, IA32_EAX)); + /* mov dword ptr [ebp+off],eax */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst)); + } else { + EMIT3_off32(0xC7, add_1reg(0x40, IA32_EBP), + STACK_VAR(dst), val); + } + } else { + if (val == 0) + EMIT2(0x33, add_2reg(0xC0, dst, dst)); + else + EMIT2_off32(0xC7, add_1reg(0xC0, dst), + val); + } + *pprog = prog; +} + +/* dst = imm (4 bytes)*/ +static inline void emit_ia32_mov_r(const u8 dst, const u8 src, bool dstk, + bool sstk, u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + u8 sreg = sstk ? IA32_EAX : src; + + if (sstk) + /* mov eax,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(src)); + if (dstk) + /* mov dword ptr [ebp+off],eax */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, sreg), STACK_VAR(dst)); + else + /* mov dst,sreg */ + EMIT2(0x89, add_2reg(0xC0, dst, sreg)); + + *pprog = prog; +} + +/* dst = src */ +static inline void emit_ia32_mov_r64(const bool is64, const u8 dst[], + const u8 src[], bool dstk, + bool sstk, u8 **pprog) +{ + emit_ia32_mov_r(dst_lo, src_lo, dstk, sstk, pprog); + if (is64) + /* complete 8 byte move */ + emit_ia32_mov_r(dst_hi, src_hi, dstk, sstk, pprog); + else + /* zero out high 4 bytes */ + emit_ia32_mov_i(dst_hi, 0, dstk, pprog); +} + +/* Sign extended move */ +static inline void emit_ia32_mov_i64(const bool is64, const u8 dst[], + const u32 val, bool dstk, u8 **pprog) +{ + u32 hi = 0; + + if (is64 && (val & (1<<31))) + hi = (u32)~0; + emit_ia32_mov_i(dst_lo, val, dstk, pprog); + emit_ia32_mov_i(dst_hi, hi, dstk, pprog); +} + +/* + * ALU operation (32 bit) + * dst = dst * src + */ +static inline void emit_ia32_mul_r(const u8 dst, const u8 src, bool dstk, + bool sstk, u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + u8 sreg = sstk ? IA32_ECX : src; + + if (sstk) + /* mov ecx,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src)); + + if (dstk) + /* mov eax,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst)); + else + /* mov eax,dst */ + EMIT2(0x8B, add_2reg(0xC0, dst, IA32_EAX)); + + + EMIT2(0xF7, add_1reg(0xE0, sreg)); + + if (dstk) + /* mov dword ptr [ebp+off],eax */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst)); + else + /* mov dst,eax */ + EMIT2(0x89, add_2reg(0xC0, dst, IA32_EAX)); + + *pprog = prog; +} + +static inline void emit_ia32_to_le_r64(const u8 dst[], s32 val, + bool dstk, u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + u8 dreg_lo = dstk ? IA32_EAX : dst_lo; + u8 dreg_hi = dstk ? IA32_EDX : dst_hi; + + if (dstk && val != 64) { + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_lo)); + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), + STACK_VAR(dst_hi)); + } + switch (val) { + case 16: + /* + * Emit 'movzwl eax,ax' to zero extend 16-bit + * into 64 bit + */ + EMIT2(0x0F, 0xB7); + EMIT1(add_2reg(0xC0, dreg_lo, dreg_lo)); + /* xor dreg_hi,dreg_hi */ + EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); + break; + case 32: + /* xor dreg_hi,dreg_hi */ + EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); + break; + case 64: + /* nop */ + break; + } + + if (dstk && val != 64) { + /* mov dword ptr [ebp+off],dreg_lo */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo), + STACK_VAR(dst_lo)); + /* mov dword ptr [ebp+off],dreg_hi */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi), + STACK_VAR(dst_hi)); + } + *pprog = prog; +} + +static inline void emit_ia32_to_be_r64(const u8 dst[], s32 val, + bool dstk, u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + u8 dreg_lo = dstk ? IA32_EAX : dst_lo; + u8 dreg_hi = dstk ? IA32_EDX : dst_hi; + + if (dstk) { + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_lo)); + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), + STACK_VAR(dst_hi)); + } + switch (val) { + case 16: + /* Emit 'ror %ax, 8' to swap lower 2 bytes */ + EMIT1(0x66); + EMIT3(0xC1, add_1reg(0xC8, dreg_lo), 8); + + EMIT2(0x0F, 0xB7); + EMIT1(add_2reg(0xC0, dreg_lo, dreg_lo)); + + /* xor dreg_hi,dreg_hi */ + EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); + break; + case 32: + /* Emit 'bswap eax' to swap lower 4 bytes */ + EMIT1(0x0F); + EMIT1(add_1reg(0xC8, dreg_lo)); + + /* xor dreg_hi,dreg_hi */ + EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); + break; + case 64: + /* Emit 'bswap eax' to swap lower 4 bytes */ + EMIT1(0x0F); + EMIT1(add_1reg(0xC8, dreg_lo)); + + /* Emit 'bswap edx' to swap lower 4 bytes */ + EMIT1(0x0F); + EMIT1(add_1reg(0xC8, dreg_hi)); + + /* mov ecx,dreg_hi */ + EMIT2(0x89, add_2reg(0xC0, IA32_ECX, dreg_hi)); + /* mov dreg_hi,dreg_lo */ + EMIT2(0x89, add_2reg(0xC0, dreg_hi, dreg_lo)); + /* mov dreg_lo,ecx */ + EMIT2(0x89, add_2reg(0xC0, dreg_lo, IA32_ECX)); + + break; + } + if (dstk) { + /* mov dword ptr [ebp+off],dreg_lo */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo), + STACK_VAR(dst_lo)); + /* mov dword ptr [ebp+off],dreg_hi */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi), + STACK_VAR(dst_hi)); + } + *pprog = prog; +} + +/* + * ALU operation (32 bit) + * dst = dst (div|mod) src + */ +static inline void emit_ia32_div_mod_r(const u8 op, const u8 dst, const u8 src, + bool dstk, bool sstk, u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + + if (sstk) + /* mov ecx,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), + STACK_VAR(src)); + else if (src != IA32_ECX) + /* mov ecx,src */ + EMIT2(0x8B, add_2reg(0xC0, src, IA32_ECX)); + + if (dstk) + /* mov eax,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst)); + else + /* mov eax,dst */ + EMIT2(0x8B, add_2reg(0xC0, dst, IA32_EAX)); + + /* xor edx,edx */ + EMIT2(0x31, add_2reg(0xC0, IA32_EDX, IA32_EDX)); + /* div ecx */ + EMIT2(0xF7, add_1reg(0xF0, IA32_ECX)); + + if (op == BPF_MOD) { + if (dstk) + EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EDX), + STACK_VAR(dst)); + else + EMIT2(0x89, add_2reg(0xC0, dst, IA32_EDX)); + } else { + if (dstk) + EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst)); + else + EMIT2(0x89, add_2reg(0xC0, dst, IA32_EAX)); + } + *pprog = prog; +} + +/* + * ALU operation (32 bit) + * dst = dst (shift) src + */ +static inline void emit_ia32_shift_r(const u8 op, const u8 dst, const u8 src, + bool dstk, bool sstk, u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + u8 dreg = dstk ? IA32_EAX : dst; + u8 b2; + + if (dstk) + /* mov eax,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst)); + + if (sstk) + /* mov ecx,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src)); + else if (src != IA32_ECX) + /* mov ecx,src */ + EMIT2(0x8B, add_2reg(0xC0, src, IA32_ECX)); + + switch (op) { + case BPF_LSH: + b2 = 0xE0; break; + case BPF_RSH: + b2 = 0xE8; break; + case BPF_ARSH: + b2 = 0xF8; break; + default: + return; + } + EMIT2(0xD3, add_1reg(b2, dreg)); + + if (dstk) + /* mov dword ptr [ebp+off],dreg */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg), STACK_VAR(dst)); + *pprog = prog; +} + +/* + * ALU operation (32 bit) + * dst = dst (op) src + */ +static inline void emit_ia32_alu_r(const bool is64, const bool hi, const u8 op, + const u8 dst, const u8 src, bool dstk, + bool sstk, u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + u8 sreg = sstk ? IA32_EAX : src; + u8 dreg = dstk ? IA32_EDX : dst; + + if (sstk) + /* mov eax,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(src)); + + if (dstk) + /* mov eax,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(dst)); + + switch (BPF_OP(op)) { + /* dst = dst + src */ + case BPF_ADD: + if (hi && is64) + EMIT2(0x11, add_2reg(0xC0, dreg, sreg)); + else + EMIT2(0x01, add_2reg(0xC0, dreg, sreg)); + break; + /* dst = dst - src */ + case BPF_SUB: + if (hi && is64) + EMIT2(0x19, add_2reg(0xC0, dreg, sreg)); + else + EMIT2(0x29, add_2reg(0xC0, dreg, sreg)); + break; + /* dst = dst | src */ + case BPF_OR: + EMIT2(0x09, add_2reg(0xC0, dreg, sreg)); + break; + /* dst = dst & src */ + case BPF_AND: + EMIT2(0x21, add_2reg(0xC0, dreg, sreg)); + break; + /* dst = dst ^ src */ + case BPF_XOR: + EMIT2(0x31, add_2reg(0xC0, dreg, sreg)); + break; + } + + if (dstk) + /* mov dword ptr [ebp+off],dreg */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg), + STACK_VAR(dst)); + *pprog = prog; +} + +/* ALU operation (64 bit) */ +static inline void emit_ia32_alu_r64(const bool is64, const u8 op, + const u8 dst[], const u8 src[], + bool dstk, bool sstk, + u8 **pprog) +{ + u8 *prog = *pprog; + + emit_ia32_alu_r(is64, false, op, dst_lo, src_lo, dstk, sstk, &prog); + if (is64) + emit_ia32_alu_r(is64, true, op, dst_hi, src_hi, dstk, sstk, + &prog); + else + emit_ia32_mov_i(dst_hi, 0, dstk, &prog); + *pprog = prog; +} + +/* + * ALU operation (32 bit) + * dst = dst (op) val + */ +static inline void emit_ia32_alu_i(const bool is64, const bool hi, const u8 op, + const u8 dst, const s32 val, bool dstk, + u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + u8 dreg = dstk ? IA32_EAX : dst; + u8 sreg = IA32_EDX; + + if (dstk) + /* mov eax,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst)); + + if (!is_imm8(val)) + /* mov edx,imm32*/ + EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EDX), val); + + switch (op) { + /* dst = dst + val */ + case BPF_ADD: + if (hi && is64) { + if (is_imm8(val)) + EMIT3(0x83, add_1reg(0xD0, dreg), val); + else + EMIT2(0x11, add_2reg(0xC0, dreg, sreg)); + } else { + if (is_imm8(val)) + EMIT3(0x83, add_1reg(0xC0, dreg), val); + else + EMIT2(0x01, add_2reg(0xC0, dreg, sreg)); + } + break; + /* dst = dst - val */ + case BPF_SUB: + if (hi && is64) { + if (is_imm8(val)) + EMIT3(0x83, add_1reg(0xD8, dreg), val); + else + EMIT2(0x19, add_2reg(0xC0, dreg, sreg)); + } else { + if (is_imm8(val)) + EMIT3(0x83, add_1reg(0xE8, dreg), val); + else + EMIT2(0x29, add_2reg(0xC0, dreg, sreg)); + } + break; + /* dst = dst | val */ + case BPF_OR: + if (is_imm8(val)) + EMIT3(0x83, add_1reg(0xC8, dreg), val); + else + EMIT2(0x09, add_2reg(0xC0, dreg, sreg)); + break; + /* dst = dst & val */ + case BPF_AND: + if (is_imm8(val)) + EMIT3(0x83, add_1reg(0xE0, dreg), val); + else + EMIT2(0x21, add_2reg(0xC0, dreg, sreg)); + break; + /* dst = dst ^ val */ + case BPF_XOR: + if (is_imm8(val)) + EMIT3(0x83, add_1reg(0xF0, dreg), val); + else + EMIT2(0x31, add_2reg(0xC0, dreg, sreg)); + break; + case BPF_NEG: + EMIT2(0xF7, add_1reg(0xD8, dreg)); + break; + } + + if (dstk) + /* mov dword ptr [ebp+off],dreg */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg), + STACK_VAR(dst)); + *pprog = prog; +} + +/* ALU operation (64 bit) */ +static inline void emit_ia32_alu_i64(const bool is64, const u8 op, + const u8 dst[], const u32 val, + bool dstk, u8 **pprog) +{ + u8 *prog = *pprog; + u32 hi = 0; + + if (is64 && (val & (1<<31))) + hi = (u32)~0; + + emit_ia32_alu_i(is64, false, op, dst_lo, val, dstk, &prog); + if (is64) + emit_ia32_alu_i(is64, true, op, dst_hi, hi, dstk, &prog); + else + emit_ia32_mov_i(dst_hi, 0, dstk, &prog); + + *pprog = prog; +} + +/* dst = ~dst (64 bit) */ +static inline void emit_ia32_neg64(const u8 dst[], bool dstk, u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + u8 dreg_lo = dstk ? IA32_EAX : dst_lo; + u8 dreg_hi = dstk ? IA32_EDX : dst_hi; + + if (dstk) { + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_lo)); + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), + STACK_VAR(dst_hi)); + } + + /* xor ecx,ecx */ + EMIT2(0x31, add_2reg(0xC0, IA32_ECX, IA32_ECX)); + /* sub dreg_lo,ecx */ + EMIT2(0x2B, add_2reg(0xC0, dreg_lo, IA32_ECX)); + /* mov dreg_lo,ecx */ + EMIT2(0x89, add_2reg(0xC0, dreg_lo, IA32_ECX)); + + /* xor ecx,ecx */ + EMIT2(0x31, add_2reg(0xC0, IA32_ECX, IA32_ECX)); + /* sbb dreg_hi,ecx */ + EMIT2(0x19, add_2reg(0xC0, dreg_hi, IA32_ECX)); + /* mov dreg_hi,ecx */ + EMIT2(0x89, add_2reg(0xC0, dreg_hi, IA32_ECX)); + + if (dstk) { + /* mov dword ptr [ebp+off],dreg_lo */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo), + STACK_VAR(dst_lo)); + /* mov dword ptr [ebp+off],dreg_hi */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi), + STACK_VAR(dst_hi)); + } + *pprog = prog; +} + +/* dst = dst << src */ +static inline void emit_ia32_lsh_r64(const u8 dst[], const u8 src[], + bool dstk, bool sstk, u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + static int jmp_label1 = -1; + static int jmp_label2 = -1; + static int jmp_label3 = -1; + u8 dreg_lo = dstk ? IA32_EAX : dst_lo; + u8 dreg_hi = dstk ? IA32_EDX : dst_hi; + + if (dstk) { + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_lo)); + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), + STACK_VAR(dst_hi)); + } + + if (sstk) + /* mov ecx,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), + STACK_VAR(src_lo)); + else + /* mov ecx,src_lo */ + EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX)); + + /* cmp ecx,32 */ + EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32); + /* Jumps when >= 32 */ + if (is_imm8(jmp_label(jmp_label1, 2))) + EMIT2(IA32_JAE, jmp_label(jmp_label1, 2)); + else + EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6)); + + /* < 32 */ + /* shl dreg_hi,cl */ + EMIT2(0xD3, add_1reg(0xE0, dreg_hi)); + /* mov ebx,dreg_lo */ + EMIT2(0x8B, add_2reg(0xC0, dreg_lo, IA32_EBX)); + /* shl dreg_lo,cl */ + EMIT2(0xD3, add_1reg(0xE0, dreg_lo)); + + /* IA32_ECX = -IA32_ECX + 32 */ + /* neg ecx */ + EMIT2(0xF7, add_1reg(0xD8, IA32_ECX)); + /* add ecx,32 */ + EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32); + + /* shr ebx,cl */ + EMIT2(0xD3, add_1reg(0xE8, IA32_EBX)); + /* or dreg_hi,ebx */ + EMIT2(0x09, add_2reg(0xC0, dreg_hi, IA32_EBX)); + + /* goto out; */ + if (is_imm8(jmp_label(jmp_label3, 2))) + EMIT2(0xEB, jmp_label(jmp_label3, 2)); + else + EMIT1_off32(0xE9, jmp_label(jmp_label3, 5)); + + /* >= 32 */ + if (jmp_label1 == -1) + jmp_label1 = cnt; + + /* cmp ecx,64 */ + EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64); + /* Jumps when >= 64 */ + if (is_imm8(jmp_label(jmp_label2, 2))) + EMIT2(IA32_JAE, jmp_label(jmp_label2, 2)); + else + EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6)); + + /* >= 32 && < 64 */ + /* sub ecx,32 */ + EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32); + /* shl dreg_lo,cl */ + EMIT2(0xD3, add_1reg(0xE0, dreg_lo)); + /* mov dreg_hi,dreg_lo */ + EMIT2(0x89, add_2reg(0xC0, dreg_hi, dreg_lo)); + + /* xor dreg_lo,dreg_lo */ + EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo)); + + /* goto out; */ + if (is_imm8(jmp_label(jmp_label3, 2))) + EMIT2(0xEB, jmp_label(jmp_label3, 2)); + else + EMIT1_off32(0xE9, jmp_label(jmp_label3, 5)); + + /* >= 64 */ + if (jmp_label2 == -1) + jmp_label2 = cnt; + /* xor dreg_lo,dreg_lo */ + EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo)); + /* xor dreg_hi,dreg_hi */ + EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); + + if (jmp_label3 == -1) + jmp_label3 = cnt; + + if (dstk) { + /* mov dword ptr [ebp+off],dreg_lo */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo), + STACK_VAR(dst_lo)); + /* mov dword ptr [ebp+off],dreg_hi */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi), + STACK_VAR(dst_hi)); + } + /* out: */ + *pprog = prog; +} + +/* dst = dst >> src (signed)*/ +static inline void emit_ia32_arsh_r64(const u8 dst[], const u8 src[], + bool dstk, bool sstk, u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + static int jmp_label1 = -1; + static int jmp_label2 = -1; + static int jmp_label3 = -1; + u8 dreg_lo = dstk ? IA32_EAX : dst_lo; + u8 dreg_hi = dstk ? IA32_EDX : dst_hi; + + if (dstk) { + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_lo)); + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), + STACK_VAR(dst_hi)); + } + + if (sstk) + /* mov ecx,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), + STACK_VAR(src_lo)); + else + /* mov ecx,src_lo */ + EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX)); + + /* cmp ecx,32 */ + EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32); + /* Jumps when >= 32 */ + if (is_imm8(jmp_label(jmp_label1, 2))) + EMIT2(IA32_JAE, jmp_label(jmp_label1, 2)); + else + EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6)); + + /* < 32 */ + /* lshr dreg_lo,cl */ + EMIT2(0xD3, add_1reg(0xE8, dreg_lo)); + /* mov ebx,dreg_hi */ + EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX)); + /* ashr dreg_hi,cl */ + EMIT2(0xD3, add_1reg(0xF8, dreg_hi)); + + /* IA32_ECX = -IA32_ECX + 32 */ + /* neg ecx */ + EMIT2(0xF7, add_1reg(0xD8, IA32_ECX)); + /* add ecx,32 */ + EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32); + + /* shl ebx,cl */ + EMIT2(0xD3, add_1reg(0xE0, IA32_EBX)); + /* or dreg_lo,ebx */ + EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX)); + + /* goto out; */ + if (is_imm8(jmp_label(jmp_label3, 2))) + EMIT2(0xEB, jmp_label(jmp_label3, 2)); + else + EMIT1_off32(0xE9, jmp_label(jmp_label3, 5)); + + /* >= 32 */ + if (jmp_label1 == -1) + jmp_label1 = cnt; + + /* cmp ecx,64 */ + EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64); + /* Jumps when >= 64 */ + if (is_imm8(jmp_label(jmp_label2, 2))) + EMIT2(IA32_JAE, jmp_label(jmp_label2, 2)); + else + EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6)); + + /* >= 32 && < 64 */ + /* sub ecx,32 */ + EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32); + /* ashr dreg_hi,cl */ + EMIT2(0xD3, add_1reg(0xF8, dreg_hi)); + /* mov dreg_lo,dreg_hi */ + EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi)); + + /* ashr dreg_hi,imm8 */ + EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31); + + /* goto out; */ + if (is_imm8(jmp_label(jmp_label3, 2))) + EMIT2(0xEB, jmp_label(jmp_label3, 2)); + else + EMIT1_off32(0xE9, jmp_label(jmp_label3, 5)); + + /* >= 64 */ + if (jmp_label2 == -1) + jmp_label2 = cnt; + /* ashr dreg_hi,imm8 */ + EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31); + /* mov dreg_lo,dreg_hi */ + EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi)); + + if (jmp_label3 == -1) + jmp_label3 = cnt; + + if (dstk) { + /* mov dword ptr [ebp+off],dreg_lo */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo), + STACK_VAR(dst_lo)); + /* mov dword ptr [ebp+off],dreg_hi */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi), + STACK_VAR(dst_hi)); + } + /* out: */ + *pprog = prog; +} + +/* dst = dst >> src */ +static inline void emit_ia32_rsh_r64(const u8 dst[], const u8 src[], bool dstk, + bool sstk, u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + static int jmp_label1 = -1; + static int jmp_label2 = -1; + static int jmp_label3 = -1; + u8 dreg_lo = dstk ? IA32_EAX : dst_lo; + u8 dreg_hi = dstk ? IA32_EDX : dst_hi; + + if (dstk) { + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_lo)); + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), + STACK_VAR(dst_hi)); + } + + if (sstk) + /* mov ecx,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), + STACK_VAR(src_lo)); + else + /* mov ecx,src_lo */ + EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX)); + + /* cmp ecx,32 */ + EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32); + /* Jumps when >= 32 */ + if (is_imm8(jmp_label(jmp_label1, 2))) + EMIT2(IA32_JAE, jmp_label(jmp_label1, 2)); + else + EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6)); + + /* < 32 */ + /* lshr dreg_lo,cl */ + EMIT2(0xD3, add_1reg(0xE8, dreg_lo)); + /* mov ebx,dreg_hi */ + EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX)); + /* shr dreg_hi,cl */ + EMIT2(0xD3, add_1reg(0xE8, dreg_hi)); + + /* IA32_ECX = -IA32_ECX + 32 */ + /* neg ecx */ + EMIT2(0xF7, add_1reg(0xD8, IA32_ECX)); + /* add ecx,32 */ + EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32); + + /* shl ebx,cl */ + EMIT2(0xD3, add_1reg(0xE0, IA32_EBX)); + /* or dreg_lo,ebx */ + EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX)); + + /* goto out; */ + if (is_imm8(jmp_label(jmp_label3, 2))) + EMIT2(0xEB, jmp_label(jmp_label3, 2)); + else + EMIT1_off32(0xE9, jmp_label(jmp_label3, 5)); + + /* >= 32 */ + if (jmp_label1 == -1) + jmp_label1 = cnt; + /* cmp ecx,64 */ + EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64); + /* Jumps when >= 64 */ + if (is_imm8(jmp_label(jmp_label2, 2))) + EMIT2(IA32_JAE, jmp_label(jmp_label2, 2)); + else + EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6)); + + /* >= 32 && < 64 */ + /* sub ecx,32 */ + EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32); + /* shr dreg_hi,cl */ + EMIT2(0xD3, add_1reg(0xE8, dreg_hi)); + /* mov dreg_lo,dreg_hi */ + EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi)); + /* xor dreg_hi,dreg_hi */ + EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); + + /* goto out; */ + if (is_imm8(jmp_label(jmp_label3, 2))) + EMIT2(0xEB, jmp_label(jmp_label3, 2)); + else + EMIT1_off32(0xE9, jmp_label(jmp_label3, 5)); + + /* >= 64 */ + if (jmp_label2 == -1) + jmp_label2 = cnt; + /* xor dreg_lo,dreg_lo */ + EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo)); + /* xor dreg_hi,dreg_hi */ + EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); + + if (jmp_label3 == -1) + jmp_label3 = cnt; + + if (dstk) { + /* mov dword ptr [ebp+off],dreg_lo */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo), + STACK_VAR(dst_lo)); + /* mov dword ptr [ebp+off],dreg_hi */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi), + STACK_VAR(dst_hi)); + } + /* out: */ + *pprog = prog; +} + +/* dst = dst << val */ +static inline void emit_ia32_lsh_i64(const u8 dst[], const u32 val, + bool dstk, u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + u8 dreg_lo = dstk ? IA32_EAX : dst_lo; + u8 dreg_hi = dstk ? IA32_EDX : dst_hi; + + if (dstk) { + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_lo)); + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), + STACK_VAR(dst_hi)); + } + /* Do LSH operation */ + if (val < 32) { + /* shl dreg_hi,imm8 */ + EMIT3(0xC1, add_1reg(0xE0, dreg_hi), val); + /* mov ebx,dreg_lo */ + EMIT2(0x8B, add_2reg(0xC0, dreg_lo, IA32_EBX)); + /* shl dreg_lo,imm8 */ + EMIT3(0xC1, add_1reg(0xE0, dreg_lo), val); + + /* IA32_ECX = 32 - val */ + /* mov ecx,val */ + EMIT2(0xB1, val); + /* movzx ecx,ecx */ + EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX)); + /* neg ecx */ + EMIT2(0xF7, add_1reg(0xD8, IA32_ECX)); + /* add ecx,32 */ + EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32); + + /* shr ebx,cl */ + EMIT2(0xD3, add_1reg(0xE8, IA32_EBX)); + /* or dreg_hi,ebx */ + EMIT2(0x09, add_2reg(0xC0, dreg_hi, IA32_EBX)); + } else if (val >= 32 && val < 64) { + u32 value = val - 32; + + /* shl dreg_lo,imm8 */ + EMIT3(0xC1, add_1reg(0xE0, dreg_lo), value); + /* mov dreg_hi,dreg_lo */ + EMIT2(0x89, add_2reg(0xC0, dreg_hi, dreg_lo)); + /* xor dreg_lo,dreg_lo */ + EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo)); + } else { + /* xor dreg_lo,dreg_lo */ + EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo)); + /* xor dreg_hi,dreg_hi */ + EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); + } + + if (dstk) { + /* mov dword ptr [ebp+off],dreg_lo */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo), + STACK_VAR(dst_lo)); + /* mov dword ptr [ebp+off],dreg_hi */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi), + STACK_VAR(dst_hi)); + } + *pprog = prog; +} + +/* dst = dst >> val */ +static inline void emit_ia32_rsh_i64(const u8 dst[], const u32 val, + bool dstk, u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + u8 dreg_lo = dstk ? IA32_EAX : dst_lo; + u8 dreg_hi = dstk ? IA32_EDX : dst_hi; + + if (dstk) { + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_lo)); + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), + STACK_VAR(dst_hi)); + } + + /* Do RSH operation */ + if (val < 32) { + /* shr dreg_lo,imm8 */ + EMIT3(0xC1, add_1reg(0xE8, dreg_lo), val); + /* mov ebx,dreg_hi */ + EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX)); + /* shr dreg_hi,imm8 */ + EMIT3(0xC1, add_1reg(0xE8, dreg_hi), val); + + /* IA32_ECX = 32 - val */ + /* mov ecx,val */ + EMIT2(0xB1, val); + /* movzx ecx,ecx */ + EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX)); + /* neg ecx */ + EMIT2(0xF7, add_1reg(0xD8, IA32_ECX)); + /* add ecx,32 */ + EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32); + + /* shl ebx,cl */ + EMIT2(0xD3, add_1reg(0xE0, IA32_EBX)); + /* or dreg_lo,ebx */ + EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX)); + } else if (val >= 32 && val < 64) { + u32 value = val - 32; + + /* shr dreg_hi,imm8 */ + EMIT3(0xC1, add_1reg(0xE8, dreg_hi), value); + /* mov dreg_lo,dreg_hi */ + EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi)); + /* xor dreg_hi,dreg_hi */ + EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); + } else { + /* xor dreg_lo,dreg_lo */ + EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo)); + /* xor dreg_hi,dreg_hi */ + EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); + } + + if (dstk) { + /* mov dword ptr [ebp+off],dreg_lo */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo), + STACK_VAR(dst_lo)); + /* mov dword ptr [ebp+off],dreg_hi */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi), + STACK_VAR(dst_hi)); + } + *pprog = prog; +} + +/* dst = dst >> val (signed) */ +static inline void emit_ia32_arsh_i64(const u8 dst[], const u32 val, + bool dstk, u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + u8 dreg_lo = dstk ? IA32_EAX : dst_lo; + u8 dreg_hi = dstk ? IA32_EDX : dst_hi; + + if (dstk) { + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_lo)); + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), + STACK_VAR(dst_hi)); + } + /* Do RSH operation */ + if (val < 32) { + /* shr dreg_lo,imm8 */ + EMIT3(0xC1, add_1reg(0xE8, dreg_lo), val); + /* mov ebx,dreg_hi */ + EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX)); + /* ashr dreg_hi,imm8 */ + EMIT3(0xC1, add_1reg(0xF8, dreg_hi), val); + + /* IA32_ECX = 32 - val */ + /* mov ecx,val */ + EMIT2(0xB1, val); + /* movzx ecx,ecx */ + EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX)); + /* neg ecx */ + EMIT2(0xF7, add_1reg(0xD8, IA32_ECX)); + /* add ecx,32 */ + EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32); + + /* shl ebx,cl */ + EMIT2(0xD3, add_1reg(0xE0, IA32_EBX)); + /* or dreg_lo,ebx */ + EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX)); + } else if (val >= 32 && val < 64) { + u32 value = val - 32; + + /* ashr dreg_hi,imm8 */ + EMIT3(0xC1, add_1reg(0xF8, dreg_hi), value); + /* mov dreg_lo,dreg_hi */ + EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi)); + + /* ashr dreg_hi,imm8 */ + EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31); + } else { + /* ashr dreg_hi,imm8 */ + EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31); + /* mov dreg_lo,dreg_hi */ + EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi)); + } + + if (dstk) { + /* mov dword ptr [ebp+off],dreg_lo */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo), + STACK_VAR(dst_lo)); + /* mov dword ptr [ebp+off],dreg_hi */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi), + STACK_VAR(dst_hi)); + } + *pprog = prog; +} + +static inline void emit_ia32_mul_r64(const u8 dst[], const u8 src[], bool dstk, + bool sstk, u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + + if (dstk) + /* mov eax,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_hi)); + else + /* mov eax,dst_hi */ + EMIT2(0x8B, add_2reg(0xC0, dst_hi, IA32_EAX)); + + if (sstk) + /* mul dword ptr [ebp+off] */ + EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(src_lo)); + else + /* mul src_lo */ + EMIT2(0xF7, add_1reg(0xE0, src_lo)); + + /* mov ecx,eax */ + EMIT2(0x89, add_2reg(0xC0, IA32_ECX, IA32_EAX)); + + if (dstk) + /* mov eax,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_lo)); + else + /* mov eax,dst_lo */ + EMIT2(0x8B, add_2reg(0xC0, dst_lo, IA32_EAX)); + + if (sstk) + /* mul dword ptr [ebp+off] */ + EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(src_hi)); + else + /* mul src_hi */ + EMIT2(0xF7, add_1reg(0xE0, src_hi)); + + /* add eax,eax */ + EMIT2(0x01, add_2reg(0xC0, IA32_ECX, IA32_EAX)); + + if (dstk) + /* mov eax,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_lo)); + else + /* mov eax,dst_lo */ + EMIT2(0x8B, add_2reg(0xC0, dst_lo, IA32_EAX)); + + if (sstk) + /* mul dword ptr [ebp+off] */ + EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(src_lo)); + else + /* mul src_lo */ + EMIT2(0xF7, add_1reg(0xE0, src_lo)); + + /* add ecx,edx */ + EMIT2(0x01, add_2reg(0xC0, IA32_ECX, IA32_EDX)); + + if (dstk) { + /* mov dword ptr [ebp+off],eax */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_lo)); + /* mov dword ptr [ebp+off],ecx */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_ECX), + STACK_VAR(dst_hi)); + } else { + /* mov dst_lo,eax */ + EMIT2(0x89, add_2reg(0xC0, dst_lo, IA32_EAX)); + /* mov dst_hi,ecx */ + EMIT2(0x89, add_2reg(0xC0, dst_hi, IA32_ECX)); + } + + *pprog = prog; +} + +static inline void emit_ia32_mul_i64(const u8 dst[], const u32 val, + bool dstk, u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + u32 hi; + + hi = val & (1<<31) ? (u32)~0 : 0; + /* movl eax,imm32 */ + EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EAX), val); + if (dstk) + /* mul dword ptr [ebp+off] */ + EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(dst_hi)); + else + /* mul dst_hi */ + EMIT2(0xF7, add_1reg(0xE0, dst_hi)); + + /* mov ecx,eax */ + EMIT2(0x89, add_2reg(0xC0, IA32_ECX, IA32_EAX)); + + /* movl eax,imm32 */ + EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EAX), hi); + if (dstk) + /* mul dword ptr [ebp+off] */ + EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(dst_lo)); + else + /* mul dst_lo */ + EMIT2(0xF7, add_1reg(0xE0, dst_lo)); + /* add ecx,eax */ + EMIT2(0x01, add_2reg(0xC0, IA32_ECX, IA32_EAX)); + + /* movl eax,imm32 */ + EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EAX), val); + if (dstk) + /* mul dword ptr [ebp+off] */ + EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(dst_lo)); + else + /* mul dst_lo */ + EMIT2(0xF7, add_1reg(0xE0, dst_lo)); + + /* add ecx,edx */ + EMIT2(0x01, add_2reg(0xC0, IA32_ECX, IA32_EDX)); + + if (dstk) { + /* mov dword ptr [ebp+off],eax */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_lo)); + /* mov dword ptr [ebp+off],ecx */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_ECX), + STACK_VAR(dst_hi)); + } else { + /* mov dword ptr [ebp+off],eax */ + EMIT2(0x89, add_2reg(0xC0, dst_lo, IA32_EAX)); + /* mov dword ptr [ebp+off],ecx */ + EMIT2(0x89, add_2reg(0xC0, dst_hi, IA32_ECX)); + } + + *pprog = prog; +} + +static int bpf_size_to_x86_bytes(int bpf_size) +{ + if (bpf_size == BPF_W) + return 4; + else if (bpf_size == BPF_H) + return 2; + else if (bpf_size == BPF_B) + return 1; + else if (bpf_size == BPF_DW) + return 4; /* imm32 */ + else + return 0; +} + +struct jit_context { + int cleanup_addr; /* Epilogue code offset */ +}; + +/* Maximum number of bytes emitted while JITing one eBPF insn */ +#define BPF_MAX_INSN_SIZE 128 +#define BPF_INSN_SAFETY 64 + +#define PROLOGUE_SIZE 35 + +/* + * Emit prologue code for BPF program and check it's size. + * bpf_tail_call helper will skip it while jumping into another program. + */ +static void emit_prologue(u8 **pprog, u32 stack_depth) +{ + u8 *prog = *pprog; + int cnt = 0; + const u8 *r1 = bpf2ia32[BPF_REG_1]; + const u8 fplo = bpf2ia32[BPF_REG_FP][0]; + const u8 fphi = bpf2ia32[BPF_REG_FP][1]; + const u8 *tcc = bpf2ia32[TCALL_CNT]; + + /* push ebp */ + EMIT1(0x55); + /* mov ebp,esp */ + EMIT2(0x89, 0xE5); + /* push edi */ + EMIT1(0x57); + /* push esi */ + EMIT1(0x56); + /* push ebx */ + EMIT1(0x53); + + /* sub esp,STACK_SIZE */ + EMIT2_off32(0x81, 0xEC, STACK_SIZE); + /* sub ebp,SCRATCH_SIZE+4+12*/ + EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 16); + /* xor ebx,ebx */ + EMIT2(0x31, add_2reg(0xC0, IA32_EBX, IA32_EBX)); + + /* Set up BPF prog stack base register */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBP), STACK_VAR(fplo)); + EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(fphi)); + + /* Move BPF_CTX (EAX) to BPF_REG_R1 */ + /* mov dword ptr [ebp+off],eax */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(r1[0])); + EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(r1[1])); + + /* Initialize Tail Count */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(tcc[0])); + EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(tcc[1])); + + BUILD_BUG_ON(cnt != PROLOGUE_SIZE); + *pprog = prog; +} + +/* Emit epilogue code for BPF program */ +static void emit_epilogue(u8 **pprog, u32 stack_depth) +{ + u8 *prog = *pprog; + const u8 *r0 = bpf2ia32[BPF_REG_0]; + int cnt = 0; + + /* mov eax,dword ptr [ebp+off]*/ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(r0[0])); + /* mov edx,dword ptr [ebp+off]*/ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(r0[1])); + + /* add ebp,SCRATCH_SIZE+4+12*/ + EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 16); + + /* mov ebx,dword ptr [ebp-12]*/ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), -12); + /* mov esi,dword ptr [ebp-8]*/ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ESI), -8); + /* mov edi,dword ptr [ebp-4]*/ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDI), -4); + + EMIT1(0xC9); /* leave */ + EMIT1(0xC3); /* ret */ + *pprog = prog; +} + +/* + * Generate the following code: + * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... + * if (index >= array->map.max_entries) + * goto out; + * if (++tail_call_cnt > MAX_TAIL_CALL_CNT) + * goto out; + * prog = array->ptrs[index]; + * if (prog == NULL) + * goto out; + * goto *(prog->bpf_func + prologue_size); + * out: + */ +static void emit_bpf_tail_call(u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + const u8 *r1 = bpf2ia32[BPF_REG_1]; + const u8 *r2 = bpf2ia32[BPF_REG_2]; + const u8 *r3 = bpf2ia32[BPF_REG_3]; + const u8 *tcc = bpf2ia32[TCALL_CNT]; + u32 lo, hi; + static int jmp_label1 = -1; + + /* + * if (index >= array->map.max_entries) + * goto out; + */ + /* mov eax,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(r2[0])); + /* mov edx,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(r3[0])); + + /* cmp dword ptr [eax+off],edx */ + EMIT3(0x39, add_2reg(0x40, IA32_EAX, IA32_EDX), + offsetof(struct bpf_array, map.max_entries)); + /* jbe out */ + EMIT2(IA32_JBE, jmp_label(jmp_label1, 2)); + + /* + * if (tail_call_cnt > MAX_TAIL_CALL_CNT) + * goto out; + */ + lo = (u32)MAX_TAIL_CALL_CNT; + hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32); + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(tcc[0])); + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(tcc[1])); + + /* cmp edx,hi */ + EMIT3(0x83, add_1reg(0xF8, IA32_EBX), hi); + EMIT2(IA32_JNE, 3); + /* cmp ecx,lo */ + EMIT3(0x83, add_1reg(0xF8, IA32_ECX), lo); + + /* ja out */ + EMIT2(IA32_JAE, jmp_label(jmp_label1, 2)); + + /* add eax,0x1 */ + EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 0x01); + /* adc ebx,0x0 */ + EMIT3(0x83, add_1reg(0xD0, IA32_EBX), 0x00); + + /* mov dword ptr [ebp+off],eax */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(tcc[0])); + /* mov dword ptr [ebp+off],edx */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(tcc[1])); + + /* prog = array->ptrs[index]; */ + /* mov edx, [eax + edx * 4 + offsetof(...)] */ + EMIT3_off32(0x8B, 0x94, 0x90, offsetof(struct bpf_array, ptrs)); + + /* + * if (prog == NULL) + * goto out; + */ + /* test edx,edx */ + EMIT2(0x85, add_2reg(0xC0, IA32_EDX, IA32_EDX)); + /* je out */ + EMIT2(IA32_JE, jmp_label(jmp_label1, 2)); + + /* goto *(prog->bpf_func + prologue_size); */ + /* mov edx, dword ptr [edx + 32] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EDX, IA32_EDX), + offsetof(struct bpf_prog, bpf_func)); + /* add edx,prologue_size */ + EMIT3(0x83, add_1reg(0xC0, IA32_EDX), PROLOGUE_SIZE); + + /* mov eax,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(r1[0])); + + /* + * Now we're ready to jump into next BPF program: + * eax == ctx (1st arg) + * edx == prog->bpf_func + prologue_size + */ + RETPOLINE_EDX_BPF_JIT(); + + if (jmp_label1 == -1) + jmp_label1 = cnt; + + /* out: */ + *pprog = prog; +} + +/* Push the scratch stack register on top of the stack. */ +static inline void emit_push_r64(const u8 src[], u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + + /* mov ecx,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src_hi)); + /* push ecx */ + EMIT1(0x51); + + /* mov ecx,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src_lo)); + /* push ecx */ + EMIT1(0x51); + + *pprog = prog; +} + +static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, + int oldproglen, struct jit_context *ctx) +{ + struct bpf_insn *insn = bpf_prog->insnsi; + int insn_cnt = bpf_prog->len; + bool seen_exit = false; + u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; + int i, cnt = 0; + int proglen = 0; + u8 *prog = temp; + + emit_prologue(&prog, bpf_prog->aux->stack_depth); + + for (i = 0; i < insn_cnt; i++, insn++) { + const s32 imm32 = insn->imm; + const bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; + const bool dstk = insn->dst_reg == BPF_REG_AX ? false : true; + const bool sstk = insn->src_reg == BPF_REG_AX ? false : true; + const u8 code = insn->code; + const u8 *dst = bpf2ia32[insn->dst_reg]; + const u8 *src = bpf2ia32[insn->src_reg]; + const u8 *r0 = bpf2ia32[BPF_REG_0]; + s64 jmp_offset; + u8 jmp_cond; + int ilen; + u8 *func; + + switch (code) { + /* ALU operations */ + /* dst = src */ + case BPF_ALU | BPF_MOV | BPF_K: + case BPF_ALU | BPF_MOV | BPF_X: + case BPF_ALU64 | BPF_MOV | BPF_K: + case BPF_ALU64 | BPF_MOV | BPF_X: + switch (BPF_SRC(code)) { + case BPF_X: + emit_ia32_mov_r64(is64, dst, src, dstk, + sstk, &prog); + break; + case BPF_K: + /* Sign-extend immediate value to dst reg */ + emit_ia32_mov_i64(is64, dst, imm32, + dstk, &prog); + break; + } + break; + /* dst = dst + src/imm */ + /* dst = dst - src/imm */ + /* dst = dst | src/imm */ + /* dst = dst & src/imm */ + /* dst = dst ^ src/imm */ + /* dst = dst * src/imm */ + /* dst = dst << src */ + /* dst = dst >> src */ + case BPF_ALU | BPF_ADD | BPF_K: + case BPF_ALU | BPF_ADD | BPF_X: + case BPF_ALU | BPF_SUB | BPF_K: + case BPF_ALU | BPF_SUB | BPF_X: + case BPF_ALU | BPF_OR | BPF_K: + case BPF_ALU | BPF_OR | BPF_X: + case BPF_ALU | BPF_AND | BPF_K: + case BPF_ALU | BPF_AND | BPF_X: + case BPF_ALU | BPF_XOR | BPF_K: + case BPF_ALU | BPF_XOR | BPF_X: + case BPF_ALU64 | BPF_ADD | BPF_K: + case BPF_ALU64 | BPF_ADD | BPF_X: + case BPF_ALU64 | BPF_SUB | BPF_K: + case BPF_ALU64 | BPF_SUB | BPF_X: + case BPF_ALU64 | BPF_OR | BPF_K: + case BPF_ALU64 | BPF_OR | BPF_X: + case BPF_ALU64 | BPF_AND | BPF_K: + case BPF_ALU64 | BPF_AND | BPF_X: + case BPF_ALU64 | BPF_XOR | BPF_K: + case BPF_ALU64 | BPF_XOR | BPF_X: + switch (BPF_SRC(code)) { + case BPF_X: + emit_ia32_alu_r64(is64, BPF_OP(code), dst, + src, dstk, sstk, &prog); + break; + case BPF_K: + emit_ia32_alu_i64(is64, BPF_OP(code), dst, + imm32, dstk, &prog); + break; + } + break; + case BPF_ALU | BPF_MUL | BPF_K: + case BPF_ALU | BPF_MUL | BPF_X: + switch (BPF_SRC(code)) { + case BPF_X: + emit_ia32_mul_r(dst_lo, src_lo, dstk, + sstk, &prog); + break; + case BPF_K: + /* mov ecx,imm32*/ + EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), + imm32); + emit_ia32_mul_r(dst_lo, IA32_ECX, dstk, + false, &prog); + break; + } + emit_ia32_mov_i(dst_hi, 0, dstk, &prog); + break; + case BPF_ALU | BPF_LSH | BPF_X: + case BPF_ALU | BPF_RSH | BPF_X: + case BPF_ALU | BPF_ARSH | BPF_K: + case BPF_ALU | BPF_ARSH | BPF_X: + switch (BPF_SRC(code)) { + case BPF_X: + emit_ia32_shift_r(BPF_OP(code), dst_lo, src_lo, + dstk, sstk, &prog); + break; + case BPF_K: + /* mov ecx,imm32*/ + EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), + imm32); + emit_ia32_shift_r(BPF_OP(code), dst_lo, + IA32_ECX, dstk, false, + &prog); + break; + } + emit_ia32_mov_i(dst_hi, 0, dstk, &prog); + break; + /* dst = dst / src(imm) */ + /* dst = dst % src(imm) */ + case BPF_ALU | BPF_DIV | BPF_K: + case BPF_ALU | BPF_DIV | BPF_X: + case BPF_ALU | BPF_MOD | BPF_K: + case BPF_ALU | BPF_MOD | BPF_X: + switch (BPF_SRC(code)) { + case BPF_X: + emit_ia32_div_mod_r(BPF_OP(code), dst_lo, + src_lo, dstk, sstk, &prog); + break; + case BPF_K: + /* mov ecx,imm32*/ + EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), + imm32); + emit_ia32_div_mod_r(BPF_OP(code), dst_lo, + IA32_ECX, dstk, false, + &prog); + break; + } + emit_ia32_mov_i(dst_hi, 0, dstk, &prog); + break; + case BPF_ALU64 | BPF_DIV | BPF_K: + case BPF_ALU64 | BPF_DIV | BPF_X: + case BPF_ALU64 | BPF_MOD | BPF_K: + case BPF_ALU64 | BPF_MOD | BPF_X: + goto notyet; + /* dst = dst >> imm */ + /* dst = dst << imm */ + case BPF_ALU | BPF_RSH | BPF_K: + case BPF_ALU | BPF_LSH | BPF_K: + if (unlikely(imm32 > 31)) + return -EINVAL; + /* mov ecx,imm32*/ + EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32); + emit_ia32_shift_r(BPF_OP(code), dst_lo, IA32_ECX, dstk, + false, &prog); + emit_ia32_mov_i(dst_hi, 0, dstk, &prog); + break; + /* dst = dst << imm */ + case BPF_ALU64 | BPF_LSH | BPF_K: + if (unlikely(imm32 > 63)) + return -EINVAL; + emit_ia32_lsh_i64(dst, imm32, dstk, &prog); + break; + /* dst = dst >> imm */ + case BPF_ALU64 | BPF_RSH | BPF_K: + if (unlikely(imm32 > 63)) + return -EINVAL; + emit_ia32_rsh_i64(dst, imm32, dstk, &prog); + break; + /* dst = dst << src */ + case BPF_ALU64 | BPF_LSH | BPF_X: + emit_ia32_lsh_r64(dst, src, dstk, sstk, &prog); + break; + /* dst = dst >> src */ + case BPF_ALU64 | BPF_RSH | BPF_X: + emit_ia32_rsh_r64(dst, src, dstk, sstk, &prog); + break; + /* dst = dst >> src (signed) */ + case BPF_ALU64 | BPF_ARSH | BPF_X: + emit_ia32_arsh_r64(dst, src, dstk, sstk, &prog); + break; + /* dst = dst >> imm (signed) */ + case BPF_ALU64 | BPF_ARSH | BPF_K: + if (unlikely(imm32 > 63)) + return -EINVAL; + emit_ia32_arsh_i64(dst, imm32, dstk, &prog); + break; + /* dst = ~dst */ + case BPF_ALU | BPF_NEG: + emit_ia32_alu_i(is64, false, BPF_OP(code), + dst_lo, 0, dstk, &prog); + emit_ia32_mov_i(dst_hi, 0, dstk, &prog); + break; + /* dst = ~dst (64 bit) */ + case BPF_ALU64 | BPF_NEG: + emit_ia32_neg64(dst, dstk, &prog); + break; + /* dst = dst * src/imm */ + case BPF_ALU64 | BPF_MUL | BPF_X: + case BPF_ALU64 | BPF_MUL | BPF_K: + switch (BPF_SRC(code)) { + case BPF_X: + emit_ia32_mul_r64(dst, src, dstk, sstk, &prog); + break; + case BPF_K: + emit_ia32_mul_i64(dst, imm32, dstk, &prog); + break; + } + break; + /* dst = htole(dst) */ + case BPF_ALU | BPF_END | BPF_FROM_LE: + emit_ia32_to_le_r64(dst, imm32, dstk, &prog); + break; + /* dst = htobe(dst) */ + case BPF_ALU | BPF_END | BPF_FROM_BE: + emit_ia32_to_be_r64(dst, imm32, dstk, &prog); + break; + /* dst = imm64 */ + case BPF_LD | BPF_IMM | BPF_DW: { + s32 hi, lo = imm32; + + hi = insn[1].imm; + emit_ia32_mov_i(dst_lo, lo, dstk, &prog); + emit_ia32_mov_i(dst_hi, hi, dstk, &prog); + insn++; + i++; + break; + } + /* ST: *(u8*)(dst_reg + off) = imm */ + case BPF_ST | BPF_MEM | BPF_H: + case BPF_ST | BPF_MEM | BPF_B: + case BPF_ST | BPF_MEM | BPF_W: + case BPF_ST | BPF_MEM | BPF_DW: + if (dstk) + /* mov eax,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_lo)); + else + /* mov eax,dst_lo */ + EMIT2(0x8B, add_2reg(0xC0, dst_lo, IA32_EAX)); + + switch (BPF_SIZE(code)) { + case BPF_B: + EMIT(0xC6, 1); break; + case BPF_H: + EMIT2(0x66, 0xC7); break; + case BPF_W: + case BPF_DW: + EMIT(0xC7, 1); break; + } + + if (is_imm8(insn->off)) + EMIT2(add_1reg(0x40, IA32_EAX), insn->off); + else + EMIT1_off32(add_1reg(0x80, IA32_EAX), + insn->off); + EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(code))); + + if (BPF_SIZE(code) == BPF_DW) { + u32 hi; + + hi = imm32 & (1<<31) ? (u32)~0 : 0; + EMIT2_off32(0xC7, add_1reg(0x80, IA32_EAX), + insn->off + 4); + EMIT(hi, 4); + } + break; + + /* STX: *(u8*)(dst_reg + off) = src_reg */ + case BPF_STX | BPF_MEM | BPF_B: + case BPF_STX | BPF_MEM | BPF_H: + case BPF_STX | BPF_MEM | BPF_W: + case BPF_STX | BPF_MEM | BPF_DW: + if (dstk) + /* mov eax,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_lo)); + else + /* mov eax,dst_lo */ + EMIT2(0x8B, add_2reg(0xC0, dst_lo, IA32_EAX)); + + if (sstk) + /* mov edx,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), + STACK_VAR(src_lo)); + else + /* mov edx,src_lo */ + EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_EDX)); + + switch (BPF_SIZE(code)) { + case BPF_B: + EMIT(0x88, 1); break; + case BPF_H: + EMIT2(0x66, 0x89); break; + case BPF_W: + case BPF_DW: + EMIT(0x89, 1); break; + } + + if (is_imm8(insn->off)) + EMIT2(add_2reg(0x40, IA32_EAX, IA32_EDX), + insn->off); + else + EMIT1_off32(add_2reg(0x80, IA32_EAX, IA32_EDX), + insn->off); + + if (BPF_SIZE(code) == BPF_DW) { + if (sstk) + /* mov edi,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, + IA32_EDX), + STACK_VAR(src_hi)); + else + /* mov edi,src_hi */ + EMIT2(0x8B, add_2reg(0xC0, src_hi, + IA32_EDX)); + EMIT1(0x89); + if (is_imm8(insn->off + 4)) { + EMIT2(add_2reg(0x40, IA32_EAX, + IA32_EDX), + insn->off + 4); + } else { + EMIT1(add_2reg(0x80, IA32_EAX, + IA32_EDX)); + EMIT(insn->off + 4, 4); + } + } + break; + + /* LDX: dst_reg = *(u8*)(src_reg + off) */ + case BPF_LDX | BPF_MEM | BPF_B: + case BPF_LDX | BPF_MEM | BPF_H: + case BPF_LDX | BPF_MEM | BPF_W: + case BPF_LDX | BPF_MEM | BPF_DW: + if (sstk) + /* mov eax,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(src_lo)); + else + /* mov eax,dword ptr [ebp+off] */ + EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_EAX)); + + switch (BPF_SIZE(code)) { + case BPF_B: + EMIT2(0x0F, 0xB6); break; + case BPF_H: + EMIT2(0x0F, 0xB7); break; + case BPF_W: + case BPF_DW: + EMIT(0x8B, 1); break; + } + + if (is_imm8(insn->off)) + EMIT2(add_2reg(0x40, IA32_EAX, IA32_EDX), + insn->off); + else + EMIT1_off32(add_2reg(0x80, IA32_EAX, IA32_EDX), + insn->off); + + if (dstk) + /* mov dword ptr [ebp+off],edx */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EDX), + STACK_VAR(dst_lo)); + else + /* mov dst_lo,edx */ + EMIT2(0x89, add_2reg(0xC0, dst_lo, IA32_EDX)); + switch (BPF_SIZE(code)) { + case BPF_B: + case BPF_H: + case BPF_W: + if (dstk) { + EMIT3(0xC7, add_1reg(0x40, IA32_EBP), + STACK_VAR(dst_hi)); + EMIT(0x0, 4); + } else { + EMIT3(0xC7, add_1reg(0xC0, dst_hi), 0); + } + break; + case BPF_DW: + EMIT2_off32(0x8B, + add_2reg(0x80, IA32_EAX, IA32_EDX), + insn->off + 4); + if (dstk) + EMIT3(0x89, + add_2reg(0x40, IA32_EBP, + IA32_EDX), + STACK_VAR(dst_hi)); + else + EMIT2(0x89, + add_2reg(0xC0, dst_hi, IA32_EDX)); + break; + default: + break; + } + break; + /* call */ + case BPF_JMP | BPF_CALL: + { + const u8 *r1 = bpf2ia32[BPF_REG_1]; + const u8 *r2 = bpf2ia32[BPF_REG_2]; + const u8 *r3 = bpf2ia32[BPF_REG_3]; + const u8 *r4 = bpf2ia32[BPF_REG_4]; + const u8 *r5 = bpf2ia32[BPF_REG_5]; + + if (insn->src_reg == BPF_PSEUDO_CALL) + goto notyet; + + func = (u8 *) __bpf_call_base + imm32; + jmp_offset = func - (image + addrs[i]); + + if (!imm32 || !is_simm32(jmp_offset)) { + pr_err("unsupported BPF func %d addr %p image %p\n", + imm32, func, image); + return -EINVAL; + } + + /* mov eax,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(r1[0])); + /* mov edx,dword ptr [ebp+off] */ + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), + STACK_VAR(r1[1])); + + emit_push_r64(r5, &prog); + emit_push_r64(r4, &prog); + emit_push_r64(r3, &prog); + emit_push_r64(r2, &prog); + + EMIT1_off32(0xE8, jmp_offset + 9); + + /* mov dword ptr [ebp+off],eax */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(r0[0])); + /* mov dword ptr [ebp+off],edx */ + EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EDX), + STACK_VAR(r0[1])); + + /* add esp,32 */ + EMIT3(0x83, add_1reg(0xC0, IA32_ESP), 32); + break; + } + case BPF_JMP | BPF_TAIL_CALL: + emit_bpf_tail_call(&prog); + break; + + /* cond jump */ + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: { + u8 dreg_lo = dstk ? IA32_EAX : dst_lo; + u8 dreg_hi = dstk ? IA32_EDX : dst_hi; + u8 sreg_lo = sstk ? IA32_ECX : src_lo; + u8 sreg_hi = sstk ? IA32_EBX : src_hi; + + if (dstk) { + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_lo)); + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), + STACK_VAR(dst_hi)); + } + + if (sstk) { + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), + STACK_VAR(src_lo)); + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), + STACK_VAR(src_hi)); + } + + /* cmp dreg_hi,sreg_hi */ + EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi)); + EMIT2(IA32_JNE, 2); + /* cmp dreg_lo,sreg_lo */ + EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo)); + goto emit_cond_jmp; + } + case BPF_JMP | BPF_JSET | BPF_X: { + u8 dreg_lo = dstk ? IA32_EAX : dst_lo; + u8 dreg_hi = dstk ? IA32_EDX : dst_hi; + u8 sreg_lo = sstk ? IA32_ECX : src_lo; + u8 sreg_hi = sstk ? IA32_EBX : src_hi; + + if (dstk) { + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_lo)); + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), + STACK_VAR(dst_hi)); + } + + if (sstk) { + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), + STACK_VAR(src_lo)); + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), + STACK_VAR(src_hi)); + } + /* and dreg_lo,sreg_lo */ + EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo)); + /* and dreg_hi,sreg_hi */ + EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi)); + /* or dreg_lo,dreg_hi */ + EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi)); + goto emit_cond_jmp; + } + case BPF_JMP | BPF_JSET | BPF_K: { + u32 hi; + u8 dreg_lo = dstk ? IA32_EAX : dst_lo; + u8 dreg_hi = dstk ? IA32_EDX : dst_hi; + u8 sreg_lo = IA32_ECX; + u8 sreg_hi = IA32_EBX; + + if (dstk) { + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_lo)); + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), + STACK_VAR(dst_hi)); + } + hi = imm32 & (1<<31) ? (u32)~0 : 0; + + /* mov ecx,imm32 */ + EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32); + /* mov ebx,imm32 */ + EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi); + + /* and dreg_lo,sreg_lo */ + EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo)); + /* and dreg_hi,sreg_hi */ + EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi)); + /* or dreg_lo,dreg_hi */ + EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi)); + goto emit_cond_jmp; + } + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: { + u32 hi; + u8 dreg_lo = dstk ? IA32_EAX : dst_lo; + u8 dreg_hi = dstk ? IA32_EDX : dst_hi; + u8 sreg_lo = IA32_ECX; + u8 sreg_hi = IA32_EBX; + + if (dstk) { + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_lo)); + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), + STACK_VAR(dst_hi)); + } + + hi = imm32 & (1<<31) ? (u32)~0 : 0; + /* mov ecx,imm32 */ + EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32); + /* mov ebx,imm32 */ + EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi); + + /* cmp dreg_hi,sreg_hi */ + EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi)); + EMIT2(IA32_JNE, 2); + /* cmp dreg_lo,sreg_lo */ + EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo)); + +emit_cond_jmp: /* Convert BPF opcode to x86 */ + switch (BPF_OP(code)) { + case BPF_JEQ: + jmp_cond = IA32_JE; + break; + case BPF_JSET: + case BPF_JNE: + jmp_cond = IA32_JNE; + break; + case BPF_JGT: + /* GT is unsigned '>', JA in x86 */ + jmp_cond = IA32_JA; + break; + case BPF_JLT: + /* LT is unsigned '<', JB in x86 */ + jmp_cond = IA32_JB; + break; + case BPF_JGE: + /* GE is unsigned '>=', JAE in x86 */ + jmp_cond = IA32_JAE; + break; + case BPF_JLE: + /* LE is unsigned '<=', JBE in x86 */ + jmp_cond = IA32_JBE; + break; + case BPF_JSGT: + /* Signed '>', GT in x86 */ + jmp_cond = IA32_JG; + break; + case BPF_JSLT: + /* Signed '<', LT in x86 */ + jmp_cond = IA32_JL; + break; + case BPF_JSGE: + /* Signed '>=', GE in x86 */ + jmp_cond = IA32_JGE; + break; + case BPF_JSLE: + /* Signed '<=', LE in x86 */ + jmp_cond = IA32_JLE; + break; + default: /* to silence GCC warning */ + return -EFAULT; + } + jmp_offset = addrs[i + insn->off] - addrs[i]; + if (is_imm8(jmp_offset)) { + EMIT2(jmp_cond, jmp_offset); + } else if (is_simm32(jmp_offset)) { + EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); + } else { + pr_err("cond_jmp gen bug %llx\n", jmp_offset); + return -EFAULT; + } + + break; + } + case BPF_JMP | BPF_JA: + if (insn->off == -1) + /* -1 jmp instructions will always jump + * backwards two bytes. Explicitly handling + * this case avoids wasting too many passes + * when there are long sequences of replaced + * dead code. + */ + jmp_offset = -2; + else + jmp_offset = addrs[i + insn->off] - addrs[i]; + + if (!jmp_offset) + /* Optimize out nop jumps */ + break; +emit_jmp: + if (is_imm8(jmp_offset)) { + EMIT2(0xEB, jmp_offset); + } else if (is_simm32(jmp_offset)) { + EMIT1_off32(0xE9, jmp_offset); + } else { + pr_err("jmp gen bug %llx\n", jmp_offset); + return -EFAULT; + } + break; + /* STX XADD: lock *(u32 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_W: + /* STX XADD: lock *(u64 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_DW: + goto notyet; + case BPF_JMP | BPF_EXIT: + if (seen_exit) { + jmp_offset = ctx->cleanup_addr - addrs[i]; + goto emit_jmp; + } + seen_exit = true; + /* Update cleanup_addr */ + ctx->cleanup_addr = proglen; + emit_epilogue(&prog, bpf_prog->aux->stack_depth); + break; +notyet: + pr_info_once("*** NOT YET: opcode %02x ***\n", code); + return -EFAULT; + default: + /* + * This error will be seen if new instruction was added + * to interpreter, but not to JIT or if there is junk in + * bpf_prog + */ + pr_err("bpf_jit: unknown opcode %02x\n", code); + return -EINVAL; + } + + ilen = prog - temp; + if (ilen > BPF_MAX_INSN_SIZE) { + pr_err("bpf_jit: fatal insn size error\n"); + return -EFAULT; + } + + if (image) { + if (unlikely(proglen + ilen > oldproglen)) { + pr_err("bpf_jit: fatal error\n"); + return -EFAULT; + } + memcpy(image + proglen, temp, ilen); + } + proglen += ilen; + addrs[i] = proglen; + prog = temp; + } + return proglen; +} + +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +{ + struct bpf_binary_header *header = NULL; + struct bpf_prog *tmp, *orig_prog = prog; + int proglen, oldproglen = 0; + struct jit_context ctx = {}; + bool tmp_blinded = false; + u8 *image = NULL; + int *addrs; + int pass; + int i; + + if (!prog->jit_requested) + return orig_prog; + + tmp = bpf_jit_blind_constants(prog); + /* + * If blinding was requested and we failed during blinding, + * we must fall back to the interpreter. + */ + if (IS_ERR(tmp)) + return orig_prog; + if (tmp != prog) { + tmp_blinded = true; + prog = tmp; + } + + addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL); + if (!addrs) { + prog = orig_prog; + goto out; + } + + /* + * Before first pass, make a rough estimation of addrs[] + * each BPF instruction is translated to less than 64 bytes + */ + for (proglen = 0, i = 0; i < prog->len; i++) { + proglen += 64; + addrs[i] = proglen; + } + ctx.cleanup_addr = proglen; + + /* + * JITed image shrinks with every pass and the loop iterates + * until the image stops shrinking. Very large BPF programs + * may converge on the last pass. In such case do one more + * pass to emit the final image. + */ + for (pass = 0; pass < 20 || image; pass++) { + proglen = do_jit(prog, addrs, image, oldproglen, &ctx); + if (proglen <= 0) { +out_image: + image = NULL; + if (header) + bpf_jit_binary_free(header); + prog = orig_prog; + goto out_addrs; + } + if (image) { + if (proglen != oldproglen) { + pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", + proglen, oldproglen); + goto out_image; + } + break; + } + if (proglen == oldproglen) { + header = bpf_jit_binary_alloc(proglen, &image, + 1, jit_fill_hole); + if (!header) { + prog = orig_prog; + goto out_addrs; + } + } + oldproglen = proglen; + cond_resched(); + } + + if (bpf_jit_enable > 1) + bpf_jit_dump(prog->len, proglen, pass + 1, image); + + if (image) { + bpf_jit_binary_lock_ro(header); + prog->bpf_func = (void *)image; + prog->jited = 1; + prog->jited_len = proglen; + } else { + prog = orig_prog; + } + +out_addrs: + kfree(addrs); +out: + if (tmp_blinded) + bpf_jit_prog_release_other(prog, prog == orig_prog ? + tmp : orig_prog); + return prog; +} diff --git a/arch/x86/pci/early.c b/arch/x86/pci/early.c index f0114007e915..e5f753cbb1c3 100644 --- a/arch/x86/pci/early.c +++ b/arch/x86/pci/early.c @@ -59,24 +59,15 @@ int early_pci_allowed(void) void early_dump_pci_device(u8 bus, u8 slot, u8 func) { + u32 value[256 / 4]; int i; - int j; - u32 val; - printk(KERN_INFO "pci 0000:%02x:%02x.%d config space:", - bus, slot, func); + pr_info("pci 0000:%02x:%02x.%d config space:\n", bus, slot, func); - for (i = 0; i < 256; i += 4) { - if (!(i & 0x0f)) - printk("\n %02x:",i); + for (i = 0; i < 256; i += 4) + value[i / 4] = read_pci_config(bus, slot, func, i); - val = read_pci_config(bus, slot, func, i); - for (j = 0; j < 4; j++) { - printk(" %02x", val & 0xff); - val >>= 8; - } - } - printk("\n"); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, value, 256, false); } void early_dump_pci_devices(void) diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 54ef19e90705..13f4485ca388 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -636,6 +636,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334a, quirk_no_aersid); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334b, quirk_no_aersid); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334c, quirk_no_aersid); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334d, quirk_no_aersid); #ifdef CONFIG_PHYS_ADDR_T_64BIT diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index bed7e7f4e44c..e01f7ceb9e7a 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -225,7 +225,7 @@ int __init efi_alloc_page_tables(void) pud = pud_alloc(&init_mm, p4d, EFI_VA_END); if (!pud) { - if (pgtable_l5_enabled) + if (pgtable_l5_enabled()) free_page((unsigned long) pgd_page_vaddr(*pgd)); free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER); return -ENOMEM; diff --git a/arch/x86/platform/intel-mid/intel_mid_vrtc.c b/arch/x86/platform/intel-mid/intel_mid_vrtc.c index 58024862a7eb..a52914aa3b6c 100644 --- a/arch/x86/platform/intel-mid/intel_mid_vrtc.c +++ b/arch/x86/platform/intel-mid/intel_mid_vrtc.c @@ -57,7 +57,7 @@ void vrtc_cmos_write(unsigned char val, unsigned char reg) } EXPORT_SYMBOL_GPL(vrtc_cmos_write); -void vrtc_get_time(struct timespec *now) +void vrtc_get_time(struct timespec64 *now) { u8 sec, min, hour, mday, mon; unsigned long flags; @@ -83,18 +83,18 @@ void vrtc_get_time(struct timespec *now) pr_info("vRTC: sec: %d min: %d hour: %d day: %d " "mon: %d year: %d\n", sec, min, hour, mday, mon, year); - now->tv_sec = mktime(year, mon, mday, hour, min, sec); + now->tv_sec = mktime64(year, mon, mday, hour, min, sec); now->tv_nsec = 0; } -int vrtc_set_mmss(const struct timespec *now) +int vrtc_set_mmss(const struct timespec64 *now) { unsigned long flags; struct rtc_time tm; int year; int retval = 0; - rtc_time_to_tm(now->tv_sec, &tm); + rtc_time64_to_tm(now->tv_sec, &tm); if (!rtc_valid_tm(&tm) && tm.tm_year >= 72) { /* * tm.year is the number of years since 1900, and the @@ -110,8 +110,8 @@ int vrtc_set_mmss(const struct timespec *now) vrtc_cmos_write(tm.tm_sec, RTC_SECONDS); spin_unlock_irqrestore(&rtc_lock, flags); } else { - pr_err("%s: Invalid vRTC value: write of %lx to vRTC failed\n", - __func__, now->tv_sec); + pr_err("%s: Invalid vRTC value: write of %llx to vRTC failed\n", + __func__, (s64)now->tv_sec); retval = -EINVAL; } return retval; diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index ccf4a49bb065..67ccf64c8bd8 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c @@ -72,7 +72,7 @@ static int set_up_temporary_text_mapping(pgd_t *pgd) * tables used by the image kernel. */ - if (pgtable_l5_enabled) { + if (pgtable_l5_enabled()) { p4d = (p4d_t *)get_safe_page(GFP_ATOMIC); if (!p4d) return -ENOMEM; diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig index 13ed827c7c66..9d529f22fd9d 100644 --- a/arch/x86/um/Kconfig +++ b/arch/x86/um/Kconfig @@ -1,5 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 -mainmenu "User Mode Linux/$SUBARCH $KERNELVERSION Kernel Configuration" +mainmenu "User Mode Linux/$(SUBARCH) $(KERNELVERSION) Kernel Configuration" + +comment "Compiler: $(CC_VERSION_TEXT)" + +source "scripts/Kconfig.include" source "arch/um/Kconfig.common" @@ -16,8 +20,8 @@ config UML_X86 select GENERIC_FIND_FIRST_BIT config 64BIT - bool "64-bit kernel" if SUBARCH = "x86" - default SUBARCH != "i386" + bool "64-bit kernel" if "$(SUBARCH)" = "x86" + default "$(SUBARCH)" != "i386" config X86_32 def_bool !64BIT diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile index 10003359e633..b2d6967262b2 100644 --- a/arch/x86/um/vdso/Makefile +++ b/arch/x86/um/vdso/Makefile @@ -23,14 +23,14 @@ $(obj)/vdso.o: $(obj)/vdso.so targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y) -export CPPFLAGS_vdso.lds += -P -C +CPPFLAGS_vdso.lds += -P -C VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \ -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so -$(obj)/vdso.so.dbg: $(src)/vdso.lds $(vobjs) FORCE +$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE $(call if_changed,vdso) $(obj)/%.so: OBJCOPYFLAGS := -S diff --git a/arch/x86/xen/efi.c b/arch/x86/xen/efi.c index a18703be9ead..1804b27f9632 100644 --- a/arch/x86/xen/efi.c +++ b/arch/x86/xen/efi.c @@ -115,6 +115,61 @@ static efi_system_table_t __init *xen_efi_probe(void) return &efi_systab_xen; } +/* + * Determine whether we're in secure boot mode. + * + * Please keep the logic in sync with + * drivers/firmware/efi/libstub/secureboot.c:efi_get_secureboot(). + */ +static enum efi_secureboot_mode xen_efi_get_secureboot(void) +{ + static efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID; + static efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID; + efi_status_t status; + u8 moksbstate, secboot, setupmode; + unsigned long size; + + size = sizeof(secboot); + status = efi.get_variable(L"SecureBoot", &efi_variable_guid, + NULL, &size, &secboot); + + if (status == EFI_NOT_FOUND) + return efi_secureboot_mode_disabled; + + if (status != EFI_SUCCESS) + goto out_efi_err; + + size = sizeof(setupmode); + status = efi.get_variable(L"SetupMode", &efi_variable_guid, + NULL, &size, &setupmode); + + if (status != EFI_SUCCESS) + goto out_efi_err; + + if (secboot == 0 || setupmode == 1) + return efi_secureboot_mode_disabled; + + /* See if a user has put the shim into insecure mode. */ + size = sizeof(moksbstate); + status = efi.get_variable(L"MokSBStateRT", &shim_guid, + NULL, &size, &moksbstate); + + /* If it fails, we don't care why. Default to secure. */ + if (status != EFI_SUCCESS) + goto secure_boot_enabled; + + if (moksbstate == 1) + return efi_secureboot_mode_disabled; + + secure_boot_enabled: + pr_info("UEFI Secure Boot is enabled.\n"); + return efi_secureboot_mode_enabled; + + out_efi_err: + pr_err("Could not determine UEFI Secure Boot status.\n"); + return efi_secureboot_mode_unknown; +} + void __init xen_efi_init(void) { efi_system_table_t *efi_systab_xen; @@ -129,6 +184,8 @@ void __init xen_efi_init(void) boot_params.efi_info.efi_systab = (__u32)__pa(efi_systab_xen); boot_params.efi_info.efi_systab_hi = (__u32)(__pa(efi_systab_xen) >> 32); + boot_params.secure_boot = xen_efi_get_secureboot(); + set_bit(EFI_BOOT, &efi.flags); set_bit(EFI_PARAVIRT, &efi.flags); set_bit(EFI_64BIT, &efi.flags); diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 2d76106788a3..96fc2f0fdbfe 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -63,37 +63,44 @@ static noinline void xen_flush_tlb_all(void) #define REMAP_BATCH_SIZE 16 struct remap_data { - xen_pfn_t *mfn; + xen_pfn_t *pfn; bool contiguous; + bool no_translate; pgprot_t prot; struct mmu_update *mmu_update; }; -static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, +static int remap_area_pfn_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, void *data) { struct remap_data *rmd = data; - pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot)); + pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot)); - /* If we have a contiguous range, just update the mfn itself, - else update pointer to be "next mfn". */ + /* + * If we have a contiguous range, just update the pfn itself, + * else update pointer to be "next pfn". + */ if (rmd->contiguous) - (*rmd->mfn)++; + (*rmd->pfn)++; else - rmd->mfn++; + rmd->pfn++; - rmd->mmu_update->ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; + rmd->mmu_update->ptr = virt_to_machine(ptep).maddr; + rmd->mmu_update->ptr |= rmd->no_translate ? + MMU_PT_UPDATE_NO_TRANSLATE : + MMU_NORMAL_PT_UPDATE; rmd->mmu_update->val = pte_val_ma(pte); rmd->mmu_update++; return 0; } -static int do_remap_gfn(struct vm_area_struct *vma, +static int do_remap_pfn(struct vm_area_struct *vma, unsigned long addr, - xen_pfn_t *gfn, int nr, + xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot, - unsigned domid, + unsigned int domid, + bool no_translate, struct page **pages) { int err = 0; @@ -104,11 +111,14 @@ static int do_remap_gfn(struct vm_area_struct *vma, BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); - rmd.mfn = gfn; + rmd.pfn = pfn; rmd.prot = prot; - /* We use the err_ptr to indicate if there we are doing a contiguous - * mapping or a discontigious mapping. */ + /* + * We use the err_ptr to indicate if there we are doing a contiguous + * mapping or a discontigious mapping. + */ rmd.contiguous = !err_ptr; + rmd.no_translate = no_translate; while (nr) { int index = 0; @@ -119,7 +129,7 @@ static int do_remap_gfn(struct vm_area_struct *vma, rmd.mmu_update = mmu_update; err = apply_to_page_range(vma->vm_mm, addr, range, - remap_area_mfn_pte_fn, &rmd); + remap_area_pfn_pte_fn, &rmd); if (err) goto out; @@ -173,7 +183,8 @@ int xen_remap_domain_gfn_range(struct vm_area_struct *vma, if (xen_feature(XENFEAT_auto_translated_physmap)) return -EOPNOTSUPP; - return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages); + return do_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false, + pages); } EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range); @@ -192,10 +203,25 @@ int xen_remap_domain_gfn_array(struct vm_area_struct *vma, * cause of "wrong memory was mapped in". */ BUG_ON(err_ptr == NULL); - return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages); + return do_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid, + false, pages); } EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array); +int xen_remap_domain_mfn_array(struct vm_area_struct *vma, + unsigned long addr, + xen_pfn_t *mfn, int nr, + int *err_ptr, pgprot_t prot, + unsigned int domid, struct page **pages) +{ + if (xen_feature(XENFEAT_auto_translated_physmap)) + return -EOPNOTSUPP; + + return do_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid, + true, pages); +} +EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array); + /* Returns: 0 success */ int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, int nr, struct page **pages) diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 29163c43ebbd..e0f1bcf01d63 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -57,7 +57,7 @@ static u64 xen_clocksource_get_cycles(struct clocksource *cs) return xen_clocksource_read(); } -static void xen_read_wallclock(struct timespec *ts) +static void xen_read_wallclock(struct timespec64 *ts) { struct shared_info *s = HYPERVISOR_shared_info; struct pvclock_wall_clock *wall_clock = &(s->wc); @@ -68,12 +68,12 @@ static void xen_read_wallclock(struct timespec *ts) put_cpu_var(xen_vcpu); } -static void xen_get_wallclock(struct timespec *now) +static void xen_get_wallclock(struct timespec64 *now) { xen_read_wallclock(now); } -static int xen_set_wallclock(const struct timespec *now) +static int xen_set_wallclock(const struct timespec64 *now) { return -ENODEV; } @@ -461,7 +461,7 @@ static void __init xen_time_init(void) { struct pvclock_vcpu_time_info *pvti; int cpu = smp_processor_id(); - struct timespec tp; + struct timespec64 tp; /* As Dom0 is never moved, no penalty on using TSC there */ if (xen_initial_domain()) @@ -479,7 +479,7 @@ static void __init xen_time_init(void) /* Set initial system time with full resolution */ xen_read_wallclock(&tp); - do_settimeofday(&tp); + do_settimeofday64(&tp); setup_force_cpu_cap(X86_FEATURE_TSC); diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S index e1a5fbeae08d..ca2d3b2bf2af 100644 --- a/arch/x86/xen/xen-pvh.S +++ b/arch/x86/xen/xen-pvh.S @@ -54,12 +54,19 @@ * charge of setting up it's own stack, GDT and IDT. */ +#define PVH_GDT_ENTRY_CS 1 +#define PVH_GDT_ENTRY_DS 2 +#define PVH_GDT_ENTRY_CANARY 3 +#define PVH_CS_SEL (PVH_GDT_ENTRY_CS * 8) +#define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8) +#define PVH_CANARY_SEL (PVH_GDT_ENTRY_CANARY * 8) + ENTRY(pvh_start_xen) cld lgdt (_pa(gdt)) - mov $(__BOOT_DS),%eax + mov $PVH_DS_SEL,%eax mov %eax,%ds mov %eax,%es mov %eax,%ss @@ -93,11 +100,17 @@ ENTRY(pvh_start_xen) mov %eax, %cr0 /* Jump to 64-bit mode. */ - ljmp $__KERNEL_CS, $_pa(1f) + ljmp $PVH_CS_SEL, $_pa(1f) /* 64-bit entry point. */ .code64 1: + /* Set base address in stack canary descriptor. */ + mov $MSR_GS_BASE,%ecx + mov $_pa(canary), %eax + xor %edx, %edx + wrmsr + call xen_prepare_pvh /* startup_64 expects boot_params in %rsi. */ @@ -107,6 +120,17 @@ ENTRY(pvh_start_xen) #else /* CONFIG_X86_64 */ + /* Set base address in stack canary descriptor. */ + movl $_pa(gdt_start),%eax + movl $_pa(canary),%ecx + movw %cx, (PVH_GDT_ENTRY_CANARY * 8) + 2(%eax) + shrl $16, %ecx + movb %cl, (PVH_GDT_ENTRY_CANARY * 8) + 4(%eax) + movb %ch, (PVH_GDT_ENTRY_CANARY * 8) + 7(%eax) + + mov $PVH_CANARY_SEL,%eax + mov %eax,%gs + call mk_early_pgtbl_32 mov $_pa(initial_page_table), %eax @@ -116,13 +140,13 @@ ENTRY(pvh_start_xen) or $(X86_CR0_PG | X86_CR0_PE), %eax mov %eax, %cr0 - ljmp $__BOOT_CS, $1f + ljmp $PVH_CS_SEL, $1f 1: call xen_prepare_pvh mov $_pa(pvh_bootparams), %esi /* startup_32 doesn't expect paging and PAE to be on. */ - ljmp $__BOOT_CS, $_pa(2f) + ljmp $PVH_CS_SEL, $_pa(2f) 2: mov %cr0, %eax and $~X86_CR0_PG, %eax @@ -131,7 +155,7 @@ ENTRY(pvh_start_xen) and $~X86_CR4_PAE, %eax mov %eax, %cr4 - ljmp $__BOOT_CS, $_pa(startup_32) + ljmp $PVH_CS_SEL, $_pa(startup_32) #endif END(pvh_start_xen) @@ -143,16 +167,19 @@ gdt: .word 0 gdt_start: .quad 0x0000000000000000 /* NULL descriptor */ - .quad 0x0000000000000000 /* reserved */ #ifdef CONFIG_X86_64 - .quad GDT_ENTRY(0xa09a, 0, 0xfffff) /* __KERNEL_CS */ + .quad GDT_ENTRY(0xa09a, 0, 0xfffff) /* PVH_CS_SEL */ #else - .quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* __KERNEL_CS */ + .quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* PVH_CS_SEL */ #endif - .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* __KERNEL_DS */ + .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* PVH_DS_SEL */ + .quad GDT_ENTRY(0x4090, 0, 0x18) /* PVH_CANARY_SEL */ gdt_end: - .balign 4 + .balign 16 +canary: + .fill 48, 1, 0 + early_stack: .fill 256, 1, 0 early_stack_end: diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index c921e8bccdc8..17df332269b2 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -19,7 +19,6 @@ config XTENSA select HAVE_ARCH_KASAN if MMU select HAVE_CC_STACKPROTECTOR select HAVE_DEBUG_KMEMLEAK - select HAVE_DMA_API_DEBUG select HAVE_DMA_CONTIGUOUS select HAVE_EXIT_THREAD select HAVE_FUNCTION_TRACER diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 436b20337168..e5e1e61c538c 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -1,4 +1,5 @@ generic-y += bug.h +generic-y += compat.h generic-y += device.h generic-y += div64.h generic-y += dma-contiguous.h diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h index d5a82153a7c5..883024054b05 100644 --- a/arch/xtensa/include/asm/pci.h +++ b/arch/xtensa/include/asm/pci.h @@ -20,8 +20,6 @@ #define pcibios_assign_all_busses() 0 -extern struct pci_controller* pcibios_alloc_controller(void); - /* Assume some values. (We should revise them, if necessary) */ #define PCIBIOS_MIN_IO 0x2000 @@ -42,8 +40,6 @@ extern struct pci_controller* pcibios_alloc_controller(void); * decisions. */ -#define PCI_DMA_BUS_IS_PHYS (1) - /* Tell PCI code what kind of PCI resource mappings we support */ #define HAVE_PCI_MMAP 1 #define ARCH_GENERIC_PCI_MMAP_RESOURCE 1 diff --git a/arch/xtensa/include/uapi/asm/msgbuf.h b/arch/xtensa/include/uapi/asm/msgbuf.h index 36e2e103ca38..d6915e9f071c 100644 --- a/arch/xtensa/include/uapi/asm/msgbuf.h +++ b/arch/xtensa/include/uapi/asm/msgbuf.h @@ -7,7 +7,6 @@ * between kernel and user space. * * Pad space is left for: - * - 64-bit time_t to solve y2038 problem * - 2 miscellaneous 32-bit values * * This file is subject to the terms and conditions of the GNU General @@ -21,19 +20,19 @@ struct msqid64_ds { struct ipc64_perm msg_perm; #ifdef __XTENSA_EB__ - unsigned int __unused1; - __kernel_time_t msg_stime; /* last msgsnd time */ - unsigned int __unused2; - __kernel_time_t msg_rtime; /* last msgrcv time */ - unsigned int __unused3; - __kernel_time_t msg_ctime; /* last change time */ + unsigned long msg_stime_high; + unsigned long msg_stime; /* last msgsnd time */ + unsigned long msg_rtime_high; + unsigned long msg_rtime; /* last msgrcv time */ + unsigned long msg_ctime_high; + unsigned long msg_ctime; /* last change time */ #elif defined(__XTENSA_EL__) - __kernel_time_t msg_stime; /* last msgsnd time */ - unsigned int __unused1; - __kernel_time_t msg_rtime; /* last msgrcv time */ - unsigned int __unused2; - __kernel_time_t msg_ctime; /* last change time */ - unsigned int __unused3; + unsigned long msg_stime; /* last msgsnd time */ + unsigned long msg_stime_high; + unsigned long msg_rtime; /* last msgrcv time */ + unsigned long msg_rtime_high; + unsigned long msg_ctime; /* last change time */ + unsigned long msg_ctime_high; #else # error processor byte order undefined! #endif diff --git a/arch/xtensa/include/uapi/asm/sembuf.h b/arch/xtensa/include/uapi/asm/sembuf.h index f61b6331a10c..09f348d643f1 100644 --- a/arch/xtensa/include/uapi/asm/sembuf.h +++ b/arch/xtensa/include/uapi/asm/sembuf.h @@ -14,7 +14,6 @@ * between kernel and user space. * * Pad space is left for: - * - 64-bit time_t to solve y2038 problem * - 2 miscellaneous 32-bit values * */ @@ -27,15 +26,15 @@ struct semid64_ds { struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ #ifdef __XTENSA_EL__ - __kernel_time_t sem_otime; /* last semop time */ - unsigned long __unused1; - __kernel_time_t sem_ctime; /* last change time */ - unsigned long __unused2; + unsigned long sem_otime; /* last semop time */ + unsigned long sem_otime_high; + unsigned long sem_ctime; /* last change time */ + unsigned long sem_ctime_high; #else - unsigned long __unused1; - __kernel_time_t sem_otime; /* last semop time */ - unsigned long __unused2; - __kernel_time_t sem_ctime; /* last change time */ + unsigned long sem_otime_high; + unsigned long sem_otime; /* last semop time */ + unsigned long sem_ctime_high; + unsigned long sem_ctime; /* last change time */ #endif unsigned long sem_nsems; /* no. of semaphores in array */ unsigned long __unused3; diff --git a/arch/xtensa/include/uapi/asm/shmbuf.h b/arch/xtensa/include/uapi/asm/shmbuf.h index 26550bdc8430..554a57a6a90f 100644 --- a/arch/xtensa/include/uapi/asm/shmbuf.h +++ b/arch/xtensa/include/uapi/asm/shmbuf.h @@ -4,10 +4,10 @@ * * The shmid64_ds structure for Xtensa architecture. * Note extra padding because this structure is passed back and forth - * between kernel and user space. + * between kernel and user space, but the padding is on the wrong + * side for big-endian xtensa, for historic reasons. * * Pad space is left for: - * - 64-bit time_t to solve y2038 problem * - 2 miscellaneous 32-bit values * * This file is subject to the terms and conditions of the GNU General Public @@ -20,42 +20,21 @@ #ifndef _XTENSA_SHMBUF_H #define _XTENSA_SHMBUF_H -#if defined (__XTENSA_EL__) struct shmid64_ds { struct ipc64_perm shm_perm; /* operation perms */ size_t shm_segsz; /* size of segment (bytes) */ - __kernel_time_t shm_atime; /* last attach time */ - unsigned long __unused1; - __kernel_time_t shm_dtime; /* last detach time */ - unsigned long __unused2; - __kernel_time_t shm_ctime; /* last change time */ - unsigned long __unused3; + unsigned long shm_atime; /* last attach time */ + unsigned long shm_atime_high; + unsigned long shm_dtime; /* last detach time */ + unsigned long shm_dtime_high; + unsigned long shm_ctime; /* last change time */ + unsigned long shm_ctime_high; __kernel_pid_t shm_cpid; /* pid of creator */ __kernel_pid_t shm_lpid; /* pid of last operator */ unsigned long shm_nattch; /* no. of current attaches */ unsigned long __unused4; unsigned long __unused5; }; -#elif defined (__XTENSA_EB__) -struct shmid64_ds { - struct ipc64_perm shm_perm; /* operation perms */ - size_t shm_segsz; /* size of segment (bytes) */ - __kernel_time_t shm_atime; /* last attach time */ - unsigned long __unused1; - __kernel_time_t shm_dtime; /* last detach time */ - unsigned long __unused2; - __kernel_time_t shm_ctime; /* last change time */ - unsigned long __unused3; - __kernel_pid_t shm_cpid; /* pid of creator */ - __kernel_pid_t shm_lpid; /* pid of last operator */ - unsigned long shm_nattch; /* no. of current attaches */ - unsigned long __unused4; - unsigned long __unused5; -}; -#else -# error endian order not defined -#endif - struct shminfo64 { unsigned long shmmax; diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 732631ce250f..392b4a80ebc2 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -261,12 +261,3 @@ const struct dma_map_ops xtensa_dma_map_ops = { .mapping_error = xtensa_dma_mapping_error, }; EXPORT_SYMBOL(xtensa_dma_map_ops); - -#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) - -static int __init xtensa_dma_init(void) -{ - dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); - return 0; -} -fs_initcall(xtensa_dma_init); diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c index b7c7a60c7000..21f13e9aabe1 100644 --- a/arch/xtensa/kernel/pci.c +++ b/arch/xtensa/kernel/pci.c @@ -41,8 +41,8 @@ * pci_bus_add_device */ -struct pci_controller* pci_ctrl_head; -struct pci_controller** pci_ctrl_tail = &pci_ctrl_head; +static struct pci_controller *pci_ctrl_head; +static struct pci_controller **pci_ctrl_tail = &pci_ctrl_head; static int pci_bus_count; @@ -80,50 +80,6 @@ pcibios_align_resource(void *data, const struct resource *res, return start; } -int -pcibios_enable_resources(struct pci_dev *dev, int mask) -{ - u16 cmd, old_cmd; - int idx; - struct resource *r; - - pci_read_config_word(dev, PCI_COMMAND, &cmd); - old_cmd = cmd; - for(idx=0; idx<6; idx++) { - r = &dev->resource[idx]; - if (!r->start && r->end) { - pr_err("PCI: Device %s not available because " - "of resource collisions\n", pci_name(dev)); - return -EINVAL; - } - if (r->flags & IORESOURCE_IO) - cmd |= PCI_COMMAND_IO; - if (r->flags & IORESOURCE_MEM) - cmd |= PCI_COMMAND_MEMORY; - } - if (dev->resource[PCI_ROM_RESOURCE].start) - cmd |= PCI_COMMAND_MEMORY; - if (cmd != old_cmd) { - pr_info("PCI: Enabling device %s (%04x -> %04x)\n", - pci_name(dev), old_cmd, cmd); - pci_write_config_word(dev, PCI_COMMAND, cmd); - } - return 0; -} - -struct pci_controller * __init pcibios_alloc_controller(void) -{ - struct pci_controller *pci_ctrl; - - pci_ctrl = (struct pci_controller *)alloc_bootmem(sizeof(*pci_ctrl)); - memset(pci_ctrl, 0, sizeof(struct pci_controller)); - - *pci_ctrl_tail = pci_ctrl; - pci_ctrl_tail = &pci_ctrl->next; - - return pci_ctrl; -} - static void __init pci_controller_apertures(struct pci_controller *pci_ctrl, struct list_head *resources) { @@ -223,8 +179,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) for (idx=0; idx<6; idx++) { r = &dev->resource[idx]; if (!r->start && r->end) { - pr_err("PCI: Device %s not available because " - "of resource collisions\n", pci_name(dev)); + pci_err(dev, "can't enable device: resource collisions\n"); return -EINVAL; } if (r->flags & IORESOURCE_IO) @@ -233,29 +188,13 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) cmd |= PCI_COMMAND_MEMORY; } if (cmd != old_cmd) { - pr_info("PCI: Enabling device %s (%04x -> %04x)\n", - pci_name(dev), old_cmd, cmd); + pci_info(dev, "enabling device (%04x -> %04x)\n", old_cmd, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd); } return 0; } -#ifdef CONFIG_PROC_FS - -/* - * Return the index of the PCI controller for device pdev. - */ - -int -pci_controller_num(struct pci_dev *dev) -{ - struct pci_controller *pci_ctrl = (struct pci_controller*) dev->sysdata; - return pci_ctrl->index; -} - -#endif /* CONFIG_PROC_FS */ - /* * Platform support for /proc/bus/pci/X/Y mmap()s. * -- paulus. diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 32c5207f1226..86507fa7c2d7 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -323,8 +323,6 @@ do_illegal_instruction(struct pt_regs *regs) void do_unaligned_user (struct pt_regs *regs) { - siginfo_t info; - __die_if_kernel("Unhandled unaligned exception in kernel", regs, SIGKILL); @@ -334,12 +332,7 @@ do_unaligned_user (struct pt_regs *regs) "(pid = %d, pc = %#010lx)\n", regs->excvaddr, current->comm, task_pid_nr(current), regs->pc); - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRALN; - info.si_addr = (void *) regs->excvaddr; - force_sig_info(SIGSEGV, &info, current); - + force_sig_fault(SIGBUS, BUS_ADRALN, (void *) regs->excvaddr, current); } #endif diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 8b9b6f44bb06..c111a833205a 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -39,13 +39,13 @@ void do_page_fault(struct pt_regs *regs) struct mm_struct *mm = current->mm; unsigned int exccause = regs->exccause; unsigned int address = regs->excvaddr; - siginfo_t info; + int code; int is_write, is_exec; int fault; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; - info.si_code = SEGV_MAPERR; + code = SEGV_MAPERR; /* We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. @@ -91,7 +91,7 @@ retry: */ good_area: - info.si_code = SEGV_ACCERR; + code = SEGV_ACCERR; if (is_write) { if (!(vma->vm_flags & VM_WRITE)) @@ -157,11 +157,7 @@ bad_area: if (user_mode(regs)) { current->thread.bad_vaddr = address; current->thread.error_code = is_write; - info.si_signo = SIGSEGV; - info.si_errno = 0; - /* info.si_code has been set above */ - info.si_addr = (void *) address; - force_sig_info(SIGSEGV, &info, current); + force_sig_fault(SIGSEGV, code, (void *) address, current); return; } bad_page_fault(regs, address, SIGSEGV); @@ -186,11 +182,7 @@ do_sigbus: * or user mode. */ current->thread.bad_vaddr = address; - info.si_code = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRERR; - info.si_addr = (void *) address; - force_sig_info(SIGBUS, &info, current); + force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address, current); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c index 92f567f9a21e..af81a62faba6 100644 --- a/arch/xtensa/platforms/iss/console.c +++ b/arch/xtensa/platforms/iss/console.c @@ -153,19 +153,6 @@ static int rs_proc_show(struct seq_file *m, void *v) return 0; } -static int rs_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, rs_proc_show, NULL); -} - -static const struct file_operations rs_proc_fops = { - .owner = THIS_MODULE, - .open = rs_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - static const struct tty_operations serial_ops = { .open = rs_open, .close = rs_close, @@ -176,7 +163,7 @@ static const struct tty_operations serial_ops = { .chars_in_buffer = rs_chars_in_buffer, .hangup = rs_hangup, .wait_until_sent = rs_wait_until_sent, - .proc_fops = &rs_proc_fops, + .proc_show = rs_proc_show, }; int __init rs_init(void) |