summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arc/Kconfig27
-rw-r--r--arch/arc/Makefile4
-rw-r--r--arch/arc/boot/Makefile16
-rw-r--r--arch/arc/boot/dts/axc001.dtsi2
-rw-r--r--arch/arc/boot/dts/nsim_700.dts2
-rw-r--r--arch/arc/boot/dts/nsimosci.dts4
-rw-r--r--arch/arc/configs/nsim_700_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig3
-rw-r--r--arch/arc/include/asm/arcregs.h5
-rw-r--r--arch/arc/include/asm/cache.h2
-rw-r--r--arch/arc/include/asm/elf.h2
-rw-r--r--arch/arc/include/asm/mcip.h16
-rw-r--r--arch/arc/include/asm/module.h1
-rw-r--r--arch/arc/include/asm/setup.h6
-rw-r--r--arch/arc/include/asm/smp.h4
-rw-r--r--arch/arc/include/asm/syscalls.h1
-rw-r--r--arch/arc/include/uapi/asm/unistd.h9
-rw-r--r--arch/arc/kernel/devtree.c2
-rw-r--r--arch/arc/kernel/mcip.c63
-rw-r--r--arch/arc/kernel/module.c53
-rw-r--r--arch/arc/kernel/process.c35
-rw-r--r--arch/arc/kernel/setup.c113
-rw-r--r--arch/arc/kernel/smp.c23
-rw-r--r--arch/arc/kernel/time.c19
-rw-r--r--arch/arc/kernel/troubleshoot.c110
-rw-r--r--arch/arc/mm/cache.c19
-rw-r--r--arch/arc/mm/dma.c30
-rw-r--r--arch/arc/mm/tlb.c6
-rw-r--r--arch/arc/mm/tlbex.S21
-rw-r--r--arch/arc/plat-eznps/smp.c6
-rw-r--r--arch/arm/boot/dts/ste-snowball.dts15
-rw-r--r--arch/arm/boot/dts/uniphier-pro5.dtsi4
-rw-r--r--arch/arm/boot/dts/uniphier-pxs2.dtsi4
-rw-r--r--arch/arm/boot/dts/vf500.dtsi2
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/include/asm/kvm_asm.h1
-rw-r--r--arch/arm/include/asm/kvm_host.h3
-rw-r--r--arch/arm/include/asm/kvm_hyp.h1
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/include/uapi/asm/unistd.h3
-rw-r--r--arch/arm/kernel/calls.S3
-rw-r--r--arch/arm/kvm/arm.c27
-rw-r--r--arch/arm/kvm/hyp/tlb.c15
-rw-r--r--arch/arm/mach-imx/gpc.c15
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c2
-rw-r--r--arch/arm/mach-mvebu/Kconfig4
-rw-r--r--arch/arm/mach-uniphier/Kconfig1
-rw-r--r--arch/arm/mm/abort-lv4t.S34
-rw-r--r--arch/arm64/Kconfig.platforms1
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2-svk.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi7
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi12
-rw-r--r--arch/arm64/include/asm/alternative.h2
-rw-r--r--arch/arm64/include/asm/cpucaps.h40
-rw-r--r--arch/arm64/include/asm/cpufeature.h20
-rw-r--r--arch/arm64/include/asm/kvm_asm.h1
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h2
-rw-r--r--arch/arm64/include/asm/lse.h1
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/kvm/hyp/tlb.c15
-rw-r--r--arch/arm64/mm/numa.c9
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c2
-rw-r--r--arch/h8300/include/asm/thread_info.h4
-rw-r--r--arch/h8300/kernel/signal.c2
-rw-r--r--arch/mips/Makefile2
-rw-r--r--arch/mips/boot/dts/mti/malta.dts3
-rw-r--r--arch/mips/generic/init.c16
-rw-r--r--arch/mips/include/asm/fpu_emulator.h13
-rw-r--r--arch/mips/include/asm/switch_to.h18
-rw-r--r--arch/mips/kernel/mips-cpc.c11
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c10
-rw-r--r--arch/mips/kernel/ptrace.c8
-rw-r--r--arch/mips/kernel/r2300_fpu.S138
-rw-r--r--arch/mips/kernel/r6000_fpu.S89
-rw-r--r--arch/mips/kernel/relocate.c2
-rw-r--r--arch/mips/kernel/setup.c13
-rw-r--r--arch/mips/kernel/traps.c137
-rw-r--r--arch/mips/lib/dump_tlb.c44
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c18
-rw-r--r--arch/nios2/kernel/time.c1
-rw-r--r--arch/openrisc/include/asm/cache.h2
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h4
-rw-r--r--arch/parisc/kernel/drivers.c6
-rw-r--r--arch/parisc/kernel/syscall.S66
-rw-r--r--arch/powerpc/include/asm/checksum.h12
-rw-r--r--arch/powerpc/include/asm/cpuidle.h2
-rw-r--r--arch/powerpc/include/asm/exception-64s.h16
-rw-r--r--arch/powerpc/include/asm/tlb.h12
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S50
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c2
-rw-r--r--arch/powerpc/kernel/idle_book3s.S35
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c1
-rw-r--r--arch/powerpc/mm/tlb-radix.c8
-rw-r--r--arch/s390/hypfs/hypfs_diag.c6
-rw-r--r--arch/s390/include/asm/ftrace.h4
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/unistd.h3
-rw-r--r--arch/s390/kernel/dis.c4
-rw-r--r--arch/s390/kernel/dumpstack.c63
-rw-r--r--arch/s390/kernel/perf_event.c2
-rw-r--r--arch/s390/kernel/stacktrace.c4
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/mm/hugetlbpage.c1
-rw-r--r--arch/s390/mm/init.c38
-rw-r--r--arch/s390/oprofile/init.c2
-rw-r--r--arch/s390/pci/pci_dma.c2
-rw-r--r--arch/sparc/include/asm/cpudata_64.h5
-rw-r--r--arch/sparc/include/asm/spinlock_32.h2
-rw-r--r--arch/sparc/include/asm/spinlock_64.h12
-rw-r--r--arch/sparc/include/asm/topology_64.h8
-rw-r--r--arch/sparc/include/asm/uaccess_64.h28
-rw-r--r--arch/sparc/kernel/head_64.S37
-rw-r--r--arch/sparc/kernel/jump_label.c23
-rw-r--r--arch/sparc/kernel/mdesc.c46
-rw-r--r--arch/sparc/kernel/smp_64.c8
-rw-r--r--arch/sparc/lib/GENcopy_from_user.S4
-rw-r--r--arch/sparc/lib/GENcopy_to_user.S4
-rw-r--r--arch/sparc/lib/GENmemcpy.S48
-rw-r--r--arch/sparc/lib/Makefile2
-rw-r--r--arch/sparc/lib/NG2copy_from_user.S8
-rw-r--r--arch/sparc/lib/NG2copy_to_user.S8
-rw-r--r--arch/sparc/lib/NG2memcpy.S228
-rw-r--r--arch/sparc/lib/NG4copy_from_user.S8
-rw-r--r--arch/sparc/lib/NG4copy_to_user.S8
-rw-r--r--arch/sparc/lib/NG4memcpy.S294
-rw-r--r--arch/sparc/lib/NGcopy_from_user.S4
-rw-r--r--arch/sparc/lib/NGcopy_to_user.S4
-rw-r--r--arch/sparc/lib/NGmemcpy.S233
-rw-r--r--arch/sparc/lib/U1copy_from_user.S8
-rw-r--r--arch/sparc/lib/U1copy_to_user.S8
-rw-r--r--arch/sparc/lib/U1memcpy.S345
-rw-r--r--arch/sparc/lib/U3copy_from_user.S8
-rw-r--r--arch/sparc/lib/U3copy_to_user.S8
-rw-r--r--arch/sparc/lib/U3memcpy.S227
-rw-r--r--arch/sparc/lib/copy_in_user.S35
-rw-r--r--arch/sparc/lib/user_fixup.c71
-rw-r--r--arch/sparc/mm/tsb.c17
-rw-r--r--arch/sparc/mm/ultra.S374
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c4
-rw-r--r--arch/x86/entry/Makefile4
-rw-r--r--arch/x86/events/intel/core.c10
-rw-r--r--arch/x86/events/intel/cstate.c30
-rw-r--r--arch/x86/events/intel/pt.c45
-rw-r--r--arch/x86/include/asm/io.h6
-rw-r--r--arch/x86/include/asm/processor.h14
-rw-r--r--arch/x86/kernel/acpi/boot.c1
-rw-r--r--arch/x86/kernel/apm_32.c5
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c2
-rw-r--r--arch/x86/kernel/cpu/scattered.c57
-rw-r--r--arch/x86/kernel/cpuid.c4
-rw-r--r--arch/x86/kernel/mcount_64.S3
-rw-r--r--arch/x86/kernel/quirks.c3
-rw-r--r--arch/x86/kernel/setup.c7
-rw-r--r--arch/x86/kernel/unwind_guess.c9
-rw-r--r--arch/x86/mm/kaslr.c6
-rw-r--r--arch/x86/mm/pat.c14
-rw-r--r--arch/x86/xen/enlighten.c2
168 files changed, 2542 insertions, 1563 deletions
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index ecd12379e2cd..bd204bfa29ed 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -41,6 +41,8 @@ config ARC
select PERF_USE_VMALLOC
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_GENERIC_DMA_COHERENT
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_LZMA
config MIGHT_HAVE_PCI
bool
@@ -186,14 +188,6 @@ if SMP
config ARC_HAS_COH_CACHES
def_bool n
-config ARC_MCIP
- bool "ARConnect Multicore IP (MCIP) Support "
- depends on ISA_ARCV2
- help
- This IP block enables SMP in ARC-HS38 cores.
- It provides for cross-core interrupts, multi-core debug
- hardware semaphores, shared memory,....
-
config NR_CPUS
int "Maximum number of CPUs (2-4096)"
range 2 4096
@@ -211,6 +205,15 @@ config ARC_SMP_HALT_ON_RESET
endif #SMP
+config ARC_MCIP
+ bool "ARConnect Multicore IP (MCIP) Support "
+ depends on ISA_ARCV2
+ default y if SMP
+ help
+ This IP block enables SMP in ARC-HS38 cores.
+ It provides for cross-core interrupts, multi-core debug
+ hardware semaphores, shared memory,....
+
menuconfig ARC_CACHE
bool "Enable Cache Support"
default y
@@ -537,14 +540,6 @@ config ARC_DBG_TLB_PARANOIA
bool "Paranoia Checks in Low Level TLB Handlers"
default n
-config ARC_DBG_TLB_MISS_COUNT
- bool "Profile TLB Misses"
- default n
- select DEBUG_FS
- help
- Counts number of I and D TLB Misses and exports them via Debugfs
- The counters can be cleared via Debugfs as well
-
endif
config ARC_UBOOT_SUPPORT
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index aa82d13d4213..19cce226d1a8 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -71,7 +71,9 @@ cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables $(cfi)
ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
# Generic build system uses -O2, we want -O3
# Note: No need to add to cflags-y as that happens anyways
-ARCH_CFLAGS += -O3
+#
+# Disable the false maybe-uninitialized warings gcc spits out at -O3
+ARCH_CFLAGS += -O3 $(call cc-disable-warning,maybe-uninitialized,)
endif
# small data is default for elf32 tool-chain. If not usable, disable it
diff --git a/arch/arc/boot/Makefile b/arch/arc/boot/Makefile
index e597cb34c16a..f94cf151e06a 100644
--- a/arch/arc/boot/Makefile
+++ b/arch/arc/boot/Makefile
@@ -14,9 +14,15 @@ UIMAGE_ENTRYADDR = $(LINUX_START_TEXT)
suffix-y := bin
suffix-$(CONFIG_KERNEL_GZIP) := gz
+suffix-$(CONFIG_KERNEL_LZMA) := lzma
-targets += uImage uImage.bin uImage.gz
-extra-y += vmlinux.bin vmlinux.bin.gz
+targets += uImage
+targets += uImage.bin
+targets += uImage.gz
+targets += uImage.lzma
+extra-y += vmlinux.bin
+extra-y += vmlinux.bin.gz
+extra-y += vmlinux.bin.lzma
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
@@ -24,12 +30,18 @@ $(obj)/vmlinux.bin: vmlinux FORCE
$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip)
+$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,lzma)
+
$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
$(call if_changed,uimage,none)
$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
$(call if_changed,uimage,gzip)
+$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
+ $(call if_changed,uimage,lzma)
+
$(obj)/uImage: $(obj)/uImage.$(suffix-y)
@ln -sf $(notdir $<) $@
@echo ' Image $@ is ready'
diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi
index 6ae2c476ad82..53ce226f77a5 100644
--- a/arch/arc/boot/dts/axc001.dtsi
+++ b/arch/arc/boot/dts/axc001.dtsi
@@ -71,7 +71,7 @@
reg-io-width = <4>;
};
- arcpmu0: pmu {
+ arcpct0: pct {
compatible = "snps,arc700-pct";
};
};
diff --git a/arch/arc/boot/dts/nsim_700.dts b/arch/arc/boot/dts/nsim_700.dts
index ce0ccd20b5bf..5ee96b067c08 100644
--- a/arch/arc/boot/dts/nsim_700.dts
+++ b/arch/arc/boot/dts/nsim_700.dts
@@ -69,7 +69,7 @@
};
};
- arcpmu0: pmu {
+ arcpct0: pct {
compatible = "snps,arc700-pct";
};
};
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
index bcf603142a33..3c391ba565ed 100644
--- a/arch/arc/boot/dts/nsimosci.dts
+++ b/arch/arc/boot/dts/nsimosci.dts
@@ -83,5 +83,9 @@
reg = <0xf0003000 0x44>;
interrupts = <7>;
};
+
+ arcpct0: pct {
+ compatible = "snps,arc700-pct";
+ };
};
};
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index 7314f538847b..b0066a749d4c 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index 65ab9fbf83f2..ebe9ebb92933 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index 3b3990cddbe1..4bde43278be6 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -12,6 +12,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 98cf20933bbb..f6fb3d26557e 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index ddf8b96d494e..b9f0fe00044b 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index ceb90745326e..6da71ba253a9 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -10,6 +10,7 @@ CONFIG_IKCONFIG_PROC=y
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
+CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
CONFIG_MODULES=y
@@ -34,7 +35,6 @@ CONFIG_INET=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
@@ -72,7 +72,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HWMON is not set
CONFIG_DRM=y
CONFIG_DRM_ARCPGU=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index db25c65155cb..1bd24ec3e350 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -43,12 +43,14 @@
#define STATUS_AE_BIT 5 /* Exception active */
#define STATUS_DE_BIT 6 /* PC is in delay slot */
#define STATUS_U_BIT 7 /* User/Kernel mode */
+#define STATUS_Z_BIT 11
#define STATUS_L_BIT 12 /* Loop inhibit */
/* These masks correspond to the status word(STATUS_32) bits */
#define STATUS_AE_MASK (1<<STATUS_AE_BIT)
#define STATUS_DE_MASK (1<<STATUS_DE_BIT)
#define STATUS_U_MASK (1<<STATUS_U_BIT)
+#define STATUS_Z_MASK (1<<STATUS_Z_BIT)
#define STATUS_L_MASK (1<<STATUS_L_BIT)
/*
@@ -349,10 +351,11 @@ struct cpuinfo_arc {
struct cpuinfo_arc_bpu bpu;
struct bcr_identity core;
struct bcr_isa isa;
+ const char *details, *name;
unsigned int vec_base;
struct cpuinfo_arc_ccm iccm, dccm;
struct {
- unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3,
+ unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
fpu_sp:1, fpu_dp:1, pad2:6,
debug:1, ap:1, smart:1, rtt:1, pad3:4,
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index fb781e34f322..b3410ff6a62d 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -53,7 +53,7 @@ extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void read_decode_cache_bcr(void);
-extern int ioc_exists;
+extern int ioc_enable;
extern unsigned long perip_base, perip_end;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
index 7096f97a1434..aa2d6da9d187 100644
--- a/arch/arc/include/asm/elf.h
+++ b/arch/arc/include/asm/elf.h
@@ -54,7 +54,7 @@ extern int elf_check_arch(const struct elf32_hdr *);
* the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk.
*/
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+#define ELF_ET_DYN_BASE (2UL * TASK_SIZE / 3)
/*
* When the program starts, a1 contains a pointer to a function to be
diff --git a/arch/arc/include/asm/mcip.h b/arch/arc/include/asm/mcip.h
index 847e3bbe387f..c8fbe4114bad 100644
--- a/arch/arc/include/asm/mcip.h
+++ b/arch/arc/include/asm/mcip.h
@@ -55,6 +55,22 @@ struct mcip_cmd {
#define IDU_M_DISTRI_DEST 0x2
};
+struct mcip_bcr {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad3:8,
+ idu:1, llm:1, num_cores:6,
+ iocoh:1, gfrc:1, dbg:1, pad2:1,
+ msg:1, sem:1, ipi:1, pad:1,
+ ver:8;
+#else
+ unsigned int ver:8,
+ pad:1, ipi:1, sem:1, msg:1,
+ pad2:1, dbg:1, gfrc:1, iocoh:1,
+ num_cores:6, llm:1, idu:1,
+ pad3:8;
+#endif
+};
+
/*
* MCIP programming model
*
diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
index 518222bb3f8e..6e91d8b339c3 100644
--- a/arch/arc/include/asm/module.h
+++ b/arch/arc/include/asm/module.h
@@ -18,6 +18,7 @@
struct mod_arch_specific {
void *unw_info;
int unw_sec_idx;
+ const char *secstr;
};
#endif
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index 48b37c693db3..cb954cdab070 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -27,11 +27,6 @@ struct id_to_str {
const char *str;
};
-struct cpuinfo_data {
- struct id_to_str info;
- int up_range;
-};
-
extern int root_mountflags, end_mem;
void setup_processor(void);
@@ -43,5 +38,6 @@ void __init setup_arch_memory(void);
#define IS_USED_RUN(v) ((v) ? "" : "(not used) ")
#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg))
#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
+#define IS_AVAIL3(v, v2, s) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_DISABLED_RUN(v2))
#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index 89fdd1b0a76e..0861007d9ef3 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -37,9 +37,9 @@ extern const char *arc_platform_smp_cpuinfo(void);
* API expected BY platform smp code (FROM arch smp code)
*
* smp_ipi_irq_setup:
- * Takes @cpu and @irq to which the arch-common ISR is hooked up
+ * Takes @cpu and @hwirq to which the arch-common ISR is hooked up
*/
-extern int smp_ipi_irq_setup(int cpu, int irq);
+extern int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq);
/*
* struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
index e56f9fcc5581..772b67ca56e7 100644
--- a/arch/arc/include/asm/syscalls.h
+++ b/arch/arc/include/asm/syscalls.h
@@ -17,6 +17,7 @@ int sys_clone_wrapper(int, int, int, int, int);
int sys_cacheflush(uint32_t, uint32_t uint32_t);
int sys_arc_settls(void *);
int sys_arc_gettls(void);
+int sys_arc_usr_cmpxchg(int *, int, int);
#include <asm-generic/syscalls.h>
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
index 41fa2ec9e02c..9a34136d84b2 100644
--- a/arch/arc/include/uapi/asm/unistd.h
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -27,18 +27,19 @@
#define NR_syscalls __NR_syscalls
+/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
+#define __NR_sysfs (__NR_arch_specific_syscall + 3)
+
/* ARC specific syscall */
#define __NR_cacheflush (__NR_arch_specific_syscall + 0)
#define __NR_arc_settls (__NR_arch_specific_syscall + 1)
#define __NR_arc_gettls (__NR_arch_specific_syscall + 2)
+#define __NR_arc_usr_cmpxchg (__NR_arch_specific_syscall + 4)
__SYSCALL(__NR_cacheflush, sys_cacheflush)
__SYSCALL(__NR_arc_settls, sys_arc_settls)
__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
-
-
-/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
-#define __NR_sysfs (__NR_arch_specific_syscall + 3)
+__SYSCALL(__NR_arc_usr_cmpxchg, sys_arc_usr_cmpxchg)
__SYSCALL(__NR_sysfs, sys_sysfs)
#undef __SYSCALL
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
index f1e07c2344f8..3b67f538f142 100644
--- a/arch/arc/kernel/devtree.c
+++ b/arch/arc/kernel/devtree.c
@@ -31,6 +31,8 @@ static void __init arc_set_early_base_baud(unsigned long dt_root)
arc_base_baud = 166666666; /* Fixed 166.6MHz clk (TB10x) */
else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp"))
arc_base_baud = 33333333; /* Fixed 33MHz clk (AXS10x) */
+ else if (of_flat_dt_is_compatible(dt_root, "ezchip,arc-nps"))
+ arc_base_baud = 800000000; /* Fixed 800MHz clk (NPS) */
else
arc_base_baud = 50000000; /* Fixed default 50MHz */
}
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 72f9179b1a24..f39142acc89e 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -15,11 +15,12 @@
#include <asm/mcip.h>
#include <asm/setup.h>
-static char smp_cpuinfo_buf[128];
-static int idu_detected;
-
static DEFINE_RAW_SPINLOCK(mcip_lock);
+#ifdef CONFIG_SMP
+
+static char smp_cpuinfo_buf[128];
+
static void mcip_setup_per_cpu(int cpu)
{
smp_ipi_irq_setup(cpu, IPI_IRQ);
@@ -86,21 +87,7 @@ static void mcip_ipi_clear(int irq)
static void mcip_probe_n_setup(void)
{
- struct mcip_bcr {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int pad3:8,
- idu:1, llm:1, num_cores:6,
- iocoh:1, gfrc:1, dbg:1, pad2:1,
- msg:1, sem:1, ipi:1, pad:1,
- ver:8;
-#else
- unsigned int ver:8,
- pad:1, ipi:1, sem:1, msg:1,
- pad2:1, dbg:1, gfrc:1, iocoh:1,
- num_cores:6, llm:1, idu:1,
- pad3:8;
-#endif
- } mp;
+ struct mcip_bcr mp;
READ_BCR(ARC_REG_MCIP_BCR, mp);
@@ -114,7 +101,6 @@ static void mcip_probe_n_setup(void)
IS_AVAIL1(mp.gfrc, "GFRC"));
cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
- idu_detected = mp.idu;
if (mp.dbg) {
__mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
@@ -130,6 +116,8 @@ struct plat_smp_ops plat_smp_ops = {
.ipi_clear = mcip_ipi_clear,
};
+#endif
+
/***************************************************************************
* ARCv2 Interrupt Distribution Unit (IDU)
*
@@ -193,6 +181,8 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
{
unsigned long flags;
cpumask_t online;
+ unsigned int destination_bits;
+ unsigned int distribution_mode;
/* errout if no online cpu per @cpumask */
if (!cpumask_and(&online, cpumask, cpu_online_mask))
@@ -200,8 +190,15 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
raw_spin_lock_irqsave(&mcip_lock, flags);
- idu_set_dest(data->hwirq, cpumask_bits(&online)[0]);
- idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
+ destination_bits = cpumask_bits(&online)[0];
+ idu_set_dest(data->hwirq, destination_bits);
+
+ if (ffs(destination_bits) == fls(destination_bits))
+ distribution_mode = IDU_M_DISTRI_DEST;
+ else
+ distribution_mode = IDU_M_DISTRI_RR;
+
+ idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
@@ -219,16 +216,15 @@ static struct irq_chip idu_irq_chip = {
};
-static int idu_first_irq;
+static irq_hw_number_t idu_first_hwirq;
static void idu_cascade_isr(struct irq_desc *desc)
{
- struct irq_domain *domain = irq_desc_get_handler_data(desc);
- unsigned int core_irq = irq_desc_get_irq(desc);
- unsigned int idu_irq;
+ struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
+ irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
+ irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq;
- idu_irq = core_irq - idu_first_irq;
- generic_handle_irq(irq_find_mapping(domain, idu_irq));
+ generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
}
static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
@@ -294,9 +290,12 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
struct irq_domain *domain;
/* Read IDU BCR to confirm nr_irqs */
int nr_irqs = of_irq_count(intc);
- int i, irq;
+ int i, virq;
+ struct mcip_bcr mp;
+
+ READ_BCR(ARC_REG_MCIP_BCR, mp);
- if (!idu_detected)
+ if (!mp.idu)
panic("IDU not detected, but DeviceTree using it");
pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs);
@@ -312,11 +311,11 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
* however we need it to get the parent virq and set IDU handler
* as first level isr
*/
- irq = irq_of_parse_and_map(intc, i);
+ virq = irq_of_parse_and_map(intc, i);
if (!i)
- idu_first_irq = irq;
+ idu_first_hwirq = irqd_to_hwirq(irq_get_irq_data(virq));
- irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain);
+ irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain);
}
__mcip_cmd(CMD_IDU_ENABLE, 0);
diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c
index 9a2849756022..42e964db2967 100644
--- a/arch/arc/kernel/module.c
+++ b/arch/arc/kernel/module.c
@@ -30,17 +30,9 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstr, struct module *mod)
{
#ifdef CONFIG_ARC_DW2_UNWIND
- int i;
-
mod->arch.unw_sec_idx = 0;
mod->arch.unw_info = NULL;
-
- for (i = 1; i < hdr->e_shnum; i++) {
- if (strcmp(secstr+sechdrs[i].sh_name, ".eh_frame") == 0) {
- mod->arch.unw_sec_idx = i;
- break;
- }
- }
+ mod->arch.secstr = secstr;
#endif
return 0;
}
@@ -59,29 +51,33 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
unsigned int relsec, /* sec index for relo sec */
struct module *module)
{
- int i, n;
+ int i, n, relo_type;
Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym_entry, *sym_sec;
- Elf32_Addr relocation;
- Elf32_Addr location;
- Elf32_Addr sec_to_patch;
- int relo_type;
-
- sec_to_patch = sechdrs[sechdrs[relsec].sh_info].sh_addr;
+ Elf32_Addr relocation, location, tgt_addr;
+ unsigned int tgtsec;
+
+ /*
+ * @relsec has relocations e.g. .rela.init.text
+ * @tgtsec is section to patch e.g. .init.text
+ */
+ tgtsec = sechdrs[relsec].sh_info;
+ tgt_addr = sechdrs[tgtsec].sh_addr;
sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr;
n = sechdrs[relsec].sh_size / sizeof(*rel_entry);
- pr_debug("\n========== Module Sym reloc ===========================\n");
- pr_debug("Section to fixup %x\n", sec_to_patch);
+ pr_debug("\nSection to fixup %s @%x\n",
+ module->arch.secstr + sechdrs[tgtsec].sh_name, tgt_addr);
pr_debug("=========================================================\n");
- pr_debug("rela->r_off | rela->addend | sym->st_value | ADDR | VALUE\n");
+ pr_debug("r_off\tr_add\tst_value ADDRESS VALUE\n");
pr_debug("=========================================================\n");
/* Loop thru entries in relocation section */
for (i = 0; i < n; i++) {
+ const char *s;
/* This is where to make the change */
- location = sec_to_patch + rel_entry[i].r_offset;
+ location = tgt_addr + rel_entry[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
@@ -89,10 +85,15 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
relocation = sym_entry->st_value + rel_entry[i].r_addend;
- pr_debug("\t%x\t\t%x\t\t%x %x %x [%s]\n",
- rel_entry[i].r_offset, rel_entry[i].r_addend,
- sym_entry->st_value, location, relocation,
- strtab + sym_entry->st_name);
+ if (sym_entry->st_name == 0 && ELF_ST_TYPE (sym_entry->st_info) == STT_SECTION) {
+ s = module->arch.secstr + sechdrs[sym_entry->st_shndx].sh_name;
+ } else {
+ s = strtab + sym_entry->st_name;
+ }
+
+ pr_debug(" %x\t%x\t%x %x %x [%s]\n",
+ rel_entry[i].r_offset, rel_entry[i].r_addend,
+ sym_entry->st_value, location, relocation, s);
/* This assumes modules are built with -mlong-calls
* so any branches/jumps are absolute 32 bit jmps
@@ -111,6 +112,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
goto relo_err;
}
+
+ if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
+ module->arch.unw_sec_idx = tgtsec;
+
return 0;
relo_err:
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index be1972bd2729..a41a79a4f4fe 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -41,6 +41,41 @@ SYSCALL_DEFINE0(arc_gettls)
return task_thread_info(current)->thr_ptr;
}
+SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
+{
+ struct pt_regs *regs = current_pt_regs();
+ int uval = -EFAULT;
+
+ /*
+ * This is only for old cores lacking LLOCK/SCOND, which by defintion
+ * can't possibly be SMP. Thus doesn't need to be SMP safe.
+ * And this also helps reduce the overhead for serializing in
+ * the UP case
+ */
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
+
+ /* Z indicates to userspace if operation succeded */
+ regs->status32 &= ~STATUS_Z_MASK;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ preempt_disable();
+
+ if (__get_user(uval, uaddr))
+ goto done;
+
+ if (uval == expected) {
+ if (!__put_user(new, uaddr))
+ regs->status32 |= STATUS_Z_MASK;
+ }
+
+done:
+ preempt_enable();
+
+ return uval;
+}
+
void arch_cpu_idle(void)
{
/* sleep, but enable all interrupts before committing */
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 3df7f9c72f42..0385df77a697 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -40,6 +40,29 @@ struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
+static const struct id_to_str arc_cpu_rel[] = {
+#ifdef CONFIG_ISA_ARCOMPACT
+ { 0x34, "R4.10"},
+ { 0x35, "R4.11"},
+#else
+ { 0x51, "R2.0" },
+ { 0x52, "R2.1" },
+ { 0x53, "R3.0" },
+#endif
+ { 0x00, NULL }
+};
+
+static const struct id_to_str arc_cpu_nm[] = {
+#ifdef CONFIG_ISA_ARCOMPACT
+ { 0x20, "ARC 600" },
+ { 0x30, "ARC 770" }, /* 750 identified seperately */
+#else
+ { 0x40, "ARC EM" },
+ { 0x50, "ARC HS38" },
+#endif
+ { 0x00, "Unknown" }
+};
+
static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
{
if (is_isa_arcompact()) {
@@ -92,11 +115,26 @@ static void read_arc_build_cfg_regs(void)
struct bcr_timer timer;
struct bcr_generic bcr;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+ const struct id_to_str *tbl;
+
FIX_PTR(cpu);
READ_BCR(AUX_IDENTITY, cpu->core);
READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
+ for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) {
+ if (cpu->core.family == tbl->id) {
+ cpu->details = tbl->str;
+ break;
+ }
+ }
+
+ for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) {
+ if ((cpu->core.family & 0xF0) == tbl->id)
+ break;
+ }
+ cpu->name = tbl->str;
+
READ_BCR(ARC_REG_TIMERS_BCR, timer);
cpu->extn.timer0 = timer.t0;
cpu->extn.timer1 = timer.t1;
@@ -111,6 +149,9 @@ static void read_arc_build_cfg_regs(void)
cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */
cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;
cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */
+ cpu->extn.swape = (cpu->core.family >= 0x34) ? 1 :
+ IS_ENABLED(CONFIG_ARC_HAS_SWAPE);
+
READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
/* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
@@ -160,64 +201,38 @@ static void read_arc_build_cfg_regs(void)
cpu->extn.rtt = bcr.ver ? 1 : 0;
cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
-}
-static const struct cpuinfo_data arc_cpu_tbl[] = {
-#ifdef CONFIG_ISA_ARCOMPACT
- { {0x20, "ARC 600" }, 0x2F},
- { {0x30, "ARC 700" }, 0x33},
- { {0x34, "ARC 700 R4.10"}, 0x34},
- { {0x35, "ARC 700 R4.11"}, 0x35},
-#else
- { {0x50, "ARC HS38 R2.0"}, 0x51},
- { {0x52, "ARC HS38 R2.1"}, 0x52},
- { {0x53, "ARC HS38 R3.0"}, 0x53},
-#endif
- { {0x00, NULL } }
-};
+ /* some hacks for lack of feature BCR info in old ARC700 cores */
+ if (is_isa_arcompact()) {
+ if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */
+ cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
+ else
+ cpu->isa.atomic = cpu->isa.atomic1;
+ cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
+
+ /* there's no direct way to distinguish 750 vs. 770 */
+ if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))
+ cpu->name = "ARC750";
+ }
+}
static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
{
struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
struct bcr_identity *core = &cpu->core;
- const struct cpuinfo_data *tbl;
- char *isa_nm;
- int i, be, atomic;
- int n = 0;
+ int i, n = 0;
FIX_PTR(cpu);
- if (is_isa_arcompact()) {
- isa_nm = "ARCompact";
- be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
-
- atomic = cpu->isa.atomic1;
- if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */
- atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
- } else {
- isa_nm = "ARCv2";
- be = cpu->isa.be;
- atomic = cpu->isa.atomic;
- }
-
n += scnprintf(buf + n, len - n,
"\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
core->family, core->cpu_id, core->chip_id);
- for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) {
- if ((core->family >= tbl->info.id) &&
- (core->family <= tbl->up_range)) {
- n += scnprintf(buf + n, len - n,
- "processor [%d]\t: %s (%s ISA) %s\n",
- cpu_id, tbl->info.str, isa_nm,
- IS_AVAIL1(be, "[Big-Endian]"));
- break;
- }
- }
-
- if (tbl->info.id == 0)
- n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n");
+ n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s\n",
+ cpu_id, cpu->name, cpu->details,
+ is_isa_arcompact() ? "ARCompact" : "ARCv2",
+ IS_AVAIL1(cpu->isa.be, "[Big-Endian]"));
n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
@@ -226,7 +241,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
CONFIG_ARC_HAS_RTC));
n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
- IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
+ IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
IS_AVAIL1(cpu->isa.unalign, "unalign (not used)"));
@@ -253,7 +268,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
IS_AVAIL1(cpu->extn.swap, "swap "),
IS_AVAIL1(cpu->extn.minmax, "minmax "),
IS_AVAIL1(cpu->extn.crc, "crc "),
- IS_AVAIL2(1, "swape", CONFIG_ARC_HAS_SWAPE));
+ IS_AVAIL2(cpu->extn.swape, "swape", CONFIG_ARC_HAS_SWAPE));
if (cpu->bpu.ver)
n += scnprintf(buf + n, len - n,
@@ -272,9 +287,7 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
FIX_PTR(cpu);
- n += scnprintf(buf + n, len - n,
- "Vector Table\t: %#x\nPeripherals\t: %#lx:%#lx\n",
- cpu->vec_base, perip_base, perip_end);
+ n += scnprintf(buf + n, len - n, "Vector Table\t: %#x\n", cpu->vec_base);
if (cpu->extn.fpu_sp || cpu->extn.fpu_dp)
n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
@@ -507,7 +520,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
* way to pass it w/o having to kmalloc/free a 2 byte string.
* Encode cpu-id as 0xFFcccc, which is decoded by show routine.
*/
- return *pos < num_possible_cpus() ? cpu_to_ptr(*pos) : NULL;
+ return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index f183cc648851..88674d972c9d 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -22,6 +22,7 @@
#include <linux/atomic.h>
#include <linux/cpumask.h>
#include <linux/reboot.h>
+#include <linux/irqdomain.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/mach_desc.h>
@@ -67,11 +68,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
int i;
/*
- * Initialise the present map, which describes the set of CPUs
- * actually populated at the present time.
+ * if platform didn't set the present map already, do it now
+ * boot cpu is set to present already by init/main.c
*/
- for (i = 0; i < max_cpus; i++)
- set_cpu_present(i, true);
+ if (num_present_cpus() <= 1) {
+ for (i = 0; i < max_cpus; i++)
+ set_cpu_present(i, true);
+ }
}
void __init smp_cpus_done(unsigned int max_cpus)
@@ -351,20 +354,24 @@ irqreturn_t do_IPI(int irq, void *dev_id)
*/
static DEFINE_PER_CPU(int, ipi_dev);
-int smp_ipi_irq_setup(int cpu, int irq)
+int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
{
int *dev = per_cpu_ptr(&ipi_dev, cpu);
+ unsigned int virq = irq_find_mapping(NULL, hwirq);
+
+ if (!virq)
+ panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
/* Boot cpu calls request, all call enable */
if (!cpu) {
int rc;
- rc = request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev);
+ rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev);
if (rc)
- panic("Percpu IRQ request failed for %d\n", irq);
+ panic("Percpu IRQ request failed for %u\n", virq);
}
- enable_percpu_irq(irq, 0);
+ enable_percpu_irq(virq, 0);
return 0;
}
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index f927b8dc6edd..c10390d1ddb6 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -152,14 +152,17 @@ static cycle_t arc_read_rtc(struct clocksource *cs)
cycle_t full;
} stamp;
-
- __asm__ __volatile(
- "1: \n"
- " lr %0, [AUX_RTC_LOW] \n"
- " lr %1, [AUX_RTC_HIGH] \n"
- " lr %2, [AUX_RTC_CTRL] \n"
- " bbit0.nt %2, 31, 1b \n"
- : "=r" (stamp.low), "=r" (stamp.high), "=r" (status));
+ /*
+ * hardware has an internal state machine which tracks readout of
+ * low/high and updates the CTRL.status if
+ * - interrupt/exception taken between the two reads
+ * - high increments after low has been read
+ */
+ do {
+ stamp.low = read_aux_reg(AUX_RTC_LOW);
+ stamp.high = read_aux_reg(AUX_RTC_HIGH);
+ status = read_aux_reg(AUX_RTC_CTRL);
+ } while (!(status & _BITUL(31)));
return stamp.full;
}
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 934150e7ac48..82f9bc819f4a 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -237,113 +237,3 @@ void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
if (!user_mode(regs))
show_stacktrace(current, regs);
}
-
-#ifdef CONFIG_DEBUG_FS
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/mount.h>
-#include <linux/pagemap.h>
-#include <linux/init.h>
-#include <linux/namei.h>
-#include <linux/debugfs.h>
-
-static struct dentry *test_dentry;
-static struct dentry *test_dir;
-static struct dentry *test_u32_dentry;
-
-static u32 clr_on_read = 1;
-
-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
-u32 numitlb, numdtlb, num_pte_not_present;
-
-static int fill_display_data(char *kbuf)
-{
- size_t num = 0;
- num += sprintf(kbuf + num, "I-TLB Miss %x\n", numitlb);
- num += sprintf(kbuf + num, "D-TLB Miss %x\n", numdtlb);
- num += sprintf(kbuf + num, "PTE not present %x\n", num_pte_not_present);
-
- if (clr_on_read)
- numitlb = numdtlb = num_pte_not_present = 0;
-
- return num;
-}
-
-static int tlb_stats_open(struct inode *inode, struct file *file)
-{
- file->private_data = (void *)__get_free_page(GFP_KERNEL);
- return 0;
-}
-
-/* called on user read(): display the counters */
-static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
- char __user *user_buf, /* user buffer */
- size_t len, /* length of buffer */
- loff_t *offset) /* offset in the file */
-{
- size_t num;
- char *kbuf = (char *)file->private_data;
-
- /* All of the data can he shoved in one iteration */
- if (*offset != 0)
- return 0;
-
- num = fill_display_data(kbuf);
-
- /* simple_read_from_buffer() is helper for copy to user space
- It copies up to @2 (num) bytes from kernel buffer @4 (kbuf) at offset
- @3 (offset) into the user space address starting at @1 (user_buf).
- @5 (len) is max size of user buffer
- */
- return simple_read_from_buffer(user_buf, num, offset, kbuf, len);
-}
-
-/* called on user write : clears the counters */
-static ssize_t tlb_stats_clear(struct file *file, const char __user *user_buf,
- size_t length, loff_t *offset)
-{
- numitlb = numdtlb = num_pte_not_present = 0;
- return length;
-}
-
-static int tlb_stats_close(struct inode *inode, struct file *file)
-{
- free_page((unsigned long)(file->private_data));
- return 0;
-}
-
-static const struct file_operations tlb_stats_file_ops = {
- .read = tlb_stats_output,
- .write = tlb_stats_clear,
- .open = tlb_stats_open,
- .release = tlb_stats_close
-};
-#endif
-
-static int __init arc_debugfs_init(void)
-{
- test_dir = debugfs_create_dir("arc", NULL);
-
-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
- test_dentry = debugfs_create_file("tlb_stats", 0444, test_dir, NULL,
- &tlb_stats_file_ops);
-#endif
-
- test_u32_dentry =
- debugfs_create_u32("clr_on_read", 0444, test_dir, &clr_on_read);
-
- return 0;
-}
-
-module_init(arc_debugfs_init);
-
-static void __exit arc_debugfs_exit(void)
-{
- debugfs_remove(test_u32_dentry);
- debugfs_remove(test_dentry);
- debugfs_remove(test_dir);
-}
-module_exit(arc_debugfs_exit);
-
-#endif
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 97dddbefb86a..2b96cfc3be75 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -22,8 +22,8 @@
#include <asm/setup.h>
static int l2_line_sz;
-int ioc_exists;
-volatile int slc_enable = 1, ioc_enable = 1;
+static int ioc_exists;
+int slc_enable = 1, ioc_enable = 1;
unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
@@ -53,18 +53,15 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
- if (!is_isa_arcv2())
- return buf;
-
p = &cpuinfo_arc700[c].slc;
if (p->ver)
n += scnprintf(buf + n, len - n,
"SLC\t\t: %uK, %uB Line%s\n",
p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
- if (ioc_exists)
- n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n",
- IS_DISABLED_RUN(ioc_enable));
+ n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
+ perip_base,
+ IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));
return buf;
}
@@ -113,8 +110,10 @@ static void read_decode_cache_bcr_arcv2(int cpu)
}
READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
- if (cbcr.c && ioc_enable)
+ if (cbcr.c)
ioc_exists = 1;
+ else
+ ioc_enable = 0;
/* HS 2.0 didn't have AUX_VOL */
if (cpuinfo_arc700[cpu].core.family > 0x51) {
@@ -1002,7 +1001,7 @@ void arc_cache_init(void)
read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
}
- if (is_isa_arcv2() && ioc_exists) {
+ if (is_isa_arcv2() && ioc_enable) {
/* IO coherency base - 0x8z */
write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
/* IO coherency aperture size - 512Mb: 0x8z-0xAz */
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 20afc65e22dc..cd8aad8226dd 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -45,7 +45,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
* -For coherent data, Read/Write to buffers terminate early in cache
* (vs. always going to memory - thus are faster)
*/
- if ((is_isa_arcv2() && ioc_exists) ||
+ if ((is_isa_arcv2() && ioc_enable) ||
(attrs & DMA_ATTR_NON_CONSISTENT))
need_coh = 0;
@@ -97,7 +97,7 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
int is_non_coh = 1;
is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
- (is_isa_arcv2() && ioc_exists);
+ (is_isa_arcv2() && ioc_enable);
if (PageHighMem(page) || !is_non_coh)
iounmap((void __force __iomem *)vaddr);
@@ -105,6 +105,31 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
__free_pages(page, get_order(size));
}
+static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ unsigned long user_count = vma_pages(vma);
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr));
+ unsigned long off = vma->vm_pgoff;
+ int ret = -ENXIO;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
+ if (off < count && user_count <= (count - off)) {
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pfn + off,
+ user_count << PAGE_SHIFT,
+ vma->vm_page_prot);
+ }
+
+ return ret;
+}
+
/*
* streaming DMA Mapping API...
* CPU accesses page via normal paddr, thus needs to explicitly made
@@ -193,6 +218,7 @@ static int arc_dma_supported(struct device *dev, u64 dma_mask)
struct dma_map_ops arc_dma_ops = {
.alloc = arc_dma_alloc,
.free = arc_dma_free,
+ .mmap = arc_dma_mmap,
.map_page = arc_dma_map_page,
.map_sg = arc_dma_map_sg,
.sync_single_for_device = arc_dma_sync_single_for_device,
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index ec868a9081a1..bdb295e09160 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -793,16 +793,16 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
char super_pg[64] = "";
if (p_mmu->s_pg_sz_m)
- scnprintf(super_pg, 64, "%dM Super Page%s, ",
+ scnprintf(super_pg, 64, "%dM Super Page %s",
p_mmu->s_pg_sz_m,
IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
n += scnprintf(buf + n, len - n,
- "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s%s\n",
+ "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
p_mmu->ver, p_mmu->pg_sz_k, super_pg,
p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
p_mmu->u_dtlb, p_mmu->u_itlb,
- IS_AVAIL2(p_mmu->pae, "PAE40 ", CONFIG_ARC_HAS_PAE40));
+ IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
return buf;
}
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index f1967eeb32e7..b30e4e36bb00 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -237,15 +237,6 @@ ex_saved_reg1:
2:
-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
- and.f 0, r0, _PAGE_PRESENT
- bz 1f
- ld r3, [num_pte_not_present]
- add r3, r3, 1
- st r3, [num_pte_not_present]
-1:
-#endif
-
.endm
;-----------------------------------------------------------------
@@ -309,12 +300,6 @@ ENTRY(EV_TLBMissI)
TLBMISS_FREEUP_REGS
-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
- ld r0, [@numitlb]
- add r0, r0, 1
- st r0, [@numitlb]
-#endif
-
;----------------------------------------------------------------
; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA
LOAD_FAULT_PTE
@@ -349,12 +334,6 @@ ENTRY(EV_TLBMissD)
TLBMISS_FREEUP_REGS
-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
- ld r0, [@numdtlb]
- add r0, r0, 1
- st r0, [@numdtlb]
-#endif
-
;----------------------------------------------------------------
; Get the PTE corresponding to V-addr accessed
; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA
diff --git a/arch/arc/plat-eznps/smp.c b/arch/arc/plat-eznps/smp.c
index 5e901f86e4bd..56a4c8522f11 100644
--- a/arch/arc/plat-eznps/smp.c
+++ b/arch/arc/plat-eznps/smp.c
@@ -140,16 +140,10 @@ static void eznps_init_per_cpu(int cpu)
mtm_enable_core(cpu);
}
-static void eznps_ipi_clear(int irq)
-{
- write_aux_reg(CTOP_AUX_IACK, 1 << irq);
-}
-
struct plat_smp_ops plat_smp_ops = {
.info = smp_cpuinfo_buf,
.init_early_smp = eznps_init_cpumasks,
.cpu_kick = eznps_smp_wakeup_cpu,
.ipi_send = eznps_ipi_send,
.init_per_cpu = eznps_init_per_cpu,
- .ipi_clear = eznps_ipi_clear,
};
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index b3df1c60d465..386eee6de232 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -239,14 +239,25 @@
arm,primecell-periphid = <0x10480180>;
max-frequency = <100000000>;
bus-width = <4>;
+ cap-sd-highspeed;
cap-mmc-highspeed;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ /* All direction control is used */
+ st,sig-dir-cmd;
+ st,sig-dir-dat0;
+ st,sig-dir-dat2;
+ st,sig-dir-dat31;
+ st,sig-pin-fbclk;
+ full-pwr-cycle;
vmmc-supply = <&ab8500_ldo_aux3_reg>;
vqmmc-supply = <&vmmci>;
pinctrl-names = "default", "sleep";
pinctrl-0 = <&sdi0_default_mode>;
pinctrl-1 = <&sdi0_sleep_mode>;
- cd-gpios = <&gpio6 26 GPIO_ACTIVE_LOW>; // 218
+ /* GPIO218 MMC_CD */
+ cd-gpios = <&gpio6 26 GPIO_ACTIVE_LOW>;
status = "okay";
};
@@ -549,7 +560,7 @@
/* VMMCI level-shifter enable */
snowball_cfg3 {
pins = "GPIO217_AH12";
- ste,config = <&gpio_out_lo>;
+ ste,config = <&gpio_out_hi>;
};
/* VMMCI level-shifter voltage select */
snowball_cfg4 {
diff --git a/arch/arm/boot/dts/uniphier-pro5.dtsi b/arch/arm/boot/dts/uniphier-pro5.dtsi
index 2c49c3614bda..5357ea9c14b1 100644
--- a/arch/arm/boot/dts/uniphier-pro5.dtsi
+++ b/arch/arm/boot/dts/uniphier-pro5.dtsi
@@ -184,11 +184,11 @@
};
&mio_clk {
- compatible = "socionext,uniphier-pro5-mio-clock";
+ compatible = "socionext,uniphier-pro5-sd-clock";
};
&mio_rst {
- compatible = "socionext,uniphier-pro5-mio-reset";
+ compatible = "socionext,uniphier-pro5-sd-reset";
};
&peri_clk {
diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi
index 8789cd518933..950f07ba0337 100644
--- a/arch/arm/boot/dts/uniphier-pxs2.dtsi
+++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi
@@ -197,11 +197,11 @@
};
&mio_clk {
- compatible = "socionext,uniphier-pxs2-mio-clock";
+ compatible = "socionext,uniphier-pxs2-sd-clock";
};
&mio_rst {
- compatible = "socionext,uniphier-pxs2-mio-reset";
+ compatible = "socionext,uniphier-pxs2-sd-reset";
};
&peri_clk {
diff --git a/arch/arm/boot/dts/vf500.dtsi b/arch/arm/boot/dts/vf500.dtsi
index a3824e61bd72..d7fdb2a7d97b 100644
--- a/arch/arm/boot/dts/vf500.dtsi
+++ b/arch/arm/boot/dts/vf500.dtsi
@@ -70,7 +70,7 @@
global_timer: timer@40002200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0x40002200 0x20>;
- interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
interrupt-parent = <&intc>;
clocks = <&clks VF610_CLK_PLATFORM_BUS>;
};
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 437d0740dec6..11f37ed1dbff 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -850,6 +850,7 @@ CONFIG_PWM_SUN4I=y
CONFIG_PWM_TEGRA=y
CONFIG_PWM_VT8500=y
CONFIG_PHY_HIX5HD2_SATA=y
+CONFIG_E1000E=y
CONFIG_PWM_STI=y
CONFIG_PWM_BCM2835=y
CONFIG_PWM_BRCMSTB=m
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index d7ea6bcb29bf..8ef05381984b 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -66,6 +66,7 @@ extern char __kvm_hyp_vector[];
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 2d19e02d03fd..d5423ab15ed5 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -57,6 +57,9 @@ struct kvm_arch {
/* VTTBR value associated with below pgd and vmid */
u64 vttbr;
+ /* The last vcpu id that ran on each physical CPU */
+ int __percpu *last_vcpu_ran;
+
/* Timer */
struct arch_timer_kvm timer;
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
index 343135ede5fa..58508900c4bb 100644
--- a/arch/arm/include/asm/kvm_hyp.h
+++ b/arch/arm/include/asm/kvm_hyp.h
@@ -71,6 +71,7 @@
#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0)
#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0)
#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0)
+#define TLBIALL __ACCESS_CP15(c8, 0, c7, 0)
#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4)
#define PRRR __ACCESS_CP15(c10, 0, c2, 0)
#define NMRR __ACCESS_CP15(c10, 0, c2, 1)
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 194b69923389..ada0d29a660f 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -19,7 +19,7 @@
* This may need to be greater than __NR_last_syscall+1 in order to
* account for the padding in the syscall table
*/
-#define __NR_syscalls (396)
+#define __NR_syscalls (400)
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 2cb9dc770e1d..314100a06ccb 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -420,6 +420,9 @@
#define __NR_copy_file_range (__NR_SYSCALL_BASE+391)
#define __NR_preadv2 (__NR_SYSCALL_BASE+392)
#define __NR_pwritev2 (__NR_SYSCALL_BASE+393)
+#define __NR_pkey_mprotect (__NR_SYSCALL_BASE+394)
+#define __NR_pkey_alloc (__NR_SYSCALL_BASE+395)
+#define __NR_pkey_free (__NR_SYSCALL_BASE+396)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 703fa0f3cd8f..08030b18f10a 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -403,6 +403,9 @@
CALL(sys_copy_file_range)
CALL(sys_preadv2)
CALL(sys_pwritev2)
+ CALL(sys_pkey_mprotect)
+/* 395 */ CALL(sys_pkey_alloc)
+ CALL(sys_pkey_free)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 08bb84f2ad58..19b5f5c1c0ff 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -114,11 +114,18 @@ void kvm_arch_check_processor_compat(void *rtn)
*/
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
- int ret = 0;
+ int ret, cpu;
if (type)
return -EINVAL;
+ kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
+ if (!kvm->arch.last_vcpu_ran)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu)
+ *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
+
ret = kvm_alloc_stage2_pgd(kvm);
if (ret)
goto out_fail_alloc;
@@ -141,6 +148,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
out_free_stage2_pgd:
kvm_free_stage2_pgd(kvm);
out_fail_alloc:
+ free_percpu(kvm->arch.last_vcpu_ran);
+ kvm->arch.last_vcpu_ran = NULL;
return ret;
}
@@ -168,6 +177,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
{
int i;
+ free_percpu(kvm->arch.last_vcpu_ran);
+ kvm->arch.last_vcpu_ran = NULL;
+
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) {
kvm_arch_vcpu_free(kvm->vcpus[i]);
@@ -312,6 +324,19 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
+ int *last_ran;
+
+ last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
+
+ /*
+ * We might get preempted before the vCPU actually runs, but
+ * over-invalidation doesn't affect correctness.
+ */
+ if (*last_ran != vcpu->vcpu_id) {
+ kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
+ *last_ran = vcpu->vcpu_id;
+ }
+
vcpu->cpu = cpu;
vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c
index 729652854f90..6d810af2d9fd 100644
--- a/arch/arm/kvm/hyp/tlb.c
+++ b/arch/arm/kvm/hyp/tlb.c
@@ -55,6 +55,21 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
__kvm_tlb_flush_vmid(kvm);
}
+void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+
+ /* Switch to requested VMID */
+ write_sysreg(kvm->arch.vttbr, VTTBR);
+ isb();
+
+ write_sysreg(0, TLBIALL);
+ dsb(nsh);
+ isb();
+
+ write_sysreg(0, VTTBR);
+}
+
void __hyp_text __kvm_flush_vm_context(void)
{
write_sysreg(0, TLBIALLNSNHIS);
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index 0df062d8b2c9..b54db47f6f32 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -408,7 +408,7 @@ static struct genpd_onecell_data imx_gpc_onecell_data = {
static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
{
struct clk *clk;
- int i;
+ int i, ret;
imx6q_pu_domain.reg = pu_reg;
@@ -430,13 +430,22 @@ static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
return 0;
- pm_genpd_init(&imx6q_pu_domain.base, NULL, false);
- return of_genpd_add_provider_onecell(dev->of_node,
+ for (i = 0; i < ARRAY_SIZE(imx_gpc_domains); i++)
+ pm_genpd_init(imx_gpc_domains[i], NULL, false);
+
+ ret = of_genpd_add_provider_onecell(dev->of_node,
&imx_gpc_onecell_data);
+ if (ret)
+ goto power_off;
+
+ return 0;
+power_off:
+ imx6q_pm_pu_power_off(&imx6q_pu_domain.base);
clk_err:
while (i--)
clk_put(imx6q_pu_domain.clk[i]);
+ imx6q_pu_domain.reg = NULL;
return -EINVAL;
}
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 97fd25105e2c..45801b27ee5c 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -173,7 +173,7 @@ static void __init imx6q_enet_phy_init(void)
ksz9021rn_phy_fixup);
phy_register_fixup_for_uid(PHY_ID_KSZ9031, MICREL_PHY_ID_MASK,
ksz9031rn_phy_fixup);
- phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffff,
+ phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffef,
ar8031_phy_fixup);
phy_register_fixup_for_uid(PHY_ID_AR8035, 0xffffffef,
ar8035_phy_fixup);
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index f9b6bd306cfe..541647f57192 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -23,6 +23,7 @@ config MACH_MVEBU_V7
select CACHE_L2X0
select ARM_CPU_SUSPEND
select MACH_MVEBU_ANY
+ select MVEBU_CLK_COREDIV
config MACH_ARMADA_370
bool "Marvell Armada 370 boards"
@@ -32,7 +33,6 @@ config MACH_ARMADA_370
select CPU_PJ4B
select MACH_MVEBU_V7
select PINCTRL_ARMADA_370
- select MVEBU_CLK_COREDIV
help
Say 'Y' here if you want your kernel to support boards based
on the Marvell Armada 370 SoC with device tree.
@@ -50,7 +50,6 @@ config MACH_ARMADA_375
select HAVE_SMP
select MACH_MVEBU_V7
select PINCTRL_ARMADA_375
- select MVEBU_CLK_COREDIV
help
Say 'Y' here if you want your kernel to support boards based
on the Marvell Armada 375 SoC with device tree.
@@ -68,7 +67,6 @@ config MACH_ARMADA_38X
select HAVE_SMP
select MACH_MVEBU_V7
select PINCTRL_ARMADA_38X
- select MVEBU_CLK_COREDIV
help
Say 'Y' here if you want your kernel to support boards based
on the Marvell Armada 380/385 SoC with device tree.
diff --git a/arch/arm/mach-uniphier/Kconfig b/arch/arm/mach-uniphier/Kconfig
index 82dddee3a469..3930fbba30b4 100644
--- a/arch/arm/mach-uniphier/Kconfig
+++ b/arch/arm/mach-uniphier/Kconfig
@@ -1,6 +1,7 @@
config ARCH_UNIPHIER
bool "Socionext UniPhier SoCs"
depends on ARCH_MULTI_V7
+ select ARCH_HAS_RESET_CONTROLLER
select ARM_AMBA
select ARM_GLOBAL_TIMER
select ARM_GIC
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index 6d8e8e3365d1..4cdfab31a0b6 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -7,7 +7,7 @@
* : r4 = aborted context pc
* : r5 = aborted context psr
*
- * Returns : r4-r5, r10-r11, r13 preserved
+ * Returns : r4-r5, r9-r11, r13 preserved
*
* Purpose : obtain information about current aborted instruction.
* Note: we read user space. This means we might cause a data
@@ -48,7 +48,10 @@ ENTRY(v4t_late_abort)
/* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
/* d */ b do_DataAbort @ ldc rd, [rn, #m]
/* e */ b .data_unknown
-/* f */
+/* f */ b .data_unknown
+
+.data_unknown_r9:
+ ldr r9, [sp], #4
.data_unknown: @ Part of jumptable
mov r0, r4
mov r1, r8
@@ -57,6 +60,7 @@ ENTRY(v4t_late_abort)
.data_arm_ldmstm:
tst r8, #1 << 21 @ check writeback bit
beq do_DataAbort @ no writeback -> no fixup
+ str r9, [sp, #-4]!
mov r7, #0x11
orr r7, r7, #0x1100
and r6, r8, r7
@@ -75,12 +79,14 @@ ENTRY(v4t_late_abort)
subne r7, r7, r6, lsl #2 @ Undo increment
addeq r7, r7, r6, lsl #2 @ Undo decrement
str r7, [r2, r9, lsr #14] @ Put register 'Rn'
+ ldr r9, [sp], #4
b do_DataAbort
.data_arm_lateldrhpre:
tst r8, #1 << 21 @ Check writeback bit
beq do_DataAbort @ No writeback -> no fixup
.data_arm_lateldrhpost:
+ str r9, [sp, #-4]!
and r9, r8, #0x00f @ get Rm / low nibble of immediate value
tst r8, #1 << 22 @ if (immediate offset)
andne r6, r8, #0xf00 @ { immediate high nibble
@@ -93,6 +99,7 @@ ENTRY(v4t_late_abort)
subne r7, r7, r6 @ Undo incrmenet
addeq r7, r7, r6 @ Undo decrement
str r7, [r2, r9, lsr #14] @ Put register 'Rn'
+ ldr r9, [sp], #4
b do_DataAbort
.data_arm_lateldrpreconst:
@@ -101,12 +108,14 @@ ENTRY(v4t_late_abort)
.data_arm_lateldrpostconst:
movs r6, r8, lsl #20 @ Get offset
beq do_DataAbort @ zero -> no fixup
+ str r9, [sp, #-4]!
and r9, r8, #15 << 16 @ Extract 'n' from instruction
ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
tst r8, #1 << 23 @ Check U bit
subne r7, r7, r6, lsr #20 @ Undo increment
addeq r7, r7, r6, lsr #20 @ Undo decrement
str r7, [r2, r9, lsr #14] @ Put register 'Rn'
+ ldr r9, [sp], #4
b do_DataAbort
.data_arm_lateldrprereg:
@@ -115,6 +124,7 @@ ENTRY(v4t_late_abort)
.data_arm_lateldrpostreg:
and r7, r8, #15 @ Extract 'm' from instruction
ldr r6, [r2, r7, lsl #2] @ Get register 'Rm'
+ str r9, [sp, #-4]!
mov r9, r8, lsr #7 @ get shift count
ands r9, r9, #31
and r7, r8, #0x70 @ get shift type
@@ -126,33 +136,33 @@ ENTRY(v4t_late_abort)
b .data_arm_apply_r6_and_rn
b .data_arm_apply_r6_and_rn @ 1: LSL #0
nop
- b .data_unknown @ 2: MUL?
+ b .data_unknown_r9 @ 2: MUL?
nop
- b .data_unknown @ 3: MUL?
+ b .data_unknown_r9 @ 3: MUL?
nop
mov r6, r6, lsr r9 @ 4: LSR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, lsr #32 @ 5: LSR #32
b .data_arm_apply_r6_and_rn
- b .data_unknown @ 6: MUL?
+ b .data_unknown_r9 @ 6: MUL?
nop
- b .data_unknown @ 7: MUL?
+ b .data_unknown_r9 @ 7: MUL?
nop
mov r6, r6, asr r9 @ 8: ASR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, asr #32 @ 9: ASR #32
b .data_arm_apply_r6_and_rn
- b .data_unknown @ A: MUL?
+ b .data_unknown_r9 @ A: MUL?
nop
- b .data_unknown @ B: MUL?
+ b .data_unknown_r9 @ B: MUL?
nop
mov r6, r6, ror r9 @ C: ROR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, rrx @ D: RRX
b .data_arm_apply_r6_and_rn
- b .data_unknown @ E: MUL?
+ b .data_unknown_r9 @ E: MUL?
nop
- b .data_unknown @ F: MUL?
+ b .data_unknown_r9 @ F: MUL?
.data_thumb_abort:
ldrh r8, [r4] @ read instruction
@@ -190,6 +200,7 @@ ENTRY(v4t_late_abort)
.data_thumb_pushpop:
tst r8, #1 << 10
beq .data_unknown
+ str r9, [sp, #-4]!
and r6, r8, #0x55 @ hweight8(r8) + R bit
and r9, r8, #0xaa
add r6, r6, r9, lsr #1
@@ -204,9 +215,11 @@ ENTRY(v4t_late_abort)
addeq r7, r7, r6, lsl #2 @ increment SP if PUSH
subne r7, r7, r6, lsl #2 @ decrement SP if POP
str r7, [r2, #13 << 2]
+ ldr r9, [sp], #4
b do_DataAbort
.data_thumb_ldmstm:
+ str r9, [sp, #-4]!
and r6, r8, #0x55 @ hweight8(r8)
and r9, r8, #0xaa
add r6, r6, r9, lsr #1
@@ -219,4 +232,5 @@ ENTRY(v4t_late_abort)
and r6, r6, #15 @ number of regs to transfer
sub r7, r7, r6, lsl #2 @ always decrement
str r7, [r2, r9, lsr #6]
+ ldr r9, [sp], #4
b do_DataAbort
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index cfbdf02ef566..101794f5ce10 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -190,6 +190,7 @@ config ARCH_THUNDER
config ARCH_UNIPHIER
bool "Socionext UniPhier SoC Family"
+ select ARCH_HAS_RESET_CONTROLLER
select PINCTRL
help
This enables support for Socionext UniPhier SoC family.
diff --git a/arch/arm64/boot/dts/broadcom/ns2-svk.dts b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
index 2d7872a36b91..b09f3bc5c6c1 100644
--- a/arch/arm64/boot/dts/broadcom/ns2-svk.dts
+++ b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
@@ -164,6 +164,8 @@
nand-ecc-mode = "hw";
nand-ecc-strength = <8>;
nand-ecc-step-size = <512>;
+ nand-bus-width = <16>;
+ brcm,nand-oob-sector-size = <16>;
#address-cells = <1>;
#size-cells = <1>;
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 220ac7057d12..97d331ec2500 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -123,6 +123,7 @@
<1 14 0xf08>, /* Physical Non-Secure PPI */
<1 11 0xf08>, /* Virtual PPI */
<1 10 0xf08>; /* Hypervisor PPI */
+ fsl,erratum-a008585;
};
pmu {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index 337da90bd7da..7f0dc13b4087 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -195,6 +195,7 @@
<1 14 4>, /* Physical Non-Secure PPI, active-low */
<1 11 4>, /* Virtual PPI, active-low */
<1 10 4>; /* Hypervisor PPI, active-low */
+ fsl,erratum-a008585;
};
pmu {
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index e5e3ed678b6f..602e2c2e9a4d 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -131,7 +131,7 @@
#address-cells = <0x1>;
#size-cells = <0x0>;
cell-index = <1>;
- clocks = <&cpm_syscon0 0 3>;
+ clocks = <&cpm_syscon0 1 21>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
index 46cdddfcea6c..e5eeca2c2456 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
@@ -116,7 +116,6 @@
cap-mmc-highspeed;
clock-frequency = <150000000>;
disable-wp;
- keep-power-in-suspend;
non-removable;
num-slots = <1>;
vmmc-supply = <&vcc_io>;
@@ -258,8 +257,6 @@
};
vcc_sd: SWITCH_REG1 {
- regulator-always-on;
- regulator-boot-on;
regulator-name = "vcc_sd";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
index 5797933ef80e..ea0a8eceefd4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
@@ -152,8 +152,6 @@
gpio = <&gpio3 11 GPIO_ACTIVE_LOW>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
vin-supply = <&vcc_io>;
};
@@ -201,7 +199,6 @@
bus-width = <8>;
cap-mmc-highspeed;
disable-wp;
- keep-power-in-suspend;
mmc-pwrseq = <&emmc_pwrseq>;
mmc-hs200-1_2v;
mmc-hs200-1_8v;
@@ -350,7 +347,6 @@
clock-freq-min-max = <400000 50000000>;
cap-sd-highspeed;
card-detect-delay = <200>;
- keep-power-in-suspend;
num-slots = <1>;
pinctrl-names = "default";
pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index b65c193dc64e..7afbfb0f96a3 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -300,8 +300,11 @@
ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000
0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>;
resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
- <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>;
- reset-names = "core", "mgmt", "mgmt-sticky", "pipe";
+ <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>,
+ <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>,
+ <&cru SRST_A_PCIE>;
+ reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
+ "pm", "pclk", "aclk";
status = "disabled";
pcie0_intc: interrupt-controller {
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
index 08fd7cf7769c..56a1b2e92cf3 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
@@ -257,18 +257,18 @@
reg = <0x59801000 0x400>;
};
- mioctrl@59810000 {
- compatible = "socionext,uniphier-mioctrl",
+ sdctrl@59810000 {
+ compatible = "socionext,uniphier-ld20-sdctrl",
"simple-mfd", "syscon";
reg = <0x59810000 0x800>;
- mio_clk: clock {
- compatible = "socionext,uniphier-ld20-mio-clock";
+ sd_clk: clock {
+ compatible = "socionext,uniphier-ld20-sd-clock";
#clock-cells = <1>;
};
- mio_rst: reset {
- compatible = "socionext,uniphier-ld20-mio-reset";
+ sd_rst: reset {
+ compatible = "socionext,uniphier-ld20-sd-reset";
#reset-cells = <1>;
};
};
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 39feb85a6931..6e1cb8c5af4d 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -1,7 +1,7 @@
#ifndef __ASM_ALTERNATIVE_H
#define __ASM_ALTERNATIVE_H
-#include <asm/cpufeature.h>
+#include <asm/cpucaps.h>
#include <asm/insn.h>
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
new file mode 100644
index 000000000000..87b446535185
--- /dev/null
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -0,0 +1,40 @@
+/*
+ * arch/arm64/include/asm/cpucaps.h
+ *
+ * Copyright (C) 2016 ARM Ltd.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_CPUCAPS_H
+#define __ASM_CPUCAPS_H
+
+#define ARM64_WORKAROUND_CLEAN_CACHE 0
+#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
+#define ARM64_WORKAROUND_845719 2
+#define ARM64_HAS_SYSREG_GIC_CPUIF 3
+#define ARM64_HAS_PAN 4
+#define ARM64_HAS_LSE_ATOMICS 5
+#define ARM64_WORKAROUND_CAVIUM_23154 6
+#define ARM64_WORKAROUND_834220 7
+#define ARM64_HAS_NO_HW_PREFETCH 8
+#define ARM64_HAS_UAO 9
+#define ARM64_ALT_PAN_NOT_UAO 10
+#define ARM64_HAS_VIRT_HOST_EXTN 11
+#define ARM64_WORKAROUND_CAVIUM_27456 12
+#define ARM64_HAS_32BIT_EL0 13
+#define ARM64_HYP_OFFSET_LOW 14
+#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
+
+#define ARM64_NCAPS 16
+
+#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index a27c3245ba21..0bc0b1de90c4 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -11,6 +11,7 @@
#include <linux/jump_label.h>
+#include <asm/cpucaps.h>
#include <asm/hwcap.h>
#include <asm/sysreg.h>
@@ -24,25 +25,6 @@
#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
#define cpu_feature(x) ilog2(HWCAP_ ## x)
-#define ARM64_WORKAROUND_CLEAN_CACHE 0
-#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
-#define ARM64_WORKAROUND_845719 2
-#define ARM64_HAS_SYSREG_GIC_CPUIF 3
-#define ARM64_HAS_PAN 4
-#define ARM64_HAS_LSE_ATOMICS 5
-#define ARM64_WORKAROUND_CAVIUM_23154 6
-#define ARM64_WORKAROUND_834220 7
-#define ARM64_HAS_NO_HW_PREFETCH 8
-#define ARM64_HAS_UAO 9
-#define ARM64_ALT_PAN_NOT_UAO 10
-#define ARM64_HAS_VIRT_HOST_EXTN 11
-#define ARM64_WORKAROUND_CAVIUM_27456 12
-#define ARM64_HAS_32BIT_EL0 13
-#define ARM64_HYP_OFFSET_LOW 14
-#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
-
-#define ARM64_NCAPS 16
-
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 18f746551bf6..ec3553eb9349 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -54,6 +54,7 @@ extern char __kvm_hyp_vector[];
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index bd94e6766759..e5050388e062 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -62,6 +62,9 @@ struct kvm_arch {
/* VTTBR value associated with above pgd and vmid */
u64 vttbr;
+ /* The last vcpu id that ran on each physical CPU */
+ int __percpu *last_vcpu_ran;
+
/* The maximum number of vCPUs depends on the used GIC model */
int max_vcpus;
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index a79b969c26fc..6f72fe8b0e3e 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -128,7 +128,7 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
return v;
}
-#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
+#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
/*
* We currently only support a 40bit IPA.
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
index 23acc00be32d..fc756e22c84c 100644
--- a/arch/arm64/include/asm/lse.h
+++ b/arch/arm64/include/asm/lse.h
@@ -5,7 +5,6 @@
#include <linux/stringify.h>
#include <asm/alternative.h>
-#include <asm/cpufeature.h>
#ifdef __ASSEMBLER__
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index ba62df8c6e35..b71086d25195 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -217,7 +217,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#else
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
-#define __page_to_voff(kaddr) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
+#define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 9cc0ea784ae6..88e2f2b938f0 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -64,6 +64,21 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
write_sysreg(0, vttbr_el2);
}
+void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+
+ /* Switch to requested VMID */
+ write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ isb();
+
+ asm volatile("tlbi vmalle1" : : );
+ dsb(nsh);
+ isb();
+
+ write_sysreg(0, vttbr_el2);
+}
+
void __hyp_text __kvm_flush_vm_context(void)
{
dsb(ishst);
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index 778a985c8a70..4b32168cf91a 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -147,7 +147,7 @@ static int __init early_cpu_to_node(int cpu)
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
{
- return node_distance(from, to);
+ return node_distance(early_cpu_to_node(from), early_cpu_to_node(to));
}
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
@@ -223,8 +223,11 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
void *nd;
int tnid;
- pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
- nid, start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
+ if (start_pfn < end_pfn)
+ pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid,
+ start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
+ else
+ pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
nd = __va(nd_pa);
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index 099e170a93ee..0068fd411a84 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -3149,7 +3149,7 @@ static void print_dma_descriptors(struct cryptocop_int_operation *iop)
printk("print_dma_descriptors start\n");
printk("iop:\n");
- printk("\tsid: 0x%lld\n", iop->sid);
+ printk("\tsid: 0x%llx\n", iop->sid);
printk("\tcdesc_out: 0x%p\n", iop->cdesc_out);
printk("\tcdesc_in: 0x%p\n", iop->cdesc_in);
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h
index b408fe660cf8..3cef06875f5c 100644
--- a/arch/h8300/include/asm/thread_info.h
+++ b/arch/h8300/include/asm/thread_info.h
@@ -31,7 +31,6 @@ struct thread_info {
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable, <0 => BUG */
mm_segment_t addr_limit;
- struct restart_block restart_block;
};
/*
@@ -44,9 +43,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index ad1f81f574e5..7138303cbbf2 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -79,7 +79,7 @@ restore_sigcontext(struct sigcontext *usc, int *pd0)
unsigned int er0;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/* restore passed registers */
#define COPY(r) do { err |= get_user(regs->r, &usc->sc_##r); } while (0)
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index fbf40d3c8123..1a6bac7b076f 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -263,7 +263,7 @@ KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \
VMLINUX_ENTRY_ADDRESS=$(entry-y) \
- PLATFORM=$(platform-y)
+ PLATFORM="$(platform-y)"
ifdef CONFIG_32BIT
bootvars-y += ADDR_BITS=32
endif
diff --git a/arch/mips/boot/dts/mti/malta.dts b/arch/mips/boot/dts/mti/malta.dts
index f604a272d91d..ffe3a1508e72 100644
--- a/arch/mips/boot/dts/mti/malta.dts
+++ b/arch/mips/boot/dts/mti/malta.dts
@@ -84,12 +84,13 @@
fpga_regs: system-controller@1f000000 {
compatible = "mti,malta-fpga", "syscon", "simple-mfd";
reg = <0x1f000000 0x1000>;
+ native-endian;
reboot {
compatible = "syscon-reboot";
regmap = <&fpga_regs>;
offset = <0x500>;
- mask = <0x4d>;
+ mask = <0x42>;
};
};
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
index 0ea73e845440..d493ccbf274a 100644
--- a/arch/mips/generic/init.c
+++ b/arch/mips/generic/init.c
@@ -30,9 +30,19 @@ static __initdata const void *mach_match_data;
void __init prom_init(void)
{
+ plat_get_fdt();
+ BUG_ON(!fdt);
+}
+
+void __init *plat_get_fdt(void)
+{
const struct mips_machine *check_mach;
const struct of_device_id *match;
+ if (fdt)
+ /* Already set up */
+ return (void *)fdt;
+
if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_arg1)) {
/*
* We booted using the UHI boot protocol, so we have been
@@ -75,12 +85,6 @@ void __init prom_init(void)
/* Retrieve the machine's FDT */
fdt = mach->fdt;
}
-
- BUG_ON(!fdt);
-}
-
-void __init *plat_get_fdt(void)
-{
return (void *)fdt;
}
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index 355dc25172e7..c05369e0b8d6 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -63,6 +63,8 @@ do { \
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu,
void *__user *fault_addr);
+void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
+ struct task_struct *tsk);
int process_fpemu_return(int sig, void __user *fault_addr,
unsigned long fcr31);
int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
@@ -81,4 +83,15 @@ static inline void fpu_emulator_init_fpu(void)
set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
}
+/*
+ * Mask the FCSR Cause bits according to the Enable bits, observing
+ * that Unimplemented is always enabled.
+ */
+static inline unsigned long mask_fcr31_x(unsigned long fcr31)
+{
+ return fcr31 & (FPU_CSR_UNI_X |
+ ((fcr31 & FPU_CSR_ALL_E) <<
+ (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E))));
+}
+
#endif /* _ASM_FPU_EMULATOR_H */
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index ebb5c0f2f90d..c0ae27971e31 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -76,6 +76,22 @@ do { if (cpu_has_rw_llb) { \
} while (0)
/*
+ * Check FCSR for any unmasked exceptions pending set with `ptrace',
+ * clear them and send a signal.
+ */
+#define __sanitize_fcr31(next) \
+do { \
+ unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31); \
+ void __user *pc; \
+ \
+ if (unlikely(fcr31)) { \
+ pc = (void __user *)task_pt_regs(next)->cp0_epc; \
+ next->thread.fpu.fcr31 &= ~fcr31; \
+ force_fcr31_sig(fcr31, pc, next); \
+ } \
+} while (0)
+
+/*
* For newly created kernel threads switch_to() will return to
* ret_from_kernel_thread, newly created user threads to ret_from_fork.
* That is, everything following resume() will be skipped for new threads.
@@ -85,6 +101,8 @@ do { if (cpu_has_rw_llb) { \
do { \
__mips_mt_fpaff_switch_to(prev); \
lose_fpu_inatomic(1, prev); \
+ if (tsk_used_math(next)) \
+ __sanitize_fcr31(next); \
if (cpu_has_dsp) { \
__save_dsp(prev); \
__restore_dsp(next); \
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index 2a45867d3b4f..a4964c334cab 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -21,6 +21,11 @@ static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
+phys_addr_t __weak mips_cpc_default_phys_base(void)
+{
+ return 0;
+}
+
/**
* mips_cpc_phys_base - retrieve the physical base address of the CPC
*
@@ -43,8 +48,12 @@ static phys_addr_t mips_cpc_phys_base(void)
if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK)
return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK;
- /* Otherwise, give it the default address & enable it */
+ /* Otherwise, use the default address */
cpc_base = mips_cpc_default_phys_base();
+ if (!cpc_base)
+ return cpc_base;
+
+ /* Enable the CPC, mapped at the default address */
write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK);
return cpc_base;
}
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index 22dedd62818a..bd09853aecdf 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -899,7 +899,7 @@ static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
* mipsr2_decoder: Decode and emulate a MIPS R2 instruction
* @regs: Process register set
* @inst: Instruction to decode and emulate
- * @fcr31: Floating Point Control and Status Register returned
+ * @fcr31: Floating Point Control and Status Register Cause bits returned
*/
int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
{
@@ -1172,13 +1172,13 @@ fpu_emul:
err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
&fault_addr);
- *fcr31 = current->thread.fpu.fcr31;
/*
- * We can't allow the emulated instruction to leave any of
- * the cause bits set in $fcr31.
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ *fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~res;
/*
* this is a tricky issue - lose_fpu() uses LL/SC atomics
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 6103b24d1bfc..a92994d60e91 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -79,16 +79,15 @@ void ptrace_disable(struct task_struct *child)
}
/*
- * Poke at FCSR according to its mask. Don't set the cause bits as
- * this is currently not handled correctly in FP context restoration
- * and will cause an oops if a corresponding enable bit is set.
+ * Poke at FCSR according to its mask. Set the Cause bits even
+ * if a corresponding Enable bit is set. This will be noticed at
+ * the time the thread is switched to and SIGFPE thrown accordingly.
*/
static void ptrace_setfcr31(struct task_struct *child, u32 value)
{
u32 fcr31;
u32 mask;
- value &= ~FPU_CSR_ALL_X;
fcr31 = child->thread.fpu.fcr31;
mask = boot_cpu_data.fpu_msk31;
child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
@@ -817,6 +816,7 @@ long arch_ptrace(struct task_struct *child, long request,
break;
#endif
case FPC_CSR:
+ init_fp_ctx(child);
ptrace_setfcr31(child, data);
break;
case DSP_BASE ... DSP_BASE + 5: {
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
index b4ac6374a38f..918f2f6d3861 100644
--- a/arch/mips/kernel/r2300_fpu.S
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -21,106 +21,84 @@
#define EX(a,b) \
9: a,##b; \
.section __ex_table,"a"; \
+ PTR 9b,fault; \
+ .previous
+
+#define EX2(a,b) \
+9: a,##b; \
+ .section __ex_table,"a"; \
PTR 9b,bad_stack; \
+ PTR 9b+4,bad_stack; \
.previous
.set noreorder
.set mips1
- /* Save floating point context */
+
+/**
+ * _save_fp_context() - save FP context from the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Save FP context, including the 32 FP data registers and the FP
+ * control & status register, from the FPU to signal context.
+ */
LEAF(_save_fp_context)
.set push
SET_HARDFLOAT
li v0, 0 # assume success
- cfc1 t1,fcr31
- EX(swc1 $f0,(SC_FPREGS+0)(a0))
- EX(swc1 $f1,(SC_FPREGS+8)(a0))
- EX(swc1 $f2,(SC_FPREGS+16)(a0))
- EX(swc1 $f3,(SC_FPREGS+24)(a0))
- EX(swc1 $f4,(SC_FPREGS+32)(a0))
- EX(swc1 $f5,(SC_FPREGS+40)(a0))
- EX(swc1 $f6,(SC_FPREGS+48)(a0))
- EX(swc1 $f7,(SC_FPREGS+56)(a0))
- EX(swc1 $f8,(SC_FPREGS+64)(a0))
- EX(swc1 $f9,(SC_FPREGS+72)(a0))
- EX(swc1 $f10,(SC_FPREGS+80)(a0))
- EX(swc1 $f11,(SC_FPREGS+88)(a0))
- EX(swc1 $f12,(SC_FPREGS+96)(a0))
- EX(swc1 $f13,(SC_FPREGS+104)(a0))
- EX(swc1 $f14,(SC_FPREGS+112)(a0))
- EX(swc1 $f15,(SC_FPREGS+120)(a0))
- EX(swc1 $f16,(SC_FPREGS+128)(a0))
- EX(swc1 $f17,(SC_FPREGS+136)(a0))
- EX(swc1 $f18,(SC_FPREGS+144)(a0))
- EX(swc1 $f19,(SC_FPREGS+152)(a0))
- EX(swc1 $f20,(SC_FPREGS+160)(a0))
- EX(swc1 $f21,(SC_FPREGS+168)(a0))
- EX(swc1 $f22,(SC_FPREGS+176)(a0))
- EX(swc1 $f23,(SC_FPREGS+184)(a0))
- EX(swc1 $f24,(SC_FPREGS+192)(a0))
- EX(swc1 $f25,(SC_FPREGS+200)(a0))
- EX(swc1 $f26,(SC_FPREGS+208)(a0))
- EX(swc1 $f27,(SC_FPREGS+216)(a0))
- EX(swc1 $f28,(SC_FPREGS+224)(a0))
- EX(swc1 $f29,(SC_FPREGS+232)(a0))
- EX(swc1 $f30,(SC_FPREGS+240)(a0))
- EX(swc1 $f31,(SC_FPREGS+248)(a0))
- EX(sw t1,(SC_FPC_CSR)(a0))
- cfc1 t0,$0 # implementation/version
+ cfc1 t1, fcr31
+ EX2(s.d $f0, 0(a0))
+ EX2(s.d $f2, 16(a0))
+ EX2(s.d $f4, 32(a0))
+ EX2(s.d $f6, 48(a0))
+ EX2(s.d $f8, 64(a0))
+ EX2(s.d $f10, 80(a0))
+ EX2(s.d $f12, 96(a0))
+ EX2(s.d $f14, 112(a0))
+ EX2(s.d $f16, 128(a0))
+ EX2(s.d $f18, 144(a0))
+ EX2(s.d $f20, 160(a0))
+ EX2(s.d $f22, 176(a0))
+ EX2(s.d $f24, 192(a0))
+ EX2(s.d $f26, 208(a0))
+ EX2(s.d $f28, 224(a0))
+ EX2(s.d $f30, 240(a0))
jr ra
+ EX(sw t1, (a1))
.set pop
- .set nomacro
- EX(sw t0,(SC_FPC_EIR)(a0))
- .set macro
END(_save_fp_context)
-/*
- * Restore FPU state:
- * - fp gp registers
- * - cp1 status/control register
+/**
+ * _restore_fp_context() - restore FP context to the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
*
- * We base the decision which registers to restore from the signal stack
- * frame on the current content of c0_status, not on the content of the
- * stack frame which might have been changed by the user.
+ * Restore FP context, including the 32 FP data registers and the FP
+ * control & status register, from signal context to the FPU.
*/
LEAF(_restore_fp_context)
.set push
SET_HARDFLOAT
li v0, 0 # assume success
- EX(lw t0,(SC_FPC_CSR)(a0))
- EX(lwc1 $f0,(SC_FPREGS+0)(a0))
- EX(lwc1 $f1,(SC_FPREGS+8)(a0))
- EX(lwc1 $f2,(SC_FPREGS+16)(a0))
- EX(lwc1 $f3,(SC_FPREGS+24)(a0))
- EX(lwc1 $f4,(SC_FPREGS+32)(a0))
- EX(lwc1 $f5,(SC_FPREGS+40)(a0))
- EX(lwc1 $f6,(SC_FPREGS+48)(a0))
- EX(lwc1 $f7,(SC_FPREGS+56)(a0))
- EX(lwc1 $f8,(SC_FPREGS+64)(a0))
- EX(lwc1 $f9,(SC_FPREGS+72)(a0))
- EX(lwc1 $f10,(SC_FPREGS+80)(a0))
- EX(lwc1 $f11,(SC_FPREGS+88)(a0))
- EX(lwc1 $f12,(SC_FPREGS+96)(a0))
- EX(lwc1 $f13,(SC_FPREGS+104)(a0))
- EX(lwc1 $f14,(SC_FPREGS+112)(a0))
- EX(lwc1 $f15,(SC_FPREGS+120)(a0))
- EX(lwc1 $f16,(SC_FPREGS+128)(a0))
- EX(lwc1 $f17,(SC_FPREGS+136)(a0))
- EX(lwc1 $f18,(SC_FPREGS+144)(a0))
- EX(lwc1 $f19,(SC_FPREGS+152)(a0))
- EX(lwc1 $f20,(SC_FPREGS+160)(a0))
- EX(lwc1 $f21,(SC_FPREGS+168)(a0))
- EX(lwc1 $f22,(SC_FPREGS+176)(a0))
- EX(lwc1 $f23,(SC_FPREGS+184)(a0))
- EX(lwc1 $f24,(SC_FPREGS+192)(a0))
- EX(lwc1 $f25,(SC_FPREGS+200)(a0))
- EX(lwc1 $f26,(SC_FPREGS+208)(a0))
- EX(lwc1 $f27,(SC_FPREGS+216)(a0))
- EX(lwc1 $f28,(SC_FPREGS+224)(a0))
- EX(lwc1 $f29,(SC_FPREGS+232)(a0))
- EX(lwc1 $f30,(SC_FPREGS+240)(a0))
- EX(lwc1 $f31,(SC_FPREGS+248)(a0))
+ EX(lw t0, (a1))
+ EX2(l.d $f0, 0(a0))
+ EX2(l.d $f2, 16(a0))
+ EX2(l.d $f4, 32(a0))
+ EX2(l.d $f6, 48(a0))
+ EX2(l.d $f8, 64(a0))
+ EX2(l.d $f10, 80(a0))
+ EX2(l.d $f12, 96(a0))
+ EX2(l.d $f14, 112(a0))
+ EX2(l.d $f16, 128(a0))
+ EX2(l.d $f18, 144(a0))
+ EX2(l.d $f20, 160(a0))
+ EX2(l.d $f22, 176(a0))
+ EX2(l.d $f24, 192(a0))
+ EX2(l.d $f26, 208(a0))
+ EX2(l.d $f28, 224(a0))
+ EX2(l.d $f30, 240(a0))
jr ra
- ctc1 t0,fcr31
+ ctc1 t0, fcr31
.set pop
END(_restore_fp_context)
.set reorder
diff --git a/arch/mips/kernel/r6000_fpu.S b/arch/mips/kernel/r6000_fpu.S
index 47077380c15c..9cc7bfab3419 100644
--- a/arch/mips/kernel/r6000_fpu.S
+++ b/arch/mips/kernel/r6000_fpu.S
@@ -21,7 +21,14 @@
.set push
SET_HARDFLOAT
- /* Save floating point context */
+/**
+ * _save_fp_context() - save FP context from the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Save FP context, including the 32 FP data registers and the FP
+ * control & status register, from the FPU to signal context.
+ */
LEAF(_save_fp_context)
mfc0 t0,CP0_STATUS
sll t0,t0,2
@@ -30,59 +37,59 @@
cfc1 t1,fcr31
/* Store the 16 double precision registers */
- sdc1 $f0,(SC_FPREGS+0)(a0)
- sdc1 $f2,(SC_FPREGS+16)(a0)
- sdc1 $f4,(SC_FPREGS+32)(a0)
- sdc1 $f6,(SC_FPREGS+48)(a0)
- sdc1 $f8,(SC_FPREGS+64)(a0)
- sdc1 $f10,(SC_FPREGS+80)(a0)
- sdc1 $f12,(SC_FPREGS+96)(a0)
- sdc1 $f14,(SC_FPREGS+112)(a0)
- sdc1 $f16,(SC_FPREGS+128)(a0)
- sdc1 $f18,(SC_FPREGS+144)(a0)
- sdc1 $f20,(SC_FPREGS+160)(a0)
- sdc1 $f22,(SC_FPREGS+176)(a0)
- sdc1 $f24,(SC_FPREGS+192)(a0)
- sdc1 $f26,(SC_FPREGS+208)(a0)
- sdc1 $f28,(SC_FPREGS+224)(a0)
- sdc1 $f30,(SC_FPREGS+240)(a0)
+ sdc1 $f0,0(a0)
+ sdc1 $f2,16(a0)
+ sdc1 $f4,32(a0)
+ sdc1 $f6,48(a0)
+ sdc1 $f8,64(a0)
+ sdc1 $f10,80(a0)
+ sdc1 $f12,96(a0)
+ sdc1 $f14,112(a0)
+ sdc1 $f16,128(a0)
+ sdc1 $f18,144(a0)
+ sdc1 $f20,160(a0)
+ sdc1 $f22,176(a0)
+ sdc1 $f24,192(a0)
+ sdc1 $f26,208(a0)
+ sdc1 $f28,224(a0)
+ sdc1 $f30,240(a0)
jr ra
- sw t0,SC_FPC_CSR(a0)
+ sw t0,(a1)
1: jr ra
nop
END(_save_fp_context)
-/* Restore FPU state:
- * - fp gp registers
- * - cp1 status/control register
+/**
+ * _restore_fp_context() - restore FP context to the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
*
- * We base the decision which registers to restore from the signal stack
- * frame on the current content of c0_status, not on the content of the
- * stack frame which might have been changed by the user.
+ * Restore FP context, including the 32 FP data registers and the FP
+ * control & status register, from signal context to the FPU.
*/
LEAF(_restore_fp_context)
mfc0 t0,CP0_STATUS
sll t0,t0,2
bgez t0,1f
- lw t0,SC_FPC_CSR(a0)
+ lw t0,(a1)
/* Restore the 16 double precision registers */
- ldc1 $f0,(SC_FPREGS+0)(a0)
- ldc1 $f2,(SC_FPREGS+16)(a0)
- ldc1 $f4,(SC_FPREGS+32)(a0)
- ldc1 $f6,(SC_FPREGS+48)(a0)
- ldc1 $f8,(SC_FPREGS+64)(a0)
- ldc1 $f10,(SC_FPREGS+80)(a0)
- ldc1 $f12,(SC_FPREGS+96)(a0)
- ldc1 $f14,(SC_FPREGS+112)(a0)
- ldc1 $f16,(SC_FPREGS+128)(a0)
- ldc1 $f18,(SC_FPREGS+144)(a0)
- ldc1 $f20,(SC_FPREGS+160)(a0)
- ldc1 $f22,(SC_FPREGS+176)(a0)
- ldc1 $f24,(SC_FPREGS+192)(a0)
- ldc1 $f26,(SC_FPREGS+208)(a0)
- ldc1 $f28,(SC_FPREGS+224)(a0)
- ldc1 $f30,(SC_FPREGS+240)(a0)
+ ldc1 $f0,0(a0)
+ ldc1 $f2,16(a0)
+ ldc1 $f4,32(a0)
+ ldc1 $f6,48(a0)
+ ldc1 $f8,64(a0)
+ ldc1 $f10,80(a0)
+ ldc1 $f12,96(a0)
+ ldc1 $f14,112(a0)
+ ldc1 $f16,128(a0)
+ ldc1 $f18,144(a0)
+ ldc1 $f20,160(a0)
+ ldc1 $f22,176(a0)
+ ldc1 $f24,192(a0)
+ ldc1 $f26,208(a0)
+ ldc1 $f28,224(a0)
+ ldc1 $f30,240(a0)
jr ra
ctc1 t0,fcr31
1: jr ra
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
index ca1cc30c0891..1958910b75c0 100644
--- a/arch/mips/kernel/relocate.c
+++ b/arch/mips/kernel/relocate.c
@@ -200,7 +200,7 @@ static inline __init unsigned long get_random_boot(void)
#if defined(CONFIG_USE_OF)
/* Get any additional entropy passed in device tree */
- {
+ if (initial_boot_params) {
int node, len;
u64 *prop;
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 0d57909d9026..f66e5ce505b2 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -368,6 +368,19 @@ static void __init bootmem_init(void)
end = PFN_DOWN(boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size);
+#ifndef CONFIG_HIGHMEM
+ /*
+ * Skip highmem here so we get an accurate max_low_pfn if low
+ * memory stops short of high memory.
+ * If the region overlaps HIGHMEM_START, end is clipped so
+ * max_pfn excludes the highmem portion.
+ */
+ if (start >= PFN_DOWN(HIGHMEM_START))
+ continue;
+ if (end > PFN_DOWN(HIGHMEM_START))
+ end = PFN_DOWN(HIGHMEM_START);
+#endif
+
if (end > max_low_pfn)
max_low_pfn = end;
if (start < min_low_pfn)
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 1f5fdee1dfc3..3905003dfe2b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -156,7 +156,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
print_ip_sym(pc);
pc = unwind_stack(task, &sp, pc, &ra);
} while (pc);
- printk("\n");
+ pr_cont("\n");
}
/*
@@ -174,22 +174,24 @@ static void show_stacktrace(struct task_struct *task,
printk("Stack :");
i = 0;
while ((unsigned long) sp & (PAGE_SIZE - 1)) {
- if (i && ((i % (64 / field)) == 0))
- printk("\n ");
+ if (i && ((i % (64 / field)) == 0)) {
+ pr_cont("\n");
+ printk(" ");
+ }
if (i > 39) {
- printk(" ...");
+ pr_cont(" ...");
break;
}
if (__get_user(stackdata, sp++)) {
- printk(" (Bad stack address)");
+ pr_cont(" (Bad stack address)");
break;
}
- printk(" %0*lx", field, stackdata);
+ pr_cont(" %0*lx", field, stackdata);
i++;
}
- printk("\n");
+ pr_cont("\n");
show_backtrace(task, regs);
}
@@ -229,18 +231,19 @@ static void show_code(unsigned int __user *pc)
long i;
unsigned short __user *pc16 = NULL;
- printk("\nCode:");
+ printk("Code:");
if ((unsigned long)pc & 1)
pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
for(i = -3 ; i < 6 ; i++) {
unsigned int insn;
if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
- printk(" (Bad address in epc)\n");
+ pr_cont(" (Bad address in epc)\n");
break;
}
- printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
+ pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
}
+ pr_cont("\n");
}
static void __show_regs(const struct pt_regs *regs)
@@ -259,15 +262,15 @@ static void __show_regs(const struct pt_regs *regs)
if ((i % 4) == 0)
printk("$%2d :", i);
if (i == 0)
- printk(" %0*lx", field, 0UL);
+ pr_cont(" %0*lx", field, 0UL);
else if (i == 26 || i == 27)
- printk(" %*s", field, "");
+ pr_cont(" %*s", field, "");
else
- printk(" %0*lx", field, regs->regs[i]);
+ pr_cont(" %0*lx", field, regs->regs[i]);
i++;
if ((i % 4) == 0)
- printk("\n");
+ pr_cont("\n");
}
#ifdef CONFIG_CPU_HAS_SMARTMIPS
@@ -288,46 +291,46 @@ static void __show_regs(const struct pt_regs *regs)
if (cpu_has_3kex) {
if (regs->cp0_status & ST0_KUO)
- printk("KUo ");
+ pr_cont("KUo ");
if (regs->cp0_status & ST0_IEO)
- printk("IEo ");
+ pr_cont("IEo ");
if (regs->cp0_status & ST0_KUP)
- printk("KUp ");
+ pr_cont("KUp ");
if (regs->cp0_status & ST0_IEP)
- printk("IEp ");
+ pr_cont("IEp ");
if (regs->cp0_status & ST0_KUC)
- printk("KUc ");
+ pr_cont("KUc ");
if (regs->cp0_status & ST0_IEC)
- printk("IEc ");
+ pr_cont("IEc ");
} else if (cpu_has_4kex) {
if (regs->cp0_status & ST0_KX)
- printk("KX ");
+ pr_cont("KX ");
if (regs->cp0_status & ST0_SX)
- printk("SX ");
+ pr_cont("SX ");
if (regs->cp0_status & ST0_UX)
- printk("UX ");
+ pr_cont("UX ");
switch (regs->cp0_status & ST0_KSU) {
case KSU_USER:
- printk("USER ");
+ pr_cont("USER ");
break;
case KSU_SUPERVISOR:
- printk("SUPERVISOR ");
+ pr_cont("SUPERVISOR ");
break;
case KSU_KERNEL:
- printk("KERNEL ");
+ pr_cont("KERNEL ");
break;
default:
- printk("BAD_MODE ");
+ pr_cont("BAD_MODE ");
break;
}
if (regs->cp0_status & ST0_ERL)
- printk("ERL ");
+ pr_cont("ERL ");
if (regs->cp0_status & ST0_EXL)
- printk("EXL ");
+ pr_cont("EXL ");
if (regs->cp0_status & ST0_IE)
- printk("IE ");
+ pr_cont("IE ");
}
- printk("\n");
+ pr_cont("\n");
exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
@@ -705,6 +708,32 @@ asmlinkage void do_ov(struct pt_regs *regs)
exception_exit(prev_state);
}
+/*
+ * Send SIGFPE according to FCSR Cause bits, which must have already
+ * been masked against Enable bits. This is impotant as Inexact can
+ * happen together with Overflow or Underflow, and `ptrace' can set
+ * any bits.
+ */
+void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
+ struct task_struct *tsk)
+{
+ struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE };
+
+ if (fcr31 & FPU_CSR_INV_X)
+ si.si_code = FPE_FLTINV;
+ else if (fcr31 & FPU_CSR_DIV_X)
+ si.si_code = FPE_FLTDIV;
+ else if (fcr31 & FPU_CSR_OVF_X)
+ si.si_code = FPE_FLTOVF;
+ else if (fcr31 & FPU_CSR_UDF_X)
+ si.si_code = FPE_FLTUND;
+ else if (fcr31 & FPU_CSR_INE_X)
+ si.si_code = FPE_FLTRES;
+ else
+ si.si_code = __SI_FAULT;
+ force_sig_info(SIGFPE, &si, tsk);
+}
+
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
{
struct siginfo si = { 0 };
@@ -715,27 +744,7 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
return 0;
case SIGFPE:
- si.si_addr = fault_addr;
- si.si_signo = sig;
- /*
- * Inexact can happen together with Overflow or Underflow.
- * Respect the mask to deliver the correct exception.
- */
- fcr31 &= (fcr31 & FPU_CSR_ALL_E) <<
- (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E));
- if (fcr31 & FPU_CSR_INV_X)
- si.si_code = FPE_FLTINV;
- else if (fcr31 & FPU_CSR_DIV_X)
- si.si_code = FPE_FLTDIV;
- else if (fcr31 & FPU_CSR_OVF_X)
- si.si_code = FPE_FLTOVF;
- else if (fcr31 & FPU_CSR_UDF_X)
- si.si_code = FPE_FLTUND;
- else if (fcr31 & FPU_CSR_INE_X)
- si.si_code = FPE_FLTRES;
- else
- si.si_code = __SI_FAULT;
- force_sig_info(sig, &si, current);
+ force_fcr31_sig(fcr31, fault_addr, current);
return 1;
case SIGBUS:
@@ -799,13 +808,13 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
- fcr31 = current->thread.fpu.fcr31;
/*
- * We can't allow the emulated instruction to leave any of
- * the cause bits set in $fcr31.
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
/* Restore the hardware register state */
own_fpu(1);
@@ -831,7 +840,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
goto out;
/* Clear FCSR.Cause before enabling interrupts */
- write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X);
+ write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
local_irq_enable();
die_if_kernel("FP exception in kernel code", regs);
@@ -853,13 +862,13 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
- fcr31 = current->thread.fpu.fcr31;
/*
- * We can't allow the emulated instruction to leave any of
- * the cause bits set in $fcr31.
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
/* Restore the hardware register state */
own_fpu(1); /* Using the FPU again. */
@@ -1424,13 +1433,13 @@ asmlinkage void do_cpu(struct pt_regs *regs)
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
&fault_addr);
- fcr31 = current->thread.fpu.fcr31;
/*
* We can't allow the emulated instruction to leave
- * any of the cause bits set in $fcr31.
+ * any enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
/* Send a signal if required. */
if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 0f80b936e75e..6eb50a7137db 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -135,42 +135,42 @@ static void dump_tlb(int first, int last)
c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
- printk("va=%0*lx asid=%0*lx",
- vwidth, (entryhi & ~0x1fffUL),
- asidwidth, entryhi & asidmask);
+ pr_cont("va=%0*lx asid=%0*lx",
+ vwidth, (entryhi & ~0x1fffUL),
+ asidwidth, entryhi & asidmask);
if (cpu_has_guestid)
- printk(" gid=%02lx",
- (guestctl1 & MIPS_GCTL1_RID)
+ pr_cont(" gid=%02lx",
+ (guestctl1 & MIPS_GCTL1_RID)
>> MIPS_GCTL1_RID_SHIFT);
/* RI/XI are in awkward places, so mask them off separately */
pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
if (xpa)
pa |= (unsigned long long)readx_c0_entrylo0() << 30;
pa = (pa << 6) & PAGE_MASK;
- printk("\n\t[");
+ pr_cont("\n\t[");
if (cpu_has_rixi)
- printk("ri=%d xi=%d ",
- (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
- (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
- printk("pa=%0*llx c=%d d=%d v=%d g=%d] [",
- pwidth, pa, c0,
- (entrylo0 & ENTRYLO_D) ? 1 : 0,
- (entrylo0 & ENTRYLO_V) ? 1 : 0,
- (entrylo0 & ENTRYLO_G) ? 1 : 0);
+ pr_cont("ri=%d xi=%d ",
+ (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
+ (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
+ pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d] [",
+ pwidth, pa, c0,
+ (entrylo0 & ENTRYLO_D) ? 1 : 0,
+ (entrylo0 & ENTRYLO_V) ? 1 : 0,
+ (entrylo0 & ENTRYLO_G) ? 1 : 0);
/* RI/XI are in awkward places, so mask them off separately */
pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
if (xpa)
pa |= (unsigned long long)readx_c0_entrylo1() << 30;
pa = (pa << 6) & PAGE_MASK;
if (cpu_has_rixi)
- printk("ri=%d xi=%d ",
- (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
- (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
- printk("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
- pwidth, pa, c1,
- (entrylo1 & ENTRYLO_D) ? 1 : 0,
- (entrylo1 & ENTRYLO_V) ? 1 : 0,
- (entrylo1 & ENTRYLO_G) ? 1 : 0);
+ pr_cont("ri=%d xi=%d ",
+ (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
+ (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
+ pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
+ pwidth, pa, c1,
+ (entrylo1 & ENTRYLO_D) ? 1 : 0,
+ (entrylo1 & ENTRYLO_V) ? 1 : 0,
+ (entrylo1 & ENTRYLO_G) ? 1 : 0);
}
printk("\n");
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 744f4a7bc49d..85b4086e553e 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -53,15 +53,15 @@ static void dump_tlb(int first, int last)
*/
printk("Index: %2d ", i);
- printk("va=%08lx asid=%08lx"
- " [pa=%06lx n=%d d=%d v=%d g=%d]",
- entryhi & PAGE_MASK,
- entryhi & asid_mask,
- entrylo0 & PAGE_MASK,
- (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
- (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
- (entrylo0 & R3K_ENTRYLO_V) ? 1 : 0,
- (entrylo0 & R3K_ENTRYLO_G) ? 1 : 0);
+ pr_cont("va=%08lx asid=%08lx"
+ " [pa=%06lx n=%d d=%d v=%d g=%d]",
+ entryhi & PAGE_MASK,
+ entryhi & asid_mask,
+ entrylo0 & PAGE_MASK,
+ (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
+ (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
+ (entrylo0 & R3K_ENTRYLO_V) ? 1 : 0,
+ (entrylo0 & R3K_ENTRYLO_G) ? 1 : 0);
}
}
printk("\n");
diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c
index d9563ddb337e..746bf5caaffc 100644
--- a/arch/nios2/kernel/time.c
+++ b/arch/nios2/kernel/time.c
@@ -324,6 +324,7 @@ static int __init nios2_time_init(struct device_node *timer)
ret = nios2_clocksource_init(timer);
break;
default:
+ ret = 0;
break;
}
diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
index 4ce7a01a252d..5f55da9cbfd5 100644
--- a/arch/openrisc/include/asm/cache.h
+++ b/arch/openrisc/include/asm/cache.h
@@ -23,6 +23,8 @@
* they shouldn't be hard-coded!
*/
+#define __ro_after_init __read_mostly
+
#define L1_CACHE_BYTES 16
#define L1_CACHE_SHIFT 4
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index a9b9407f38f7..6b0741e7a7ed 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -368,7 +368,9 @@
#define __IGNORE_select /* newselect */
#define __IGNORE_fadvise64 /* fadvise64_64 */
-
+#define __IGNORE_pkey_mprotect
+#define __IGNORE_pkey_alloc
+#define __IGNORE_pkey_free
#define LINUX_GATEWAY_ADDR 0x100
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index f8150669b8c6..700e2d2da096 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -873,11 +873,11 @@ static void print_parisc_device(struct parisc_device *dev)
if (dev->num_addrs) {
int k;
- printk(", additional addresses: ");
+ pr_cont(", additional addresses: ");
for (k = 0; k < dev->num_addrs; k++)
- printk("0x%lx ", dev->addr[k]);
+ pr_cont("0x%lx ", dev->addr[k]);
}
- printk("\n");
+ pr_cont("\n");
}
/**
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index d03422e5f188..23de307c3052 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -100,14 +100,12 @@ set_thread_pointer:
.endr
/* This address must remain fixed at 0x100 for glibc's syscalls to work */
- .align 256
+ .align LINUX_GATEWAY_ADDR
linux_gateway_entry:
gate .+8, %r0 /* become privileged */
mtsp %r0,%sr4 /* get kernel space into sr4 */
mtsp %r0,%sr5 /* get kernel space into sr5 */
mtsp %r0,%sr6 /* get kernel space into sr6 */
- mfsp %sr7,%r1 /* save user sr7 */
- mtsp %r1,%sr3 /* and store it in sr3 */
#ifdef CONFIG_64BIT
/* for now we can *always* set the W bit on entry to the syscall
@@ -133,6 +131,14 @@ linux_gateway_entry:
depdi 0, 31, 32, %r21
1:
#endif
+
+ /* We use a rsm/ssm pair to prevent sr3 from being clobbered
+ * by external interrupts.
+ */
+ mfsp %sr7,%r1 /* save user sr7 */
+ rsm PSW_SM_I, %r0 /* disable interrupts */
+ mtsp %r1,%sr3 /* and store it in sr3 */
+
mfctl %cr30,%r1
xor %r1,%r30,%r30 /* ye olde xor trick */
xor %r1,%r30,%r1
@@ -147,6 +153,7 @@ linux_gateway_entry:
*/
mtsp %r0,%sr7 /* get kernel space into sr7 */
+ ssm PSW_SM_I, %r0 /* enable interrupts */
STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
mfctl %cr30,%r1 /* get task ptr in %r1 */
LDREG TI_TASK(%r1),%r1
@@ -474,11 +481,6 @@ lws_start:
comiclr,>> __NR_lws_entries, %r20, %r0
b,n lws_exit_nosys
- /* WARNING: Trashing sr2 and sr3 */
- mfsp %sr7,%r1 /* get userspace into sr3 */
- mtsp %r1,%sr3
- mtsp %r0,%sr2 /* get kernel space into sr2 */
-
/* Load table start */
ldil L%lws_table, %r1
ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */
@@ -627,9 +629,9 @@ cas_action:
stw %r1, 4(%sr2,%r20)
#endif
/* The load and store could fail */
-1: ldw,ma 0(%sr3,%r26), %r28
+1: ldw,ma 0(%r26), %r28
sub,<> %r28, %r25, %r0
-2: stw,ma %r24, 0(%sr3,%r26)
+2: stw,ma %r24, 0(%r26)
/* Free lock */
stw,ma %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
@@ -706,9 +708,9 @@ lws_compare_and_swap_2:
nop
/* 8bit load */
-4: ldb 0(%sr3,%r25), %r25
+4: ldb 0(%r25), %r25
b cas2_lock_start
-5: ldb 0(%sr3,%r24), %r24
+5: ldb 0(%r24), %r24
nop
nop
nop
@@ -716,9 +718,9 @@ lws_compare_and_swap_2:
nop
/* 16bit load */
-6: ldh 0(%sr3,%r25), %r25
+6: ldh 0(%r25), %r25
b cas2_lock_start
-7: ldh 0(%sr3,%r24), %r24
+7: ldh 0(%r24), %r24
nop
nop
nop
@@ -726,9 +728,9 @@ lws_compare_and_swap_2:
nop
/* 32bit load */
-8: ldw 0(%sr3,%r25), %r25
+8: ldw 0(%r25), %r25
b cas2_lock_start
-9: ldw 0(%sr3,%r24), %r24
+9: ldw 0(%r24), %r24
nop
nop
nop
@@ -737,14 +739,14 @@ lws_compare_and_swap_2:
/* 64bit load */
#ifdef CONFIG_64BIT
-10: ldd 0(%sr3,%r25), %r25
-11: ldd 0(%sr3,%r24), %r24
+10: ldd 0(%r25), %r25
+11: ldd 0(%r24), %r24
#else
/* Load new value into r22/r23 - high/low */
-10: ldw 0(%sr3,%r25), %r22
-11: ldw 4(%sr3,%r25), %r23
+10: ldw 0(%r25), %r22
+11: ldw 4(%r25), %r23
/* Load new value into fr4 for atomic store later */
-12: flddx 0(%sr3,%r24), %fr4
+12: flddx 0(%r24), %fr4
#endif
cas2_lock_start:
@@ -794,30 +796,30 @@ cas2_action:
ldo 1(%r0),%r28
/* 8bit CAS */
-13: ldb,ma 0(%sr3,%r26), %r29
+13: ldb,ma 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-14: stb,ma %r24, 0(%sr3,%r26)
+14: stb,ma %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 16bit CAS */
-15: ldh,ma 0(%sr3,%r26), %r29
+15: ldh,ma 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-16: sth,ma %r24, 0(%sr3,%r26)
+16: sth,ma %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 32bit CAS */
-17: ldw,ma 0(%sr3,%r26), %r29
+17: ldw,ma 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-18: stw,ma %r24, 0(%sr3,%r26)
+18: stw,ma %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
@@ -825,22 +827,22 @@ cas2_action:
/* 64bit CAS */
#ifdef CONFIG_64BIT
-19: ldd,ma 0(%sr3,%r26), %r29
+19: ldd,ma 0(%r26), %r29
sub,*= %r29, %r25, %r0
b,n cas2_end
-20: std,ma %r24, 0(%sr3,%r26)
+20: std,ma %r24, 0(%r26)
copy %r0, %r28
#else
/* Compare first word */
-19: ldw,ma 0(%sr3,%r26), %r29
+19: ldw,ma 0(%r26), %r29
sub,= %r29, %r22, %r0
b,n cas2_end
/* Compare second word */
-20: ldw,ma 4(%sr3,%r26), %r29
+20: ldw,ma 4(%r26), %r29
sub,= %r29, %r23, %r0
b,n cas2_end
/* Perform the store */
-21: fstdx %fr4, 0(%sr3,%r26)
+21: fstdx %fr4, 0(%r26)
copy %r0, %r28
#endif
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index ee655ed1ff1b..1e8fceb308a5 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -53,10 +53,8 @@ static inline __sum16 csum_fold(__wsum sum)
return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
}
-static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
{
#ifdef __powerpc64__
unsigned long s = (__force u32)sum;
@@ -83,10 +81,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
-static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
index 01b8a13f0224..3919332965af 100644
--- a/arch/powerpc/include/asm/cpuidle.h
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -26,7 +26,7 @@ extern u64 pnv_first_deep_stop_state;
std r0,0(r1); \
ptesync; \
ld r0,0(r1); \
-1: cmp cr0,r0,r0; \
+1: cmpd cr0,r0,r0; \
bne 1b; \
IDLE_INST; \
b .
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 2e4e7d878c8e..84d49b197c32 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -93,6 +93,10 @@
ld reg,PACAKBASE(r13); /* get high part of &label */ \
ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l;
+#define __LOAD_HANDLER(reg, label) \
+ ld reg,PACAKBASE(r13); \
+ ori reg,reg,(ABS_ADDR(label))@l;
+
/* Exception register prefixes */
#define EXC_HV H
#define EXC_STD
@@ -208,6 +212,18 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define kvmppc_interrupt kvmppc_interrupt_pr
#endif
+#ifdef CONFIG_RELOCATABLE
+#define BRANCH_TO_COMMON(reg, label) \
+ __LOAD_HANDLER(reg, label); \
+ mtctr reg; \
+ bctr
+
+#else
+#define BRANCH_TO_COMMON(reg, label) \
+ b label
+
+#endif
+
#define __KVM_HANDLER_PROLOG(area, n) \
BEGIN_FTR_SECTION_NESTED(947) \
ld r10,area+EX_CFAR(r13); \
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index f6f68f73e858..99e1397b71da 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -52,11 +52,23 @@ static inline int mm_is_core_local(struct mm_struct *mm)
return cpumask_subset(mm_cpumask(mm),
topology_sibling_cpumask(smp_processor_id()));
}
+
+static inline int mm_is_thread_local(struct mm_struct *mm)
+{
+ return cpumask_equal(mm_cpumask(mm),
+ cpumask_of(smp_processor_id()));
+}
+
#else
static inline int mm_is_core_local(struct mm_struct *mm)
{
return 1;
}
+
+static inline int mm_is_thread_local(struct mm_struct *mm)
+{
+ return 1;
+}
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index f129408c6022..08ba447a4b3d 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -95,19 +95,35 @@ __start_interrupts:
/* No virt vectors corresponding with 0x0..0x100 */
EXC_VIRT_NONE(0x4000, 0x4100)
-EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
- SET_SCRATCH0(r13)
+
#ifdef CONFIG_PPC_P7_NAP
-BEGIN_FTR_SECTION
- /* Running native on arch 2.06 or later, check if we are
- * waking up from nap/sleep/winkle.
+ /*
+ * If running native on arch 2.06 or later, check if we are waking up
+ * from nap/sleep/winkle, and branch to idle handler.
*/
- mfspr r13,SPRN_SRR1
- rlwinm. r13,r13,47-31,30,31
- beq 9f
+#define IDLETEST(n) \
+ BEGIN_FTR_SECTION ; \
+ mfspr r10,SPRN_SRR1 ; \
+ rlwinm. r10,r10,47-31,30,31 ; \
+ beq- 1f ; \
+ cmpwi cr3,r10,2 ; \
+ BRANCH_TO_COMMON(r10, system_reset_idle_common) ; \
+1: \
+ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+#else
+#define IDLETEST NOTEST
+#endif
- cmpwi cr3,r13,2
- GET_PACA(r13)
+EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
+ IDLETEST, 0x100)
+
+EXC_REAL_END(system_reset, 0x100, 0x200)
+EXC_VIRT_NONE(0x4100, 0x4200)
+
+#ifdef CONFIG_PPC_P7_NAP
+EXC_COMMON_BEGIN(system_reset_idle_common)
bl pnv_restore_hyp_resource
li r0,PNV_THREAD_RUNNING
@@ -130,14 +146,8 @@ BEGIN_FTR_SECTION
blt cr3,2f
b pnv_wakeup_loss
2: b pnv_wakeup_noloss
+#endif
-9:
-END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
-#endif /* CONFIG_PPC_P7_NAP */
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
- NOTEST, 0x100)
-EXC_REAL_END(system_reset, 0x100, 0x200)
-EXC_VIRT_NONE(0x4100, 0x4200)
EXC_COMMON(system_reset_common, 0x100, system_reset_exception)
#ifdef CONFIG_PPC_PSERIES
@@ -817,10 +827,8 @@ EXC_VIRT(trap_0b, 0x4b00, 0x4c00, 0xb00)
TRAMP_KVM(PACA_EXGEN, 0xb00)
EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
-
-#define LOAD_SYSCALL_HANDLER(reg) \
- ld reg,PACAKBASE(r13); \
- ori reg,reg,(ABS_ADDR(system_call_common))@l;
+#define LOAD_SYSCALL_HANDLER(reg) \
+ __LOAD_HANDLER(reg, system_call_common)
/* Syscall routine is used twice, in reloc-off and reloc-on paths */
#define SYSCALL_PSERIES_1 \
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 9781c69eae57..03d089b3ed72 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -275,7 +275,7 @@ int hw_breakpoint_handler(struct die_args *args)
if (!stepped) {
WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
"0x%lx will be disabled.", info->address);
- perf_event_disable(bp);
+ perf_event_disable_inatomic(bp);
goto out;
}
/*
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index bd739fed26e3..72dac0b58061 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -90,6 +90,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
* Threads will spin in HMT_LOW until the lock bit is cleared.
* r14 - pointer to core_idle_state
* r15 - used to load contents of core_idle_state
+ * r9 - used as a temporary variable
*/
core_idle_lock_held:
@@ -99,6 +100,8 @@ core_idle_lock_held:
bne 3b
HMT_MEDIUM
lwarx r15,0,r14
+ andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
+ bne core_idle_lock_held
blr
/*
@@ -163,12 +166,6 @@ _GLOBAL(pnv_powersave_common)
std r9,_MSR(r1)
std r1,PACAR1(r13)
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- /* Tell KVM we're entering idle */
- li r4,KVM_HWTHREAD_IN_IDLE
- stb r4,HSTATE_HWTHREAD_STATE(r13)
-#endif
-
/*
* Go to real mode to do the nap, as required by the architecture.
* Also, we need to be in real mode before setting hwthread_state,
@@ -185,6 +182,26 @@ _GLOBAL(pnv_powersave_common)
.globl pnv_enter_arch207_idle_mode
pnv_enter_arch207_idle_mode:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* Tell KVM we're entering idle */
+ li r4,KVM_HWTHREAD_IN_IDLE
+ /******************************************************/
+ /* N O T E W E L L ! ! ! N O T E W E L L */
+ /* The following store to HSTATE_HWTHREAD_STATE(r13) */
+ /* MUST occur in real mode, i.e. with the MMU off, */
+ /* and the MMU must stay off until we clear this flag */
+ /* and test HSTATE_HWTHREAD_REQ(r13) in the system */
+ /* reset interrupt vector in exceptions-64s.S. */
+ /* The reason is that another thread can switch the */
+ /* MMU to a guest context whenever this flag is set */
+ /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
+ /* that would potentially cause this thread to start */
+ /* executing instructions from guest memory in */
+ /* hypervisor mode, leading to a host crash or data */
+ /* corruption, or worse. */
+ /******************************************************/
+ stb r4,HSTATE_HWTHREAD_STATE(r13)
+#endif
stb r3,PACA_THREAD_IDLE_STATE(r13)
cmpwi cr3,r3,PNV_THREAD_SLEEP
bge cr3,2f
@@ -250,6 +267,12 @@ enter_winkle:
* r3 - requested stop state
*/
power_enter_stop:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* Tell KVM we're entering idle */
+ li r4,KVM_HWTHREAD_IN_IDLE
+ /* DO THIS IN REAL MODE! See comment above. */
+ stb r4,HSTATE_HWTHREAD_STATE(r13)
+#endif
/*
* Check if the requested state is a deep idle state.
*/
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 9e7c10fe205f..ce6dc61b15b2 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1012,7 +1012,7 @@ void restore_tm_state(struct pt_regs *regs)
/* Ensure that restore_math() will restore */
if (msr_diff & MSR_FP)
current->thread.load_fp = 1;
-#ifdef CONFIG_ALIVEC
+#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
current->thread.load_vec = 1;
#endif
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 82ff5de8b1e7..a0ea63ac2b52 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -23,6 +23,7 @@
#include <asm/ppc-opcode.h>
#include <asm/pnv-pci.h>
#include <asm/opal.h>
+#include <asm/smp.h>
#include "book3s_xics.h"
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 0e49ec541ab5..bda8c43be78a 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -175,7 +175,7 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
if (unlikely(pid == MMU_NO_CONTEXT))
goto no_context;
- if (!mm_is_core_local(mm)) {
+ if (!mm_is_thread_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
@@ -201,7 +201,7 @@ void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
if (unlikely(pid == MMU_NO_CONTEXT))
goto no_context;
- if (!mm_is_core_local(mm)) {
+ if (!mm_is_thread_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
@@ -226,7 +226,7 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
pid = mm ? mm->context.id : 0;
if (unlikely(pid == MMU_NO_CONTEXT))
goto bail;
- if (!mm_is_core_local(mm)) {
+ if (!mm_is_thread_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
@@ -321,7 +321,7 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
{
unsigned long pid;
unsigned long addr;
- int local = mm_is_core_local(mm);
+ int local = mm_is_thread_local(mm);
unsigned long ap = mmu_get_ap(psize);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 28f03ca60100..794bebb43d23 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -363,11 +363,11 @@ out:
static int diag224_get_name_table(void)
{
/* memory must be below 2GB */
- diag224_cpu_names = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
+ diag224_cpu_names = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
if (!diag224_cpu_names)
return -ENOMEM;
if (diag224(diag224_cpu_names)) {
- kfree(diag224_cpu_names);
+ free_page((unsigned long) diag224_cpu_names);
return -EOPNOTSUPP;
}
EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
@@ -376,7 +376,7 @@ static int diag224_get_name_table(void)
static void diag224_delete_name_table(void)
{
- kfree(diag224_cpu_names);
+ free_page((unsigned long) diag224_cpu_names);
}
static int diag224_idx2name(int index, char *name)
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 64053d9ac3f2..836c56290499 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -12,9 +12,7 @@
#ifndef __ASSEMBLY__
-unsigned long return_address(int depth);
-
-#define ftrace_return_address(n) return_address(n)
+#define ftrace_return_address(n) __builtin_return_address(n)
void _mcount(void);
void ftrace_caller(void);
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 03323175de30..602af692efdc 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -192,7 +192,7 @@ struct task_struct;
struct mm_struct;
struct seq_file;
-typedef int (*dump_trace_func_t)(void *data, unsigned long address);
+typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);
void dump_trace(dump_trace_func_t func, void *data,
struct task_struct *task, unsigned long sp);
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 02613bad8bbb..3066031a73fe 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -9,6 +9,9 @@
#include <uapi/asm/unistd.h>
#define __IGNORE_time
+#define __IGNORE_pkey_mprotect
+#define __IGNORE_pkey_alloc
+#define __IGNORE_pkey_free
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_ALARM
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 43446fa2a4e5..c74c59236f44 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -2014,12 +2014,12 @@ void show_code(struct pt_regs *regs)
*ptr++ = '\t';
ptr += print_insn(ptr, code + start, addr);
start += opsize;
- printk("%s", buffer);
+ pr_cont("%s", buffer);
ptr = buffer;
ptr += sprintf(ptr, "\n ");
hops++;
}
- printk("\n");
+ pr_cont("\n");
}
void print_fn_code(unsigned char *code, unsigned long len)
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 6693383bc01b..55d4fe174fd9 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -38,10 +38,10 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
if (sp < low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
+ if (func(data, sf->gprs[8], 0))
+ return sp;
/* Follow the backchain. */
while (1) {
- if (func(data, sf->gprs[8]))
- return sp;
low = sp;
sp = sf->back_chain;
if (!sp)
@@ -49,6 +49,8 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
if (sp <= low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
+ if (func(data, sf->gprs[8], 1))
+ return sp;
}
/* Zero backchain detected, check for interrupt frame. */
sp = (unsigned long) (sf + 1);
@@ -56,7 +58,7 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
return sp;
regs = (struct pt_regs *) sp;
if (!user_mode(regs)) {
- if (func(data, regs->psw.addr))
+ if (func(data, regs->psw.addr, 1))
return sp;
}
low = sp;
@@ -85,33 +87,12 @@ void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
}
EXPORT_SYMBOL_GPL(dump_trace);
-struct return_address_data {
- unsigned long address;
- int depth;
-};
-
-static int __return_address(void *data, unsigned long address)
-{
- struct return_address_data *rd = data;
-
- if (rd->depth--)
- return 0;
- rd->address = address;
- return 1;
-}
-
-unsigned long return_address(int depth)
-{
- struct return_address_data rd = { .depth = depth + 2 };
-
- dump_trace(__return_address, &rd, NULL, current_stack_pointer());
- return rd.address;
-}
-EXPORT_SYMBOL_GPL(return_address);
-
-static int show_address(void *data, unsigned long address)
+static int show_address(void *data, unsigned long address, int reliable)
{
- printk("([<%016lx>] %pSR)\n", address, (void *)address);
+ if (reliable)
+ printk(" [<%016lx>] %pSR \n", address, (void *)address);
+ else
+ printk("([<%016lx>] %pSR)\n", address, (void *)address);
return 0;
}
@@ -138,14 +119,14 @@ void show_stack(struct task_struct *task, unsigned long *sp)
else
stack = (unsigned long *)task->thread.ksp;
}
+ printk(KERN_DEFAULT "Stack:\n");
for (i = 0; i < 20; i++) {
if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
break;
- if ((i * sizeof(long) % 32) == 0)
- printk("%s ", i == 0 ? "" : "\n");
- printk("%016lx ", *stack++);
+ if (i % 4 == 0)
+ printk(KERN_DEFAULT " ");
+ pr_cont("%016lx%c", *stack++, i % 4 == 3 ? '\n' : ' ');
}
- printk("\n");
show_trace(task, (unsigned long)sp);
}
@@ -163,13 +144,13 @@ void show_registers(struct pt_regs *regs)
mode = user_mode(regs) ? "User" : "Krnl";
printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
if (!user_mode(regs))
- printk(" (%pSR)", (void *)regs->psw.addr);
- printk("\n");
+ pr_cont(" (%pSR)", (void *)regs->psw.addr);
+ pr_cont("\n");
printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
"P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e,
psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm);
- printk(" RI:%x EA:%x", psw->ri, psw->eaba);
- printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
+ pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba);
+ printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
printk(" %016lx %016lx %016lx %016lx\n",
regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
@@ -205,14 +186,14 @@ void die(struct pt_regs *regs, const char *str)
printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
regs->int_code >> 17, ++die_counter);
#ifdef CONFIG_PREEMPT
- printk("PREEMPT ");
+ pr_cont("PREEMPT ");
#endif
#ifdef CONFIG_SMP
- printk("SMP ");
+ pr_cont("SMP ");
#endif
if (debug_pagealloc_enabled())
- printk("DEBUG_PAGEALLOC");
- printk("\n");
+ pr_cont("DEBUG_PAGEALLOC");
+ pr_cont("\n");
notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
print_modules();
show_regs(regs);
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 17431f63de00..955a7b6fa0a4 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -222,7 +222,7 @@ static int __init service_level_perf_register(void)
}
arch_initcall(service_level_perf_register);
-static int __perf_callchain_kernel(void *data, unsigned long address)
+static int __perf_callchain_kernel(void *data, unsigned long address, int reliable)
{
struct perf_callchain_entry_ctx *entry = data;
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 44f84b23d4e5..355db9db8210 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -27,12 +27,12 @@ static int __save_address(void *data, unsigned long address, int nosched)
return 1;
}
-static int save_address(void *data, unsigned long address)
+static int save_address(void *data, unsigned long address, int reliable)
{
return __save_address(data, address, 0);
}
-static int save_address_nosched(void *data, unsigned long address)
+static int save_address_nosched(void *data, unsigned long address, int reliable)
{
return __save_address(data, address, 1);
}
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 000e6e91f6a0..3667d20e997f 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -62,9 +62,11 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__start_ro_after_init = .;
+ __start_data_ro_after_init = .;
.data..ro_after_init : {
*(.data..ro_after_init)
}
+ __end_data_ro_after_init = .;
EXCEPTION_TABLE(16)
. = ALIGN(PAGE_SIZE);
__end_ro_after_init = .;
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index cd404aa3931c..4a0c5bce3552 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -217,6 +217,7 @@ static __init int setup_hugepagesz(char *opt)
} else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
} else {
+ hugetlb_bad_size();
pr_err("hugepagesz= specifies an unsupported page size %s\n",
string);
return 0;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index f56a39bd8ba6..b3e9d18f2ec6 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -151,36 +151,40 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
{
- unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
- unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS);
+ unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
unsigned long start_pfn = PFN_DOWN(start);
unsigned long size_pages = PFN_DOWN(size);
- unsigned long nr_pages;
- int rc, zone_enum;
+ pg_data_t *pgdat = NODE_DATA(nid);
+ struct zone *zone;
+ int rc, i;
rc = vmem_add_mapping(start, size);
if (rc)
return rc;
- while (size_pages > 0) {
- if (start_pfn < dma_end_pfn) {
- nr_pages = (start_pfn + size_pages > dma_end_pfn) ?
- dma_end_pfn - start_pfn : size_pages;
- zone_enum = ZONE_DMA;
- } else if (start_pfn < normal_end_pfn) {
- nr_pages = (start_pfn + size_pages > normal_end_pfn) ?
- normal_end_pfn - start_pfn : size_pages;
- zone_enum = ZONE_NORMAL;
+ for (i = 0; i < MAX_NR_ZONES; i++) {
+ zone = pgdat->node_zones + i;
+ if (zone_idx(zone) != ZONE_MOVABLE) {
+ /* Add range within existing zone limits, if possible */
+ zone_start_pfn = zone->zone_start_pfn;
+ zone_end_pfn = zone->zone_start_pfn +
+ zone->spanned_pages;
} else {
- nr_pages = size_pages;
- zone_enum = ZONE_MOVABLE;
+ /* Add remaining range to ZONE_MOVABLE */
+ zone_start_pfn = start_pfn;
+ zone_end_pfn = start_pfn + size_pages;
}
- rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum,
- start_pfn, size_pages);
+ if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
+ continue;
+ nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
+ zone_end_pfn - start_pfn : size_pages;
+ rc = __add_pages(nid, zone, start_pfn, nr_pages);
if (rc)
break;
start_pfn += nr_pages;
size_pages -= nr_pages;
+ if (!size_pages)
+ break;
}
if (rc)
vmem_remove_mapping(start, size);
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 16f4c3960b87..9a4de4599c7b 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -13,7 +13,7 @@
#include <linux/init.h>
#include <asm/processor.h>
-static int __s390_backtrace(void *data, unsigned long address)
+static int __s390_backtrace(void *data, unsigned long address, int reliable)
{
unsigned int *depth = data;
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 7350c8bc13a2..6b2f72f523b9 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -423,7 +423,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
dma_addr_t dma_addr_base, dma_addr;
int flags = ZPCI_PTE_VALID;
struct scatterlist *s;
- unsigned long pa;
+ unsigned long pa = 0;
int ret;
size = PAGE_ALIGN(size);
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index a6cfdabb6054..5b0ed48e5b0c 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -24,9 +24,10 @@ typedef struct {
unsigned int icache_line_size;
unsigned int ecache_size;
unsigned int ecache_line_size;
- unsigned short sock_id;
+ unsigned short sock_id; /* physical package */
unsigned short core_id;
- int proc_id;
+ unsigned short max_cache_id; /* groupings of highest shared cache */
+ unsigned short proc_id; /* strand (aka HW thread) id */
} cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index d9c5876c6121..8011e79f59c9 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -134,7 +134,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
*(volatile __u32 *)&lp->lock = ~0U;
}
-static void inline arch_write_unlock(arch_rwlock_t *lock)
+static inline void arch_write_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
" st %%g0, [%0]"
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 87990b7c6b0d..07c9f2e9bf57 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -96,7 +96,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
-static void inline arch_read_lock(arch_rwlock_t *lock)
+static inline void arch_read_lock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -119,7 +119,7 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
: "memory");
}
-static int inline arch_read_trylock(arch_rwlock_t *lock)
+static inline int arch_read_trylock(arch_rwlock_t *lock)
{
int tmp1, tmp2;
@@ -140,7 +140,7 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
return tmp1;
}
-static void inline arch_read_unlock(arch_rwlock_t *lock)
+static inline void arch_read_unlock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -156,7 +156,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
: "memory");
}
-static void inline arch_write_lock(arch_rwlock_t *lock)
+static inline void arch_write_lock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2;
@@ -181,7 +181,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
: "memory");
}
-static void inline arch_write_unlock(arch_rwlock_t *lock)
+static inline void arch_write_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
" stw %%g0, [%0]"
@@ -190,7 +190,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
: "memory");
}
-static int inline arch_write_trylock(arch_rwlock_t *lock)
+static inline int arch_write_trylock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2, result;
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index bec481aaca16..7b4898a36eee 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -44,14 +44,20 @@ int __node_distance(int, int);
#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).core_id)
#define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu])
+#define topology_core_cache_cpumask(cpu) (&cpu_core_sib_cache_map[cpu])
#define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
#endif /* CONFIG_SMP */
extern cpumask_t cpu_core_map[NR_CPUS];
extern cpumask_t cpu_core_sib_map[NR_CPUS];
+extern cpumask_t cpu_core_sib_cache_map[NR_CPUS];
+
+/**
+ * Return cores that shares the last level cache.
+ */
static inline const struct cpumask *cpu_coregroup_mask(int cpu)
{
- return &cpu_core_map[cpu];
+ return &cpu_core_sib_cache_map[cpu];
}
#endif /* _ASM_SPARC64_TOPOLOGY_H */
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index b68acc563235..5373136c412b 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -82,7 +82,6 @@ static inline int access_ok(int type, const void __user * addr, unsigned long si
return 1;
}
-void __ret_efault(void);
void __retl_efault(void);
/* Uh, these should become the main single-value transfer routines..
@@ -189,55 +188,34 @@ int __get_user_bad(void);
unsigned long __must_check ___copy_from_user(void *to,
const void __user *from,
unsigned long size);
-unsigned long copy_from_user_fixup(void *to, const void __user *from,
- unsigned long size);
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long size)
{
- unsigned long ret;
-
check_object_size(to, size, false);
- ret = ___copy_from_user(to, from, size);
- if (unlikely(ret))
- ret = copy_from_user_fixup(to, from, size);
-
- return ret;
+ return ___copy_from_user(to, from, size);
}
#define __copy_from_user copy_from_user
unsigned long __must_check ___copy_to_user(void __user *to,
const void *from,
unsigned long size);
-unsigned long copy_to_user_fixup(void __user *to, const void *from,
- unsigned long size);
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long size)
{
- unsigned long ret;
-
check_object_size(from, size, true);
- ret = ___copy_to_user(to, from, size);
- if (unlikely(ret))
- ret = copy_to_user_fixup(to, from, size);
- return ret;
+ return ___copy_to_user(to, from, size);
}
#define __copy_to_user copy_to_user
unsigned long __must_check ___copy_in_user(void __user *to,
const void __user *from,
unsigned long size);
-unsigned long copy_in_user_fixup(void __user *to, void __user *from,
- unsigned long size);
static inline unsigned long __must_check
copy_in_user(void __user *to, void __user *from, unsigned long size)
{
- unsigned long ret = ___copy_in_user(to, from, size);
-
- if (unlikely(ret))
- ret = copy_in_user_fixup(to, from, size);
- return ret;
+ return ___copy_in_user(to, from, size);
}
#define __copy_in_user copy_in_user
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index beba6c11554c..6aa3da152c20 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -926,48 +926,11 @@ tlb_type: .word 0 /* Must NOT end up in BSS */
EXPORT_SYMBOL(tlb_type)
.section ".fixup",#alloc,#execinstr
- .globl __ret_efault, __retl_efault, __ret_one, __retl_one
-ENTRY(__ret_efault)
- ret
- restore %g0, -EFAULT, %o0
-ENDPROC(__ret_efault)
-EXPORT_SYMBOL(__ret_efault)
-
ENTRY(__retl_efault)
retl
mov -EFAULT, %o0
ENDPROC(__retl_efault)
-ENTRY(__retl_one)
- retl
- mov 1, %o0
-ENDPROC(__retl_one)
-
-ENTRY(__retl_one_fp)
- VISExitHalf
- retl
- mov 1, %o0
-ENDPROC(__retl_one_fp)
-
-ENTRY(__ret_one_asi)
- wr %g0, ASI_AIUS, %asi
- ret
- restore %g0, 1, %o0
-ENDPROC(__ret_one_asi)
-
-ENTRY(__retl_one_asi)
- wr %g0, ASI_AIUS, %asi
- retl
- mov 1, %o0
-ENDPROC(__retl_one_asi)
-
-ENTRY(__retl_one_asi_fp)
- wr %g0, ASI_AIUS, %asi
- VISExitHalf
- retl
- mov 1, %o0
-ENDPROC(__retl_one_asi_fp)
-
ENTRY(__retl_o1)
retl
mov %o1, %o0
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
index 59bbeff55024..07933b9e9ce0 100644
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -13,19 +13,30 @@
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
- u32 val;
u32 *insn = (u32 *) (unsigned long) entry->code;
+ u32 val;
if (type == JUMP_LABEL_JMP) {
s32 off = (s32)entry->target - (s32)entry->code;
+ bool use_v9_branch = false;
+
+ BUG_ON(off & 3);
#ifdef CONFIG_SPARC64
- /* ba,pt %xcc, . + (off << 2) */
- val = 0x10680000 | ((u32) off >> 2);
-#else
- /* ba . + (off << 2) */
- val = 0x10800000 | ((u32) off >> 2);
+ if (off <= 0xfffff && off >= -0x100000)
+ use_v9_branch = true;
#endif
+ if (use_v9_branch) {
+ /* WDISP19 - target is . + immed << 2 */
+ /* ba,pt %xcc, . + off */
+ val = 0x10680000 | (((u32) off >> 2) & 0x7ffff);
+ } else {
+ /* WDISP22 - target is . + immed << 2 */
+ BUG_ON(off > 0x7fffff);
+ BUG_ON(off < -0x800000);
+ /* ba . + off */
+ val = 0x10800000 | (((u32) off >> 2) & 0x3fffff);
+ }
} else {
val = 0x01000000;
}
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 11228861d9b4..8a6982dfd733 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -645,13 +645,20 @@ static void __mark_core_id(struct mdesc_handle *hp, u64 node,
cpu_data(*id).core_id = core_id;
}
-static void __mark_sock_id(struct mdesc_handle *hp, u64 node,
- int sock_id)
+static void __mark_max_cache_id(struct mdesc_handle *hp, u64 node,
+ int max_cache_id)
{
const u64 *id = mdesc_get_property(hp, node, "id", NULL);
- if (*id < num_possible_cpus())
- cpu_data(*id).sock_id = sock_id;
+ if (*id < num_possible_cpus()) {
+ cpu_data(*id).max_cache_id = max_cache_id;
+
+ /**
+ * On systems without explicit socket descriptions socket
+ * is max_cache_id
+ */
+ cpu_data(*id).sock_id = max_cache_id;
+ }
}
static void mark_core_ids(struct mdesc_handle *hp, u64 mp,
@@ -660,10 +667,11 @@ static void mark_core_ids(struct mdesc_handle *hp, u64 mp,
find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10);
}
-static void mark_sock_ids(struct mdesc_handle *hp, u64 mp,
- int sock_id)
+static void mark_max_cache_ids(struct mdesc_handle *hp, u64 mp,
+ int max_cache_id)
{
- find_back_node_value(hp, mp, "cpu", __mark_sock_id, sock_id, 10);
+ find_back_node_value(hp, mp, "cpu", __mark_max_cache_id,
+ max_cache_id, 10);
}
static void set_core_ids(struct mdesc_handle *hp)
@@ -694,14 +702,15 @@ static void set_core_ids(struct mdesc_handle *hp)
}
}
-static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level)
+static int set_max_cache_ids_by_cache(struct mdesc_handle *hp, int level)
{
u64 mp;
int idx = 1;
int fnd = 0;
- /* Identify unique sockets by looking for cpus backpointed to by
- * shared level n caches.
+ /**
+ * Identify unique highest level of shared cache by looking for cpus
+ * backpointed to by shared level N caches.
*/
mdesc_for_each_node_by_name(hp, mp, "cache") {
const u64 *cur_lvl;
@@ -709,8 +718,7 @@ static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level)
cur_lvl = mdesc_get_property(hp, mp, "level", NULL);
if (*cur_lvl != level)
continue;
-
- mark_sock_ids(hp, mp, idx);
+ mark_max_cache_ids(hp, mp, idx);
idx++;
fnd = 1;
}
@@ -745,15 +753,17 @@ static void set_sock_ids(struct mdesc_handle *hp)
{
u64 mp;
- /* If machine description exposes sockets data use it.
- * Otherwise fallback to use shared L3 or L2 caches.
+ /**
+ * Find the highest level of shared cache which pre-T7 is also
+ * the socket.
*/
+ if (!set_max_cache_ids_by_cache(hp, 3))
+ set_max_cache_ids_by_cache(hp, 2);
+
+ /* If machine description exposes sockets data use it.*/
mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets");
if (mp != MDESC_NODE_NULL)
- return set_sock_ids_by_socket(hp, mp);
-
- if (!set_sock_ids_by_cache(hp, 3))
- set_sock_ids_by_cache(hp, 2);
+ set_sock_ids_by_socket(hp, mp);
}
static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index d3035ba6cd31..8182f7caf5b1 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -63,9 +63,13 @@ cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
[0 ... NR_CPUS-1] = CPU_MASK_NONE };
+cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = {
+ [0 ... NR_CPUS - 1] = CPU_MASK_NONE };
+
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
EXPORT_SYMBOL(cpu_core_map);
EXPORT_SYMBOL(cpu_core_sib_map);
+EXPORT_SYMBOL(cpu_core_sib_cache_map);
static cpumask_t smp_commenced_mask;
@@ -1265,6 +1269,10 @@ void smp_fill_in_sib_core_maps(void)
unsigned int j;
for_each_present_cpu(j) {
+ if (cpu_data(i).max_cache_id ==
+ cpu_data(j).max_cache_id)
+ cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]);
+
if (cpu_data(i).sock_id == cpu_data(j).sock_id)
cpumask_set_cpu(j, &cpu_core_sib_map[i]);
}
diff --git a/arch/sparc/lib/GENcopy_from_user.S b/arch/sparc/lib/GENcopy_from_user.S
index b7d0bd6b1406..69a439fa2fc1 100644
--- a/arch/sparc/lib/GENcopy_from_user.S
+++ b/arch/sparc/lib/GENcopy_from_user.S
@@ -3,11 +3,11 @@
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_LD(x) \
+#define EX_LD(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/GENcopy_to_user.S b/arch/sparc/lib/GENcopy_to_user.S
index 780550e1afc7..9947427ce354 100644
--- a/arch/sparc/lib/GENcopy_to_user.S
+++ b/arch/sparc/lib/GENcopy_to_user.S
@@ -3,11 +3,11 @@
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/GENmemcpy.S b/arch/sparc/lib/GENmemcpy.S
index 89358ee94851..059ea24ad73d 100644
--- a/arch/sparc/lib/GENmemcpy.S
+++ b/arch/sparc/lib/GENmemcpy.S
@@ -4,21 +4,18 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#define GLOBAL_SPARE %g7
#else
#define GLOBAL_SPARE %g5
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
-#endif
-
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
+#define EX_ST(x,y) x
#endif
#ifndef LOAD
@@ -45,6 +42,29 @@
.register %g3,#scratch
.text
+
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+ENTRY(GEN_retl_o4_1)
+ add %o4, %o2, %o4
+ retl
+ add %o4, 1, %o0
+ENDPROC(GEN_retl_o4_1)
+ENTRY(GEN_retl_g1_8)
+ add %g1, %o2, %g1
+ retl
+ add %g1, 8, %o0
+ENDPROC(GEN_retl_g1_8)
+ENTRY(GEN_retl_o2_4)
+ retl
+ add %o2, 4, %o0
+ENDPROC(GEN_retl_o2_4)
+ENTRY(GEN_retl_o2_1)
+ retl
+ add %o2, 1, %o0
+ENDPROC(GEN_retl_o2_1)
+#endif
+
.align 64
.globl FUNC_NAME
@@ -73,8 +93,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %g0, %o4, %o4
sub %o2, %o4, %o2
1: subcc %o4, 1, %o4
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o0))
+ EX_LD(LOAD(ldub, %o1, %g1),GEN_retl_o4_1)
+ EX_ST(STORE(stb, %g1, %o0),GEN_retl_o4_1)
add %o1, 1, %o1
bne,pt %XCC, 1b
add %o0, 1, %o0
@@ -82,8 +102,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andn %o2, 0x7, %g1
sub %o2, %g1, %o2
1: subcc %g1, 0x8, %g1
- EX_LD(LOAD(ldx, %o1, %g2))
- EX_ST(STORE(stx, %g2, %o0))
+ EX_LD(LOAD(ldx, %o1, %g2),GEN_retl_g1_8)
+ EX_ST(STORE(stx, %g2, %o0),GEN_retl_g1_8)
add %o1, 0x8, %o1
bne,pt %XCC, 1b
add %o0, 0x8, %o0
@@ -100,8 +120,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
1:
subcc %o2, 4, %o2
- EX_LD(LOAD(lduw, %o1, %g1))
- EX_ST(STORE(stw, %g1, %o1 + %o3))
+ EX_LD(LOAD(lduw, %o1, %g1),GEN_retl_o2_4)
+ EX_ST(STORE(stw, %g1, %o1 + %o3),GEN_retl_o2_4)
bgu,pt %XCC, 1b
add %o1, 4, %o1
@@ -111,8 +131,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
.align 32
90:
subcc %o2, 1, %o2
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o1 + %o3))
+ EX_LD(LOAD(ldub, %o1, %g1),GEN_retl_o2_1)
+ EX_ST(STORE(stb, %g1, %o1 + %o3),GEN_retl_o2_1)
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 885f00e81d1a..69912d2f8b54 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -38,7 +38,7 @@ lib-$(CONFIG_SPARC64) += NG4patch.o NG4copy_page.o NG4clear_page.o NG4memset.o
lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o
lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o
-lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o
+lib-$(CONFIG_SPARC64) += copy_in_user.o memmove.o
lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o
obj-$(CONFIG_SPARC64) += iomap.o
diff --git a/arch/sparc/lib/NG2copy_from_user.S b/arch/sparc/lib/NG2copy_from_user.S
index d5242b8c4f94..b79a6998d87c 100644
--- a/arch/sparc/lib/NG2copy_from_user.S
+++ b/arch/sparc/lib/NG2copy_from_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_LD(x) \
+#define EX_LD(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_LD_FP(x) \
+#define EX_LD_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NG2copy_to_user.S b/arch/sparc/lib/NG2copy_to_user.S
index 4e962d993b10..dcec55f254ab 100644
--- a/arch/sparc/lib/NG2copy_to_user.S
+++ b/arch/sparc/lib/NG2copy_to_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_ST_FP(x) \
+#define EX_ST_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S
index d5f585df2f3f..c629dbd121b6 100644
--- a/arch/sparc/lib/NG2memcpy.S
+++ b/arch/sparc/lib/NG2memcpy.S
@@ -4,6 +4,7 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#define GLOBAL_SPARE %g7
@@ -32,21 +33,17 @@
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_LD_FP
-#define EX_LD_FP(x) x
+#define EX_LD_FP(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
+#define EX_ST(x,y) x
#endif
#ifndef EX_ST_FP
-#define EX_ST_FP(x) x
-#endif
-
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
+#define EX_ST_FP(x,y) x
#endif
#ifndef LOAD
@@ -140,45 +137,110 @@
fsrc2 %x6, %f12; \
fsrc2 %x7, %f14;
#define FREG_LOAD_1(base, x0) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0))
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1)
#define FREG_LOAD_2(base, x0, x1) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1);
#define FREG_LOAD_3(base, x0, x1, x2) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
- EX_LD_FP(LOAD(ldd, base + 0x10, %x2));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1);
#define FREG_LOAD_4(base, x0, x1, x2, x3) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
- EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
- EX_LD_FP(LOAD(ldd, base + 0x18, %x3));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1);
#define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
- EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
- EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
- EX_LD_FP(LOAD(ldd, base + 0x20, %x4));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1);
#define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
- EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
- EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
- EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \
- EX_LD_FP(LOAD(ldd, base + 0x28, %x5));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1);
#define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
- EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
- EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
- EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \
- EX_LD_FP(LOAD(ldd, base + 0x28, %x5)); \
- EX_LD_FP(LOAD(ldd, base + 0x30, %x6));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x30, %x6), NG2_retl_o2_plus_g1);
.register %g2,#scratch
.register %g3,#scratch
.text
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+__restore_fp:
+ VISExitHalf
+__restore_asi:
+ retl
+ wr %g0, ASI_AIUS, %asi
+ENTRY(NG2_retl_o2)
+ ba,pt %xcc, __restore_asi
+ mov %o2, %o0
+ENDPROC(NG2_retl_o2)
+ENTRY(NG2_retl_o2_plus_1)
+ ba,pt %xcc, __restore_asi
+ add %o2, 1, %o0
+ENDPROC(NG2_retl_o2_plus_1)
+ENTRY(NG2_retl_o2_plus_4)
+ ba,pt %xcc, __restore_asi
+ add %o2, 4, %o0
+ENDPROC(NG2_retl_o2_plus_4)
+ENTRY(NG2_retl_o2_plus_8)
+ ba,pt %xcc, __restore_asi
+ add %o2, 8, %o0
+ENDPROC(NG2_retl_o2_plus_8)
+ENTRY(NG2_retl_o2_plus_o4_plus_1)
+ add %o4, 1, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG2_retl_o2_plus_o4_plus_1)
+ENTRY(NG2_retl_o2_plus_o4_plus_8)
+ add %o4, 8, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG2_retl_o2_plus_o4_plus_8)
+ENTRY(NG2_retl_o2_plus_o4_plus_16)
+ add %o4, 16, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG2_retl_o2_plus_o4_plus_16)
+ENTRY(NG2_retl_o2_plus_g1_fp)
+ ba,pt %xcc, __restore_fp
+ add %o2, %g1, %o0
+ENDPROC(NG2_retl_o2_plus_g1_fp)
+ENTRY(NG2_retl_o2_plus_g1_plus_64_fp)
+ add %g1, 64, %g1
+ ba,pt %xcc, __restore_fp
+ add %o2, %g1, %o0
+ENDPROC(NG2_retl_o2_plus_g1_plus_64_fp)
+ENTRY(NG2_retl_o2_plus_g1_plus_1)
+ add %g1, 1, %g1
+ ba,pt %xcc, __restore_asi
+ add %o2, %g1, %o0
+ENDPROC(NG2_retl_o2_plus_g1_plus_1)
+ENTRY(NG2_retl_o2_and_7_plus_o4)
+ and %o2, 7, %o2
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG2_retl_o2_and_7_plus_o4)
+ENTRY(NG2_retl_o2_and_7_plus_o4_plus_8)
+ and %o2, 7, %o2
+ add %o4, 8, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG2_retl_o2_and_7_plus_o4_plus_8)
+#endif
+
.align 64
.globl FUNC_NAME
@@ -230,8 +292,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %g0, %o4, %o4 ! bytes to align dst
sub %o2, %o4, %o2
1: subcc %o4, 1, %o4
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o0))
+ EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_o4_plus_1)
+ EX_ST(STORE(stb, %g1, %o0), NG2_retl_o2_plus_o4_plus_1)
add %o1, 1, %o1
bne,pt %XCC, 1b
add %o0, 1, %o0
@@ -281,11 +343,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
nop
/* fall through for 0 < low bits < 8 */
110: sub %o4, 64, %g2
- EX_LD_FP(LOAD_BLK(%g2, %f0))
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+ EX_LD_FP(LOAD_BLK(%g2, %f0), NG2_retl_o2_plus_g1)
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -296,10 +358,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
120: sub %o4, 56, %g2
FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -310,10 +372,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
130: sub %o4, 48, %g2
FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_6(f20, f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -324,10 +386,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
140: sub %o4, 40, %g2
FREG_LOAD_5(%g2, f0, f2, f4, f6, f8)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_5(f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -338,10 +400,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
150: sub %o4, 32, %g2
FREG_LOAD_4(%g2, f0, f2, f4, f6)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_4(f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -352,10 +414,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
160: sub %o4, 24, %g2
FREG_LOAD_3(%g2, f0, f2, f4)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_3(f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -366,10 +428,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
170: sub %o4, 16, %g2
FREG_LOAD_2(%g2, f0, f2)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_2(f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -380,10 +442,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
180: sub %o4, 8, %g2
FREG_LOAD_1(%g2, f0)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_1(f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -393,10 +455,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
nop
190:
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
subcc %g1, 64, %g1
- EX_LD_FP(LOAD_BLK(%o4, %f0))
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_LD_FP(LOAD_BLK(%o4, %f0), NG2_retl_o2_plus_g1_plus_64)
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1_plus_64)
add %o4, 64, %o4
bne,pt %xcc, 1b
LOAD(prefetch, %o4 + 64, #one_read)
@@ -423,28 +485,28 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andn %o2, 0xf, %o4
and %o2, 0xf, %o2
1: subcc %o4, 0x10, %o4
- EX_LD(LOAD(ldx, %o1, %o5))
+ EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_o4_plus_16)
add %o1, 0x08, %o1
- EX_LD(LOAD(ldx, %o1, %g1))
+ EX_LD(LOAD(ldx, %o1, %g1), NG2_retl_o2_plus_o4_plus_16)
sub %o1, 0x08, %o1
- EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE))
+ EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_16)
add %o1, 0x8, %o1
- EX_ST(STORE(stx, %g1, %o1 + GLOBAL_SPARE))
+ EX_ST(STORE(stx, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_8)
bgu,pt %XCC, 1b
add %o1, 0x8, %o1
73: andcc %o2, 0x8, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x8, %o2
- EX_LD(LOAD(ldx, %o1, %o5))
- EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE))
+ EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_8)
+ EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_8)
add %o1, 0x8, %o1
1: andcc %o2, 0x4, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x4, %o2
- EX_LD(LOAD(lduw, %o1, %o5))
- EX_ST(STORE(stw, %o5, %o1 + GLOBAL_SPARE))
+ EX_LD(LOAD(lduw, %o1, %o5), NG2_retl_o2_plus_4)
+ EX_ST(STORE(stw, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4)
add %o1, 0x4, %o1
1: cmp %o2, 0
be,pt %XCC, 85f
@@ -460,8 +522,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %o2, %g1, %o2
1: subcc %g1, 1, %g1
- EX_LD(LOAD(ldub, %o1, %o5))
- EX_ST(STORE(stb, %o5, %o1 + GLOBAL_SPARE))
+ EX_LD(LOAD(ldub, %o1, %o5), NG2_retl_o2_plus_g1_plus_1)
+ EX_ST(STORE(stb, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_g1_plus_1)
bgu,pt %icc, 1b
add %o1, 1, %o1
@@ -477,16 +539,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
8: mov 64, GLOBAL_SPARE
andn %o1, 0x7, %o1
- EX_LD(LOAD(ldx, %o1, %g2))
+ EX_LD(LOAD(ldx, %o1, %g2), NG2_retl_o2)
sub GLOBAL_SPARE, %g1, GLOBAL_SPARE
andn %o2, 0x7, %o4
sllx %g2, %g1, %g2
1: add %o1, 0x8, %o1
- EX_LD(LOAD(ldx, %o1, %g3))
+ EX_LD(LOAD(ldx, %o1, %g3), NG2_retl_o2_and_7_plus_o4)
subcc %o4, 0x8, %o4
srlx %g3, GLOBAL_SPARE, %o5
or %o5, %g2, %o5
- EX_ST(STORE(stx, %o5, %o0))
+ EX_ST(STORE(stx, %o5, %o0), NG2_retl_o2_and_7_plus_o4_plus_8)
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
@@ -506,8 +568,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
1:
subcc %o2, 4, %o2
- EX_LD(LOAD(lduw, %o1, %g1))
- EX_ST(STORE(stw, %g1, %o1 + GLOBAL_SPARE))
+ EX_LD(LOAD(lduw, %o1, %g1), NG2_retl_o2_plus_4)
+ EX_ST(STORE(stw, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4)
bgu,pt %XCC, 1b
add %o1, 4, %o1
@@ -517,8 +579,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
.align 32
90:
subcc %o2, 1, %o2
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o1 + GLOBAL_SPARE))
+ EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_1)
+ EX_ST(STORE(stb, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_1)
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
diff --git a/arch/sparc/lib/NG4copy_from_user.S b/arch/sparc/lib/NG4copy_from_user.S
index 2e8ee7ad07a9..16a286c1a528 100644
--- a/arch/sparc/lib/NG4copy_from_user.S
+++ b/arch/sparc/lib/NG4copy_from_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 2012 David S. Miller (davem@davemloft.net)
*/
-#define EX_LD(x) \
+#define EX_LD(x, y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_LD_FP(x) \
+#define EX_LD_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NG4copy_to_user.S b/arch/sparc/lib/NG4copy_to_user.S
index be0bf4590df8..6b0276ffc858 100644
--- a/arch/sparc/lib/NG4copy_to_user.S
+++ b/arch/sparc/lib/NG4copy_to_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 2012 David S. Miller (davem@davemloft.net)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_ST_FP(x) \
+#define EX_ST_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 8e13ee1f4454..75bb93b1437f 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -4,6 +4,7 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#define GLOBAL_SPARE %g7
@@ -46,22 +47,19 @@
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_LD_FP
-#define EX_LD_FP(x) x
+#define EX_LD_FP(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
+#define EX_ST(x,y) x
#endif
#ifndef EX_ST_FP
-#define EX_ST_FP(x) x
+#define EX_ST_FP(x,y) x
#endif
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
-#endif
#ifndef LOAD
#define LOAD(type,addr,dest) type [addr], dest
@@ -94,6 +92,158 @@
.register %g3,#scratch
.text
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+__restore_asi_fp:
+ VISExitHalf
+__restore_asi:
+ retl
+ wr %g0, ASI_AIUS, %asi
+
+ENTRY(NG4_retl_o2)
+ ba,pt %xcc, __restore_asi
+ mov %o2, %o0
+ENDPROC(NG4_retl_o2)
+ENTRY(NG4_retl_o2_plus_1)
+ ba,pt %xcc, __restore_asi
+ add %o2, 1, %o0
+ENDPROC(NG4_retl_o2_plus_1)
+ENTRY(NG4_retl_o2_plus_4)
+ ba,pt %xcc, __restore_asi
+ add %o2, 4, %o0
+ENDPROC(NG4_retl_o2_plus_4)
+ENTRY(NG4_retl_o2_plus_o5)
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5)
+ENTRY(NG4_retl_o2_plus_o5_plus_4)
+ add %o5, 4, %o5
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5_plus_4)
+ENTRY(NG4_retl_o2_plus_o5_plus_8)
+ add %o5, 8, %o5
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5_plus_8)
+ENTRY(NG4_retl_o2_plus_o5_plus_16)
+ add %o5, 16, %o5
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5_plus_16)
+ENTRY(NG4_retl_o2_plus_o5_plus_24)
+ add %o5, 24, %o5
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5_plus_24)
+ENTRY(NG4_retl_o2_plus_o5_plus_32)
+ add %o5, 32, %o5
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5_plus_32)
+ENTRY(NG4_retl_o2_plus_g1)
+ ba,pt %xcc, __restore_asi
+ add %o2, %g1, %o0
+ENDPROC(NG4_retl_o2_plus_g1)
+ENTRY(NG4_retl_o2_plus_g1_plus_1)
+ add %g1, 1, %g1
+ ba,pt %xcc, __restore_asi
+ add %o2, %g1, %o0
+ENDPROC(NG4_retl_o2_plus_g1_plus_1)
+ENTRY(NG4_retl_o2_plus_g1_plus_8)
+ add %g1, 8, %g1
+ ba,pt %xcc, __restore_asi
+ add %o2, %g1, %o0
+ENDPROC(NG4_retl_o2_plus_g1_plus_8)
+ENTRY(NG4_retl_o2_plus_o4)
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4)
+ENTRY(NG4_retl_o2_plus_o4_plus_8)
+ add %o4, 8, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_8)
+ENTRY(NG4_retl_o2_plus_o4_plus_16)
+ add %o4, 16, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_16)
+ENTRY(NG4_retl_o2_plus_o4_plus_24)
+ add %o4, 24, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_24)
+ENTRY(NG4_retl_o2_plus_o4_plus_32)
+ add %o4, 32, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_32)
+ENTRY(NG4_retl_o2_plus_o4_plus_40)
+ add %o4, 40, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_40)
+ENTRY(NG4_retl_o2_plus_o4_plus_48)
+ add %o4, 48, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_48)
+ENTRY(NG4_retl_o2_plus_o4_plus_56)
+ add %o4, 56, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_56)
+ENTRY(NG4_retl_o2_plus_o4_plus_64)
+ add %o4, 64, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_64)
+ENTRY(NG4_retl_o2_plus_o4_fp)
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_8_fp)
+ add %o4, 8, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_8_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_16_fp)
+ add %o4, 16, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_16_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_24_fp)
+ add %o4, 24, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_24_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_32_fp)
+ add %o4, 32, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_32_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_40_fp)
+ add %o4, 40, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_40_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_48_fp)
+ add %o4, 48, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_48_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_56_fp)
+ add %o4, 56, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_56_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_64_fp)
+ add %o4, 64, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_64_fp)
+#endif
.align 64
.globl FUNC_NAME
@@ -124,12 +274,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
brz,pt %g1, 51f
sub %o2, %g1, %o2
-1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2))
+
+1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1)
add %o1, 1, %o1
subcc %g1, 1, %g1
add %o0, 1, %o0
bne,pt %icc, 1b
- EX_ST(STORE(stb, %g2, %o0 - 0x01))
+ EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1)
51: LOAD(prefetch, %o1 + 0x040, #n_reads_strong)
LOAD(prefetch, %o1 + 0x080, #n_reads_strong)
@@ -154,43 +305,43 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
brz,pt %g1, .Llarge_aligned
sub %o2, %g1, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2))
+1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1)
add %o1, 8, %o1
subcc %g1, 8, %g1
add %o0, 8, %o0
bne,pt %icc, 1b
- EX_ST(STORE(stx, %g2, %o0 - 0x08))
+ EX_ST(STORE(stx, %g2, %o0 - 0x08), NG4_retl_o2_plus_g1_plus_8)
.Llarge_aligned:
/* len >= 0x80 && src 8-byte aligned && dest 8-byte aligned */
andn %o2, 0x3f, %o4
sub %o2, %o4, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1))
+1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o4)
add %o1, 0x40, %o1
- EX_LD(LOAD(ldx, %o1 - 0x38, %g2))
+ EX_LD(LOAD(ldx, %o1 - 0x38, %g2), NG4_retl_o2_plus_o4)
subcc %o4, 0x40, %o4
- EX_LD(LOAD(ldx, %o1 - 0x30, %g3))
- EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE))
- EX_LD(LOAD(ldx, %o1 - 0x20, %o5))
- EX_ST(STORE_INIT(%g1, %o0))
+ EX_LD(LOAD(ldx, %o1 - 0x30, %g3), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD(LOAD(ldx, %o1 - 0x20, %o5), NG4_retl_o2_plus_o4_plus_64)
+ EX_ST(STORE_INIT(%g1, %o0), NG4_retl_o2_plus_o4_plus_64)
add %o0, 0x08, %o0
- EX_ST(STORE_INIT(%g2, %o0))
+ EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_56)
add %o0, 0x08, %o0
- EX_LD(LOAD(ldx, %o1 - 0x18, %g2))
- EX_ST(STORE_INIT(%g3, %o0))
+ EX_LD(LOAD(ldx, %o1 - 0x18, %g2), NG4_retl_o2_plus_o4_plus_48)
+ EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_48)
add %o0, 0x08, %o0
- EX_LD(LOAD(ldx, %o1 - 0x10, %g3))
- EX_ST(STORE_INIT(GLOBAL_SPARE, %o0))
+ EX_LD(LOAD(ldx, %o1 - 0x10, %g3), NG4_retl_o2_plus_o4_plus_40)
+ EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_40)
add %o0, 0x08, %o0
- EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE))
- EX_ST(STORE_INIT(%o5, %o0))
+ EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_32)
+ EX_ST(STORE_INIT(%o5, %o0), NG4_retl_o2_plus_o4_plus_32)
add %o0, 0x08, %o0
- EX_ST(STORE_INIT(%g2, %o0))
+ EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_24)
add %o0, 0x08, %o0
- EX_ST(STORE_INIT(%g3, %o0))
+ EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_16)
add %o0, 0x08, %o0
- EX_ST(STORE_INIT(GLOBAL_SPARE, %o0))
+ EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_8)
add %o0, 0x08, %o0
bne,pt %icc, 1b
LOAD(prefetch, %o1 + 0x200, #n_reads_strong)
@@ -216,17 +367,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %o2, %o4, %o2
alignaddr %o1, %g0, %g1
add %o1, %o4, %o1
- EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0))
-1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2))
+ EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0), NG4_retl_o2_plus_o4)
+1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2), NG4_retl_o2_plus_o4)
subcc %o4, 0x40, %o4
- EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4))
- EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6))
- EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8))
- EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10))
- EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12))
- EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14))
+ EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14), NG4_retl_o2_plus_o4_plus_64)
faligndata %f0, %f2, %f16
- EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0))
+ EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0), NG4_retl_o2_plus_o4_plus_64)
faligndata %f2, %f4, %f18
add %g1, 0x40, %g1
faligndata %f4, %f6, %f20
@@ -235,14 +386,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
faligndata %f10, %f12, %f26
faligndata %f12, %f14, %f28
faligndata %f14, %f0, %f30
- EX_ST_FP(STORE(std, %f16, %o0 + 0x00))
- EX_ST_FP(STORE(std, %f18, %o0 + 0x08))
- EX_ST_FP(STORE(std, %f20, %o0 + 0x10))
- EX_ST_FP(STORE(std, %f22, %o0 + 0x18))
- EX_ST_FP(STORE(std, %f24, %o0 + 0x20))
- EX_ST_FP(STORE(std, %f26, %o0 + 0x28))
- EX_ST_FP(STORE(std, %f28, %o0 + 0x30))
- EX_ST_FP(STORE(std, %f30, %o0 + 0x38))
+ EX_ST_FP(STORE(std, %f16, %o0 + 0x00), NG4_retl_o2_plus_o4_plus_64)
+ EX_ST_FP(STORE(std, %f18, %o0 + 0x08), NG4_retl_o2_plus_o4_plus_56)
+ EX_ST_FP(STORE(std, %f20, %o0 + 0x10), NG4_retl_o2_plus_o4_plus_48)
+ EX_ST_FP(STORE(std, %f22, %o0 + 0x18), NG4_retl_o2_plus_o4_plus_40)
+ EX_ST_FP(STORE(std, %f24, %o0 + 0x20), NG4_retl_o2_plus_o4_plus_32)
+ EX_ST_FP(STORE(std, %f26, %o0 + 0x28), NG4_retl_o2_plus_o4_plus_24)
+ EX_ST_FP(STORE(std, %f28, %o0 + 0x30), NG4_retl_o2_plus_o4_plus_16)
+ EX_ST_FP(STORE(std, %f30, %o0 + 0x38), NG4_retl_o2_plus_o4_plus_8)
add %o0, 0x40, %o0
bne,pt %icc, 1b
LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
@@ -270,37 +421,38 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andncc %o2, 0x20 - 1, %o5
be,pn %icc, 2f
sub %o2, %o5, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1))
- EX_LD(LOAD(ldx, %o1 + 0x08, %g2))
- EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE))
- EX_LD(LOAD(ldx, %o1 + 0x18, %o4))
+1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5)
+ EX_LD(LOAD(ldx, %o1 + 0x08, %g2), NG4_retl_o2_plus_o5)
+ EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE), NG4_retl_o2_plus_o5)
+ EX_LD(LOAD(ldx, %o1 + 0x18, %o4), NG4_retl_o2_plus_o5)
add %o1, 0x20, %o1
subcc %o5, 0x20, %o5
- EX_ST(STORE(stx, %g1, %o0 + 0x00))
- EX_ST(STORE(stx, %g2, %o0 + 0x08))
- EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10))
- EX_ST(STORE(stx, %o4, %o0 + 0x18))
+ EX_ST(STORE(stx, %g1, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_32)
+ EX_ST(STORE(stx, %g2, %o0 + 0x08), NG4_retl_o2_plus_o5_plus_24)
+ EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), NG4_retl_o2_plus_o5_plus_24)
+ EX_ST(STORE(stx, %o4, %o0 + 0x18), NG4_retl_o2_plus_o5_plus_8)
bne,pt %icc, 1b
add %o0, 0x20, %o0
2: andcc %o2, 0x18, %o5
be,pt %icc, 3f
sub %o2, %o5, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1))
+
+1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5)
add %o1, 0x08, %o1
add %o0, 0x08, %o0
subcc %o5, 0x08, %o5
bne,pt %icc, 1b
- EX_ST(STORE(stx, %g1, %o0 - 0x08))
+ EX_ST(STORE(stx, %g1, %o0 - 0x08), NG4_retl_o2_plus_o5_plus_8)
3: brz,pt %o2, .Lexit
cmp %o2, 0x04
bl,pn %icc, .Ltiny
nop
- EX_LD(LOAD(lduw, %o1 + 0x00, %g1))
+ EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2)
add %o1, 0x04, %o1
add %o0, 0x04, %o0
subcc %o2, 0x04, %o2
bne,pn %icc, .Ltiny
- EX_ST(STORE(stw, %g1, %o0 - 0x04))
+ EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_4)
ba,a,pt %icc, .Lexit
.Lmedium_unaligned:
/* First get dest 8 byte aligned. */
@@ -309,12 +461,12 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
brz,pt %g1, 2f
sub %o2, %g1, %o2
-1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2))
+1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1)
add %o1, 1, %o1
subcc %g1, 1, %g1
add %o0, 1, %o0
bne,pt %icc, 1b
- EX_ST(STORE(stb, %g2, %o0 - 0x01))
+ EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1)
2:
and %o1, 0x7, %g1
brz,pn %g1, .Lmedium_noprefetch
@@ -322,16 +474,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
mov 64, %g2
sub %g2, %g1, %g2
andn %o1, 0x7, %o1
- EX_LD(LOAD(ldx, %o1 + 0x00, %o4))
+ EX_LD(LOAD(ldx, %o1 + 0x00, %o4), NG4_retl_o2)
sllx %o4, %g1, %o4
andn %o2, 0x08 - 1, %o5
sub %o2, %o5, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3))
+1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), NG4_retl_o2_plus_o5)
add %o1, 0x08, %o1
subcc %o5, 0x08, %o5
srlx %g3, %g2, GLOBAL_SPARE
or GLOBAL_SPARE, %o4, GLOBAL_SPARE
- EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00))
+ EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_8)
add %o0, 0x08, %o0
bne,pt %icc, 1b
sllx %g3, %g1, %o4
@@ -342,17 +494,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
ba,pt %icc, .Lsmall_unaligned
.Ltiny:
- EX_LD(LOAD(ldub, %o1 + 0x00, %g1))
+ EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2)
subcc %o2, 1, %o2
be,pn %icc, .Lexit
- EX_ST(STORE(stb, %g1, %o0 + 0x00))
- EX_LD(LOAD(ldub, %o1 + 0x01, %g1))
+ EX_ST(STORE(stb, %g1, %o0 + 0x00), NG4_retl_o2_plus_1)
+ EX_LD(LOAD(ldub, %o1 + 0x01, %g1), NG4_retl_o2)
subcc %o2, 1, %o2
be,pn %icc, .Lexit
- EX_ST(STORE(stb, %g1, %o0 + 0x01))
- EX_LD(LOAD(ldub, %o1 + 0x02, %g1))
+ EX_ST(STORE(stb, %g1, %o0 + 0x01), NG4_retl_o2_plus_1)
+ EX_LD(LOAD(ldub, %o1 + 0x02, %g1), NG4_retl_o2)
ba,pt %icc, .Lexit
- EX_ST(STORE(stb, %g1, %o0 + 0x02))
+ EX_ST(STORE(stb, %g1, %o0 + 0x02), NG4_retl_o2)
.Lsmall:
andcc %g2, 0x3, %g0
@@ -360,22 +512,22 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andn %o2, 0x4 - 1, %o5
sub %o2, %o5, %o2
1:
- EX_LD(LOAD(lduw, %o1 + 0x00, %g1))
+ EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5)
add %o1, 0x04, %o1
subcc %o5, 0x04, %o5
add %o0, 0x04, %o0
bne,pt %icc, 1b
- EX_ST(STORE(stw, %g1, %o0 - 0x04))
+ EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_o5_plus_4)
brz,pt %o2, .Lexit
nop
ba,a,pt %icc, .Ltiny
.Lsmall_unaligned:
-1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1))
+1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2)
add %o1, 1, %o1
add %o0, 1, %o0
subcc %o2, 1, %o2
bne,pt %icc, 1b
- EX_ST(STORE(stb, %g1, %o0 - 0x01))
+ EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1)
ba,a,pt %icc, .Lexit
.size FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc/lib/NGcopy_from_user.S b/arch/sparc/lib/NGcopy_from_user.S
index 5d1e4d1ac21e..9cd42fcbc781 100644
--- a/arch/sparc/lib/NGcopy_from_user.S
+++ b/arch/sparc/lib/NGcopy_from_user.S
@@ -3,11 +3,11 @@
* Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_LD(x) \
+#define EX_LD(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __ret_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NGcopy_to_user.S b/arch/sparc/lib/NGcopy_to_user.S
index ff630dcb273c..5c358afd464e 100644
--- a/arch/sparc/lib/NGcopy_to_user.S
+++ b/arch/sparc/lib/NGcopy_to_user.S
@@ -3,11 +3,11 @@
* Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __ret_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NGmemcpy.S b/arch/sparc/lib/NGmemcpy.S
index 96a14caf6966..d88c4ed50a00 100644
--- a/arch/sparc/lib/NGmemcpy.S
+++ b/arch/sparc/lib/NGmemcpy.S
@@ -4,6 +4,7 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/thread_info.h>
#define GLOBAL_SPARE %g7
@@ -27,15 +28,11 @@
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
-#endif
-
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
+#define EX_ST(x,y) x
#endif
#ifndef LOAD
@@ -79,6 +76,92 @@
.register %g3,#scratch
.text
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+__restore_asi:
+ ret
+ wr %g0, ASI_AIUS, %asi
+ restore
+ENTRY(NG_ret_i2_plus_i4_plus_1)
+ ba,pt %xcc, __restore_asi
+ add %i2, %i5, %i0
+ENDPROC(NG_ret_i2_plus_i4_plus_1)
+ENTRY(NG_ret_i2_plus_g1)
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1)
+ENTRY(NG_ret_i2_plus_g1_minus_8)
+ sub %g1, 8, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_8)
+ENTRY(NG_ret_i2_plus_g1_minus_16)
+ sub %g1, 16, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_16)
+ENTRY(NG_ret_i2_plus_g1_minus_24)
+ sub %g1, 24, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_24)
+ENTRY(NG_ret_i2_plus_g1_minus_32)
+ sub %g1, 32, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_32)
+ENTRY(NG_ret_i2_plus_g1_minus_40)
+ sub %g1, 40, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_40)
+ENTRY(NG_ret_i2_plus_g1_minus_48)
+ sub %g1, 48, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_48)
+ENTRY(NG_ret_i2_plus_g1_minus_56)
+ sub %g1, 56, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_56)
+ENTRY(NG_ret_i2_plus_i4)
+ ba,pt %xcc, __restore_asi
+ add %i2, %i4, %i0
+ENDPROC(NG_ret_i2_plus_i4)
+ENTRY(NG_ret_i2_plus_i4_minus_8)
+ sub %i4, 8, %i4
+ ba,pt %xcc, __restore_asi
+ add %i2, %i4, %i0
+ENDPROC(NG_ret_i2_plus_i4_minus_8)
+ENTRY(NG_ret_i2_plus_8)
+ ba,pt %xcc, __restore_asi
+ add %i2, 8, %i0
+ENDPROC(NG_ret_i2_plus_8)
+ENTRY(NG_ret_i2_plus_4)
+ ba,pt %xcc, __restore_asi
+ add %i2, 4, %i0
+ENDPROC(NG_ret_i2_plus_4)
+ENTRY(NG_ret_i2_plus_1)
+ ba,pt %xcc, __restore_asi
+ add %i2, 1, %i0
+ENDPROC(NG_ret_i2_plus_1)
+ENTRY(NG_ret_i2_plus_g1_plus_1)
+ add %g1, 1, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_plus_1)
+ENTRY(NG_ret_i2)
+ ba,pt %xcc, __restore_asi
+ mov %i2, %i0
+ENDPROC(NG_ret_i2)
+ENTRY(NG_ret_i2_and_7_plus_i4)
+ and %i2, 7, %i2
+ ba,pt %xcc, __restore_asi
+ add %i2, %i4, %i0
+ENDPROC(NG_ret_i2_and_7_plus_i4)
+#endif
+
.align 64
.globl FUNC_NAME
@@ -126,8 +209,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
sub %g0, %i4, %i4 ! bytes to align dst
sub %i2, %i4, %i2
1: subcc %i4, 1, %i4
- EX_LD(LOAD(ldub, %i1, %g1))
- EX_ST(STORE(stb, %g1, %o0))
+ EX_LD(LOAD(ldub, %i1, %g1), NG_ret_i2_plus_i4_plus_1)
+ EX_ST(STORE(stb, %g1, %o0), NG_ret_i2_plus_i4_plus_1)
add %i1, 1, %i1
bne,pt %XCC, 1b
add %o0, 1, %o0
@@ -160,7 +243,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
and %i4, 0x7, GLOBAL_SPARE
sll GLOBAL_SPARE, 3, GLOBAL_SPARE
mov 64, %i5
- EX_LD(LOAD_TWIN(%i1, %g2, %g3))
+ EX_LD(LOAD_TWIN(%i1, %g2, %g3), NG_ret_i2_plus_g1)
sub %i5, GLOBAL_SPARE, %i5
mov 16, %o4
mov 32, %o5
@@ -178,31 +261,31 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
srlx WORD3, PRE_SHIFT, TMP; \
or WORD2, TMP, WORD2;
-8: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3))
+8: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3), NG_ret_i2_plus_g1)
MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
LOAD(prefetch, %i1 + %i3, #one_read)
- EX_ST(STORE_INIT(%g2, %o0 + 0x00))
- EX_ST(STORE_INIT(%g3, %o0 + 0x08))
+ EX_ST(STORE_INIT(%g2, %o0 + 0x00), NG_ret_i2_plus_g1)
+ EX_ST(STORE_INIT(%g3, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
- EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3))
+ EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3), NG_ret_i2_plus_g1_minus_16)
MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%o2, %o0 + 0x10))
- EX_ST(STORE_INIT(%o3, %o0 + 0x18))
+ EX_ST(STORE_INIT(%o2, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%o3, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
- EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+ EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%g2, %o0 + 0x20))
- EX_ST(STORE_INIT(%g3, %o0 + 0x28))
+ EX_ST(STORE_INIT(%g2, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
+ EX_ST(STORE_INIT(%g3, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
- EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3))
+ EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3), NG_ret_i2_plus_g1_minus_48)
add %i1, 64, %i1
MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%o2, %o0 + 0x30))
- EX_ST(STORE_INIT(%o3, %o0 + 0x38))
+ EX_ST(STORE_INIT(%o2, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
+ EX_ST(STORE_INIT(%o3, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
subcc %g1, 64, %g1
bne,pt %XCC, 8b
@@ -211,31 +294,31 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
ba,pt %XCC, 60f
add %i1, %i4, %i1
-9: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3))
+9: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3), NG_ret_i2_plus_g1)
MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
LOAD(prefetch, %i1 + %i3, #one_read)
- EX_ST(STORE_INIT(%g3, %o0 + 0x00))
- EX_ST(STORE_INIT(%o2, %o0 + 0x08))
+ EX_ST(STORE_INIT(%g3, %o0 + 0x00), NG_ret_i2_plus_g1)
+ EX_ST(STORE_INIT(%o2, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
- EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3))
+ EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3), NG_ret_i2_plus_g1_minus_16)
MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%o3, %o0 + 0x10))
- EX_ST(STORE_INIT(%g2, %o0 + 0x18))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%g2, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
- EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+ EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%g3, %o0 + 0x20))
- EX_ST(STORE_INIT(%o2, %o0 + 0x28))
+ EX_ST(STORE_INIT(%g3, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
+ EX_ST(STORE_INIT(%o2, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
- EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3))
+ EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3), NG_ret_i2_plus_g1_minus_48)
add %i1, 64, %i1
MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%o3, %o0 + 0x30))
- EX_ST(STORE_INIT(%g2, %o0 + 0x38))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
+ EX_ST(STORE_INIT(%g2, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
subcc %g1, 64, %g1
bne,pt %XCC, 9b
@@ -249,25 +332,25 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
* one twin load ahead, then add 8 back into source when
* we finish the loop.
*/
- EX_LD(LOAD_TWIN(%i1, %o4, %o5))
+ EX_LD(LOAD_TWIN(%i1, %o4, %o5), NG_ret_i2_plus_g1)
mov 16, %o7
mov 32, %g2
mov 48, %g3
mov 64, %o1
-1: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+1: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1)
LOAD(prefetch, %i1 + %o1, #one_read)
- EX_ST(STORE_INIT(%o5, %o0 + 0x00)) ! initializes cache line
- EX_ST(STORE_INIT(%o2, %o0 + 0x08))
- EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5))
- EX_ST(STORE_INIT(%o3, %o0 + 0x10))
- EX_ST(STORE_INIT(%o4, %o0 + 0x18))
- EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3))
- EX_ST(STORE_INIT(%o5, %o0 + 0x20))
- EX_ST(STORE_INIT(%o2, %o0 + 0x28))
- EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5))
+ EX_ST(STORE_INIT(%o5, %o0 + 0x00), NG_ret_i2_plus_g1) ! initializes cache line
+ EX_ST(STORE_INIT(%o2, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
+ EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%o3, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%o4, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
+ EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
+ EX_ST(STORE_INIT(%o5, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
+ EX_ST(STORE_INIT(%o2, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
+ EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5), NG_ret_i2_plus_g1_minus_48)
add %i1, 64, %i1
- EX_ST(STORE_INIT(%o3, %o0 + 0x30))
- EX_ST(STORE_INIT(%o4, %o0 + 0x38))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
+ EX_ST(STORE_INIT(%o4, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
subcc %g1, 64, %g1
bne,pt %XCC, 1b
add %o0, 64, %o0
@@ -282,20 +365,20 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
mov 32, %g2
mov 48, %g3
mov 64, %o1
-1: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5))
- EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+1: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5), NG_ret_i2_plus_g1)
+ EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1)
LOAD(prefetch, %i1 + %o1, #one_read)
- EX_ST(STORE_INIT(%o4, %o0 + 0x00)) ! initializes cache line
- EX_ST(STORE_INIT(%o5, %o0 + 0x08))
- EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5))
- EX_ST(STORE_INIT(%o2, %o0 + 0x10))
- EX_ST(STORE_INIT(%o3, %o0 + 0x18))
- EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3))
+ EX_ST(STORE_INIT(%o4, %o0 + 0x00), NG_ret_i2_plus_g1) ! initializes cache line
+ EX_ST(STORE_INIT(%o5, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
+ EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%o2, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%o3, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
+ EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
add %i1, 64, %i1
- EX_ST(STORE_INIT(%o4, %o0 + 0x20))
- EX_ST(STORE_INIT(%o5, %o0 + 0x28))
- EX_ST(STORE_INIT(%o2, %o0 + 0x30))
- EX_ST(STORE_INIT(%o3, %o0 + 0x38))
+ EX_ST(STORE_INIT(%o4, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
+ EX_ST(STORE_INIT(%o5, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
+ EX_ST(STORE_INIT(%o2, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
+ EX_ST(STORE_INIT(%o3, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
subcc %g1, 64, %g1
bne,pt %XCC, 1b
add %o0, 64, %o0
@@ -321,28 +404,28 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
andn %i2, 0xf, %i4
and %i2, 0xf, %i2
1: subcc %i4, 0x10, %i4
- EX_LD(LOAD(ldx, %i1, %o4))
+ EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_i4)
add %i1, 0x08, %i1
- EX_LD(LOAD(ldx, %i1, %g1))
+ EX_LD(LOAD(ldx, %i1, %g1), NG_ret_i2_plus_i4)
sub %i1, 0x08, %i1
- EX_ST(STORE(stx, %o4, %i1 + %i3))
+ EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_i4)
add %i1, 0x8, %i1
- EX_ST(STORE(stx, %g1, %i1 + %i3))
+ EX_ST(STORE(stx, %g1, %i1 + %i3), NG_ret_i2_plus_i4_minus_8)
bgu,pt %XCC, 1b
add %i1, 0x8, %i1
73: andcc %i2, 0x8, %g0
be,pt %XCC, 1f
nop
sub %i2, 0x8, %i2
- EX_LD(LOAD(ldx, %i1, %o4))
- EX_ST(STORE(stx, %o4, %i1 + %i3))
+ EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_8)
+ EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_8)
add %i1, 0x8, %i1
1: andcc %i2, 0x4, %g0
be,pt %XCC, 1f
nop
sub %i2, 0x4, %i2
- EX_LD(LOAD(lduw, %i1, %i5))
- EX_ST(STORE(stw, %i5, %i1 + %i3))
+ EX_LD(LOAD(lduw, %i1, %i5), NG_ret_i2_plus_4)
+ EX_ST(STORE(stw, %i5, %i1 + %i3), NG_ret_i2_plus_4)
add %i1, 0x4, %i1
1: cmp %i2, 0
be,pt %XCC, 85f
@@ -358,8 +441,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
sub %i2, %g1, %i2
1: subcc %g1, 1, %g1
- EX_LD(LOAD(ldub, %i1, %i5))
- EX_ST(STORE(stb, %i5, %i1 + %i3))
+ EX_LD(LOAD(ldub, %i1, %i5), NG_ret_i2_plus_g1_plus_1)
+ EX_ST(STORE(stb, %i5, %i1 + %i3), NG_ret_i2_plus_g1_plus_1)
bgu,pt %icc, 1b
add %i1, 1, %i1
@@ -375,16 +458,16 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
8: mov 64, %i3
andn %i1, 0x7, %i1
- EX_LD(LOAD(ldx, %i1, %g2))
+ EX_LD(LOAD(ldx, %i1, %g2), NG_ret_i2)
sub %i3, %g1, %i3
andn %i2, 0x7, %i4
sllx %g2, %g1, %g2
1: add %i1, 0x8, %i1
- EX_LD(LOAD(ldx, %i1, %g3))
+ EX_LD(LOAD(ldx, %i1, %g3), NG_ret_i2_and_7_plus_i4)
subcc %i4, 0x8, %i4
srlx %g3, %i3, %i5
or %i5, %g2, %i5
- EX_ST(STORE(stx, %i5, %o0))
+ EX_ST(STORE(stx, %i5, %o0), NG_ret_i2_and_7_plus_i4)
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
@@ -404,8 +487,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
1:
subcc %i2, 4, %i2
- EX_LD(LOAD(lduw, %i1, %g1))
- EX_ST(STORE(stw, %g1, %i1 + %i3))
+ EX_LD(LOAD(lduw, %i1, %g1), NG_ret_i2_plus_4)
+ EX_ST(STORE(stw, %g1, %i1 + %i3), NG_ret_i2_plus_4)
bgu,pt %XCC, 1b
add %i1, 4, %i1
@@ -415,8 +498,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
.align 32
90:
subcc %i2, 1, %i2
- EX_LD(LOAD(ldub, %i1, %g1))
- EX_ST(STORE(stb, %g1, %i1 + %i3))
+ EX_LD(LOAD(ldub, %i1, %g1), NG_ret_i2_plus_1)
+ EX_ST(STORE(stb, %g1, %i1 + %i3), NG_ret_i2_plus_1)
bgu,pt %XCC, 90b
add %i1, 1, %i1
ret
diff --git a/arch/sparc/lib/U1copy_from_user.S b/arch/sparc/lib/U1copy_from_user.S
index ecc5692fa2b4..bb6ff73229e3 100644
--- a/arch/sparc/lib/U1copy_from_user.S
+++ b/arch/sparc/lib/U1copy_from_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
-#define EX_LD(x) \
+#define EX_LD(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_LD_FP(x) \
+#define EX_LD_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_fp;\
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/U1copy_to_user.S b/arch/sparc/lib/U1copy_to_user.S
index 9eea392e44d4..ed92ce739558 100644
--- a/arch/sparc/lib/U1copy_to_user.S
+++ b/arch/sparc/lib/U1copy_to_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_ST_FP(x) \
+#define EX_ST_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_fp;\
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S
index 97e1b211090c..4f0d50b33a72 100644
--- a/arch/sparc/lib/U1memcpy.S
+++ b/arch/sparc/lib/U1memcpy.S
@@ -5,6 +5,7 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#include <asm/export.h>
@@ -24,21 +25,17 @@
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_LD_FP
-#define EX_LD_FP(x) x
+#define EX_LD_FP(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
+#define EX_ST(x,y) x
#endif
#ifndef EX_ST_FP
-#define EX_ST_FP(x) x
-#endif
-
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
+#define EX_ST_FP(x,y) x
#endif
#ifndef LOAD
@@ -79,53 +76,169 @@
faligndata %f7, %f8, %f60; \
faligndata %f8, %f9, %f62;
-#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \
- EX_LD_FP(LOAD_BLK(%src, %fdest)); \
- EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
- add %src, 0x40, %src; \
- subcc %len, 0x40, %len; \
- be,pn %xcc, jmptgt; \
- add %dest, 0x40, %dest; \
-
-#define LOOP_CHUNK1(src, dest, len, branch_dest) \
- MAIN_LOOP_CHUNK(src, dest, f0, f48, len, branch_dest)
-#define LOOP_CHUNK2(src, dest, len, branch_dest) \
- MAIN_LOOP_CHUNK(src, dest, f16, f48, len, branch_dest)
-#define LOOP_CHUNK3(src, dest, len, branch_dest) \
- MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest)
+#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, jmptgt) \
+ EX_LD_FP(LOAD_BLK(%src, %fdest), U1_gs_80_fp); \
+ EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \
+ add %src, 0x40, %src; \
+ subcc %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE; \
+ be,pn %xcc, jmptgt; \
+ add %dest, 0x40, %dest; \
+
+#define LOOP_CHUNK1(src, dest, branch_dest) \
+ MAIN_LOOP_CHUNK(src, dest, f0, f48, branch_dest)
+#define LOOP_CHUNK2(src, dest, branch_dest) \
+ MAIN_LOOP_CHUNK(src, dest, f16, f48, branch_dest)
+#define LOOP_CHUNK3(src, dest, branch_dest) \
+ MAIN_LOOP_CHUNK(src, dest, f32, f48, branch_dest)
#define DO_SYNC membar #Sync;
#define STORE_SYNC(dest, fsrc) \
- EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
+ EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \
add %dest, 0x40, %dest; \
DO_SYNC
#define STORE_JUMP(dest, fsrc, target) \
- EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
+ EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_40_fp); \
add %dest, 0x40, %dest; \
ba,pt %xcc, target; \
nop;
-#define FINISH_VISCHUNK(dest, f0, f1, left) \
- subcc %left, 8, %left;\
- bl,pn %xcc, 95f; \
- faligndata %f0, %f1, %f48; \
- EX_ST_FP(STORE(std, %f48, %dest)); \
+#define FINISH_VISCHUNK(dest, f0, f1) \
+ subcc %g3, 8, %g3; \
+ bl,pn %xcc, 95f; \
+ faligndata %f0, %f1, %f48; \
+ EX_ST_FP(STORE(std, %f48, %dest), U1_g3_8_fp); \
add %dest, 8, %dest;
-#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
- subcc %left, 8, %left; \
- bl,pn %xcc, 95f; \
+#define UNEVEN_VISCHUNK_LAST(dest, f0, f1) \
+ subcc %g3, 8, %g3; \
+ bl,pn %xcc, 95f; \
fsrc2 %f0, %f1;
-#define UNEVEN_VISCHUNK(dest, f0, f1, left) \
- UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
+#define UNEVEN_VISCHUNK(dest, f0, f1) \
+ UNEVEN_VISCHUNK_LAST(dest, f0, f1) \
ba,a,pt %xcc, 93f;
.register %g2,#scratch
.register %g3,#scratch
.text
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+ENTRY(U1_g1_1_fp)
+ VISExitHalf
+ add %g1, 1, %g1
+ add %g1, %g2, %g1
+ retl
+ add %g1, %o2, %o0
+ENDPROC(U1_g1_1_fp)
+ENTRY(U1_g2_0_fp)
+ VISExitHalf
+ retl
+ add %g2, %o2, %o0
+ENDPROC(U1_g2_0_fp)
+ENTRY(U1_g2_8_fp)
+ VISExitHalf
+ add %g2, 8, %g2
+ retl
+ add %g2, %o2, %o0
+ENDPROC(U1_g2_8_fp)
+ENTRY(U1_gs_0_fp)
+ VISExitHalf
+ add %GLOBAL_SPARE, %g3, %o0
+ retl
+ add %o0, %o2, %o0
+ENDPROC(U1_gs_0_fp)
+ENTRY(U1_gs_80_fp)
+ VISExitHalf
+ add %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
+ add %GLOBAL_SPARE, %g3, %o0
+ retl
+ add %o0, %o2, %o0
+ENDPROC(U1_gs_80_fp)
+ENTRY(U1_gs_40_fp)
+ VISExitHalf
+ add %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE
+ add %GLOBAL_SPARE, %g3, %o0
+ retl
+ add %o0, %o2, %o0
+ENDPROC(U1_gs_40_fp)
+ENTRY(U1_g3_0_fp)
+ VISExitHalf
+ retl
+ add %g3, %o2, %o0
+ENDPROC(U1_g3_0_fp)
+ENTRY(U1_g3_8_fp)
+ VISExitHalf
+ add %g3, 8, %g3
+ retl
+ add %g3, %o2, %o0
+ENDPROC(U1_g3_8_fp)
+ENTRY(U1_o2_0_fp)
+ VISExitHalf
+ retl
+ mov %o2, %o0
+ENDPROC(U1_o2_0_fp)
+ENTRY(U1_o2_1_fp)
+ VISExitHalf
+ retl
+ add %o2, 1, %o0
+ENDPROC(U1_o2_1_fp)
+ENTRY(U1_gs_0)
+ VISExitHalf
+ retl
+ add %GLOBAL_SPARE, %o2, %o0
+ENDPROC(U1_gs_0)
+ENTRY(U1_gs_8)
+ VISExitHalf
+ add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE
+ retl
+ add %GLOBAL_SPARE, 0x8, %o0
+ENDPROC(U1_gs_8)
+ENTRY(U1_gs_10)
+ VISExitHalf
+ add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE
+ retl
+ add %GLOBAL_SPARE, 0x10, %o0
+ENDPROC(U1_gs_10)
+ENTRY(U1_o2_0)
+ retl
+ mov %o2, %o0
+ENDPROC(U1_o2_0)
+ENTRY(U1_o2_8)
+ retl
+ add %o2, 8, %o0
+ENDPROC(U1_o2_8)
+ENTRY(U1_o2_4)
+ retl
+ add %o2, 4, %o0
+ENDPROC(U1_o2_4)
+ENTRY(U1_o2_1)
+ retl
+ add %o2, 1, %o0
+ENDPROC(U1_o2_1)
+ENTRY(U1_g1_0)
+ retl
+ add %g1, %o2, %o0
+ENDPROC(U1_g1_0)
+ENTRY(U1_g1_1)
+ add %g1, 1, %g1
+ retl
+ add %g1, %o2, %o0
+ENDPROC(U1_g1_1)
+ENTRY(U1_gs_0_o2_adj)
+ and %o2, 7, %o2
+ retl
+ add %GLOBAL_SPARE, %o2, %o0
+ENDPROC(U1_gs_0_o2_adj)
+ENTRY(U1_gs_8_o2_adj)
+ and %o2, 7, %o2
+ add %GLOBAL_SPARE, 8, %GLOBAL_SPARE
+ retl
+ add %GLOBAL_SPARE, %o2, %o0
+ENDPROC(U1_gs_8_o2_adj)
+#endif
+
.align 64
.globl FUNC_NAME
@@ -167,8 +280,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
and %g2, 0x38, %g2
1: subcc %g1, 0x1, %g1
- EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3))
- EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE))
+ EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U1_g1_1_fp)
+ EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE), U1_g1_1_fp)
bgu,pt %XCC, 1b
add %o1, 0x1, %o1
@@ -179,20 +292,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
be,pt %icc, 3f
alignaddr %o1, %g0, %o1
- EX_LD_FP(LOAD(ldd, %o1, %f4))
-1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6))
+ EX_LD_FP(LOAD(ldd, %o1, %f4), U1_g2_0_fp)
+1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U1_g2_0_fp)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f4, %f6, %f0
- EX_ST_FP(STORE(std, %f0, %o0))
+ EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp)
be,pn %icc, 3f
add %o0, 0x8, %o0
- EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U1_g2_0_fp)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f6, %f4, %f0
- EX_ST_FP(STORE(std, %f0, %o0))
+ EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp)
bne,pt %icc, 1b
add %o0, 0x8, %o0
@@ -215,13 +328,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
add %g1, %GLOBAL_SPARE, %g1
subcc %o2, %g3, %o2
- EX_LD_FP(LOAD_BLK(%o1, %f0))
+ EX_LD_FP(LOAD_BLK(%o1, %f0), U1_gs_0_fp)
add %o1, 0x40, %o1
add %g1, %g3, %g1
- EX_LD_FP(LOAD_BLK(%o1, %f16))
+ EX_LD_FP(LOAD_BLK(%o1, %f16), U1_gs_0_fp)
add %o1, 0x40, %o1
sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
- EX_LD_FP(LOAD_BLK(%o1, %f32))
+ EX_LD_FP(LOAD_BLK(%o1, %f32), U1_gs_80_fp)
add %o1, 0x40, %o1
/* There are 8 instances of the unrolled loop,
@@ -241,11 +354,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
.align 64
1: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f0, %f2, %f48
1: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
@@ -262,11 +375,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 56f)
1: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f2, %f4, %f48
1: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
@@ -283,11 +396,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 57f)
1: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f4, %f6, %f48
1: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
@@ -304,11 +417,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 58f)
1: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f6, %f8, %f48
1: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
@@ -325,11 +438,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 59f)
1: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f8, %f10, %f48
1: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
@@ -346,11 +459,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 60f)
1: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f10, %f12, %f48
1: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
@@ -367,11 +480,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 61f)
1: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f12, %f14, %f48
1: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
@@ -388,11 +501,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 62f)
1: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f14, %f16, %f48
1: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
@@ -408,53 +521,53 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
STORE_JUMP(o0, f48, 63f)
-40: FINISH_VISCHUNK(o0, f0, f2, g3)
-41: FINISH_VISCHUNK(o0, f2, f4, g3)
-42: FINISH_VISCHUNK(o0, f4, f6, g3)
-43: FINISH_VISCHUNK(o0, f6, f8, g3)
-44: FINISH_VISCHUNK(o0, f8, f10, g3)
-45: FINISH_VISCHUNK(o0, f10, f12, g3)
-46: FINISH_VISCHUNK(o0, f12, f14, g3)
-47: UNEVEN_VISCHUNK(o0, f14, f0, g3)
-48: FINISH_VISCHUNK(o0, f16, f18, g3)
-49: FINISH_VISCHUNK(o0, f18, f20, g3)
-50: FINISH_VISCHUNK(o0, f20, f22, g3)
-51: FINISH_VISCHUNK(o0, f22, f24, g3)
-52: FINISH_VISCHUNK(o0, f24, f26, g3)
-53: FINISH_VISCHUNK(o0, f26, f28, g3)
-54: FINISH_VISCHUNK(o0, f28, f30, g3)
-55: UNEVEN_VISCHUNK(o0, f30, f0, g3)
-56: FINISH_VISCHUNK(o0, f32, f34, g3)
-57: FINISH_VISCHUNK(o0, f34, f36, g3)
-58: FINISH_VISCHUNK(o0, f36, f38, g3)
-59: FINISH_VISCHUNK(o0, f38, f40, g3)
-60: FINISH_VISCHUNK(o0, f40, f42, g3)
-61: FINISH_VISCHUNK(o0, f42, f44, g3)
-62: FINISH_VISCHUNK(o0, f44, f46, g3)
-63: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3)
-
-93: EX_LD_FP(LOAD(ldd, %o1, %f2))
+40: FINISH_VISCHUNK(o0, f0, f2)
+41: FINISH_VISCHUNK(o0, f2, f4)
+42: FINISH_VISCHUNK(o0, f4, f6)
+43: FINISH_VISCHUNK(o0, f6, f8)
+44: FINISH_VISCHUNK(o0, f8, f10)
+45: FINISH_VISCHUNK(o0, f10, f12)
+46: FINISH_VISCHUNK(o0, f12, f14)
+47: UNEVEN_VISCHUNK(o0, f14, f0)
+48: FINISH_VISCHUNK(o0, f16, f18)
+49: FINISH_VISCHUNK(o0, f18, f20)
+50: FINISH_VISCHUNK(o0, f20, f22)
+51: FINISH_VISCHUNK(o0, f22, f24)
+52: FINISH_VISCHUNK(o0, f24, f26)
+53: FINISH_VISCHUNK(o0, f26, f28)
+54: FINISH_VISCHUNK(o0, f28, f30)
+55: UNEVEN_VISCHUNK(o0, f30, f0)
+56: FINISH_VISCHUNK(o0, f32, f34)
+57: FINISH_VISCHUNK(o0, f34, f36)
+58: FINISH_VISCHUNK(o0, f36, f38)
+59: FINISH_VISCHUNK(o0, f38, f40)
+60: FINISH_VISCHUNK(o0, f40, f42)
+61: FINISH_VISCHUNK(o0, f42, f44)
+62: FINISH_VISCHUNK(o0, f44, f46)
+63: UNEVEN_VISCHUNK_LAST(o0, f46, f0)
+
+93: EX_LD_FP(LOAD(ldd, %o1, %f2), U1_g3_0_fp)
add %o1, 8, %o1
subcc %g3, 8, %g3
faligndata %f0, %f2, %f8
- EX_ST_FP(STORE(std, %f8, %o0))
+ EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
bl,pn %xcc, 95f
add %o0, 8, %o0
- EX_LD_FP(LOAD(ldd, %o1, %f0))
+ EX_LD_FP(LOAD(ldd, %o1, %f0), U1_g3_0_fp)
add %o1, 8, %o1
subcc %g3, 8, %g3
faligndata %f2, %f0, %f8
- EX_ST_FP(STORE(std, %f8, %o0))
+ EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
bge,pt %xcc, 93b
add %o0, 8, %o0
95: brz,pt %o2, 2f
mov %g1, %o1
-1: EX_LD_FP(LOAD(ldub, %o1, %o3))
+1: EX_LD_FP(LOAD(ldub, %o1, %o3), U1_o2_0_fp)
add %o1, 1, %o1
subcc %o2, 1, %o2
- EX_ST_FP(STORE(stb, %o3, %o0))
+ EX_ST_FP(STORE(stb, %o3, %o0), U1_o2_1_fp)
bne,pt %xcc, 1b
add %o0, 1, %o0
@@ -470,27 +583,27 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
72: andn %o2, 0xf, %GLOBAL_SPARE
and %o2, 0xf, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
- EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
+1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U1_gs_0)
+ EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U1_gs_0)
subcc %GLOBAL_SPARE, 0x10, %GLOBAL_SPARE
- EX_ST(STORE(stx, %o5, %o1 + %o3))
+ EX_ST(STORE(stx, %o5, %o1 + %o3), U1_gs_10)
add %o1, 0x8, %o1
- EX_ST(STORE(stx, %g1, %o1 + %o3))
+ EX_ST(STORE(stx, %g1, %o1 + %o3), U1_gs_8)
bgu,pt %XCC, 1b
add %o1, 0x8, %o1
73: andcc %o2, 0x8, %g0
be,pt %XCC, 1f
nop
- EX_LD(LOAD(ldx, %o1, %o5))
+ EX_LD(LOAD(ldx, %o1, %o5), U1_o2_0)
sub %o2, 0x8, %o2
- EX_ST(STORE(stx, %o5, %o1 + %o3))
+ EX_ST(STORE(stx, %o5, %o1 + %o3), U1_o2_8)
add %o1, 0x8, %o1
1: andcc %o2, 0x4, %g0
be,pt %XCC, 1f
nop
- EX_LD(LOAD(lduw, %o1, %o5))
+ EX_LD(LOAD(lduw, %o1, %o5), U1_o2_0)
sub %o2, 0x4, %o2
- EX_ST(STORE(stw, %o5, %o1 + %o3))
+ EX_ST(STORE(stw, %o5, %o1 + %o3), U1_o2_4)
add %o1, 0x4, %o1
1: cmp %o2, 0
be,pt %XCC, 85f
@@ -504,9 +617,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %g0, %g1, %g1
sub %o2, %g1, %o2
-1: EX_LD(LOAD(ldub, %o1, %o5))
+1: EX_LD(LOAD(ldub, %o1, %o5), U1_g1_0)
subcc %g1, 1, %g1
- EX_ST(STORE(stb, %o5, %o1 + %o3))
+ EX_ST(STORE(stb, %o5, %o1 + %o3), U1_g1_1)
bgu,pt %icc, 1b
add %o1, 1, %o1
@@ -522,16 +635,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
8: mov 64, %o3
andn %o1, 0x7, %o1
- EX_LD(LOAD(ldx, %o1, %g2))
+ EX_LD(LOAD(ldx, %o1, %g2), U1_o2_0)
sub %o3, %g1, %o3
andn %o2, 0x7, %GLOBAL_SPARE
sllx %g2, %g1, %g2
-1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
+1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U1_gs_0_o2_adj)
subcc %GLOBAL_SPARE, 0x8, %GLOBAL_SPARE
add %o1, 0x8, %o1
srlx %g3, %o3, %o5
or %o5, %g2, %o5
- EX_ST(STORE(stx, %o5, %o0))
+ EX_ST(STORE(stx, %o5, %o0), U1_gs_8_o2_adj)
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
@@ -549,9 +662,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
bne,pn %XCC, 90f
sub %o0, %o1, %o3
-1: EX_LD(LOAD(lduw, %o1, %g1))
+1: EX_LD(LOAD(lduw, %o1, %g1), U1_o2_0)
subcc %o2, 4, %o2
- EX_ST(STORE(stw, %g1, %o1 + %o3))
+ EX_ST(STORE(stw, %g1, %o1 + %o3), U1_o2_4)
bgu,pt %XCC, 1b
add %o1, 4, %o1
@@ -559,9 +672,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
mov EX_RETVAL(%o4), %o0
.align 32
-90: EX_LD(LOAD(ldub, %o1, %g1))
+90: EX_LD(LOAD(ldub, %o1, %g1), U1_o2_0)
subcc %o2, 1, %o2
- EX_ST(STORE(stb, %g1, %o1 + %o3))
+ EX_ST(STORE(stb, %g1, %o1 + %o3), U1_o2_1)
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
diff --git a/arch/sparc/lib/U3copy_from_user.S b/arch/sparc/lib/U3copy_from_user.S
index 88ad73d86fe4..db73010a1af8 100644
--- a/arch/sparc/lib/U3copy_from_user.S
+++ b/arch/sparc/lib/U3copy_from_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
-#define EX_LD(x) \
+#define EX_LD(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_LD_FP(x) \
+#define EX_LD_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/U3copy_to_user.S b/arch/sparc/lib/U3copy_to_user.S
index 845139d75537..c4ee858e352a 100644
--- a/arch/sparc/lib/U3copy_to_user.S
+++ b/arch/sparc/lib/U3copy_to_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_ST_FP(x) \
+#define EX_ST_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S
index 491ee69e4995..54f98706b03b 100644
--- a/arch/sparc/lib/U3memcpy.S
+++ b/arch/sparc/lib/U3memcpy.S
@@ -4,6 +4,7 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#define GLOBAL_SPARE %g7
@@ -22,21 +23,17 @@
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_LD_FP
-#define EX_LD_FP(x) x
+#define EX_LD_FP(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
+#define EX_ST(x,y) x
#endif
#ifndef EX_ST_FP
-#define EX_ST_FP(x) x
-#endif
-
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
+#define EX_ST_FP(x,y) x
#endif
#ifndef LOAD
@@ -77,6 +74,87 @@
*/
.text
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+__restore_fp:
+ VISExitHalf
+ retl
+ nop
+ENTRY(U3_retl_o2_plus_g2_plus_g1_plus_1_fp)
+ add %g1, 1, %g1
+ add %g2, %g1, %g2
+ ba,pt %xcc, __restore_fp
+ add %o2, %g2, %o0
+ENDPROC(U3_retl_o2_plus_g2_plus_g1_plus_1_fp)
+ENTRY(U3_retl_o2_plus_g2_fp)
+ ba,pt %xcc, __restore_fp
+ add %o2, %g2, %o0
+ENDPROC(U3_retl_o2_plus_g2_fp)
+ENTRY(U3_retl_o2_plus_g2_plus_8_fp)
+ add %g2, 8, %g2
+ ba,pt %xcc, __restore_fp
+ add %o2, %g2, %o0
+ENDPROC(U3_retl_o2_plus_g2_plus_8_fp)
+ENTRY(U3_retl_o2)
+ retl
+ mov %o2, %o0
+ENDPROC(U3_retl_o2)
+ENTRY(U3_retl_o2_plus_1)
+ retl
+ add %o2, 1, %o0
+ENDPROC(U3_retl_o2_plus_1)
+ENTRY(U3_retl_o2_plus_4)
+ retl
+ add %o2, 4, %o0
+ENDPROC(U3_retl_o2_plus_4)
+ENTRY(U3_retl_o2_plus_8)
+ retl
+ add %o2, 8, %o0
+ENDPROC(U3_retl_o2_plus_8)
+ENTRY(U3_retl_o2_plus_g1_plus_1)
+ add %g1, 1, %g1
+ retl
+ add %o2, %g1, %o0
+ENDPROC(U3_retl_o2_plus_g1_plus_1)
+ENTRY(U3_retl_o2_fp)
+ ba,pt %xcc, __restore_fp
+ mov %o2, %o0
+ENDPROC(U3_retl_o2_fp)
+ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp)
+ sll %o3, 6, %o3
+ add %o3, 0x80, %o3
+ ba,pt %xcc, __restore_fp
+ add %o2, %o3, %o0
+ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp)
+ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp)
+ sll %o3, 6, %o3
+ add %o3, 0x40, %o3
+ ba,pt %xcc, __restore_fp
+ add %o2, %o3, %o0
+ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp)
+ENTRY(U3_retl_o2_plus_GS_plus_0x10)
+ add GLOBAL_SPARE, 0x10, GLOBAL_SPARE
+ retl
+ add %o2, GLOBAL_SPARE, %o0
+ENDPROC(U3_retl_o2_plus_GS_plus_0x10)
+ENTRY(U3_retl_o2_plus_GS_plus_0x08)
+ add GLOBAL_SPARE, 0x08, GLOBAL_SPARE
+ retl
+ add %o2, GLOBAL_SPARE, %o0
+ENDPROC(U3_retl_o2_plus_GS_plus_0x08)
+ENTRY(U3_retl_o2_and_7_plus_GS)
+ and %o2, 7, %o2
+ retl
+ add %o2, GLOBAL_SPARE, %o2
+ENDPROC(U3_retl_o2_and_7_plus_GS)
+ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
+ add GLOBAL_SPARE, 8, GLOBAL_SPARE
+ and %o2, 7, %o2
+ retl
+ add %o2, GLOBAL_SPARE, %o2
+ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
+#endif
+
.align 64
/* The cheetah's flexible spine, oversized liver, enlarged heart,
@@ -126,8 +204,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
and %g2, 0x38, %g2
1: subcc %g1, 0x1, %g1
- EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3))
- EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE))
+ EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U3_retl_o2_plus_g2_plus_g1_plus_1)
+ EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE), U3_retl_o2_plus_g2_plus_g1_plus_1)
bgu,pt %XCC, 1b
add %o1, 0x1, %o1
@@ -138,20 +216,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
be,pt %icc, 3f
alignaddr %o1, %g0, %o1
- EX_LD_FP(LOAD(ldd, %o1, %f4))
-1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6))
+ EX_LD_FP(LOAD(ldd, %o1, %f4), U3_retl_o2_plus_g2)
+1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U3_retl_o2_plus_g2)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f4, %f6, %f0
- EX_ST_FP(STORE(std, %f0, %o0))
+ EX_ST_FP(STORE(std, %f0, %o0), U3_retl_o2_plus_g2_plus_8)
be,pn %icc, 3f
add %o0, 0x8, %o0
- EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U3_retl_o2_plus_g2)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f6, %f4, %f2
- EX_ST_FP(STORE(std, %f2, %o0))
+ EX_ST_FP(STORE(std, %f2, %o0), U3_retl_o2_plus_g2_plus_8)
bne,pt %icc, 1b
add %o0, 0x8, %o0
@@ -161,25 +239,25 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
LOAD(prefetch, %o1 + 0x080, #one_read)
LOAD(prefetch, %o1 + 0x0c0, #one_read)
LOAD(prefetch, %o1 + 0x100, #one_read)
- EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0), U3_retl_o2)
LOAD(prefetch, %o1 + 0x140, #one_read)
- EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2)
LOAD(prefetch, %o1 + 0x180, #one_read)
- EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2)
LOAD(prefetch, %o1 + 0x1c0, #one_read)
faligndata %f0, %f2, %f16
- EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2)
faligndata %f2, %f4, %f18
- EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2)
faligndata %f4, %f6, %f20
- EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2)
faligndata %f6, %f8, %f22
- EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2)
faligndata %f8, %f10, %f24
- EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2)
faligndata %f10, %f12, %f26
- EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2)
subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE
add %o1, 0x40, %o1
@@ -190,26 +268,26 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
.align 64
1:
- EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f12, %f14, %f28
- EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f14, %f0, %f30
- EX_ST_FP(STORE_BLK(%f16, %o0))
- EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
+ EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
+ EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f0, %f2, %f16
add %o0, 0x40, %o0
- EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f2, %f4, %f18
- EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f4, %f6, %f20
- EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40)
subcc %o3, 0x01, %o3
faligndata %f6, %f8, %f22
- EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f8, %f10, %f24
- EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
LOAD(prefetch, %o1 + 0x1c0, #one_read)
faligndata %f10, %f12, %f26
bg,pt %XCC, 1b
@@ -217,29 +295,29 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
/* Finally we copy the last full 64-byte block. */
2:
- EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f12, %f14, %f28
- EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f14, %f0, %f30
- EX_ST_FP(STORE_BLK(%f16, %o0))
- EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
+ EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
+ EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f0, %f2, %f16
- EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f2, %f4, %f18
- EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f4, %f6, %f20
- EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f6, %f8, %f22
- EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f8, %f10, %f24
cmp %g1, 0
be,pt %XCC, 1f
add %o0, 0x40, %o0
- EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x40)
1: faligndata %f10, %f12, %f26
faligndata %f12, %f14, %f28
faligndata %f14, %f0, %f30
- EX_ST_FP(STORE_BLK(%f16, %o0))
+ EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x40)
add %o0, 0x40, %o0
add %o1, 0x40, %o1
membar #Sync
@@ -259,20 +337,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %o2, %g2, %o2
be,a,pt %XCC, 1f
- EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0), U3_retl_o2_plus_g2)
-1: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2))
+1: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2), U3_retl_o2_plus_g2)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f0, %f2, %f8
- EX_ST_FP(STORE(std, %f8, %o0))
+ EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8)
be,pn %XCC, 2f
add %o0, 0x8, %o0
- EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0), U3_retl_o2_plus_g2)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f2, %f0, %f8
- EX_ST_FP(STORE(std, %f8, %o0))
+ EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8)
bne,pn %XCC, 1b
add %o0, 0x8, %o0
@@ -292,30 +370,33 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andcc %o2, 0x8, %g0
be,pt %icc, 1f
nop
- EX_LD(LOAD(ldx, %o1, %o5))
- EX_ST(STORE(stx, %o5, %o1 + %o3))
+ EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2)
+ EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2)
add %o1, 0x8, %o1
+ sub %o2, 8, %o2
1: andcc %o2, 0x4, %g0
be,pt %icc, 1f
nop
- EX_LD(LOAD(lduw, %o1, %o5))
- EX_ST(STORE(stw, %o5, %o1 + %o3))
+ EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2)
+ EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2)
add %o1, 0x4, %o1
+ sub %o2, 4, %o2
1: andcc %o2, 0x2, %g0
be,pt %icc, 1f
nop
- EX_LD(LOAD(lduh, %o1, %o5))
- EX_ST(STORE(sth, %o5, %o1 + %o3))
+ EX_LD(LOAD(lduh, %o1, %o5), U3_retl_o2)
+ EX_ST(STORE(sth, %o5, %o1 + %o3), U3_retl_o2)
add %o1, 0x2, %o1
+ sub %o2, 2, %o2
1: andcc %o2, 0x1, %g0
be,pt %icc, 85f
nop
- EX_LD(LOAD(ldub, %o1, %o5))
+ EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2)
ba,pt %xcc, 85f
- EX_ST(STORE(stb, %o5, %o1 + %o3))
+ EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2)
.align 64
70: /* 16 < len <= 64 */
@@ -326,26 +407,26 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andn %o2, 0xf, GLOBAL_SPARE
and %o2, 0xf, %o2
1: subcc GLOBAL_SPARE, 0x10, GLOBAL_SPARE
- EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
- EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
- EX_ST(STORE(stx, %o5, %o1 + %o3))
+ EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U3_retl_o2_plus_GS_plus_0x10)
+ EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U3_retl_o2_plus_GS_plus_0x10)
+ EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x10)
add %o1, 0x8, %o1
- EX_ST(STORE(stx, %g1, %o1 + %o3))
+ EX_ST(STORE(stx, %g1, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x08)
bgu,pt %XCC, 1b
add %o1, 0x8, %o1
73: andcc %o2, 0x8, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x8, %o2
- EX_LD(LOAD(ldx, %o1, %o5))
- EX_ST(STORE(stx, %o5, %o1 + %o3))
+ EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2_plus_8)
+ EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_8)
add %o1, 0x8, %o1
1: andcc %o2, 0x4, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x4, %o2
- EX_LD(LOAD(lduw, %o1, %o5))
- EX_ST(STORE(stw, %o5, %o1 + %o3))
+ EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2_plus_4)
+ EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2_plus_4)
add %o1, 0x4, %o1
1: cmp %o2, 0
be,pt %XCC, 85f
@@ -361,8 +442,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %o2, %g1, %o2
1: subcc %g1, 1, %g1
- EX_LD(LOAD(ldub, %o1, %o5))
- EX_ST(STORE(stb, %o5, %o1 + %o3))
+ EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2_plus_g1_plus_1)
+ EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2_plus_g1_plus_1)
bgu,pt %icc, 1b
add %o1, 1, %o1
@@ -378,16 +459,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
8: mov 64, %o3
andn %o1, 0x7, %o1
- EX_LD(LOAD(ldx, %o1, %g2))
+ EX_LD(LOAD(ldx, %o1, %g2), U3_retl_o2)
sub %o3, %g1, %o3
andn %o2, 0x7, GLOBAL_SPARE
sllx %g2, %g1, %g2
-1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
+1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U3_retl_o2_and_7_plus_GS)
subcc GLOBAL_SPARE, 0x8, GLOBAL_SPARE
add %o1, 0x8, %o1
srlx %g3, %o3, %o5
or %o5, %g2, %o5
- EX_ST(STORE(stx, %o5, %o0))
+ EX_ST(STORE(stx, %o5, %o0), U3_retl_o2_and_7_plus_GS_plus_8)
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
@@ -407,8 +488,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
1:
subcc %o2, 4, %o2
- EX_LD(LOAD(lduw, %o1, %g1))
- EX_ST(STORE(stw, %g1, %o1 + %o3))
+ EX_LD(LOAD(lduw, %o1, %g1), U3_retl_o2_plus_4)
+ EX_ST(STORE(stw, %g1, %o1 + %o3), U3_retl_o2_plus_4)
bgu,pt %XCC, 1b
add %o1, 4, %o1
@@ -418,8 +499,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
.align 32
90:
subcc %o2, 1, %o2
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o1 + %o3))
+ EX_LD(LOAD(ldub, %o1, %g1), U3_retl_o2_plus_1)
+ EX_ST(STORE(stb, %g1, %o1 + %o3), U3_retl_o2_plus_1)
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
diff --git a/arch/sparc/lib/copy_in_user.S b/arch/sparc/lib/copy_in_user.S
index 482de093bdae..0252b218de45 100644
--- a/arch/sparc/lib/copy_in_user.S
+++ b/arch/sparc/lib/copy_in_user.S
@@ -9,18 +9,33 @@
#define XCC xcc
-#define EX(x,y) \
+#define EX(x,y,z) \
98: x,y; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, z; \
.text; \
.align 4;
+#define EX_O4(x,y) EX(x,y,__retl_o4_plus_8)
+#define EX_O2_4(x,y) EX(x,y,__retl_o2_plus_4)
+#define EX_O2_1(x,y) EX(x,y,__retl_o2_plus_1)
+
.register %g2,#scratch
.register %g3,#scratch
.text
+__retl_o4_plus_8:
+ add %o4, %o2, %o4
+ retl
+ add %o4, 8, %o0
+__retl_o2_plus_4:
+ retl
+ add %o2, 4, %o0
+__retl_o2_plus_1:
+ retl
+ add %o2, 1, %o0
+
.align 32
/* Don't try to get too fancy here, just nice and
@@ -45,8 +60,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
andn %o2, 0x7, %o4
and %o2, 0x7, %o2
1: subcc %o4, 0x8, %o4
- EX(ldxa [%o1] %asi, %o5)
- EX(stxa %o5, [%o0] %asi)
+ EX_O4(ldxa [%o1] %asi, %o5)
+ EX_O4(stxa %o5, [%o0] %asi)
add %o1, 0x8, %o1
bgu,pt %XCC, 1b
add %o0, 0x8, %o0
@@ -54,8 +69,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
be,pt %XCC, 1f
nop
sub %o2, 0x4, %o2
- EX(lduwa [%o1] %asi, %o5)
- EX(stwa %o5, [%o0] %asi)
+ EX_O2_4(lduwa [%o1] %asi, %o5)
+ EX_O2_4(stwa %o5, [%o0] %asi)
add %o1, 0x4, %o1
add %o0, 0x4, %o0
1: cmp %o2, 0
@@ -71,8 +86,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
82:
subcc %o2, 4, %o2
- EX(lduwa [%o1] %asi, %g1)
- EX(stwa %g1, [%o0] %asi)
+ EX_O2_4(lduwa [%o1] %asi, %g1)
+ EX_O2_4(stwa %g1, [%o0] %asi)
add %o1, 4, %o1
bgu,pt %XCC, 82b
add %o0, 4, %o0
@@ -83,8 +98,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
.align 32
90:
subcc %o2, 1, %o2
- EX(lduba [%o1] %asi, %g1)
- EX(stba %g1, [%o0] %asi)
+ EX_O2_1(lduba [%o1] %asi, %g1)
+ EX_O2_1(stba %g1, [%o0] %asi)
add %o1, 1, %o1
bgu,pt %XCC, 90b
add %o0, 1, %o0
diff --git a/arch/sparc/lib/user_fixup.c b/arch/sparc/lib/user_fixup.c
deleted file mode 100644
index ac96ae236709..000000000000
--- a/arch/sparc/lib/user_fixup.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/* user_fixup.c: Fix up user copy faults.
- *
- * Copyright (C) 2004 David S. Miller <davem@redhat.com>
- */
-
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-
-#include <asm/uaccess.h>
-
-/* Calculating the exact fault address when using
- * block loads and stores can be very complicated.
- *
- * Instead of trying to be clever and handling all
- * of the cases, just fix things up simply here.
- */
-
-static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset)
-{
- unsigned long fault_addr = current_thread_info()->fault_address;
- unsigned long end = start + size;
-
- if (fault_addr < start || fault_addr >= end) {
- *offset = 0;
- } else {
- *offset = fault_addr - start;
- size = end - fault_addr;
- }
- return size;
-}
-
-unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size)
-{
- unsigned long offset;
-
- size = compute_size((unsigned long) from, size, &offset);
- if (likely(size))
- memset(to + offset, 0, size);
-
- return size;
-}
-EXPORT_SYMBOL(copy_from_user_fixup);
-
-unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size)
-{
- unsigned long offset;
-
- return compute_size((unsigned long) to, size, &offset);
-}
-EXPORT_SYMBOL(copy_to_user_fixup);
-
-unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size)
-{
- unsigned long fault_addr = current_thread_info()->fault_address;
- unsigned long start = (unsigned long) to;
- unsigned long end = start + size;
-
- if (fault_addr >= start && fault_addr < end)
- return end - fault_addr;
-
- start = (unsigned long) from;
- end = start + size;
- if (fault_addr >= start && fault_addr < end)
- return end - fault_addr;
-
- return size;
-}
-EXPORT_SYMBOL(copy_in_user_fixup);
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index f2b77112e9d8..e20fbbafb0b0 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -27,6 +27,20 @@ static inline int tag_compare(unsigned long tag, unsigned long vaddr)
return (tag == (vaddr >> 22));
}
+static void flush_tsb_kernel_range_scan(unsigned long start, unsigned long end)
+{
+ unsigned long idx;
+
+ for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) {
+ struct tsb *ent = &swapper_tsb[idx];
+ unsigned long match = idx << 13;
+
+ match |= (ent->tag << 22);
+ if (match >= start && match < end)
+ ent->tag = (1UL << TSB_TAG_INVALID_BIT);
+ }
+}
+
/* TSB flushes need only occur on the processor initiating the address
* space modification, not on each cpu the address space has run on.
* Only the TLB flush needs that treatment.
@@ -36,6 +50,9 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long v;
+ if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES)
+ return flush_tsb_kernel_range_scan(start, end);
+
for (v = start; v < end; v += PAGE_SIZE) {
unsigned long hash = tsb_hash(v, PAGE_SHIFT,
KERNEL_TSB_NENTRIES);
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index b4f4733abc6e..5d2fd6cd3189 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -30,7 +30,7 @@
.text
.align 32
.globl __flush_tlb_mm
-__flush_tlb_mm: /* 18 insns */
+__flush_tlb_mm: /* 19 insns */
/* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
ldxa [%o1] ASI_DMMU, %g2
cmp %g2, %o0
@@ -81,7 +81,7 @@ __flush_tlb_page: /* 22 insns */
.align 32
.globl __flush_tlb_pending
-__flush_tlb_pending: /* 26 insns */
+__flush_tlb_pending: /* 27 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
rdpr %pstate, %g7
sllx %o1, 3, %o1
@@ -113,12 +113,14 @@ __flush_tlb_pending: /* 26 insns */
.align 32
.globl __flush_tlb_kernel_range
-__flush_tlb_kernel_range: /* 16 insns */
+__flush_tlb_kernel_range: /* 31 insns */
/* %o0=start, %o1=end */
cmp %o0, %o1
be,pn %xcc, 2f
+ sub %o1, %o0, %o3
+ srlx %o3, 18, %o4
+ brnz,pn %o4, __spitfire_flush_tlb_kernel_range_slow
sethi %hi(PAGE_SIZE), %o4
- sub %o1, %o0, %o3
sub %o3, %o4, %o3
or %o0, 0x20, %o0 ! Nucleus
1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
@@ -131,6 +133,41 @@ __flush_tlb_kernel_range: /* 16 insns */
retl
nop
nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+__spitfire_flush_tlb_kernel_range_slow:
+ mov 63 * 8, %o4
+1: ldxa [%o4] ASI_ITLB_DATA_ACCESS, %o3
+ andcc %o3, 0x40, %g0 /* _PAGE_L_4U */
+ bne,pn %xcc, 2f
+ mov TLB_TAG_ACCESS, %o3
+ stxa %g0, [%o3] ASI_IMMU
+ stxa %g0, [%o4] ASI_ITLB_DATA_ACCESS
+ membar #Sync
+2: ldxa [%o4] ASI_DTLB_DATA_ACCESS, %o3
+ andcc %o3, 0x40, %g0
+ bne,pn %xcc, 2f
+ mov TLB_TAG_ACCESS, %o3
+ stxa %g0, [%o3] ASI_DMMU
+ stxa %g0, [%o4] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+2: sub %o4, 8, %o4
+ brgez,pt %o4, 1b
+ nop
+ retl
+ nop
__spitfire_flush_tlb_mm_slow:
rdpr %pstate, %g1
@@ -285,6 +322,40 @@ __cheetah_flush_tlb_pending: /* 27 insns */
retl
wrpr %g7, 0x0, %pstate
+__cheetah_flush_tlb_kernel_range: /* 31 insns */
+ /* %o0=start, %o1=end */
+ cmp %o0, %o1
+ be,pn %xcc, 2f
+ sub %o1, %o0, %o3
+ srlx %o3, 18, %o4
+ brnz,pn %o4, 3f
+ sethi %hi(PAGE_SIZE), %o4
+ sub %o3, %o4, %o3
+ or %o0, 0x20, %o0 ! Nucleus
+1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
+ stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
+ membar #Sync
+ brnz,pt %o3, 1b
+ sub %o3, %o4, %o3
+2: sethi %hi(KERNBASE), %o3
+ flush %o3
+ retl
+ nop
+3: mov 0x80, %o4
+ stxa %g0, [%o4] ASI_DMMU_DEMAP
+ membar #Sync
+ stxa %g0, [%o4] ASI_IMMU_DEMAP
+ membar #Sync
+ retl
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
#ifdef DCACHE_ALIASING_POSSIBLE
__cheetah_flush_dcache_page: /* 11 insns */
sethi %hi(PAGE_OFFSET), %g1
@@ -309,19 +380,28 @@ __hypervisor_tlb_tl0_error:
ret
restore
-__hypervisor_flush_tlb_mm: /* 10 insns */
+__hypervisor_flush_tlb_mm: /* 19 insns */
mov %o0, %o2 /* ARG2: mmu context */
mov 0, %o0 /* ARG0: CPU lists unimplemented */
mov 0, %o1 /* ARG1: CPU lists unimplemented */
mov HV_MMU_ALL, %o3 /* ARG3: flags */
mov HV_FAST_MMU_DEMAP_CTX, %o5
ta HV_FAST_TRAP
- brnz,pn %o0, __hypervisor_tlb_tl0_error
+ brnz,pn %o0, 1f
mov HV_FAST_MMU_DEMAP_CTX, %o1
retl
nop
+1: sethi %hi(__hypervisor_tlb_tl0_error), %o5
+ jmpl %o5 + %lo(__hypervisor_tlb_tl0_error), %g0
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
-__hypervisor_flush_tlb_page: /* 11 insns */
+__hypervisor_flush_tlb_page: /* 22 insns */
/* %o0 = context, %o1 = vaddr */
mov %o0, %g2
mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
@@ -330,12 +410,23 @@ __hypervisor_flush_tlb_page: /* 11 insns */
srlx %o0, PAGE_SHIFT, %o0
sllx %o0, PAGE_SHIFT, %o0
ta HV_MMU_UNMAP_ADDR_TRAP
- brnz,pn %o0, __hypervisor_tlb_tl0_error
+ brnz,pn %o0, 1f
mov HV_MMU_UNMAP_ADDR_TRAP, %o1
retl
nop
+1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
+ jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
-__hypervisor_flush_tlb_pending: /* 16 insns */
+__hypervisor_flush_tlb_pending: /* 27 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
sllx %o1, 3, %g1
mov %o2, %g2
@@ -347,31 +438,57 @@ __hypervisor_flush_tlb_pending: /* 16 insns */
srlx %o0, PAGE_SHIFT, %o0
sllx %o0, PAGE_SHIFT, %o0
ta HV_MMU_UNMAP_ADDR_TRAP
- brnz,pn %o0, __hypervisor_tlb_tl0_error
+ brnz,pn %o0, 1f
mov HV_MMU_UNMAP_ADDR_TRAP, %o1
brnz,pt %g1, 1b
nop
retl
nop
+1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
+ jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
-__hypervisor_flush_tlb_kernel_range: /* 16 insns */
+__hypervisor_flush_tlb_kernel_range: /* 31 insns */
/* %o0=start, %o1=end */
cmp %o0, %o1
be,pn %xcc, 2f
- sethi %hi(PAGE_SIZE), %g3
- mov %o0, %g1
- sub %o1, %g1, %g2
+ sub %o1, %o0, %g2
+ srlx %g2, 18, %g3
+ brnz,pn %g3, 4f
+ mov %o0, %g1
+ sethi %hi(PAGE_SIZE), %g3
sub %g2, %g3, %g2
1: add %g1, %g2, %o0 /* ARG0: virtual address */
mov 0, %o1 /* ARG1: mmu context */
mov HV_MMU_ALL, %o2 /* ARG2: flags */
ta HV_MMU_UNMAP_ADDR_TRAP
- brnz,pn %o0, __hypervisor_tlb_tl0_error
+ brnz,pn %o0, 3f
mov HV_MMU_UNMAP_ADDR_TRAP, %o1
brnz,pt %g2, 1b
sub %g2, %g3, %g2
2: retl
nop
+3: sethi %hi(__hypervisor_tlb_tl0_error), %o2
+ jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
+ nop
+4: mov 0, %o0 /* ARG0: CPU lists unimplemented */
+ mov 0, %o1 /* ARG1: CPU lists unimplemented */
+ mov 0, %o2 /* ARG2: mmu context == nucleus */
+ mov HV_MMU_ALL, %o3 /* ARG3: flags */
+ mov HV_FAST_MMU_DEMAP_CTX, %o5
+ ta HV_FAST_TRAP
+ brnz,pn %o0, 3b
+ mov HV_FAST_MMU_DEMAP_CTX, %o1
+ retl
+ nop
#ifdef DCACHE_ALIASING_POSSIBLE
/* XXX Niagara and friends have an 8K cache, so no aliasing is
@@ -394,43 +511,6 @@ tlb_patch_one:
retl
nop
- .globl cheetah_patch_cachetlbops
-cheetah_patch_cachetlbops:
- save %sp, -128, %sp
-
- sethi %hi(__flush_tlb_mm), %o0
- or %o0, %lo(__flush_tlb_mm), %o0
- sethi %hi(__cheetah_flush_tlb_mm), %o1
- or %o1, %lo(__cheetah_flush_tlb_mm), %o1
- call tlb_patch_one
- mov 19, %o2
-
- sethi %hi(__flush_tlb_page), %o0
- or %o0, %lo(__flush_tlb_page), %o0
- sethi %hi(__cheetah_flush_tlb_page), %o1
- or %o1, %lo(__cheetah_flush_tlb_page), %o1
- call tlb_patch_one
- mov 22, %o2
-
- sethi %hi(__flush_tlb_pending), %o0
- or %o0, %lo(__flush_tlb_pending), %o0
- sethi %hi(__cheetah_flush_tlb_pending), %o1
- or %o1, %lo(__cheetah_flush_tlb_pending), %o1
- call tlb_patch_one
- mov 27, %o2
-
-#ifdef DCACHE_ALIASING_POSSIBLE
- sethi %hi(__flush_dcache_page), %o0
- or %o0, %lo(__flush_dcache_page), %o0
- sethi %hi(__cheetah_flush_dcache_page), %o1
- or %o1, %lo(__cheetah_flush_dcache_page), %o1
- call tlb_patch_one
- mov 11, %o2
-#endif /* DCACHE_ALIASING_POSSIBLE */
-
- ret
- restore
-
#ifdef CONFIG_SMP
/* These are all called by the slaves of a cross call, at
* trap level 1, with interrupts fully disabled.
@@ -447,7 +527,7 @@ cheetah_patch_cachetlbops:
*/
.align 32
.globl xcall_flush_tlb_mm
-xcall_flush_tlb_mm: /* 21 insns */
+xcall_flush_tlb_mm: /* 24 insns */
mov PRIMARY_CONTEXT, %g2
ldxa [%g2] ASI_DMMU, %g3
srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
@@ -469,9 +549,12 @@ xcall_flush_tlb_mm: /* 21 insns */
nop
nop
nop
+ nop
+ nop
+ nop
.globl xcall_flush_tlb_page
-xcall_flush_tlb_page: /* 17 insns */
+xcall_flush_tlb_page: /* 20 insns */
/* %g5=context, %g1=vaddr */
mov PRIMARY_CONTEXT, %g4
ldxa [%g4] ASI_DMMU, %g2
@@ -490,15 +573,20 @@ xcall_flush_tlb_page: /* 17 insns */
retry
nop
nop
+ nop
+ nop
+ nop
.globl xcall_flush_tlb_kernel_range
-xcall_flush_tlb_kernel_range: /* 25 insns */
+xcall_flush_tlb_kernel_range: /* 44 insns */
sethi %hi(PAGE_SIZE - 1), %g2
or %g2, %lo(PAGE_SIZE - 1), %g2
andn %g1, %g2, %g1
andn %g7, %g2, %g7
sub %g7, %g1, %g3
- add %g2, 1, %g2
+ srlx %g3, 18, %g2
+ brnz,pn %g2, 2f
+ add %g2, 1, %g2
sub %g3, %g2, %g3
or %g1, 0x20, %g1 ! Nucleus
1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
@@ -507,8 +595,25 @@ xcall_flush_tlb_kernel_range: /* 25 insns */
brnz,pt %g3, 1b
sub %g3, %g2, %g3
retry
- nop
- nop
+2: mov 63 * 8, %g1
+1: ldxa [%g1] ASI_ITLB_DATA_ACCESS, %g2
+ andcc %g2, 0x40, %g0 /* _PAGE_L_4U */
+ bne,pn %xcc, 2f
+ mov TLB_TAG_ACCESS, %g2
+ stxa %g0, [%g2] ASI_IMMU
+ stxa %g0, [%g1] ASI_ITLB_DATA_ACCESS
+ membar #Sync
+2: ldxa [%g1] ASI_DTLB_DATA_ACCESS, %g2
+ andcc %g2, 0x40, %g0
+ bne,pn %xcc, 2f
+ mov TLB_TAG_ACCESS, %g2
+ stxa %g0, [%g2] ASI_DMMU
+ stxa %g0, [%g1] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+2: sub %g1, 8, %g1
+ brgez,pt %g1, 1b
+ nop
+ retry
nop
nop
nop
@@ -637,6 +742,52 @@ xcall_fetch_glob_pmu_n4:
retry
+__cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */
+ sethi %hi(PAGE_SIZE - 1), %g2
+ or %g2, %lo(PAGE_SIZE - 1), %g2
+ andn %g1, %g2, %g1
+ andn %g7, %g2, %g7
+ sub %g7, %g1, %g3
+ srlx %g3, 18, %g2
+ brnz,pn %g2, 2f
+ add %g2, 1, %g2
+ sub %g3, %g2, %g3
+ or %g1, 0x20, %g1 ! Nucleus
+1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
+ stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
+ membar #Sync
+ brnz,pt %g3, 1b
+ sub %g3, %g2, %g3
+ retry
+2: mov 0x80, %g2
+ stxa %g0, [%g2] ASI_DMMU_DEMAP
+ membar #Sync
+ stxa %g0, [%g2] ASI_IMMU_DEMAP
+ membar #Sync
+ retry
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
#ifdef DCACHE_ALIASING_POSSIBLE
.align 32
.globl xcall_flush_dcache_page_cheetah
@@ -700,7 +851,7 @@ __hypervisor_tlb_xcall_error:
ba,a,pt %xcc, rtrap
.globl __hypervisor_xcall_flush_tlb_mm
-__hypervisor_xcall_flush_tlb_mm: /* 21 insns */
+__hypervisor_xcall_flush_tlb_mm: /* 24 insns */
/* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
mov %o0, %g2
mov %o1, %g3
@@ -714,7 +865,7 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
mov HV_FAST_MMU_DEMAP_CTX, %o5
ta HV_FAST_TRAP
mov HV_FAST_MMU_DEMAP_CTX, %g6
- brnz,pn %o0, __hypervisor_tlb_xcall_error
+ brnz,pn %o0, 1f
mov %o0, %g5
mov %g2, %o0
mov %g3, %o1
@@ -723,9 +874,12 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
mov %g7, %o5
membar #Sync
retry
+1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
+ jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
+ nop
.globl __hypervisor_xcall_flush_tlb_page
-__hypervisor_xcall_flush_tlb_page: /* 17 insns */
+__hypervisor_xcall_flush_tlb_page: /* 20 insns */
/* %g5=ctx, %g1=vaddr */
mov %o0, %g2
mov %o1, %g3
@@ -737,42 +891,64 @@ __hypervisor_xcall_flush_tlb_page: /* 17 insns */
sllx %o0, PAGE_SHIFT, %o0
ta HV_MMU_UNMAP_ADDR_TRAP
mov HV_MMU_UNMAP_ADDR_TRAP, %g6
- brnz,a,pn %o0, __hypervisor_tlb_xcall_error
+ brnz,a,pn %o0, 1f
mov %o0, %g5
mov %g2, %o0
mov %g3, %o1
mov %g4, %o2
membar #Sync
retry
+1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
+ jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
+ nop
.globl __hypervisor_xcall_flush_tlb_kernel_range
-__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
+__hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */
/* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
sethi %hi(PAGE_SIZE - 1), %g2
or %g2, %lo(PAGE_SIZE - 1), %g2
andn %g1, %g2, %g1
andn %g7, %g2, %g7
sub %g7, %g1, %g3
+ srlx %g3, 18, %g7
add %g2, 1, %g2
sub %g3, %g2, %g3
mov %o0, %g2
mov %o1, %g4
- mov %o2, %g7
+ brnz,pn %g7, 2f
+ mov %o2, %g7
1: add %g1, %g3, %o0 /* ARG0: virtual address */
mov 0, %o1 /* ARG1: mmu context */
mov HV_MMU_ALL, %o2 /* ARG2: flags */
ta HV_MMU_UNMAP_ADDR_TRAP
mov HV_MMU_UNMAP_ADDR_TRAP, %g6
- brnz,pn %o0, __hypervisor_tlb_xcall_error
+ brnz,pn %o0, 1f
mov %o0, %g5
sethi %hi(PAGE_SIZE), %o2
brnz,pt %g3, 1b
sub %g3, %o2, %g3
- mov %g2, %o0
+5: mov %g2, %o0
mov %g4, %o1
mov %g7, %o2
membar #Sync
retry
+1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
+ jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
+ nop
+2: mov %o3, %g1
+ mov %o5, %g3
+ mov 0, %o0 /* ARG0: CPU lists unimplemented */
+ mov 0, %o1 /* ARG1: CPU lists unimplemented */
+ mov 0, %o2 /* ARG2: mmu context == nucleus */
+ mov HV_MMU_ALL, %o3 /* ARG3: flags */
+ mov HV_FAST_MMU_DEMAP_CTX, %o5
+ ta HV_FAST_TRAP
+ mov %g1, %o3
+ brz,pt %o0, 5b
+ mov %g3, %o5
+ mov HV_FAST_MMU_DEMAP_CTX, %g6
+ ba,pt %xcc, 1b
+ clr %g5
/* These just get rescheduled to PIL vectors. */
.globl xcall_call_function
@@ -809,6 +985,58 @@ xcall_kgdb_capture:
#endif /* CONFIG_SMP */
+ .globl cheetah_patch_cachetlbops
+cheetah_patch_cachetlbops:
+ save %sp, -128, %sp
+
+ sethi %hi(__flush_tlb_mm), %o0
+ or %o0, %lo(__flush_tlb_mm), %o0
+ sethi %hi(__cheetah_flush_tlb_mm), %o1
+ or %o1, %lo(__cheetah_flush_tlb_mm), %o1
+ call tlb_patch_one
+ mov 19, %o2
+
+ sethi %hi(__flush_tlb_page), %o0
+ or %o0, %lo(__flush_tlb_page), %o0
+ sethi %hi(__cheetah_flush_tlb_page), %o1
+ or %o1, %lo(__cheetah_flush_tlb_page), %o1
+ call tlb_patch_one
+ mov 22, %o2
+
+ sethi %hi(__flush_tlb_pending), %o0
+ or %o0, %lo(__flush_tlb_pending), %o0
+ sethi %hi(__cheetah_flush_tlb_pending), %o1
+ or %o1, %lo(__cheetah_flush_tlb_pending), %o1
+ call tlb_patch_one
+ mov 27, %o2
+
+ sethi %hi(__flush_tlb_kernel_range), %o0
+ or %o0, %lo(__flush_tlb_kernel_range), %o0
+ sethi %hi(__cheetah_flush_tlb_kernel_range), %o1
+ or %o1, %lo(__cheetah_flush_tlb_kernel_range), %o1
+ call tlb_patch_one
+ mov 31, %o2
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+ sethi %hi(__flush_dcache_page), %o0
+ or %o0, %lo(__flush_dcache_page), %o0
+ sethi %hi(__cheetah_flush_dcache_page), %o1
+ or %o1, %lo(__cheetah_flush_dcache_page), %o1
+ call tlb_patch_one
+ mov 11, %o2
+#endif /* DCACHE_ALIASING_POSSIBLE */
+
+#ifdef CONFIG_SMP
+ sethi %hi(xcall_flush_tlb_kernel_range), %o0
+ or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
+ sethi %hi(__cheetah_xcall_flush_tlb_kernel_range), %o1
+ or %o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1
+ call tlb_patch_one
+ mov 44, %o2
+#endif /* CONFIG_SMP */
+
+ ret
+ restore
.globl hypervisor_patch_cachetlbops
hypervisor_patch_cachetlbops:
@@ -819,28 +1047,28 @@ hypervisor_patch_cachetlbops:
sethi %hi(__hypervisor_flush_tlb_mm), %o1
or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
call tlb_patch_one
- mov 10, %o2
+ mov 19, %o2
sethi %hi(__flush_tlb_page), %o0
or %o0, %lo(__flush_tlb_page), %o0
sethi %hi(__hypervisor_flush_tlb_page), %o1
or %o1, %lo(__hypervisor_flush_tlb_page), %o1
call tlb_patch_one
- mov 11, %o2
+ mov 22, %o2
sethi %hi(__flush_tlb_pending), %o0
or %o0, %lo(__flush_tlb_pending), %o0
sethi %hi(__hypervisor_flush_tlb_pending), %o1
or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
call tlb_patch_one
- mov 16, %o2
+ mov 27, %o2
sethi %hi(__flush_tlb_kernel_range), %o0
or %o0, %lo(__flush_tlb_kernel_range), %o0
sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
call tlb_patch_one
- mov 16, %o2
+ mov 31, %o2
#ifdef DCACHE_ALIASING_POSSIBLE
sethi %hi(__flush_dcache_page), %o0
@@ -857,21 +1085,21 @@ hypervisor_patch_cachetlbops:
sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
call tlb_patch_one
- mov 21, %o2
+ mov 24, %o2
sethi %hi(xcall_flush_tlb_page), %o0
or %o0, %lo(xcall_flush_tlb_page), %o0
sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
call tlb_patch_one
- mov 17, %o2
+ mov 20, %o2
sethi %hi(xcall_flush_tlb_kernel_range), %o0
or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
call tlb_patch_one
- mov 25, %o2
+ mov 44, %o2
#endif /* CONFIG_SMP */
ret
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 0ab5ee1c26af..aa8b0672f87a 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -888,7 +888,7 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
struct scatter_walk src_sg_walk;
- struct scatter_walk dst_sg_walk;
+ struct scatter_walk dst_sg_walk = {};
unsigned int i;
/* Assuming we are supporting rfc4106 64-bit extended */
@@ -968,7 +968,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
u8 authTag[16];
struct scatter_walk src_sg_walk;
- struct scatter_walk dst_sg_walk;
+ struct scatter_walk dst_sg_walk = {};
unsigned int i;
if (unlikely(req->assoclen != 16 && req->assoclen != 20))
diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
index 77f28ce9c646..9976fcecd17e 100644
--- a/arch/x86/entry/Makefile
+++ b/arch/x86/entry/Makefile
@@ -5,8 +5,8 @@
OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
-CFLAGS_syscall_64.o += -Wno-override-init
-CFLAGS_syscall_32.o += -Wno-override-init
+CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,)
obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
obj-y += common.o
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index eab0915f5995..a74a2dbc0180 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3607,10 +3607,14 @@ __init int intel_pmu_init(void)
/*
* Quirk: v2 perfmon does not report fixed-purpose events, so
- * assume at least 3 events:
+ * assume at least 3 events, when not running in a hypervisor:
*/
- if (version > 1)
- x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
+ if (version > 1) {
+ int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
+
+ x86_pmu.num_counters_fixed =
+ max((int)edx.split.num_counters_fixed, assume);
+ }
if (boot_cpu_has(X86_FEATURE_PDCM)) {
u64 capabilities;
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 3ca87b5a8677..4f5ac726335f 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -48,7 +48,8 @@
* Scope: Core
* MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
* perf code: 0x02
- * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
+ * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
+ * SKL,KNL
* Scope: Core
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
* perf code: 0x03
@@ -56,15 +57,16 @@
* Scope: Core
* MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
* perf code: 0x00
- * Available model: SNB,IVB,HSW,BDW,SKL
+ * Available model: SNB,IVB,HSW,BDW,SKL,KNL
* Scope: Package (physical package)
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
* perf code: 0x01
- * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
+ * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL
* Scope: Package (physical package)
* MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
* perf code: 0x02
- * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
+ * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
+ * SKL,KNL
* Scope: Package (physical package)
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
* perf code: 0x03
@@ -118,6 +120,7 @@ struct cstate_model {
/* Quirk flags */
#define SLM_PKG_C6_USE_C7_MSR (1UL << 0)
+#define KNL_CORE_C6_MSR (1UL << 1)
struct perf_cstate_msr {
u64 msr;
@@ -488,6 +491,18 @@ static const struct cstate_model slm_cstates __initconst = {
.quirks = SLM_PKG_C6_USE_C7_MSR,
};
+
+static const struct cstate_model knl_cstates __initconst = {
+ .core_events = BIT(PERF_CSTATE_CORE_C6_RES),
+
+ .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
+ BIT(PERF_CSTATE_PKG_C3_RES) |
+ BIT(PERF_CSTATE_PKG_C6_RES),
+ .quirks = KNL_CORE_C6_MSR,
+};
+
+
+
#define X86_CSTATES_MODEL(model, states) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
@@ -523,6 +538,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
+
+ X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates),
{ },
};
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
@@ -558,6 +575,11 @@ static int __init cstate_probe(const struct cstate_model *cm)
if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
+ /* KNL has different MSR for CORE C6 */
+ if (cm->quirks & KNL_CORE_C6_MSR)
+ pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY;
+
+
has_cstate_core = cstate_probe_msr(cm->core_events,
PERF_CSTATE_CORE_EVENT_MAX,
core_msr, core_events_attrs);
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index c5047b8f777b..1c1b9fe705c8 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -36,13 +36,6 @@ static DEFINE_PER_CPU(struct pt, pt_ctx);
static struct pt_pmu pt_pmu;
-enum cpuid_regs {
- CR_EAX = 0,
- CR_ECX,
- CR_EDX,
- CR_EBX
-};
-
/*
* Capabilities of Intel PT hardware, such as number of address bits or
* supported output schemes, are cached and exported to userspace as "caps"
@@ -64,21 +57,21 @@ static struct pt_cap_desc {
u8 reg;
u32 mask;
} pt_caps[] = {
- PT_CAP(max_subleaf, 0, CR_EAX, 0xffffffff),
- PT_CAP(cr3_filtering, 0, CR_EBX, BIT(0)),
- PT_CAP(psb_cyc, 0, CR_EBX, BIT(1)),
- PT_CAP(ip_filtering, 0, CR_EBX, BIT(2)),
- PT_CAP(mtc, 0, CR_EBX, BIT(3)),
- PT_CAP(ptwrite, 0, CR_EBX, BIT(4)),
- PT_CAP(power_event_trace, 0, CR_EBX, BIT(5)),
- PT_CAP(topa_output, 0, CR_ECX, BIT(0)),
- PT_CAP(topa_multiple_entries, 0, CR_ECX, BIT(1)),
- PT_CAP(single_range_output, 0, CR_ECX, BIT(2)),
- PT_CAP(payloads_lip, 0, CR_ECX, BIT(31)),
- PT_CAP(num_address_ranges, 1, CR_EAX, 0x3),
- PT_CAP(mtc_periods, 1, CR_EAX, 0xffff0000),
- PT_CAP(cycle_thresholds, 1, CR_EBX, 0xffff),
- PT_CAP(psb_periods, 1, CR_EBX, 0xffff0000),
+ PT_CAP(max_subleaf, 0, CPUID_EAX, 0xffffffff),
+ PT_CAP(cr3_filtering, 0, CPUID_EBX, BIT(0)),
+ PT_CAP(psb_cyc, 0, CPUID_EBX, BIT(1)),
+ PT_CAP(ip_filtering, 0, CPUID_EBX, BIT(2)),
+ PT_CAP(mtc, 0, CPUID_EBX, BIT(3)),
+ PT_CAP(ptwrite, 0, CPUID_EBX, BIT(4)),
+ PT_CAP(power_event_trace, 0, CPUID_EBX, BIT(5)),
+ PT_CAP(topa_output, 0, CPUID_ECX, BIT(0)),
+ PT_CAP(topa_multiple_entries, 0, CPUID_ECX, BIT(1)),
+ PT_CAP(single_range_output, 0, CPUID_ECX, BIT(2)),
+ PT_CAP(payloads_lip, 0, CPUID_ECX, BIT(31)),
+ PT_CAP(num_address_ranges, 1, CPUID_EAX, 0x3),
+ PT_CAP(mtc_periods, 1, CPUID_EAX, 0xffff0000),
+ PT_CAP(cycle_thresholds, 1, CPUID_EBX, 0xffff),
+ PT_CAP(psb_periods, 1, CPUID_EBX, 0xffff0000),
};
static u32 pt_cap_get(enum pt_capabilities cap)
@@ -213,10 +206,10 @@ static int __init pt_pmu_hw_init(void)
for (i = 0; i < PT_CPUID_LEAVES; i++) {
cpuid_count(20, i,
- &pt_pmu.caps[CR_EAX + i*PT_CPUID_REGS_NUM],
- &pt_pmu.caps[CR_EBX + i*PT_CPUID_REGS_NUM],
- &pt_pmu.caps[CR_ECX + i*PT_CPUID_REGS_NUM],
- &pt_pmu.caps[CR_EDX + i*PT_CPUID_REGS_NUM]);
+ &pt_pmu.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM],
+ &pt_pmu.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM],
+ &pt_pmu.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM],
+ &pt_pmu.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM]);
}
ret = -ENOMEM;
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index de25aad07853..d34bd370074b 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -351,4 +351,10 @@ extern void arch_phys_wc_del(int handle);
#define arch_phys_wc_add arch_phys_wc_add
#endif
+#ifdef CONFIG_X86_PAT
+extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size);
+extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size);
+#define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
+#endif
+
#endif /* _ASM_X86_IO_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 984a7bf17f6a..e7f8c62701d4 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -137,6 +137,17 @@ struct cpuinfo_x86 {
u32 microcode;
};
+struct cpuid_regs {
+ u32 eax, ebx, ecx, edx;
+};
+
+enum cpuid_regs_idx {
+ CPUID_EAX = 0,
+ CPUID_EBX,
+ CPUID_ECX,
+ CPUID_EDX,
+};
+
#define X86_VENDOR_INTEL 0
#define X86_VENDOR_CYRIX 1
#define X86_VENDOR_AMD 2
@@ -178,6 +189,9 @@ extern void identify_secondary_cpu(struct cpuinfo_x86 *);
extern void print_cpu_info(struct cpuinfo_x86 *);
void print_cpu_msr(struct cpuinfo_x86 *);
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
+extern u32 get_scattered_cpuid_leaf(unsigned int level,
+ unsigned int sub_leaf,
+ enum cpuid_regs_idx reg);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 8a5abaa7d453..931ced8ca345 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -454,6 +454,7 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger,
polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+ acpi_penalize_sci_irq(bus_irq, trigger, polarity);
/*
* stash over-ride to indicate we've been here
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index c7364bd633e1..51287cd90bf6 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -1042,8 +1042,11 @@ static int apm_get_power_status(u_short *status, u_short *bat, u_short *life)
if (apm_info.get_power_status_broken)
return APM_32_UNSUPPORTED;
- if (apm_bios_call(&call))
+ if (apm_bios_call(&call)) {
+ if (!call.err)
+ return APM_NO_ERROR;
return call.err;
+ }
*status = call.ebx;
*bat = call.ecx;
if (apm_info.get_power_status_swabinminutes) {
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 620ab06bcf45..017bda12caae 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -429,7 +429,7 @@ int __init save_microcode_in_initrd_amd(void)
* We need the physical address of the container for both bitness since
* boot_params.hdr.ramdisk_image is a physical address.
*/
- cont = __pa(container);
+ cont = __pa_nodebug(container);
cont_va = container;
#endif
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 1db8dc490b66..d1316f9c8329 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -17,11 +17,17 @@ struct cpuid_bit {
u32 sub_leaf;
};
-enum cpuid_regs {
- CR_EAX = 0,
- CR_ECX,
- CR_EDX,
- CR_EBX
+/* Please keep the leaf sorted by cpuid_bit.level for faster search. */
+static const struct cpuid_bit cpuid_bits[] = {
+ { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
+ { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
+ { X86_FEATURE_INTEL_PT, CPUID_EBX, 25, 0x00000007, 0 },
+ { X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 },
+ { X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 },
+ { X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
+ { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
+ { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
+ { 0, 0, 0, 0, 0 }
};
void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
@@ -30,18 +36,6 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
u32 regs[4];
const struct cpuid_bit *cb;
- static const struct cpuid_bit cpuid_bits[] = {
- { X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 },
- { X86_FEATURE_AVX512_4VNNIW, CR_EDX, 2, 0x00000007, 0 },
- { X86_FEATURE_AVX512_4FMAPS, CR_EDX, 3, 0x00000007, 0 },
- { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
- { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
- { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
- { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 },
- { X86_FEATURE_PROC_FEEDBACK, CR_EDX,11, 0x80000007, 0 },
- { 0, 0, 0, 0, 0 }
- };
-
for (cb = cpuid_bits; cb->feature; cb++) {
/* Verify that the level is valid */
@@ -50,10 +44,35 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
max_level > (cb->level | 0xffff))
continue;
- cpuid_count(cb->level, cb->sub_leaf, &regs[CR_EAX],
- &regs[CR_EBX], &regs[CR_ECX], &regs[CR_EDX]);
+ cpuid_count(cb->level, cb->sub_leaf, &regs[CPUID_EAX],
+ &regs[CPUID_EBX], &regs[CPUID_ECX],
+ &regs[CPUID_EDX]);
if (regs[cb->reg] & (1 << cb->bit))
set_cpu_cap(c, cb->feature);
}
}
+
+u32 get_scattered_cpuid_leaf(unsigned int level, unsigned int sub_leaf,
+ enum cpuid_regs_idx reg)
+{
+ const struct cpuid_bit *cb;
+ u32 cpuid_val = 0;
+
+ for (cb = cpuid_bits; cb->feature; cb++) {
+
+ if (level > cb->level)
+ continue;
+
+ if (level < cb->level)
+ break;
+
+ if (reg == cb->reg && sub_leaf == cb->sub_leaf) {
+ if (cpu_has(&boot_cpu_data, cb->feature))
+ cpuid_val |= BIT(cb->bit);
+ }
+ }
+
+ return cpuid_val;
+}
+EXPORT_SYMBOL_GPL(get_scattered_cpuid_leaf);
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 2836de390f95..9095c80723d6 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -46,10 +46,6 @@
static struct class *cpuid_class;
-struct cpuid_regs {
- u32 eax, ebx, ecx, edx;
-};
-
static void cpuid_smp_cpuid(void *cmd_block)
{
struct cpuid_regs *cmd = (struct cpuid_regs *)cmd_block;
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index efe73aacf966..7b0d3da52fb4 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -18,8 +18,10 @@
#ifdef CC_USING_FENTRY
# define function_hook __fentry__
+EXPORT_SYMBOL(__fentry__)
#else
# define function_hook mcount
+EXPORT_SYMBOL(mcount)
#endif
/* All cases save the original rbp (8 bytes) */
@@ -295,7 +297,6 @@ trace:
jmp fgraph_trace
END(function_hook)
#endif /* CONFIG_DYNAMIC_FTRACE */
-EXPORT_SYMBOL(function_hook)
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 51402a7e4ca6..0bee04d41bed 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -625,8 +625,6 @@ static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
amd_disable_seq_and_redirect_scrub);
-#endif
-
#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
#include <linux/jump_label.h>
#include <asm/string_64.h>
@@ -657,3 +655,4 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
#endif
+#endif
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index bbfbca5fea0c..9c337b0e8ba7 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1221,11 +1221,16 @@ void __init setup_arch(char **cmdline_p)
*/
get_smp_config();
+ /*
+ * Systems w/o ACPI and mptables might not have it mapped the local
+ * APIC yet, but prefill_possible_map() might need to access it.
+ */
+ init_apic_mappings();
+
prefill_possible_map();
init_cpu_to_node();
- init_apic_mappings();
io_apic_init_mappings();
kvm_guest_init();
diff --git a/arch/x86/kernel/unwind_guess.c b/arch/x86/kernel/unwind_guess.c
index 9298993dc8b7..2d721e533cf4 100644
--- a/arch/x86/kernel/unwind_guess.c
+++ b/arch/x86/kernel/unwind_guess.c
@@ -47,7 +47,14 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
get_stack_info(first_frame, state->task, &state->stack_info,
&state->stack_mask);
- if (!__kernel_text_address(*first_frame))
+ /*
+ * The caller can provide the address of the first frame directly
+ * (first_frame) or indirectly (regs->sp) to indicate which stack frame
+ * to start unwinding at. Skip ahead until we reach it.
+ */
+ if (!unwind_done(state) &&
+ (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
+ !__kernel_text_address(*first_frame)))
unwind_next_frame(state);
}
EXPORT_SYMBOL_GPL(__unwind_start);
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index ddd2661c4502..887e57182716 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -104,10 +104,10 @@ void __init kernel_randomize_memory(void)
* consistent with the vaddr_start/vaddr_end variables.
*/
BUILD_BUG_ON(vaddr_start >= vaddr_end);
- BUILD_BUG_ON(config_enabled(CONFIG_X86_ESPFIX64) &&
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
vaddr_end >= EFI_VA_START);
- BUILD_BUG_ON((config_enabled(CONFIG_X86_ESPFIX64) ||
- config_enabled(CONFIG_EFI)) &&
+ BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
+ IS_ENABLED(CONFIG_EFI)) &&
vaddr_end >= __START_KERNEL_map);
BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 170cc4ff057b..83e701f160a9 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -730,6 +730,20 @@ void io_free_memtype(resource_size_t start, resource_size_t end)
free_memtype(start, end);
}
+int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
+{
+ enum page_cache_mode type = _PAGE_CACHE_MODE_WC;
+
+ return io_reserve_memtype(start, start + size, &type);
+}
+EXPORT_SYMBOL(arch_io_reserve_memtype_wc);
+
+void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
+{
+ io_free_memtype(start, start + size);
+}
+EXPORT_SYMBOL(arch_io_free_memtype_wc);
+
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c0fdd57da7aa..bdd855685403 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1837,6 +1837,7 @@ static void __init init_hvm_pv_info(void)
xen_domain_type = XEN_HVM_DOMAIN;
}
+#endif
static int xen_cpu_up_prepare(unsigned int cpu)
{
@@ -1887,6 +1888,7 @@ static int xen_cpu_up_online(unsigned int cpu)
return 0;
}
+#ifdef CONFIG_XEN_PVHVM
#ifdef CONFIG_KEXEC_CORE
static void xen_hvm_shutdown(void)
{