diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-01-05 11:23:17 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-01-05 11:23:17 -0800 |
commit | 1205b62390eed4e747232d183fbf412a5aecacd9 (patch) | |
tree | 7fde1a5aaee01a23e6ea4137329f332d5e179adf /arch/arm/mm | |
parent | 9ee3b3f4a5eb523ef27675ac2fcd2269b9d68767 (diff) | |
parent | 6de92920a717ea2b7b45bb3d651b8bb951eab185 (diff) | |
download | linux-1205b62390eed4e747232d183fbf412a5aecacd9.tar.bz2 |
Merge tag 'for-4.21' of git://git.armlinux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King:
"Included in this update:
- Florian Fainelli noticed that userspace segfaults caused by the
lack of kernel-userspace helpers was hard to diagnose; we now issue
a warning when userspace tries to use the helpers but the kernel
has them disabled.
- Ben Dooks wants compatibility for the old ATAG serial number with
DT systems.
- Some cleanup of assembly by Nicolas Pitre.
- User accessors optimisation from Vincent Whitchurch.
- More robust kdump on SMP systems from Yufen Wang.
- Sebastian Andrzej Siewior noticed problems with the SMP "boot_lock"
on RT kernels, and so we convert the Versatile series of platforms
to use a raw spinlock instead, consolidating the Versatile
implementation. We entirely remove the boot_lock on OMAP systems,
where it's unnecessary. Further patches for other systems will be
submitted for the following merge window.
- Start switching old StrongARM-11x0 systems to use gpiolib rather
than their private GPIO implementation - mostly PCMCIA bits.
- ARM Kconfig cleanups.
- Cleanup a mostly harmless mistake in the recent Spectre patch in
4.20 (which had the effect that data that can be placed into the
init sections was incorrectly always placed in the rodata section)"
* tag 'for-4.21' of git://git.armlinux.org.uk/~rmk/linux-arm: (25 commits)
ARM: omap2: remove unnecessary boot_lock
ARM: versatile: rename and comment SMP implementation
ARM: versatile: convert boot_lock to raw
ARM: vexpress/realview: consolidate immitation CPU hotplug
ARM: fix the cockup in the previous patch
ARM: sa1100/cerf: switch to using gpio_led_register_device()
ARM: sa1100/assabet: switch to using gpio leds
ARM: sa1100/assabet: add gpio keys support for right-hand two buttons
ARM: sa1111: remove legacy GPIO interfaces
pcmcia: sa1100*: remove redundant bvd1/bvd2 setting
ARM: pxa/lubbock: switch PCMCIA to MAX1600 library
ARM: pxa/mainstone: switch PCMCIA to MAX1600 library and gpiod APIs
ARM: sa1100/neponset: switch PCMCIA to MAX1600 library and gpiod APIs
ARM: sa1100/jornada720: switch PCMCIA to gpiod APIs
pcmcia: add MAX1600 library
ARM: sa1100: explicitly register sa11x0-pcmcia devices
ARM: 8813/1: Make aligned 2-byte getuser()/putuser() atomic on ARMv6+
ARM: 8812/1: Optimise copy_{from/to}_user for !CPU_USE_DOMAINS
ARM: 8811/1: always list both ldrd/strd registers explicitly
ARM: 8808/1: kexec:offline panic_smp_self_stop CPU
...
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/copypage-fa.c | 35 | ||||
-rw-r--r-- | arch/arm/mm/copypage-feroceon.c | 98 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4mc.c | 19 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4wb.c | 41 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4wt.c | 37 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xsc3.c | 79 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xscale.c | 79 | ||||
-rw-r--r-- | arch/arm/mm/fault.c | 6 | ||||
-rw-r--r-- | arch/arm/mm/proc-macros.S | 4 | ||||
-rw-r--r-- | arch/arm/mm/pv-fixup-asm.S | 16 |
10 files changed, 202 insertions, 212 deletions
diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c index d130a5ece5d5..bf24690ec83a 100644 --- a/arch/arm/mm/copypage-fa.c +++ b/arch/arm/mm/copypage-fa.c @@ -17,26 +17,25 @@ /* * Faraday optimised copy_user_page */ -static void __naked -fa_copy_user_page(void *kto, const void *kfrom) +static void fa_copy_user_page(void *kto, const void *kfrom) { - asm("\ - stmfd sp!, {r4, lr} @ 2\n\ - mov r2, %0 @ 1\n\ -1: ldmia r1!, {r3, r4, ip, lr} @ 4\n\ - stmia r0, {r3, r4, ip, lr} @ 4\n\ - mcr p15, 0, r0, c7, c14, 1 @ 1 clean and invalidate D line\n\ - add r0, r0, #16 @ 1\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4\n\ - stmia r0, {r3, r4, ip, lr} @ 4\n\ - mcr p15, 0, r0, c7, c14, 1 @ 1 clean and invalidate D line\n\ - add r0, r0, #16 @ 1\n\ - subs r2, r2, #1 @ 1\n\ + int tmp; + + asm volatile ("\ +1: ldmia %1!, {r3, r4, ip, lr} @ 4\n\ + stmia %0, {r3, r4, ip, lr} @ 4\n\ + mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\ + add %0, %0, #16 @ 1\n\ + ldmia %1!, {r3, r4, ip, lr} @ 4\n\ + stmia %0, {r3, r4, ip, lr} @ 4\n\ + mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\ + add %0, %0, #16 @ 1\n\ + subs %2, %2, #1 @ 1\n\ bne 1b @ 1\n\ - mcr p15, 0, r2, c7, c10, 4 @ 1 drain WB\n\ - ldmfd sp!, {r4, pc} @ 3" - : - : "I" (PAGE_SIZE / 32)); + mcr p15, 0, %2, c7, c10, 4 @ 1 drain WB" + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) + : "2" (PAGE_SIZE / 32) + : "r3", "r4", "ip", "lr"); } void fa_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index 49ee0c1a7209..cc819732d9b8 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c @@ -13,58 +13,56 @@ #include <linux/init.h> #include <linux/highmem.h> -static void __naked -feroceon_copy_user_page(void *kto, const void *kfrom) +static void feroceon_copy_user_page(void *kto, const void *kfrom) { - asm("\ - stmfd sp!, {r4-r9, lr} \n\ - mov ip, %2 \n\ -1: mov lr, r1 \n\ - ldmia r1!, {r2 - r9} \n\ - pld [lr, #32] \n\ - pld [lr, #64] \n\ - pld [lr, #96] \n\ - pld [lr, #128] \n\ - pld [lr, #160] \n\ - pld [lr, #192] \n\ - pld [lr, #224] \n\ - stmia r0, {r2 - r9} \n\ - ldmia r1!, {r2 - r9} \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ - stmia r0, {r2 - r9} \n\ - ldmia r1!, {r2 - r9} \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ - stmia r0, {r2 - r9} \n\ - ldmia r1!, {r2 - r9} \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ - stmia r0, {r2 - r9} \n\ - ldmia r1!, {r2 - r9} \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ - stmia r0, {r2 - r9} \n\ - ldmia r1!, {r2 - r9} \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ - stmia r0, {r2 - r9} \n\ - ldmia r1!, {r2 - r9} \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ - stmia r0, {r2 - r9} \n\ - ldmia r1!, {r2 - r9} \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ - stmia r0, {r2 - r9} \n\ - subs ip, ip, #(32 * 8) \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ + int tmp; + + asm volatile ("\ +1: ldmia %1!, {r2 - r7, ip, lr} \n\ + pld [%1, #0] \n\ + pld [%1, #32] \n\ + pld [%1, #64] \n\ + pld [%1, #96] \n\ + pld [%1, #128] \n\ + pld [%1, #160] \n\ + pld [%1, #192] \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + ldmia %1!, {r2 - r7, ip, lr} \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + ldmia %1!, {r2 - r7, ip, lr} \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + ldmia %1!, {r2 - r7, ip, lr} \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + ldmia %1!, {r2 - r7, ip, lr} \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + ldmia %1!, {r2 - r7, ip, lr} \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + ldmia %1!, {r2 - r7, ip, lr} \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + ldmia %1!, {r2 - r7, ip, lr} \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + subs %2, %2, #(32 * 8) \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ bne 1b \n\ - mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\ - ldmfd sp!, {r4-r9, pc}" - : - : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE)); + mcr p15, 0, %2, c7, c10, 4 @ drain WB" + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) + : "2" (PAGE_SIZE) + : "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); } void feroceon_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 0224416cba3c..b03202cddddb 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -40,12 +40,11 @@ static DEFINE_RAW_SPINLOCK(minicache_lock); * instruction. If your processor does not supply this, you have to write your * own copy_user_highpage that does the right thing. */ -static void __naked -mc_copy_user_page(void *from, void *to) +static void mc_copy_user_page(void *from, void *to) { - asm volatile( - "stmfd sp!, {r4, lr} @ 2\n\ - mov r4, %2 @ 1\n\ + int tmp; + + asm volatile ("\ ldmia %0!, {r2, r3, ip, lr} @ 4\n\ 1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\ stmia %1!, {r2, r3, ip, lr} @ 4\n\ @@ -55,13 +54,13 @@ mc_copy_user_page(void *from, void *to) mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\ stmia %1!, {r2, r3, ip, lr} @ 4\n\ ldmia %0!, {r2, r3, ip, lr} @ 4\n\ - subs r4, r4, #1 @ 1\n\ + subs %2, %2, #1 @ 1\n\ stmia %1!, {r2, r3, ip, lr} @ 4\n\ ldmneia %0!, {r2, r3, ip, lr} @ 4\n\ - bne 1b @ 1\n\ - ldmfd sp!, {r4, pc} @ 3" - : - : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); + bne 1b @ " + : "+&r" (from), "+&r" (to), "=&r" (tmp) + : "2" (PAGE_SIZE / 64) + : "r2", "r3", "ip", "lr"); } void v4_mc_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index 067d0fdd630c..cd3e165afeed 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c @@ -22,29 +22,28 @@ * instruction. If your processor does not supply this, you have to write your * own copy_user_highpage that does the right thing. */ -static void __naked -v4wb_copy_user_page(void *kto, const void *kfrom) +static void v4wb_copy_user_page(void *kto, const void *kfrom) { - asm("\ - stmfd sp!, {r4, lr} @ 2\n\ - mov r2, %2 @ 1\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4\n\ -1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ - stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ - stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4\n\ - mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ - stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4\n\ - subs r2, r2, #1 @ 1\n\ - stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ + int tmp; + + asm volatile ("\ + ldmia %1!, {r3, r4, ip, lr} @ 4\n\ +1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\ + stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmia %1!, {r3, r4, ip, lr} @ 4\n\ + mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmia %1!, {r3, r4, ip, lr} @ 4\n\ + subs %2, %2, #1 @ 1\n\ + stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmneia %1!, {r3, r4, ip, lr} @ 4\n\ bne 1b @ 1\n\ - mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\ - ldmfd sp!, {r4, pc} @ 3" - : - : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64)); + mcr p15, 0, %1, c7, c10, 4 @ 1 drain WB" + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) + : "2" (PAGE_SIZE / 64) + : "r3", "r4", "ip", "lr"); } void v4wb_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index b85c5da2e510..8614572e1296 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c @@ -20,27 +20,26 @@ * dirty data in the cache. However, we do have to ensure that * subsequent reads are up to date. */ -static void __naked -v4wt_copy_user_page(void *kto, const void *kfrom) +static void v4wt_copy_user_page(void *kto, const void *kfrom) { - asm("\ - stmfd sp!, {r4, lr} @ 2\n\ - mov r2, %2 @ 1\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4\n\ -1: stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ - stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4\n\ - stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4\n\ - subs r2, r2, #1 @ 1\n\ - stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ + int tmp; + + asm volatile ("\ + ldmia %1!, {r3, r4, ip, lr} @ 4\n\ +1: stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\ + stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmia %1!, {r3, r4, ip, lr} @ 4\n\ + stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmia %1!, {r3, r4, ip, lr} @ 4\n\ + subs %2, %2, #1 @ 1\n\ + stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmneia %1!, {r3, r4, ip, lr} @ 4\n\ bne 1b @ 1\n\ - mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\ - ldmfd sp!, {r4, pc} @ 3" - : - : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64)); + mcr p15, 0, %2, c7, c7, 0 @ flush ID cache" + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) + : "2" (PAGE_SIZE / 64) + : "r3", "r4", "ip", "lr"); } void v4wt_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index 03a2042aced5..a08158241ad1 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c @@ -21,53 +21,46 @@ /* * XSC3 optimised copy_user_highpage - * r0 = destination - * r1 = source * * The source page may have some clean entries in the cache already, but we * can safely ignore them - break_cow() will flush them out of the cache * if we eventually end up using our copied page. * */ -static void __naked -xsc3_mc_copy_user_page(void *kto, const void *kfrom) +static void xsc3_mc_copy_user_page(void *kto, const void *kfrom) { - asm("\ - stmfd sp!, {r4, r5, lr} \n\ - mov lr, %2 \n\ - \n\ - pld [r1, #0] \n\ - pld [r1, #32] \n\ -1: pld [r1, #64] \n\ - pld [r1, #96] \n\ + int tmp; + + asm volatile ("\ + pld [%1, #0] \n\ + pld [%1, #32] \n\ +1: pld [%1, #64] \n\ + pld [%1, #96] \n\ \n\ -2: ldrd r2, [r1], #8 \n\ - mov ip, r0 \n\ - ldrd r4, [r1], #8 \n\ - mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ - strd r2, [r0], #8 \n\ - ldrd r2, [r1], #8 \n\ - strd r4, [r0], #8 \n\ - ldrd r4, [r1], #8 \n\ - strd r2, [r0], #8 \n\ - strd r4, [r0], #8 \n\ - ldrd r2, [r1], #8 \n\ - mov ip, r0 \n\ - ldrd r4, [r1], #8 \n\ - mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ - strd r2, [r0], #8 \n\ - ldrd r2, [r1], #8 \n\ - subs lr, lr, #1 \n\ - strd r4, [r0], #8 \n\ - ldrd r4, [r1], #8 \n\ - strd r2, [r0], #8 \n\ - strd r4, [r0], #8 \n\ +2: ldrd r2, r3, [%1], #8 \n\ + ldrd r4, r5, [%1], #8 \n\ + mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\ + strd r2, r3, [%0], #8 \n\ + ldrd r2, r3, [%1], #8 \n\ + strd r4, r5, [%0], #8 \n\ + ldrd r4, r5, [%1], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r4, r5, [%0], #8 \n\ + ldrd r2, r3, [%1], #8 \n\ + ldrd r4, r5, [%1], #8 \n\ + mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\ + strd r2, r3, [%0], #8 \n\ + ldrd r2, r3, [%1], #8 \n\ + subs %2, %2, #1 \n\ + strd r4, r5, [%0], #8 \n\ + ldrd r4, r5, [%1], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r4, r5, [%0], #8 \n\ bgt 1b \n\ - beq 2b \n\ - \n\ - ldmfd sp!, {r4, r5, pc}" - : - : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1)); + beq 2b " + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) + : "2" (PAGE_SIZE / 64 - 1) + : "r2", "r3", "r4", "r5"); } void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, @@ -85,8 +78,6 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, /* * XScale optimised clear_user_page - * r0 = destination - * r1 = virtual user address of ultimate destination page */ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { @@ -96,10 +87,10 @@ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) mov r2, #0 \n\ mov r3, #0 \n\ 1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\ - strd r2, [%0], #8 \n\ - strd r2, [%0], #8 \n\ - strd r2, [%0], #8 \n\ - strd r2, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ subs r1, r1, #1 \n\ bne 1b" : "=r" (ptr) diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 97972379f4d6..63b921936754 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -36,52 +36,51 @@ static DEFINE_RAW_SPINLOCK(minicache_lock); * Dcache aliasing issue. The writes will be forwarded to the write buffer, * and merged as appropriate. */ -static void __naked -mc_copy_user_page(void *from, void *to) +static void mc_copy_user_page(void *from, void *to) { + int tmp; + /* * Strangely enough, best performance is achieved * when prefetching destination as well. (NP) */ - asm volatile( - "stmfd sp!, {r4, r5, lr} \n\ - mov lr, %2 \n\ - pld [r0, #0] \n\ - pld [r0, #32] \n\ - pld [r1, #0] \n\ - pld [r1, #32] \n\ -1: pld [r0, #64] \n\ - pld [r0, #96] \n\ - pld [r1, #64] \n\ - pld [r1, #96] \n\ -2: ldrd r2, [r0], #8 \n\ - ldrd r4, [r0], #8 \n\ - mov ip, r1 \n\ - strd r2, [r1], #8 \n\ - ldrd r2, [r0], #8 \n\ - strd r4, [r1], #8 \n\ - ldrd r4, [r0], #8 \n\ - strd r2, [r1], #8 \n\ - strd r4, [r1], #8 \n\ + asm volatile ("\ + pld [%0, #0] \n\ + pld [%0, #32] \n\ + pld [%1, #0] \n\ + pld [%1, #32] \n\ +1: pld [%0, #64] \n\ + pld [%0, #96] \n\ + pld [%1, #64] \n\ + pld [%1, #96] \n\ +2: ldrd r2, r3, [%0], #8 \n\ + ldrd r4, r5, [%0], #8 \n\ + mov ip, %1 \n\ + strd r2, r3, [%1], #8 \n\ + ldrd r2, r3, [%0], #8 \n\ + strd r4, r5, [%1], #8 \n\ + ldrd r4, r5, [%0], #8 \n\ + strd r2, r3, [%1], #8 \n\ + strd r4, r5, [%1], #8 \n\ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ - ldrd r2, [r0], #8 \n\ + ldrd r2, r3, [%0], #8 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ - ldrd r4, [r0], #8 \n\ - mov ip, r1 \n\ - strd r2, [r1], #8 \n\ - ldrd r2, [r0], #8 \n\ - strd r4, [r1], #8 \n\ - ldrd r4, [r0], #8 \n\ - strd r2, [r1], #8 \n\ - strd r4, [r1], #8 \n\ + ldrd r4, r5, [%0], #8 \n\ + mov ip, %1 \n\ + strd r2, r3, [%1], #8 \n\ + ldrd r2, r3, [%0], #8 \n\ + strd r4, r5, [%1], #8 \n\ + ldrd r4, r5, [%0], #8 \n\ + strd r2, r3, [%1], #8 \n\ + strd r4, r5, [%1], #8 \n\ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ - subs lr, lr, #1 \n\ + subs %2, %2, #1 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ bgt 1b \n\ - beq 2b \n\ - ldmfd sp!, {r4, r5, pc} " - : - : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1)); + beq 2b " + : "+&r" (from), "+&r" (to), "=&r" (tmp) + : "2" (PAGE_SIZE / 64 - 1) + : "r2", "r3", "r4", "r5", "ip"); } void xscale_mc_copy_user_highpage(struct page *to, struct page *from, @@ -115,10 +114,10 @@ xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) mov r2, #0 \n\ mov r3, #0 \n\ 1: mov ip, %0 \n\ - strd r2, [%0], #8 \n\ - strd r2, [%0], #8 \n\ - strd r2, [%0], #8 \n\ - strd r2, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ subs r1, r1, #1 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index f4ea4c62c613..58f69fa07df9 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -173,6 +173,12 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, show_regs(regs); } #endif +#ifndef CONFIG_KUSER_HELPERS + if ((sig == SIGSEGV) && ((addr & PAGE_MASK) == 0xffff0000)) + printk_ratelimited(KERN_DEBUG + "%s: CONFIG_KUSER_HELPERS disabled at 0x%08lx\n", + tsk->comm, addr); +#endif tsk->thread.address = addr; tsk->thread.error_code = fsr; diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 19516fbc2c55..5461d589a1e2 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -278,7 +278,7 @@ * If we are building for big.Little with branch predictor hardening, * we need the processor function tables to remain available after boot. */ -#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) .section ".rodata" #endif .type \name\()_processor_functions, #object @@ -316,7 +316,7 @@ ENTRY(\name\()_processor_functions) .endif .size \name\()_processor_functions, . - \name\()_processor_functions -#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) .previous #endif .endm diff --git a/arch/arm/mm/pv-fixup-asm.S b/arch/arm/mm/pv-fixup-asm.S index 1867f3e43016..fd2ff9034d17 100644 --- a/arch/arm/mm/pv-fixup-asm.S +++ b/arch/arm/mm/pv-fixup-asm.S @@ -33,10 +33,10 @@ ENTRY(lpae_pgtables_remap_asm) add r7, r2, #0x1000 add r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER add r7, r7, #PAGE_OFFSET >> (SECTION_SHIFT - L2_ORDER) -1: ldrd r4, [r7] +1: ldrd r4, r5, [r7] adds r4, r4, r0 adc r5, r5, r1 - strd r4, [r7], #1 << L2_ORDER + strd r4, r5, [r7], #1 << L2_ORDER cmp r7, r6 bls 1b @@ -44,22 +44,22 @@ ENTRY(lpae_pgtables_remap_asm) add r7, r2, #0x1000 add r7, r7, r3, lsr #SECTION_SHIFT - L2_ORDER bic r7, r7, #(1 << L2_ORDER) - 1 - ldrd r4, [r7] + ldrd r4, r5, [r7] adds r4, r4, r0 adc r5, r5, r1 - strd r4, [r7], #1 << L2_ORDER - ldrd r4, [r7] + strd r4, r5, [r7], #1 << L2_ORDER + ldrd r4, r5, [r7] adds r4, r4, r0 adc r5, r5, r1 - strd r4, [r7] + strd r4, r5, [r7] /* Update level 1 entries */ mov r6, #4 mov r7, r2 -2: ldrd r4, [r7] +2: ldrd r4, r5, [r7] adds r4, r4, r0 adc r5, r5, r1 - strd r4, [r7], #1 << L1_ORDER + strd r4, r5, [r7], #1 << L1_ORDER subs r6, r6, #1 bne 2b |