diff options
| author | James Morse <james.morse@arm.com> | 2015-07-22 19:05:54 +0100 | 
|---|---|---|
| committer | Will Deacon <will.deacon@arm.com> | 2015-07-27 11:08:41 +0100 | 
| commit | 338d4f49d6f7114a017d294ccf7374df4f998edc (patch) | |
| tree | af0b5cedc6e042623346d4e82be24eda190626a1 /arch/arm64/lib | |
| parent | 9ded63aaf83eba76e1a54ac02581c2badc497f1a (diff) | |
| download | linux-338d4f49d6f7114a017d294ccf7374df4f998edc.tar.bz2 | |
arm64: kernel: Add support for Privileged Access Never
'Privileged Access Never' is a new arm8.1 feature which prevents
privileged code from accessing any virtual address where read or write
access is also permitted at EL0.
This patch enables the PAN feature on all CPUs, and modifies {get,put}_user
helpers temporarily to permit access.
This will catch kernel bugs where user memory is accessed directly.
'Unprivileged loads and stores' using ldtrb et al are unaffected by PAN.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
[will: use ALTERNATIVE in asm and tidy up pan_enable check]
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/lib')
| -rw-r--r-- | arch/arm64/lib/clear_user.S | 8 | ||||
| -rw-r--r-- | arch/arm64/lib/copy_from_user.S | 8 | ||||
| -rw-r--r-- | arch/arm64/lib/copy_in_user.S | 8 | ||||
| -rw-r--r-- | arch/arm64/lib/copy_to_user.S | 8 | 
4 files changed, 32 insertions, 0 deletions
| diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index c17967fdf5f6..a9723c71c52b 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -16,7 +16,11 @@   * along with this program.  If not, see <http://www.gnu.org/licenses/>.   */  #include <linux/linkage.h> + +#include <asm/alternative.h>  #include <asm/assembler.h> +#include <asm/cpufeature.h> +#include <asm/sysreg.h>  	.text @@ -29,6 +33,8 @@   * Alignment fixed up by hardware.   */  ENTRY(__clear_user) +ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ +	    CONFIG_ARM64_PAN)  	mov	x2, x1			// save the size for fixup return  	subs	x1, x1, #8  	b.mi	2f @@ -48,6 +54,8 @@ USER(9f, strh	wzr, [x0], #2	)  	b.mi	5f  USER(9f, strb	wzr, [x0]	)  5:	mov	x0, #0 +ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ +	    CONFIG_ARM64_PAN)  	ret  ENDPROC(__clear_user) diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 47c3fa5ae4ae..1be9ef27be97 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -15,7 +15,11 @@   */  #include <linux/linkage.h> + +#include <asm/alternative.h>  #include <asm/assembler.h> +#include <asm/cpufeature.h> +#include <asm/sysreg.h>  /*   * Copy from user space to a kernel buffer (alignment handled by the hardware) @@ -28,6 +32,8 @@   *	x0 - bytes not copied   */  ENTRY(__copy_from_user) +ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ +	    CONFIG_ARM64_PAN)  	add	x5, x1, x2			// upper user buffer boundary  	subs	x2, x2, #16  	b.mi	1f @@ -56,6 +62,8 @@ USER(9f, ldrh	w3, [x1], #2	)  USER(9f, ldrb	w3, [x1]	)  	strb	w3, [x0]  5:	mov	x0, #0 +ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ +	    CONFIG_ARM64_PAN)  	ret  ENDPROC(__copy_from_user) diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index 436bcc5d77b5..1b94661e22b3 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -17,7 +17,11 @@   */  #include <linux/linkage.h> + +#include <asm/alternative.h>  #include <asm/assembler.h> +#include <asm/cpufeature.h> +#include <asm/sysreg.h>  /*   * Copy from user space to user space (alignment handled by the hardware) @@ -30,6 +34,8 @@   *	x0 - bytes not copied   */  ENTRY(__copy_in_user) +ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ +	    CONFIG_ARM64_PAN)  	add	x5, x0, x2			// upper user buffer boundary  	subs	x2, x2, #16  	b.mi	1f @@ -58,6 +64,8 @@ USER(9f, strh	w3, [x0], #2	)  USER(9f, ldrb	w3, [x1]	)  USER(9f, strb	w3, [x0]	)  5:	mov	x0, #0 +ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ +	    CONFIG_ARM64_PAN)  	ret  ENDPROC(__copy_in_user) diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index f5e1f526f408..a257b47e2dc4 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -15,7 +15,11 @@   */  #include <linux/linkage.h> + +#include <asm/alternative.h>  #include <asm/assembler.h> +#include <asm/cpufeature.h> +#include <asm/sysreg.h>  /*   * Copy to user space from a kernel buffer (alignment handled by the hardware) @@ -28,6 +32,8 @@   *	x0 - bytes not copied   */  ENTRY(__copy_to_user) +ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ +	    CONFIG_ARM64_PAN)  	add	x5, x0, x2			// upper user buffer boundary  	subs	x2, x2, #16  	b.mi	1f @@ -56,6 +62,8 @@ USER(9f, strh	w3, [x0], #2	)  	ldrb	w3, [x1]  USER(9f, strb	w3, [x0]	)  5:	mov	x0, #0 +ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ +	    CONFIG_ARM64_PAN)  	ret  ENDPROC(__copy_to_user) |