summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/nohash
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@csgroup.eu>2022-06-28 16:48:54 +0200
committerMichael Ellerman <mpe@ellerman.id.au>2022-06-29 17:04:14 +1000
commit09317643117ade87c03158341e87466413fa8f1a (patch)
tree0548875fbb113351744d9ed86bf8037408f42b62 /arch/powerpc/mm/nohash
parentdd8de84b57b02ba9c1fe530a6d916c0853f136bd (diff)
downloadlinux-09317643117ade87c03158341e87466413fa8f1a.tar.bz2
powerpc/64e: Fix early TLB miss with KUAP
With KUAP, the TLB miss handler bails out when an access to user memory is performed with a nul TID. But the normal TLB miss routine which is only used early during boot does the check regardless for all memory areas, not only user memory. By chance there is no early IO or vmalloc access, but when KASAN come we will start having early TLB misses. Fix it by creating a special branch for user accesses similar to the one in the 'bolted' TLB miss handlers. Unfortunately SPRN_MAS1 is now read too early and there are no registers available to preserve it so it will be read a second time. Fixes: 57bc963837f5 ("powerpc/kuap: Wire-up KUAP on book3e/64") Cc: stable@vger.kernel.org Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/8d6c5859a45935d6e1a336da4dc20be421e8cea7.1656427701.git.christophe.leroy@csgroup.eu
Diffstat (limited to 'arch/powerpc/mm/nohash')
-rw-r--r--arch/powerpc/mm/nohash/tlb_low_64e.S17
1 files changed, 8 insertions, 9 deletions
diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S
index 8b97c4acfebf..9e9ab3803fb2 100644
--- a/arch/powerpc/mm/nohash/tlb_low_64e.S
+++ b/arch/powerpc/mm/nohash/tlb_low_64e.S
@@ -583,7 +583,7 @@ itlb_miss_fault_e6500:
*/
rlwimi r11,r14,32-19,27,27
rlwimi r11,r14,32-16,19,19
- beq normal_tlb_miss
+ beq normal_tlb_miss_user
/* XXX replace the RMW cycles with immediate loads + writes */
1: mfspr r10,SPRN_MAS1
cmpldi cr0,r15,8 /* Check for vmalloc region */
@@ -626,7 +626,7 @@ itlb_miss_fault_e6500:
cmpldi cr0,r15,0 /* Check for user region */
std r14,EX_TLB_ESR(r12) /* write crazy -1 to frame */
- beq normal_tlb_miss
+ beq normal_tlb_miss_user
li r11,_PAGE_PRESENT|_PAGE_BAP_SX /* Base perm */
oris r11,r11,_PAGE_ACCESSED@h
@@ -653,6 +653,12 @@ itlb_miss_fault_e6500:
* r11 = PTE permission mask
* r10 = crap (free to use)
*/
+normal_tlb_miss_user:
+#ifdef CONFIG_PPC_KUAP
+ mfspr r14,SPRN_MAS1
+ rlwinm. r14,r14,0,0x3fff0000
+ beq- normal_tlb_miss_access_fault /* KUAP fault */
+#endif
normal_tlb_miss:
/* So we first construct the page table address. We do that by
* shifting the bottom of the address (not the region ID) by
@@ -683,11 +689,6 @@ finish_normal_tlb_miss:
/* Check if required permissions are met */
andc. r15,r11,r14
bne- normal_tlb_miss_access_fault
-#ifdef CONFIG_PPC_KUAP
- mfspr r11,SPRN_MAS1
- rlwinm. r10,r11,0,0x3fff0000
- beq- normal_tlb_miss_access_fault /* KUAP fault */
-#endif
/* Now we build the MAS:
*
@@ -709,9 +710,7 @@ finish_normal_tlb_miss:
rldicl r10,r14,64-8,64-8
cmpldi cr0,r10,BOOK3E_PAGESZ_4K
beq- 1f
-#ifndef CONFIG_PPC_KUAP
mfspr r11,SPRN_MAS1
-#endif
rlwimi r11,r14,31,21,24
rlwinm r11,r11,0,21,19
mtspr SPRN_MAS1,r11