summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/book3s64/hash_utils.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2020-08-11 11:58:31 +1000
committerDave Airlie <airlied@redhat.com>2020-08-11 11:58:31 +1000
commitc44264f9f729fd63bd6a81a6ac5cd6cd49af09e5 (patch)
treead77b18ffeafb50b3eb9ba6472670dc1d96f5558 /arch/powerpc/mm/book3s64/hash_utils.c
parentca457ab5908603b36be903e73977afde1ba03c84 (diff)
parentbcf876870b95592b52519ed4aafcf9d95999bc9c (diff)
downloadlinux-c44264f9f729fd63bd6a81a6ac5cd6cd49af09e5.tar.bz2
Merge tag 'v5.8' into drm-next
I need to backmerge 5.8 as I've got a bunch of fixes sitting on an rc7 base that I want to land. Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'arch/powerpc/mm/book3s64/hash_utils.c')
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c25
1 files changed, 25 insertions, 0 deletions
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 468169e33c86..9b9f92ad0e7a 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1559,6 +1559,7 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
pgd_t *pgdir;
int rc, ssize, update_flags = 0;
unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0);
+ unsigned long flags;
BUG_ON(get_region_id(ea) != USER_REGION_ID);
@@ -1592,6 +1593,28 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
return;
#endif /* CONFIG_PPC_64K_PAGES */
+ /*
+ * __hash_page_* must run with interrupts off, as it sets the
+ * H_PAGE_BUSY bit. It's possible for perf interrupts to hit at any
+ * time and may take a hash fault reading the user stack, see
+ * read_user_stack_slow() in the powerpc/perf code.
+ *
+ * If that takes a hash fault on the same page as we lock here, it
+ * will bail out when seeing H_PAGE_BUSY set, and retry the access
+ * leading to an infinite loop.
+ *
+ * Disabling interrupts here does not prevent perf interrupts, but it
+ * will prevent them taking hash faults (see the NMI test in
+ * do_hash_page), then read_user_stack's copy_from_user_nofault will
+ * fail and perf will fall back to read_user_stack_slow(), which
+ * walks the Linux page tables.
+ *
+ * Interrupts must also be off for the duration of the
+ * mm_is_thread_local test and update, to prevent preempt running the
+ * mm on another CPU (XXX: this may be racy vs kthread_use_mm).
+ */
+ local_irq_save(flags);
+
/* Is that local to this CPU ? */
if (mm_is_thread_local(mm))
update_flags |= HPTE_LOCAL_UPDATE;
@@ -1614,6 +1637,8 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
mm_ctx_user_psize(&mm->context),
mm_ctx_user_psize(&mm->context),
pte_val(*ptep));
+
+ local_irq_restore(flags);
}
/*