summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/page.h
diff options
context:
space:
mode:
authorglider@google.com <glider@google.com>2020-03-12 16:59:20 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2020-03-17 18:36:57 +0000
commitc17a290f7e7e59d24b4507736b7b40b0eb5f8f1f (patch)
treef140da9800e59eaca827dfd79efba4c1dc65b712 /arch/arm64/include/asm/page.h
parentd22b115cbfbb7e4a938f9eb6ea77da9ecac3df5a (diff)
downloadlinux-c17a290f7e7e59d24b4507736b7b40b0eb5f8f1f.tar.bz2
arm64: define __alloc_zeroed_user_highpage
When running the kernel with init_on_alloc=1, calling the default implementation of __alloc_zeroed_user_highpage() from include/linux/highmem.h leads to double-initialization of the allocated page (first by the page allocator, then by clear_user_page(). Calling alloc_page_vma() with __GFP_ZERO, similarly to e.g. x86, seems to be enough to ensure the user page is zeroed only once. Signed-off-by: Alexander Potapenko <glider@google.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/include/asm/page.h')
-rw-r--r--arch/arm64/include/asm/page.h4
1 files changed, 4 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index d39ddb258a04..75d6cd23a679 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -21,6 +21,10 @@ extern void __cpu_copy_user_page(void *to, const void *from,
extern void copy_page(void *to, const void *from);
extern void clear_page(void *to);
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+
#define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr)
#define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr)