summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAndrei Vagin <avagin@gmail.com>2020-06-24 01:33:16 -0700
committerCatalin Marinas <catalin.marinas@arm.com>2020-07-24 13:15:20 +0100
commitd53b5c013e1e7c3b43a7487171a84ee2acdd9597 (patch)
treec24e33b952d838bad3ae301104d5fce0e6c5b3d8 /arch
parent9ebcfadb0610322ac537dd7aa5d9cbc2b2894c68 (diff)
downloadlinux-d53b5c013e1e7c3b43a7487171a84ee2acdd9597.tar.bz2
arm64/vdso: use the fault callback to map vvar pages
Currently the vdso has no awareness of time namespaces, which may apply distinct offsets to processes in different namespaces. To handle this within the vdso, we'll need to expose a per-namespace data page. As a preparatory step, this patch separates the vdso data page from the code pages, and has it faulted in via its own fault callback. Subsquent patches will extend this to support distinct pages per time namespace. The vvar vma has to be installed with the VM_PFNMAP flag to handle faults via its vma fault callback. Signed-off-by: Andrei Vagin <avagin@gmail.com> Reviewed-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Reviewed-by: Dmitry Safonov <dima@arista.com> Link: https://lore.kernel.org/r/20200624083321.144975-2-avagin@gmail.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/kernel/vdso.c25
1 files changed, 15 insertions, 10 deletions
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index e546df0efefb..eb7798e5eb00 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -107,29 +107,32 @@ static int __vdso_init(enum vdso_abi abi)
vdso_info[abi].vdso_code_start) >>
PAGE_SHIFT;
- /* Allocate the vDSO pagelist, plus a page for the data. */
- vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages + 1,
+ vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages,
sizeof(struct page *),
GFP_KERNEL);
if (vdso_pagelist == NULL)
return -ENOMEM;
- /* Grab the vDSO data page. */
- vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
-
-
/* Grab the vDSO code pages. */
pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
for (i = 0; i < vdso_info[abi].vdso_pages; i++)
- vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
+ vdso_pagelist[i] = pfn_to_page(pfn + i);
- vdso_info[abi].dm->pages = &vdso_pagelist[0];
- vdso_info[abi].cm->pages = &vdso_pagelist[1];
+ vdso_info[abi].cm->pages = vdso_pagelist;
return 0;
}
+static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ if (vmf->pgoff == 0)
+ return vmf_insert_pfn(vma, vmf->address,
+ sym_to_pfn(vdso_data));
+ return VM_FAULT_SIGBUS;
+}
+
static int __setup_additional_pages(enum vdso_abi abi,
struct mm_struct *mm,
struct linux_binprm *bprm,
@@ -150,7 +153,7 @@ static int __setup_additional_pages(enum vdso_abi abi,
}
ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
- VM_READ|VM_MAYREAD,
+ VM_READ|VM_MAYREAD|VM_PFNMAP,
vdso_info[abi].dm);
if (IS_ERR(ret))
goto up_fail;
@@ -206,6 +209,7 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
#ifdef CONFIG_COMPAT_VDSO
[AA32_MAP_VVAR] = {
.name = "[vvar]",
+ .fault = vvar_fault,
},
[AA32_MAP_VDSO] = {
.name = "[vdso]",
@@ -371,6 +375,7 @@ enum aarch64_map {
static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
[AA64_MAP_VVAR] = {
.name = "[vvar]",
+ .fault = vvar_fault,
},
[AA64_MAP_VDSO] = {
.name = "[vdso]",