diff options
author | Heiko Carstens <hca@linux.ibm.com> | 2021-01-24 22:01:16 +0100 |
---|---|---|
committer | Vasily Gorbik <gor@linux.ibm.com> | 2021-02-09 15:57:05 +0100 |
commit | 5056c2c53a22a61facb1a551bf736df9b06e513a (patch) | |
tree | bb69328400d260e9f34c2e624cdf71d782566ad4 /arch/s390/kernel | |
parent | dfc11c98763aed6b2fa17d5d23f28a429ab9877b (diff) | |
download | linux-5056c2c53a22a61facb1a551bf736df9b06e513a.tar.bz2 |
s390/vdso: put vdso datapage in a separate vma
Add a separate "[vvar]" mapping for the vdso datapage, since it
doesn't need to be executable or COW-able.
This is actually the s390 implementation of commit 871549385278
("arm64: vdso: put vdso datapage in a separate vma")
Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/vdso.c | 55 |
1 files changed, 35 insertions, 20 deletions
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 86e7a3921348..968b263f64b4 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -40,6 +40,14 @@ static int __init vdso_setup(char *str) } __setup("vdso=", vdso_setup); +static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, + struct vm_area_struct *vma, struct vm_fault *vmf) +{ + if (vmf->pgoff == 0) + return vmf_insert_pfn(vma, vmf->address, virt_to_pfn(vdso_data)); + return VM_FAULT_SIGBUS; +} + static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *vma) { @@ -47,6 +55,11 @@ static int vdso_mremap(const struct vm_special_mapping *sm, return 0; } +static struct vm_special_mapping vvar_mapping = { + .name = "[vvar]", + .fault = vvar_fault, +}; + static struct vm_special_mapping vdso_mapping = { .name = "[vdso]", .mremap = vdso_mremap, @@ -61,38 +74,41 @@ early_initcall(vdso_getcpu_init); /* Must be called before SMP init */ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { + unsigned long addr, vdso_text_start, vdso_text_len, vdso_mapping_len; struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - unsigned long vdso_base; int rc; if (!vdso_enabled || is_compat_task()) return 0; if (mmap_write_lock_killable(mm)) return -EINTR; - vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0); - rc = vdso_base; - if (IS_ERR_VALUE(vdso_base)) + vdso_text_len = vdso_pages << PAGE_SHIFT; + vdso_mapping_len = vdso_text_len + PAGE_SIZE; + addr = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); + rc = addr; + if (IS_ERR_VALUE(addr)) goto out; - /* - * our vma flags don't have VM_WRITE so by default, the process - * isn't allowed to write those pages. - * gdb can break that with ptrace interface, and thus trigger COW - * on those pages but it's then your responsibility to never do that - * on the "data" page of the vDSO or you'll stop getting kernel - * updates and your nice userland gettimeofday will be totally dead. - * It's fine to use that for setting breakpoints in the vDSO code - * pages though. - */ - vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, + /* VM_MAYWRITE for COW so gdb can set breakpoints */ + vdso_text_start = addr; + vma = _install_special_mapping(mm, addr, vdso_text_len, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, &vdso_mapping); rc = PTR_ERR(vma); if (IS_ERR(vma)) goto out; - current->mm->context.vdso_base = vdso_base; - rc = 0; + addr += vdso_text_len; + vma = _install_special_mapping(mm, addr, PAGE_SIZE, + VM_READ|VM_MAYREAD|VM_PFNMAP, + &vvar_mapping); + if (IS_ERR(vma)) { + do_munmap(mm, vdso_text_start, vdso_text_len, NULL); + rc = PTR_ERR(vma); + } else { + current->mm->context.vdso_base = vdso_text_start; + rc = 0; + } out: mmap_write_unlock(mm); return rc; @@ -103,15 +119,14 @@ static int __init vdso_init(void) struct page **pages; int i; - vdso_pages = ((vdso64_end - vdso64_start) >> PAGE_SHIFT) + 1; + vdso_pages = (vdso64_end - vdso64_start) >> PAGE_SHIFT; pages = kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL); if (!pages) { vdso_enabled = 0; return -ENOMEM; } - for (i = 0; i < vdso_pages - 1; i++) + for (i = 0; i < vdso_pages; i++) pages[i] = virt_to_page(vdso64_start + i * PAGE_SIZE); - pages[vdso_pages - 1] = virt_to_page(vdso_data); pages[vdso_pages] = NULL; vdso_mapping.pages = pages; return 0; |