From 84b2789d61156db0224724806b20110c0d34b07c Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 13 Jan 2021 22:09:42 +0100 Subject: um: separate child and parent errors in clone stub If the two are mixed up, then it looks as though the parent returned an error if the child failed (before) the mmap(), and then the resulting process never gets killed. Fix this by splitting the child and parent errors, reporting and using them appropriately. Signed-off-by: Johannes Berg Signed-off-by: Richard Weinberger --- arch/x86/um/shared/sysdep/stub_32.h | 2 +- arch/x86/um/shared/sysdep/stub_64.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/um/shared/sysdep/stub_32.h b/arch/x86/um/shared/sysdep/stub_32.h index 51fd256c75f0..8ea69211e53c 100644 --- a/arch/x86/um/shared/sysdep/stub_32.h +++ b/arch/x86/um/shared/sysdep/stub_32.h @@ -86,7 +86,7 @@ static inline void remap_stack(int fd, unsigned long offset) "d" (PROT_READ | PROT_WRITE), "S" (MAP_FIXED | MAP_SHARED), "D" (fd), "a" (offset), - "i" (&((struct stub_data *) STUB_DATA)->err) + "i" (&((struct stub_data *) STUB_DATA)->child_err) : "memory"); } diff --git a/arch/x86/um/shared/sysdep/stub_64.h b/arch/x86/um/shared/sysdep/stub_64.h index 994df93c5ed3..b7b8b8e4359d 100644 --- a/arch/x86/um/shared/sysdep/stub_64.h +++ b/arch/x86/um/shared/sysdep/stub_64.h @@ -92,7 +92,7 @@ static inline void remap_stack(long fd, unsigned long offset) "d" (PROT_READ | PROT_WRITE), "g" (MAP_FIXED | MAP_SHARED), "g" (fd), "g" (offset), - "i" (&((struct stub_data *) STUB_DATA)->err) + "i" (&((struct stub_data *) STUB_DATA)->child_err) : __syscall_clobber, "r10", "r8", "r9" ); } -- cgit v1.2.3 From 9f0b4807a44ff81cf59421c8a86641efec586610 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 13 Jan 2021 22:09:43 +0100 Subject: um: rework userspace stubs to not hard-code stub location The userspace stacks mostly have a stack (and in the case of the syscall stub we can just set their stack pointer) that points to the location of the stub data page already. Rework the stubs to use the stack pointer to derive the start of the data page, rather than requiring it to be hard-coded. In the clone stub, also integrate the int3 into the stack remap, since we really must not use the stack while we remap it. This prepares for putting the stub at a variable location that's not part of the normal address space of the userspace processes running inside the UML machine. Signed-off-by: Johannes Berg Signed-off-by: Richard Weinberger --- arch/um/include/shared/as-layout.h | 16 ++++----------- arch/um/include/shared/common-offsets.h | 6 ++++++ arch/um/kernel/skas/clone.c | 3 +-- arch/um/os-Linux/skas/mem.c | 2 ++ arch/x86/um/shared/sysdep/stub_32.h | 33 ++++++++++++++++++++---------- arch/x86/um/shared/sysdep/stub_64.h | 36 ++++++++++++++++++++++----------- arch/x86/um/stub_32.S | 17 ++++++++++------ arch/x86/um/stub_64.S | 5 ++--- arch/x86/um/stub_segv.c | 5 +++-- 9 files changed, 75 insertions(+), 48 deletions(-) (limited to 'arch/x86') diff --git a/arch/um/include/shared/as-layout.h b/arch/um/include/shared/as-layout.h index 5f286ef2721b..56408bf3480d 100644 --- a/arch/um/include/shared/as-layout.h +++ b/arch/um/include/shared/as-layout.h @@ -20,18 +20,10 @@ * 'UL' and other type specifiers unilaterally. We * use the following macros to deal with this. */ - -#ifdef __ASSEMBLY__ -#define _UML_AC(X, Y) (Y) -#else -#define __UML_AC(X, Y) (X(Y)) -#define _UML_AC(X, Y) __UML_AC(X, Y) -#endif - -#define STUB_START _UML_AC(, 0x100000) -#define STUB_CODE _UML_AC((unsigned long), STUB_START) -#define STUB_DATA _UML_AC((unsigned long), STUB_CODE + UM_KERN_PAGE_SIZE) -#define STUB_END _UML_AC((unsigned long), STUB_DATA + UM_KERN_PAGE_SIZE) +#define STUB_START 0x100000UL +#define STUB_CODE STUB_START +#define STUB_DATA (STUB_CODE + UM_KERN_PAGE_SIZE) +#define STUB_END (STUB_DATA + UM_KERN_PAGE_SIZE) #ifndef __ASSEMBLY__ diff --git a/arch/um/include/shared/common-offsets.h b/arch/um/include/shared/common-offsets.h index 16a51a8c800f..edc90ab73734 100644 --- a/arch/um/include/shared/common-offsets.h +++ b/arch/um/include/shared/common-offsets.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* for use by sys-$SUBARCH/kernel-offsets.c */ +#include DEFINE(KERNEL_MADV_REMOVE, MADV_REMOVE); @@ -43,3 +44,8 @@ DEFINE(UML_CONFIG_64BIT, CONFIG_64BIT); #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT DEFINE(UML_CONFIG_UML_TIME_TRAVEL_SUPPORT, CONFIG_UML_TIME_TRAVEL_SUPPORT); #endif + +/* for stub */ +DEFINE(UML_STUB_FIELD_OFFSET, offsetof(struct stub_data, offset)); +DEFINE(UML_STUB_FIELD_CHILD_ERR, offsetof(struct stub_data, child_err)); +DEFINE(UML_STUB_FIELD_FD, offsetof(struct stub_data, fd)); diff --git a/arch/um/kernel/skas/clone.c b/arch/um/kernel/skas/clone.c index 7c592c788cbf..592cdb138441 100644 --- a/arch/um/kernel/skas/clone.c +++ b/arch/um/kernel/skas/clone.c @@ -41,8 +41,7 @@ stub_clone_handler(void) goto done; } - remap_stack(data->fd, data->offset); - goto done; + remap_stack_and_trap(); done: trap_myself(); diff --git a/arch/um/os-Linux/skas/mem.c b/arch/um/os-Linux/skas/mem.c index c546d16f8dfe..3b4975ee67e2 100644 --- a/arch/um/os-Linux/skas/mem.c +++ b/arch/um/os-Linux/skas/mem.c @@ -40,6 +40,8 @@ static int __init init_syscall_regs(void) syscall_regs[REGS_IP_INDEX] = STUB_CODE + ((unsigned long) batch_syscall_stub - (unsigned long) __syscall_stub_start); + syscall_regs[REGS_SP_INDEX] = STUB_DATA; + return 0; } diff --git a/arch/x86/um/shared/sysdep/stub_32.h b/arch/x86/um/shared/sysdep/stub_32.h index 8ea69211e53c..c3891c1ada26 100644 --- a/arch/x86/um/shared/sysdep/stub_32.h +++ b/arch/x86/um/shared/sysdep/stub_32.h @@ -7,8 +7,8 @@ #define __SYSDEP_STUB_H #include +#include -#define STUB_SYSCALL_RET EAX #define STUB_MMAP_NR __NR_mmap2 #define MMAP_OFFSET(o) ((o) >> UM_KERN_PAGE_SHIFT) @@ -77,17 +77,28 @@ static inline void trap_myself(void) __asm("int3"); } -static inline void remap_stack(int fd, unsigned long offset) +static void inline remap_stack_and_trap(void) { - __asm__ volatile ("movl %%eax,%%ebp ; movl %0,%%eax ; int $0x80 ;" - "movl %7, %%ebx ; movl %%eax, (%%ebx)" - : : "g" (STUB_MMAP_NR), "b" (STUB_DATA), - "c" (UM_KERN_PAGE_SIZE), - "d" (PROT_READ | PROT_WRITE), - "S" (MAP_FIXED | MAP_SHARED), "D" (fd), - "a" (offset), - "i" (&((struct stub_data *) STUB_DATA)->child_err) - : "memory"); + __asm__ volatile ( + "movl %%esp,%%ebx ;" + "andl %0,%%ebx ;" + "movl %1,%%eax ;" + "movl %%ebx,%%edi ; addl %2,%%edi ; movl (%%edi),%%edi ;" + "movl %%ebx,%%ebp ; addl %3,%%ebp ; movl (%%ebp),%%ebp ;" + "int $0x80 ;" + "addl %4,%%ebx ; movl %%eax, (%%ebx) ;" + "int $3" + : : + "g" (~(UM_KERN_PAGE_SIZE - 1)), + "g" (STUB_MMAP_NR), + "g" (UML_STUB_FIELD_FD), + "g" (UML_STUB_FIELD_OFFSET), + "g" (UML_STUB_FIELD_CHILD_ERR), + "c" (UM_KERN_PAGE_SIZE), + "d" (PROT_READ | PROT_WRITE), + "S" (MAP_FIXED | MAP_SHARED) + : + "memory"); } #endif diff --git a/arch/x86/um/shared/sysdep/stub_64.h b/arch/x86/um/shared/sysdep/stub_64.h index b7b8b8e4359d..6e2626b77a2e 100644 --- a/arch/x86/um/shared/sysdep/stub_64.h +++ b/arch/x86/um/shared/sysdep/stub_64.h @@ -7,8 +7,8 @@ #define __SYSDEP_STUB_H #include +#include -#define STUB_SYSCALL_RET PT_INDEX(RAX) #define STUB_MMAP_NR __NR_mmap #define MMAP_OFFSET(o) (o) @@ -82,18 +82,30 @@ static inline void trap_myself(void) __asm("int3"); } -static inline void remap_stack(long fd, unsigned long offset) +static inline void remap_stack_and_trap(void) { - __asm__ volatile ("movq %4,%%r10 ; movq %5,%%r8 ; " - "movq %6, %%r9; " __syscall "; movq %7, %%rbx ; " - "movq %%rax, (%%rbx)": - : "a" (STUB_MMAP_NR), "D" (STUB_DATA), - "S" (UM_KERN_PAGE_SIZE), - "d" (PROT_READ | PROT_WRITE), - "g" (MAP_FIXED | MAP_SHARED), "g" (fd), - "g" (offset), - "i" (&((struct stub_data *) STUB_DATA)->child_err) - : __syscall_clobber, "r10", "r8", "r9" ); + __asm__ volatile ( + "movq %0,%%rax ;" + "movq %%rsp,%%rdi ;" + "andq %1,%%rdi ;" + "movq %2,%%r10 ;" + "movq %%rdi,%%r8 ; addq %3,%%r8 ; movq (%%r8),%%r8 ;" + "movq %%rdi,%%r9 ; addq %4,%%r9 ; movq (%%r9),%%r9 ;" + __syscall ";" + "movq %%rsp,%%rdi ; andq %1,%%rdi ;" + "addq %5,%%rdi ; movq %%rax, (%%rdi) ;" + "int3" + : : + "g" (STUB_MMAP_NR), + "g" (~(UM_KERN_PAGE_SIZE - 1)), + "g" (MAP_FIXED | MAP_SHARED), + "g" (UML_STUB_FIELD_FD), + "g" (UML_STUB_FIELD_OFFSET), + "g" (UML_STUB_FIELD_CHILD_ERR), + "S" (UM_KERN_PAGE_SIZE), + "d" (PROT_READ | PROT_WRITE) + : + __syscall_clobber, "r10", "r8", "r9"); } #endif diff --git a/arch/x86/um/stub_32.S b/arch/x86/um/stub_32.S index a193e88536a9..8291899e6aaf 100644 --- a/arch/x86/um/stub_32.S +++ b/arch/x86/um/stub_32.S @@ -5,21 +5,22 @@ .globl batch_syscall_stub batch_syscall_stub: - /* load pointer to first operation */ - mov $(STUB_DATA+8), %esp - + /* %esp comes in as "top of page" */ + mov %esp, %ecx + /* %esp has pointer to first operation */ + add $8, %esp again: /* load length of additional data */ mov 0x0(%esp), %eax /* if(length == 0) : end of list */ /* write possible 0 to header */ - mov %eax, STUB_DATA+4 + mov %eax, 0x4(%ecx) cmpl $0, %eax jz done /* save current pointer */ - mov %esp, STUB_DATA+4 + mov %esp, 0x4(%ecx) /* skip additional data */ add %eax, %esp @@ -38,6 +39,10 @@ again: /* execute syscall */ int $0x80 + /* restore top of page pointer in %ecx */ + mov %esp, %ecx + andl $(~UM_KERN_PAGE_SIZE) + 1, %ecx + /* check return value */ pop %ebx cmp %ebx, %eax @@ -45,7 +50,7 @@ again: done: /* save return value */ - mov %eax, STUB_DATA + mov %eax, (%ecx) /* stop */ int3 diff --git a/arch/x86/um/stub_64.S b/arch/x86/um/stub_64.S index 8a95c5b2eaf9..f3404640197a 100644 --- a/arch/x86/um/stub_64.S +++ b/arch/x86/um/stub_64.S @@ -4,9 +4,8 @@ .section .__syscall_stub, "ax" .globl batch_syscall_stub batch_syscall_stub: - mov $(STUB_DATA), %rbx - /* load pointer to first operation */ - mov %rbx, %rsp + /* %rsp has the pointer to first operation */ + mov %rsp, %rbx add $0x10, %rsp again: /* load length of additional data */ diff --git a/arch/x86/um/stub_segv.c b/arch/x86/um/stub_segv.c index 27361cbb7ca9..21836eaf1725 100644 --- a/arch/x86/um/stub_segv.c +++ b/arch/x86/um/stub_segv.c @@ -11,10 +11,11 @@ void __attribute__ ((__section__ (".__syscall_stub"))) stub_segv_handler(int sig, siginfo_t *info, void *p) { + int stack; ucontext_t *uc = p; + struct faultinfo *f = (void *)(((unsigned long)&stack) & ~(UM_KERN_PAGE_SIZE - 1)); - GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA), - &uc->uc_mcontext); + GET_FAULTINFO_FROM_MC(*f, &uc->uc_mcontext); trap_myself(); } -- cgit v1.2.3 From bfc58e2b98e99737409cd9f4d86a79677c5b887c Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 13 Jan 2021 22:09:44 +0100 Subject: um: remove process stub VMA This mostly reverts the old commit 3963333fe676 ("uml: cover stubs with a VMA") which had added a VMA to the existing PTEs. However, there's no real reason to have the PTEs in the first place and the VMA cannot be 'fixed' in place, which leads to bugs that userspace could try to unmap them and be forcefully killed, or such. Also, there's a bit of an ugly hole in userspace's address space. Simplify all this: just install the stub code/page at the top of the (inner) address space, i.e. put it just above TASK_SIZE. The pages are simply hard-coded to be mapped in the userspace process we use to implement an mm context, and they're out of reach of the inner mmap/munmap/mprotect etc. since they're above TASK_SIZE. Getting rid of the VMA also makes vma_merge() no longer hit one of the VM_WARN_ON()s there because we installed a VMA while the code assumes the stack VMA is the first one. It also removes a lockdep warning about mmap_sem usage since we no longer have uml_setup_stubs() and thus no longer need to do any manipulation that would require mmap_sem in activate_mm(). Signed-off-by: Johannes Berg Signed-off-by: Richard Weinberger --- arch/um/include/asm/Kbuild | 1 + arch/um/include/asm/mmu_context.h | 29 +------------ arch/um/include/shared/as-layout.h | 3 +- arch/um/kernel/exec.c | 4 +- arch/um/kernel/skas/mmu.c | 87 -------------------------------------- arch/um/kernel/tlb.c | 15 ------- arch/um/kernel/um_arch.c | 5 +++ arch/um/os-Linux/skas/process.c | 4 -- arch/x86/um/os-Linux/task_size.c | 2 +- 9 files changed, 11 insertions(+), 139 deletions(-) (limited to 'arch/x86') diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 314979467db1..a58811dc054c 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild @@ -25,3 +25,4 @@ generic-y += topology.h generic-y += trace_clock.h generic-y += word-at-a-time.h generic-y += kprobes.h +generic-y += mm_hooks.h diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h index f8a100770691..68e2eb9cfb47 100644 --- a/arch/um/include/asm/mmu_context.h +++ b/arch/um/include/asm/mmu_context.h @@ -10,33 +10,9 @@ #include #include +#include #include -extern void uml_setup_stubs(struct mm_struct *mm); -/* - * Needed since we do not use the asm-generic/mm_hooks.h: - */ -static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) -{ - uml_setup_stubs(mm); - return 0; -} -extern void arch_exit_mmap(struct mm_struct *mm); -static inline void arch_unmap(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ -} -static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, - bool write, bool execute, bool foreign) -{ - /* by default, allow everything */ - return true; -} - -/* - * end asm-generic/mm_hooks.h functions - */ - extern void force_flush_all(void); #define activate_mm activate_mm @@ -47,9 +23,6 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new) * when the new ->mm is used for the first time. */ __switch_mm(&new->context.id); - mmap_write_lock_nested(new, SINGLE_DEPTH_NESTING); - uml_setup_stubs(new); - mmap_write_unlock(new); } static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, diff --git a/arch/um/include/shared/as-layout.h b/arch/um/include/shared/as-layout.h index 56408bf3480d..9a0bd648d872 100644 --- a/arch/um/include/shared/as-layout.h +++ b/arch/um/include/shared/as-layout.h @@ -20,7 +20,7 @@ * 'UL' and other type specifiers unilaterally. We * use the following macros to deal with this. */ -#define STUB_START 0x100000UL +#define STUB_START stub_start #define STUB_CODE STUB_START #define STUB_DATA (STUB_CODE + UM_KERN_PAGE_SIZE) #define STUB_END (STUB_DATA + UM_KERN_PAGE_SIZE) @@ -46,6 +46,7 @@ extern unsigned long long highmem; extern unsigned long brk_start; extern unsigned long host_task_size; +extern unsigned long stub_start; extern int linux_main(int argc, char **argv); extern void uml_finishsetup(void); diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c index e8fd5d540b05..4d8498100341 100644 --- a/arch/um/kernel/exec.c +++ b/arch/um/kernel/exec.c @@ -26,9 +26,7 @@ void flush_thread(void) arch_flush_thread(¤t->thread.arch); - ret = unmap(¤t->mm->context.id, 0, STUB_START, 0, &data); - ret = ret || unmap(¤t->mm->context.id, STUB_END, - host_task_size - STUB_END, 1, &data); + ret = unmap(¤t->mm->context.id, 0, TASK_SIZE, 1, &data); if (ret) { printk(KERN_ERR "flush_thread - clearing address space failed, " "err = %d\n", ret); diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index d9961163da66..125df465e8ea 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c @@ -14,47 +14,6 @@ #include #include -static int init_stub_pte(struct mm_struct *mm, unsigned long proc, - unsigned long kernel) -{ - pgd_t *pgd; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - - pgd = pgd_offset(mm, proc); - - p4d = p4d_alloc(mm, pgd, proc); - if (!p4d) - goto out; - - pud = pud_alloc(mm, p4d, proc); - if (!pud) - goto out_pud; - - pmd = pmd_alloc(mm, pud, proc); - if (!pmd) - goto out_pmd; - - pte = pte_alloc_map(mm, pmd, proc); - if (!pte) - goto out_pte; - - *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); - *pte = pte_mkread(*pte); - return 0; - - out_pte: - pmd_free(mm, pmd); - out_pmd: - pud_free(mm, pud); - out_pud: - p4d_free(mm, p4d); - out: - return -ENOMEM; -} - int init_new_context(struct task_struct *task, struct mm_struct *mm) { struct mm_context *from_mm = NULL; @@ -98,52 +57,6 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm) return ret; } -void uml_setup_stubs(struct mm_struct *mm) -{ - int err, ret; - - ret = init_stub_pte(mm, STUB_CODE, - (unsigned long) __syscall_stub_start); - if (ret) - goto out; - - ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack); - if (ret) - goto out; - - mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start); - mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack); - - /* dup_mmap already holds mmap_lock */ - err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START, - VM_READ | VM_MAYREAD | VM_EXEC | - VM_MAYEXEC | VM_DONTCOPY | VM_PFNMAP, - mm->context.stub_pages); - if (err) { - printk(KERN_ERR "install_special_mapping returned %d\n", err); - goto out; - } - return; - -out: - force_sigsegv(SIGSEGV); -} - -void arch_exit_mmap(struct mm_struct *mm) -{ - pte_t *pte; - - pte = virt_to_pte(mm, STUB_CODE); - if (pte != NULL) - pte_clear(mm, STUB_CODE, pte); - - pte = virt_to_pte(mm, STUB_DATA); - if (pte == NULL) - return; - - pte_clear(mm, STUB_DATA, pte); -} - void destroy_context(struct mm_struct *mm) { struct mm_context *mmu = &mm->context; diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index 5be1b0da9f3b..bc38f79ca3a3 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c @@ -125,9 +125,6 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, struct host_vm_op *last; int fd = -1, ret = 0; - if (virt + len > STUB_START && virt < STUB_END) - return -EINVAL; - if (hvc->userspace) fd = phys_mapping(phys, &offset); else @@ -165,9 +162,6 @@ static int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *last; int ret = 0; - if (addr + len > STUB_START && addr < STUB_END) - return -EINVAL; - if (hvc->index != 0) { last = &hvc->ops[hvc->index - 1]; if ((last->type == MUNMAP) && @@ -195,9 +189,6 @@ static int add_mprotect(unsigned long addr, unsigned long len, struct host_vm_op *last; int ret = 0; - if (addr + len > STUB_START && addr < STUB_END) - return -EINVAL; - if (hvc->index != 0) { last = &hvc->ops[hvc->index - 1]; if ((last->type == MPROTECT) && @@ -232,9 +223,6 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr, pte = pte_offset_kernel(pmd, addr); do { - if ((addr >= STUB_START) && (addr < STUB_END)) - continue; - r = pte_read(*pte); w = pte_write(*pte); x = pte_exec(*pte); @@ -478,9 +466,6 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) address &= PAGE_MASK; - if (address >= STUB_START && address < STUB_END) - goto kill; - pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) goto kill; diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index 80e2660782a0..74e07e748a9b 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c @@ -249,6 +249,7 @@ void uml_finishsetup(void) } /* Set during early boot */ +unsigned long stub_start; unsigned long task_size; EXPORT_SYMBOL(task_size); @@ -283,6 +284,10 @@ int __init linux_main(int argc, char **argv) add_arg(DEFAULT_COMMAND_LINE_CONSOLE); host_task_size = os_get_top_address(); + /* reserve two pages for the stubs */ + host_task_size -= 2 * PAGE_SIZE; + stub_start = host_task_size; + /* * TASK_SIZE needs to be PGDIR_SIZE aligned or else exit_mmap craps * out diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c index 623b0aeadf4c..fba674fac8b7 100644 --- a/arch/um/os-Linux/skas/process.c +++ b/arch/um/os-Linux/skas/process.c @@ -251,10 +251,6 @@ static int userspace_tramp(void *stack) signal(SIGTERM, SIG_DFL); signal(SIGWINCH, SIG_IGN); - /* - * This has a pte, but it can't be mapped in with the usual - * tlb_flush mechanism because this is part of that mechanism - */ fd = phys_mapping(to_phys(__syscall_stub_start), &offset); addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE, PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset); diff --git a/arch/x86/um/os-Linux/task_size.c b/arch/x86/um/os-Linux/task_size.c index e62174638f00..1dc9adc20b1c 100644 --- a/arch/x86/um/os-Linux/task_size.c +++ b/arch/x86/um/os-Linux/task_size.c @@ -145,7 +145,7 @@ out: unsigned long os_get_top_address(void) { /* The old value of CONFIG_TOP_ADDR */ - return 0x7fc0000000; + return 0x7fc0002000; } #endif -- cgit v1.2.3