diff options
author | Juergen Gross <jgross@suse.com> | 2018-08-28 09:40:13 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2018-09-03 16:50:33 +0200 |
commit | f030aade9165080f3539fb86fc2ce9ffc391813c (patch) | |
tree | 5fd74186e418d49b65ea15cb2b0022e95f247033 /arch/x86/xen/mmu_pv.c | |
parent | 28c11b0f798c6727355bd44603b16bb04e3a044d (diff) | |
download | linux-f030aade9165080f3539fb86fc2ce9ffc391813c.tar.bz2 |
x86/xen: Move pv specific parts of arch/x86/xen/mmu.c to mmu_pv.c
There are some PV specific functions in arch/x86/xen/mmu.c which can be
moved to mmu_pv.c. This in turn enables to build multicalls.c dependent
on CONFIG_XEN_PV.
Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: xen-devel@lists.xenproject.org
Cc: virtualization@lists.linux-foundation.org
Cc: akataria@vmware.com
Cc: rusty@rustcorp.com.au
Cc: hpa@zytor.com
Link: https://lkml.kernel.org/r/20180828074026.820-3-jgross@suse.com
Diffstat (limited to 'arch/x86/xen/mmu_pv.c')
-rw-r--r-- | arch/x86/xen/mmu_pv.c | 138 |
1 files changed, 138 insertions, 0 deletions
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 2fe5c9b1816b..b0691c7a3951 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -99,6 +99,12 @@ static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss; #endif /* CONFIG_X86_64 */ /* + * Protects atomic reservation decrease/increase against concurrent increases. + * Also protects non-atomic updates of current_pages and balloon lists. + */ +DEFINE_SPINLOCK(xen_reservation_lock); + +/* * Note about cr3 (pagetable base) values: * * xen_cr3 contains the current logical cr3 value; it contains the @@ -2662,6 +2668,138 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) } EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); +static noinline void xen_flush_tlb_all(void) +{ + struct mmuext_op *op; + struct multicall_space mcs; + + preempt_disable(); + + mcs = xen_mc_entry(sizeof(*op)); + + op = mcs.args; + op->cmd = MMUEXT_TLB_FLUSH_ALL; + MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); + + xen_mc_issue(PARAVIRT_LAZY_MMU); + + preempt_enable(); +} + +#define REMAP_BATCH_SIZE 16 + +struct remap_data { + xen_pfn_t *pfn; + bool contiguous; + bool no_translate; + pgprot_t prot; + struct mmu_update *mmu_update; +}; + +static int remap_area_pfn_pte_fn(pte_t *ptep, pgtable_t token, + unsigned long addr, void *data) +{ + struct remap_data *rmd = data; + pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot)); + + /* + * If we have a contiguous range, just update the pfn itself, + * else update pointer to be "next pfn". + */ + if (rmd->contiguous) + (*rmd->pfn)++; + else + rmd->pfn++; + + rmd->mmu_update->ptr = virt_to_machine(ptep).maddr; + rmd->mmu_update->ptr |= rmd->no_translate ? + MMU_PT_UPDATE_NO_TRANSLATE : + MMU_NORMAL_PT_UPDATE; + rmd->mmu_update->val = pte_val_ma(pte); + rmd->mmu_update++; + + return 0; +} + +int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, + xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot, + unsigned int domid, bool no_translate, struct page **pages) +{ + int err = 0; + struct remap_data rmd; + struct mmu_update mmu_update[REMAP_BATCH_SIZE]; + unsigned long range; + int mapped = 0; + + BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); + + rmd.pfn = pfn; + rmd.prot = prot; + /* + * We use the err_ptr to indicate if there we are doing a contiguous + * mapping or a discontigious mapping. + */ + rmd.contiguous = !err_ptr; + rmd.no_translate = no_translate; + + while (nr) { + int index = 0; + int done = 0; + int batch = min(REMAP_BATCH_SIZE, nr); + int batch_left = batch; + + range = (unsigned long)batch << PAGE_SHIFT; + + rmd.mmu_update = mmu_update; + err = apply_to_page_range(vma->vm_mm, addr, range, + remap_area_pfn_pte_fn, &rmd); + if (err) + goto out; + + /* + * We record the error for each page that gives an error, but + * continue mapping until the whole set is done + */ + do { + int i; + + err = HYPERVISOR_mmu_update(&mmu_update[index], + batch_left, &done, domid); + + /* + * @err_ptr may be the same buffer as @gfn, so + * only clear it after each chunk of @gfn is + * used. + */ + if (err_ptr) { + for (i = index; i < index + done; i++) + err_ptr[i] = 0; + } + if (err < 0) { + if (!err_ptr) + goto out; + err_ptr[i] = err; + done++; /* Skip failed frame. */ + } else + mapped += done; + batch_left -= done; + index += done; + } while (batch_left); + + nr -= batch; + addr += range; + if (err_ptr) + err_ptr += batch; + cond_resched(); + } +out: + + xen_flush_tlb_all(); + + return err < 0 ? err : mapped; +} +EXPORT_SYMBOL_GPL(xen_remap_pfn); + #ifdef CONFIG_KEXEC_CORE phys_addr_t paddr_vmcoreinfo_note(void) { |