From 5e25f5db6abb96ca8ee2aaedcb863daa6dfcc07a Mon Sep 17 00:00:00 2001 From: Dongli Zhang Date: Wed, 1 Nov 2017 09:46:33 +0800 Subject: xen/time: do not decrease steal time after live migration on xen After guest live migration on xen, steal time in /proc/stat (cpustat[CPUTIME_STEAL]) might decrease because steal returned by xen_steal_lock() might be less than this_rq()->prev_steal_time which is derived from previous return value of xen_steal_clock(). For instance, steal time of each vcpu is 335 before live migration. cpu 198 0 368 200064 1962 0 0 1340 0 0 cpu0 38 0 81 50063 492 0 0 335 0 0 cpu1 65 0 97 49763 634 0 0 335 0 0 cpu2 38 0 81 50098 462 0 0 335 0 0 cpu3 56 0 107 50138 374 0 0 335 0 0 After live migration, steal time is reduced to 312. cpu 200 0 370 200330 1971 0 0 1248 0 0 cpu0 38 0 82 50123 500 0 0 312 0 0 cpu1 65 0 97 49832 634 0 0 312 0 0 cpu2 39 0 82 50167 462 0 0 312 0 0 cpu3 56 0 107 50207 374 0 0 312 0 0 Since runstate times are cumulative and cleared during xen live migration by xen hypervisor, the idea of this patch is to accumulate runstate times to global percpu variables before live migration suspend. Once guest VM is resumed, xen_get_runstate_snapshot_cpu() would always return the sum of new runstate times and previously accumulated times stored in global percpu variables. Comment above HYPERVISOR_suspend() has been removed as it is inaccurate: the call can return an error code (e.g., possibly -EPERM in the future). Similar and more severe issue would impact prior linux 4.8-4.10 as discussed by Michael Las at https://0xstubs.org/debugging-a-flaky-cpu-steal-time-counter-on-a-paravirtualized-xen-guest, which would overflow steal time and lead to 100% st usage in top command for linux 4.8-4.10. A backport of this patch would fix that issue. [boris: added linux/slab.h to driver/xen/time.c, slightly reformatted commit message] References: https://0xstubs.org/debugging-a-flaky-cpu-steal-time-counter-on-a-paravirtualized-xen-guest Signed-off-by: Dongli Zhang Reviewed-by: Boris Ostrovsky Signed-off-by: Boris Ostrovsky --- include/xen/xen-ops.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/xen') diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index 218e6aae5433..09072271f122 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -32,6 +32,7 @@ void xen_resume_notifier_unregister(struct notifier_block *nb); bool xen_vcpu_stolen(int vcpu); void xen_setup_runstate_info(int cpu); void xen_time_setup_guest(void); +void xen_manage_runstate_time(int action); void xen_get_runstate_snapshot(struct vcpu_runstate_info *res); u64 xen_steal_clock(int cpu); -- cgit v1.2.3 From ec4001c3f29ebb3d4147aaec7be9c687ddadb7c8 Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Fri, 3 Nov 2017 17:04:11 +0000 Subject: xen: support priv-mapping in an HVM tools domain If the domain has XENFEAT_auto_translated_physmap then use of the PV- specific HYPERVISOR_mmu_update hypercall is clearly incorrect. This patch adds checks in xen_remap_domain_gfn_array() and xen_unmap_domain_gfn_array() which call through to the approprate xlate_mmu function if the feature is present. A check is also added to xen_remap_domain_gfn_range() to fail with -EOPNOTSUPP since this should not be used in an HVM tools domain. Signed-off-by: Paul Durrant Reviewed-by: Boris Ostrovsky Signed-off-by: Boris Ostrovsky --- arch/x86/xen/mmu.c | 14 ++++++++++++-- include/xen/xen-ops.h | 24 ++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 2 deletions(-) (limited to 'include/xen') diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 3e15345abfe7..d33e7dbe3129 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -172,6 +172,9 @@ int xen_remap_domain_gfn_range(struct vm_area_struct *vma, pgprot_t prot, unsigned domid, struct page **pages) { + if (xen_feature(XENFEAT_auto_translated_physmap)) + return -EOPNOTSUPP; + return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages); } EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range); @@ -182,6 +185,10 @@ int xen_remap_domain_gfn_array(struct vm_area_struct *vma, int *err_ptr, pgprot_t prot, unsigned domid, struct page **pages) { + if (xen_feature(XENFEAT_auto_translated_physmap)) + return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, + prot, domid, pages); + /* We BUG_ON because it's a programmer error to pass a NULL err_ptr, * and the consequences later is quite hard to detect what the actual * cause of "wrong memory was mapped in". @@ -193,9 +200,12 @@ EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array); /* Returns: 0 success */ int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, - int numpgs, struct page **pages) + int nr, struct page **pages) { - if (!pages || !xen_feature(XENFEAT_auto_translated_physmap)) + if (xen_feature(XENFEAT_auto_translated_physmap)) + return xen_xlate_unmap_gfn_range(vma, nr, pages); + + if (!pages) return 0; return -EINVAL; diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index 09072271f122..c278b21cad39 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -104,6 +104,8 @@ int xen_remap_domain_gfn_range(struct vm_area_struct *vma, struct page **pages); int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, int numpgs, struct page **pages); + +#ifdef CONFIG_XEN_AUTO_XLATE int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, unsigned long addr, xen_pfn_t *gfn, int nr, @@ -112,6 +114,28 @@ int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, struct page **pages); int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, int nr, struct page **pages); +#else +/* + * These two functions are called from arch/x86/xen/mmu.c and so stubs + * are needed for a configuration not specifying CONFIG_XEN_AUTO_XLATE. + */ +static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, + unsigned long addr, + xen_pfn_t *gfn, int nr, + int *err_ptr, pgprot_t prot, + unsigned int domid, + struct page **pages) +{ + return -EOPNOTSUPP; +} + +static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, + int nr, struct page **pages) +{ + return -EOPNOTSUPP; +} +#endif + int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr, unsigned long nr_grant_frames); -- cgit v1.2.3 From b988b8ff072ab04abd62d10d3fe44ec544be8a7d Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Thu, 2 Nov 2017 10:19:17 +0100 Subject: xen: re-introduce support for grant v2 interface The grant v2 support was removed from the kernel with commit 438b33c7145ca8a5131a30c36d8f59bce119a19a ("xen/grant-table: remove support for V2 tables") as the higher memory footprint of v2 grants resulted in less grants being possible for a kernel compared to the v1 grant interface. As machines with more than 16TB of memory are expected to be more common in the near future support of grant v2 is mandatory in order to be able to run a Xen pv domain at any memory location. So re-add grant v2 support basically by reverting above commit. Signed-off-by: Juergen Gross Reviewed-by: Boris Ostrovsky Signed-off-by: Boris Ostrovsky --- arch/arm/xen/grant-table.c | 9 +- arch/x86/xen/grant-table.c | 60 ++++++++- drivers/xen/grant-table.c | 312 ++++++++++++++++++++++++++++++++++++++++++++- include/xen/grant_table.h | 30 ++++- 4 files changed, 398 insertions(+), 13 deletions(-) (limited to 'include/xen') diff --git a/arch/arm/xen/grant-table.c b/arch/arm/xen/grant-table.c index e43791829ace..91cf08ba1e95 100644 --- a/arch/arm/xen/grant-table.c +++ b/arch/arm/xen/grant-table.c @@ -45,7 +45,14 @@ void arch_gnttab_unmap(void *shared, unsigned long nr_gframes) return; } -int arch_gnttab_init(unsigned long nr_shared) +int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes, + unsigned long max_nr_gframes, + grant_status_t **__shared) +{ + return -ENOSYS; +} + +int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status) { return 0; } diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c index 809b6c812654..92ccc718152d 100644 --- a/arch/x86/xen/grant-table.c +++ b/arch/x86/xen/grant-table.c @@ -49,7 +49,7 @@ static struct gnttab_vm_area { struct vm_struct *area; pte_t **ptes; -} gnttab_shared_vm_area; +} gnttab_shared_vm_area, gnttab_status_vm_area; int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, unsigned long max_nr_gframes, @@ -73,16 +73,43 @@ int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, return 0; } +int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes, + unsigned long max_nr_gframes, + grant_status_t **__shared) +{ + grant_status_t *shared = *__shared; + unsigned long addr; + unsigned long i; + + if (shared == NULL) + *__shared = shared = gnttab_status_vm_area.area->addr; + + addr = (unsigned long)shared; + + for (i = 0; i < nr_gframes; i++) { + set_pte_at(&init_mm, addr, gnttab_status_vm_area.ptes[i], + mfn_pte(frames[i], PAGE_KERNEL)); + addr += PAGE_SIZE; + } + + return 0; +} + void arch_gnttab_unmap(void *shared, unsigned long nr_gframes) { + pte_t **ptes; unsigned long addr; unsigned long i; + if (shared == gnttab_status_vm_area.area->addr) + ptes = gnttab_status_vm_area.ptes; + else + ptes = gnttab_shared_vm_area.ptes; + addr = (unsigned long)shared; for (i = 0; i < nr_gframes; i++) { - set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i], - __pte(0)); + set_pte_at(&init_mm, addr, ptes[i], __pte(0)); addr += PAGE_SIZE; } } @@ -102,12 +129,35 @@ static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames) return 0; } -int arch_gnttab_init(unsigned long nr_shared) +static void arch_gnttab_vfree(struct gnttab_vm_area *area) { + free_vm_area(area->area); + kfree(area->ptes); +} + +int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status) +{ + int ret; + if (!xen_pv_domain()) return 0; - return arch_gnttab_valloc(&gnttab_shared_vm_area, nr_shared); + ret = arch_gnttab_valloc(&gnttab_shared_vm_area, nr_shared); + if (ret < 0) + return ret; + + /* + * Always allocate the space for the status frames in case + * we're migrated to a host with V2 support. + */ + ret = arch_gnttab_valloc(&gnttab_status_vm_area, nr_status); + if (ret < 0) + goto err; + + return 0; +err: + arch_gnttab_vfree(&gnttab_shared_vm_area); + return -ENOMEM; } #ifdef CONFIG_XEN_PVH diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 2c6a9114d332..65c4bdb0b463 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -71,6 +71,7 @@ struct grant_frames xen_auto_xlat_grant_frames; static union { struct grant_entry_v1 *v1; + union grant_entry_v2 *v2; void *addr; } gnttab_shared; @@ -121,6 +122,29 @@ struct gnttab_ops { * by bit operations. */ int (*query_foreign_access)(grant_ref_t ref); + /* + * Grant a domain to access a range of bytes within the page referred by + * an available grant entry. Ref parameter is reference of a grant entry + * which will be sub-page accessed, domid is id of grantee domain, frame + * is frame address of subpage grant, flags is grant type and flag + * information, page_off is offset of the range of bytes, and length is + * length of bytes to be accessed. + */ + void (*update_subpage_entry)(grant_ref_t ref, domid_t domid, + unsigned long frame, int flags, + unsigned page_off, unsigned length); + /* + * Redirect an available grant entry on domain A to another grant + * reference of domain B, then allow domain C to use grant reference + * of domain B transitively. Ref parameter is an available grant entry + * reference on domain A, domid is id of domain C which accesses grant + * entry transitively, flags is grant type and flag information, + * trans_domid is id of domain B whose grant entry is finally accessed + * transitively, trans_gref is grant entry transitive reference of + * domain B. + */ + void (*update_trans_entry)(grant_ref_t ref, domid_t domid, int flags, + domid_t trans_domid, grant_ref_t trans_gref); }; struct unmap_refs_callback_data { @@ -130,6 +154,9 @@ struct unmap_refs_callback_data { static const struct gnttab_ops *gnttab_interface; +/* This reflects status of grant entries, so act as a global value. */ +static grant_status_t *grstatus; + static int grant_table_version; static int grefs_per_grant_frame; @@ -138,6 +165,7 @@ static struct gnttab_free_callback *gnttab_free_callback_list; static int gnttab_expand(unsigned int req_entries); #define RPP (PAGE_SIZE / sizeof(grant_ref_t)) +#define SPP (PAGE_SIZE / sizeof(grant_status_t)) static inline grant_ref_t *__gnttab_entry(grant_ref_t entry) { @@ -210,7 +238,7 @@ static void put_free_entry(grant_ref_t ref) } /* - * Following applies to gnttab_update_entry_v1. + * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2. * Introducing a valid entry into the grant table: * 1. Write ent->domid. * 2. Write ent->frame: @@ -229,6 +257,15 @@ static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid, gnttab_shared.v1[ref].flags = flags; } +static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid, + unsigned long frame, unsigned int flags) +{ + gnttab_shared.v2[ref].hdr.domid = domid; + gnttab_shared.v2[ref].full_page.frame = frame; + wmb(); /* Hypervisor concurrent accesses. */ + gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags; +} + /* * Public grant-issuing interface functions */ @@ -255,11 +292,132 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); +static void gnttab_update_subpage_entry_v2(grant_ref_t ref, domid_t domid, + unsigned long frame, int flags, + unsigned page_off, unsigned length) +{ + gnttab_shared.v2[ref].sub_page.frame = frame; + gnttab_shared.v2[ref].sub_page.page_off = page_off; + gnttab_shared.v2[ref].sub_page.length = length; + gnttab_shared.v2[ref].hdr.domid = domid; + wmb(); + gnttab_shared.v2[ref].hdr.flags = + GTF_permit_access | GTF_sub_page | flags; +} + +int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid, + unsigned long frame, int flags, + unsigned page_off, + unsigned length) +{ + if (flags & (GTF_accept_transfer | GTF_reading | + GTF_writing | GTF_transitive)) + return -EPERM; + + if (gnttab_interface->update_subpage_entry == NULL) + return -ENOSYS; + + gnttab_interface->update_subpage_entry(ref, domid, frame, flags, + page_off, length); + + return 0; +} +EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage_ref); + +int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame, + int flags, unsigned page_off, + unsigned length) +{ + int ref, rc; + + ref = get_free_entries(1); + if (unlikely(ref < 0)) + return -ENOSPC; + + rc = gnttab_grant_foreign_access_subpage_ref(ref, domid, frame, flags, + page_off, length); + if (rc < 0) { + put_free_entry(ref); + return rc; + } + + return ref; +} +EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage); + +bool gnttab_subpage_grants_available(void) +{ + return gnttab_interface->update_subpage_entry != NULL; +} +EXPORT_SYMBOL_GPL(gnttab_subpage_grants_available); + +static void gnttab_update_trans_entry_v2(grant_ref_t ref, domid_t domid, + int flags, domid_t trans_domid, + grant_ref_t trans_gref) +{ + gnttab_shared.v2[ref].transitive.trans_domid = trans_domid; + gnttab_shared.v2[ref].transitive.gref = trans_gref; + gnttab_shared.v2[ref].hdr.domid = domid; + wmb(); + gnttab_shared.v2[ref].hdr.flags = + GTF_permit_access | GTF_transitive | flags; +} + +int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid, + int flags, domid_t trans_domid, + grant_ref_t trans_gref) +{ + if (flags & (GTF_accept_transfer | GTF_reading | + GTF_writing | GTF_sub_page)) + return -EPERM; + + if (gnttab_interface->update_trans_entry == NULL) + return -ENOSYS; + + gnttab_interface->update_trans_entry(ref, domid, flags, trans_domid, + trans_gref); + + return 0; +} +EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans_ref); + +int gnttab_grant_foreign_access_trans(domid_t domid, int flags, + domid_t trans_domid, + grant_ref_t trans_gref) +{ + int ref, rc; + + ref = get_free_entries(1); + if (unlikely(ref < 0)) + return -ENOSPC; + + rc = gnttab_grant_foreign_access_trans_ref(ref, domid, flags, + trans_domid, trans_gref); + if (rc < 0) { + put_free_entry(ref); + return rc; + } + + return ref; +} +EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans); + +bool gnttab_trans_grants_available(void) +{ + return gnttab_interface->update_trans_entry != NULL; +} +EXPORT_SYMBOL_GPL(gnttab_trans_grants_available); + static int gnttab_query_foreign_access_v1(grant_ref_t ref) { return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing); } +static int gnttab_query_foreign_access_v2(grant_ref_t ref) +{ + return grstatus[ref] & (GTF_reading|GTF_writing); +} + int gnttab_query_foreign_access(grant_ref_t ref) { return gnttab_interface->query_foreign_access(ref); @@ -282,6 +440,29 @@ static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly) return 1; } +static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly) +{ + gnttab_shared.v2[ref].hdr.flags = 0; + mb(); /* Concurrent access by hypervisor. */ + if (grstatus[ref] & (GTF_reading|GTF_writing)) { + return 0; + } else { + /* + * The read of grstatus needs to have acquire semantics. + * On x86, reads already have that, and we just need to + * protect against compiler reorderings. + * On other architectures we may need a full barrier. + */ +#ifdef CONFIG_X86 + barrier(); +#else + mb(); +#endif + } + + return 1; +} + static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) { return gnttab_interface->end_foreign_access_ref(ref, readonly); @@ -442,6 +623,37 @@ static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref) return frame; } +static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref) +{ + unsigned long frame; + u16 flags; + u16 *pflags; + + pflags = &gnttab_shared.v2[ref].hdr.flags; + + /* + * If a transfer is not even yet started, try to reclaim the grant + * reference and return failure (== 0). + */ + while (!((flags = *pflags) & GTF_transfer_committed)) { + if (sync_cmpxchg(pflags, flags, 0) == flags) + return 0; + cpu_relax(); + } + + /* If a transfer is in progress then wait until it is completed. */ + while (!(flags & GTF_transfer_completed)) { + flags = *pflags; + cpu_relax(); + } + + rmb(); /* Read the frame number /after/ reading completion status. */ + frame = gnttab_shared.v2[ref].full_page.frame; + BUG_ON(frame == 0); + + return frame; +} + unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) { return gnttab_interface->end_foreign_transfer_ref(ref); @@ -938,6 +1150,12 @@ int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item) } EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync); +static unsigned int nr_status_frames(unsigned int nr_grant_frames) +{ + BUG_ON(grefs_per_grant_frame == 0); + return (nr_grant_frames * grefs_per_grant_frame + SPP - 1) / SPP; +} + static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) { int rc; @@ -955,6 +1173,55 @@ static void gnttab_unmap_frames_v1(void) arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames); } +static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes) +{ + uint64_t *sframes; + unsigned int nr_sframes; + struct gnttab_get_status_frames getframes; + int rc; + + nr_sframes = nr_status_frames(nr_gframes); + + /* No need for kzalloc as it is initialized in following hypercall + * GNTTABOP_get_status_frames. + */ + sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC); + if (!sframes) + return -ENOMEM; + + getframes.dom = DOMID_SELF; + getframes.nr_frames = nr_sframes; + set_xen_guest_handle(getframes.frame_list, sframes); + + rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames, + &getframes, 1); + if (rc == -ENOSYS) { + kfree(sframes); + return -ENOSYS; + } + + BUG_ON(rc || getframes.status); + + rc = arch_gnttab_map_status(sframes, nr_sframes, + nr_status_frames(gnttab_max_grant_frames()), + &grstatus); + BUG_ON(rc); + kfree(sframes); + + rc = arch_gnttab_map_shared(frames, nr_gframes, + gnttab_max_grant_frames(), + &gnttab_shared.addr); + BUG_ON(rc); + + return 0; +} + +static void gnttab_unmap_frames_v2(void) +{ + arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames); + arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames)); +} + static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; @@ -1022,13 +1289,45 @@ static const struct gnttab_ops gnttab_v1_ops = { .query_foreign_access = gnttab_query_foreign_access_v1, }; +static const struct gnttab_ops gnttab_v2_ops = { + .map_frames = gnttab_map_frames_v2, + .unmap_frames = gnttab_unmap_frames_v2, + .update_entry = gnttab_update_entry_v2, + .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2, + .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2, + .query_foreign_access = gnttab_query_foreign_access_v2, + .update_subpage_entry = gnttab_update_subpage_entry_v2, + .update_trans_entry = gnttab_update_trans_entry_v2, +}; + static void gnttab_request_version(void) { - /* Only version 1 is used, which will always be available. */ - grant_table_version = 1; - grefs_per_grant_frame = XEN_PAGE_SIZE / sizeof(struct grant_entry_v1); - gnttab_interface = &gnttab_v1_ops; + int rc; + struct gnttab_set_version gsv; + gsv.version = 1; + + rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1); + if (rc == 0 && gsv.version == 2) { + grant_table_version = 2; + grefs_per_grant_frame = XEN_PAGE_SIZE / + sizeof(union grant_entry_v2); + gnttab_interface = &gnttab_v2_ops; + } else if (grant_table_version == 2) { + /* + * If we've already used version 2 features, + * but then suddenly discover that they're not + * available (e.g. migrating to an older + * version of Xen), almost unbounded badness + * can happen. + */ + panic("we need grant tables version 2, but only version 1 is available"); + } else { + grant_table_version = 1; + grefs_per_grant_frame = XEN_PAGE_SIZE / + sizeof(struct grant_entry_v1); + gnttab_interface = &gnttab_v1_ops; + } pr_info("Grant tables using version %d layout\n", grant_table_version); } @@ -1122,7 +1421,8 @@ int gnttab_init(void) } } - ret = arch_gnttab_init(max_nr_grant_frames); + ret = arch_gnttab_init(max_nr_grant_frames, + nr_status_frames(max_nr_grant_frames)); if (ret < 0) goto ini_nomem; diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index 34b1379f9777..dd6c7a32ee32 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h @@ -84,6 +84,24 @@ int gnttab_resume(void); int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int readonly); +int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame, + int flags, unsigned page_off, + unsigned length); +int gnttab_grant_foreign_access_trans(domid_t domid, int flags, + domid_t trans_domid, + grant_ref_t trans_gref); + +/* + * Are sub-page grants available on this version of Xen? Returns true if they + * are, and false if they're not. + */ +bool gnttab_subpage_grants_available(void); + +/* + * Are transitive grants available on this version of Xen? Returns true if they + * are, and false if they're not. + */ +bool gnttab_trans_grants_available(void); /* * End access through the given grant reference, iff the grant entry is no @@ -130,6 +148,13 @@ void gnttab_cancel_free_callback(struct gnttab_free_callback *callback); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int readonly); +int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid, + unsigned long frame, int flags, + unsigned page_off, + unsigned length); +int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid, + int flags, domid_t trans_domid, + grant_ref_t trans_gref); /* Give access to the first 4K of the page */ static inline void gnttab_page_grant_foreign_access_ref_one( @@ -174,10 +199,13 @@ gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr, unmap->dev_bus_addr = 0; } -int arch_gnttab_init(unsigned long nr_shared); +int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status); int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes, unsigned long max_nr_gframes, void **__shared); +int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes, + unsigned long max_nr_gframes, + grant_status_t **__shared); void arch_gnttab_unmap(void *shared, unsigned long nr_gframes); struct grant_frames { -- cgit v1.2.3 From 56c9c700c4399858e50971d98ac44b5842b06a87 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Thu, 2 Nov 2017 10:19:18 +0100 Subject: xen: limit grant v2 interface to the v1 functionality As there is currently no user for sub-page grants or transient grants remove that functionality. This at once makes it possible to switch from grant v2 to grant v1 without restrictions, as there is no loss of functionality other than the limited frame number width related to the switch. Signed-off-by: Juergen Gross Reviewed-by: Boris Ostrovsky Signed-off-by: Boris Ostrovsky --- drivers/xen/grant-table.c | 150 ---------------------------------------------- include/xen/grant_table.h | 25 -------- 2 files changed, 175 deletions(-) (limited to 'include/xen') diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 65c4bdb0b463..db6a1b9efd11 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -122,29 +122,6 @@ struct gnttab_ops { * by bit operations. */ int (*query_foreign_access)(grant_ref_t ref); - /* - * Grant a domain to access a range of bytes within the page referred by - * an available grant entry. Ref parameter is reference of a grant entry - * which will be sub-page accessed, domid is id of grantee domain, frame - * is frame address of subpage grant, flags is grant type and flag - * information, page_off is offset of the range of bytes, and length is - * length of bytes to be accessed. - */ - void (*update_subpage_entry)(grant_ref_t ref, domid_t domid, - unsigned long frame, int flags, - unsigned page_off, unsigned length); - /* - * Redirect an available grant entry on domain A to another grant - * reference of domain B, then allow domain C to use grant reference - * of domain B transitively. Ref parameter is an available grant entry - * reference on domain A, domid is id of domain C which accesses grant - * entry transitively, flags is grant type and flag information, - * trans_domid is id of domain B whose grant entry is finally accessed - * transitively, trans_gref is grant entry transitive reference of - * domain B. - */ - void (*update_trans_entry)(grant_ref_t ref, domid_t domid, int flags, - domid_t trans_domid, grant_ref_t trans_gref); }; struct unmap_refs_callback_data { @@ -292,122 +269,6 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); -static void gnttab_update_subpage_entry_v2(grant_ref_t ref, domid_t domid, - unsigned long frame, int flags, - unsigned page_off, unsigned length) -{ - gnttab_shared.v2[ref].sub_page.frame = frame; - gnttab_shared.v2[ref].sub_page.page_off = page_off; - gnttab_shared.v2[ref].sub_page.length = length; - gnttab_shared.v2[ref].hdr.domid = domid; - wmb(); - gnttab_shared.v2[ref].hdr.flags = - GTF_permit_access | GTF_sub_page | flags; -} - -int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid, - unsigned long frame, int flags, - unsigned page_off, - unsigned length) -{ - if (flags & (GTF_accept_transfer | GTF_reading | - GTF_writing | GTF_transitive)) - return -EPERM; - - if (gnttab_interface->update_subpage_entry == NULL) - return -ENOSYS; - - gnttab_interface->update_subpage_entry(ref, domid, frame, flags, - page_off, length); - - return 0; -} -EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage_ref); - -int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame, - int flags, unsigned page_off, - unsigned length) -{ - int ref, rc; - - ref = get_free_entries(1); - if (unlikely(ref < 0)) - return -ENOSPC; - - rc = gnttab_grant_foreign_access_subpage_ref(ref, domid, frame, flags, - page_off, length); - if (rc < 0) { - put_free_entry(ref); - return rc; - } - - return ref; -} -EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage); - -bool gnttab_subpage_grants_available(void) -{ - return gnttab_interface->update_subpage_entry != NULL; -} -EXPORT_SYMBOL_GPL(gnttab_subpage_grants_available); - -static void gnttab_update_trans_entry_v2(grant_ref_t ref, domid_t domid, - int flags, domid_t trans_domid, - grant_ref_t trans_gref) -{ - gnttab_shared.v2[ref].transitive.trans_domid = trans_domid; - gnttab_shared.v2[ref].transitive.gref = trans_gref; - gnttab_shared.v2[ref].hdr.domid = domid; - wmb(); - gnttab_shared.v2[ref].hdr.flags = - GTF_permit_access | GTF_transitive | flags; -} - -int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid, - int flags, domid_t trans_domid, - grant_ref_t trans_gref) -{ - if (flags & (GTF_accept_transfer | GTF_reading | - GTF_writing | GTF_sub_page)) - return -EPERM; - - if (gnttab_interface->update_trans_entry == NULL) - return -ENOSYS; - - gnttab_interface->update_trans_entry(ref, domid, flags, trans_domid, - trans_gref); - - return 0; -} -EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans_ref); - -int gnttab_grant_foreign_access_trans(domid_t domid, int flags, - domid_t trans_domid, - grant_ref_t trans_gref) -{ - int ref, rc; - - ref = get_free_entries(1); - if (unlikely(ref < 0)) - return -ENOSPC; - - rc = gnttab_grant_foreign_access_trans_ref(ref, domid, flags, - trans_domid, trans_gref); - if (rc < 0) { - put_free_entry(ref); - return rc; - } - - return ref; -} -EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans); - -bool gnttab_trans_grants_available(void) -{ - return gnttab_interface->update_trans_entry != NULL; -} -EXPORT_SYMBOL_GPL(gnttab_trans_grants_available); - static int gnttab_query_foreign_access_v1(grant_ref_t ref) { return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing); @@ -1296,8 +1157,6 @@ static const struct gnttab_ops gnttab_v2_ops = { .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2, .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2, .query_foreign_access = gnttab_query_foreign_access_v2, - .update_subpage_entry = gnttab_update_subpage_entry_v2, - .update_trans_entry = gnttab_update_trans_entry_v2, }; static void gnttab_request_version(void) @@ -1313,15 +1172,6 @@ static void gnttab_request_version(void) grefs_per_grant_frame = XEN_PAGE_SIZE / sizeof(union grant_entry_v2); gnttab_interface = &gnttab_v2_ops; - } else if (grant_table_version == 2) { - /* - * If we've already used version 2 features, - * but then suddenly discover that they're not - * available (e.g. migrating to an older - * version of Xen), almost unbounded badness - * can happen. - */ - panic("we need grant tables version 2, but only version 1 is available"); } else { grant_table_version = 1; grefs_per_grant_frame = XEN_PAGE_SIZE / diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index dd6c7a32ee32..2e37741f6b8d 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h @@ -84,24 +84,6 @@ int gnttab_resume(void); int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int readonly); -int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame, - int flags, unsigned page_off, - unsigned length); -int gnttab_grant_foreign_access_trans(domid_t domid, int flags, - domid_t trans_domid, - grant_ref_t trans_gref); - -/* - * Are sub-page grants available on this version of Xen? Returns true if they - * are, and false if they're not. - */ -bool gnttab_subpage_grants_available(void); - -/* - * Are transitive grants available on this version of Xen? Returns true if they - * are, and false if they're not. - */ -bool gnttab_trans_grants_available(void); /* * End access through the given grant reference, iff the grant entry is no @@ -148,13 +130,6 @@ void gnttab_cancel_free_callback(struct gnttab_free_callback *callback); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int readonly); -int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid, - unsigned long frame, int flags, - unsigned page_off, - unsigned length); -int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid, - int flags, domid_t trans_domid, - grant_ref_t trans_gref); /* Give access to the first 4K of the page */ static inline void gnttab_page_grant_foreign_access_ref_one( -- cgit v1.2.3 From 2229f70b5bbb025e1394b61007938a68060afbfb Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Wed, 8 Nov 2017 17:19:57 +0000 Subject: x86/xen/time: setup vcpu 0 time info page In order to support pvclock vdso on xen we need to setup the time info page for vcpu 0 and register the page with Xen using the VCPUOP_register_vcpu_time_memory_area hypercall. This hypercall will also forcefully update the pvti which will set some of the necessary flags for vdso. Afterwards we check if it supports the PVCLOCK_TSC_STABLE_BIT flag which is mandatory for having vdso/vsyscall support. And if so, it will set the cpu 0 pvti that will be later on used when mapping the vdso image. The xen headers are also updated to include the new hypercall for registering the secondary vcpu_time_info struct. Signed-off-by: Joao Martins Reviewed-by: Juergen Gross Reviewed-by: Boris Ostrovsky Signed-off-by: Boris Ostrovsky --- arch/x86/xen/suspend.c | 4 ++ arch/x86/xen/time.c | 90 +++++++++++++++++++++++++++++++++++++++++++- arch/x86/xen/xen-ops.h | 2 + include/xen/interface/vcpu.h | 42 +++++++++++++++++++++ 4 files changed, 137 insertions(+), 1 deletion(-) (limited to 'include/xen') diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index d6b1680693a9..800ed36ecfba 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c @@ -16,6 +16,8 @@ void xen_arch_pre_suspend(void) { + xen_save_time_memory_area(); + if (xen_pv_domain()) xen_pv_pre_suspend(); } @@ -26,6 +28,8 @@ void xen_arch_post_suspend(int cancelled) xen_pv_post_suspend(cancelled); else xen_hvm_post_suspend(cancelled); + + xen_restore_time_memory_area(); } static void xen_vcpu_notify_restore(void *data) diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index c96e61fd70e7..c2041043c606 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -370,6 +370,92 @@ static const struct pv_time_ops xen_time_ops __initconst = { .steal_clock = xen_steal_clock, }; +static struct pvclock_vsyscall_time_info *xen_clock __read_mostly; + +void xen_save_time_memory_area(void) +{ + struct vcpu_register_time_memory_area t; + int ret; + + if (!xen_clock) + return; + + t.addr.v = NULL; + + ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t); + if (ret != 0) + pr_notice("Cannot save secondary vcpu_time_info (err %d)", + ret); + else + clear_page(xen_clock); +} + +void xen_restore_time_memory_area(void) +{ + struct vcpu_register_time_memory_area t; + int ret; + + if (!xen_clock) + return; + + t.addr.v = &xen_clock->pvti; + + ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t); + + /* + * We don't disable VCLOCK_PVCLOCK entirely if it fails to register the + * secondary time info with Xen or if we migrated to a host without the + * necessary flags. On both of these cases what happens is either + * process seeing a zeroed out pvti or seeing no PVCLOCK_TSC_STABLE_BIT + * bit set. Userspace checks the latter and if 0, it discards the data + * in pvti and fallbacks to a system call for a reliable timestamp. + */ + if (ret != 0) + pr_notice("Cannot restore secondary vcpu_time_info (err %d)", + ret); +} + +static void xen_setup_vsyscall_time_info(void) +{ + struct vcpu_register_time_memory_area t; + struct pvclock_vsyscall_time_info *ti; + int ret; + + ti = (struct pvclock_vsyscall_time_info *)get_zeroed_page(GFP_KERNEL); + if (!ti) + return; + + t.addr.v = &ti->pvti; + + ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t); + if (ret) { + pr_notice("xen: VCLOCK_PVCLOCK not supported (err %d)\n", ret); + free_page((unsigned long)ti); + return; + } + + /* + * If primary time info had this bit set, secondary should too since + * it's the same data on both just different memory regions. But we + * still check it in case hypervisor is buggy. + */ + if (!(ti->pvti.flags & PVCLOCK_TSC_STABLE_BIT)) { + t.addr.v = NULL; + ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, + 0, &t); + if (!ret) + free_page((unsigned long)ti); + + pr_notice("xen: VCLOCK_PVCLOCK not supported (tsc unstable)\n"); + return; + } + + xen_clock = ti; + pvclock_set_pvti_cpu0_va(xen_clock); + + xen_clocksource.archdata.vclock_mode = VCLOCK_PVCLOCK; +} + static void __init xen_time_init(void) { struct pvclock_vcpu_time_info *pvti; @@ -401,8 +487,10 @@ static void __init xen_time_init(void) * bit is supported hence speeding up Xen clocksource. */ pvti = &__this_cpu_read(xen_vcpu)->time; - if (pvti->flags & PVCLOCK_TSC_STABLE_BIT) + if (pvti->flags & PVCLOCK_TSC_STABLE_BIT) { pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); + xen_setup_vsyscall_time_info(); + } xen_setup_runstate_info(cpu); xen_setup_timer(cpu); diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index c8a6d224f7ed..f96dbedb33d4 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -69,6 +69,8 @@ void xen_setup_runstate_info(int cpu); void xen_teardown_timer(int cpu); u64 xen_clocksource_read(void); void xen_setup_cpu_clockevents(void); +void xen_save_time_memory_area(void); +void xen_restore_time_memory_area(void); void __init xen_init_time_ops(void); void __init xen_hvm_init_time_ops(void); diff --git a/include/xen/interface/vcpu.h b/include/xen/interface/vcpu.h index 98188c87f5c1..504c71601511 100644 --- a/include/xen/interface/vcpu.h +++ b/include/xen/interface/vcpu.h @@ -178,4 +178,46 @@ DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_vcpu_info); /* Send an NMI to the specified VCPU. @extra_arg == NULL. */ #define VCPUOP_send_nmi 11 + +/* + * Get the physical ID information for a pinned vcpu's underlying physical + * processor. The physical ID informmation is architecture-specific. + * On x86: id[31:0]=apic_id, id[63:32]=acpi_id. + * This command returns -EINVAL if it is not a valid operation for this VCPU. + */ +#define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */ +struct vcpu_get_physid { + uint64_t phys_id; +}; +DEFINE_GUEST_HANDLE_STRUCT(vcpu_get_physid); +#define xen_vcpu_physid_to_x86_apicid(physid) ((uint32_t)(physid)) +#define xen_vcpu_physid_to_x86_acpiid(physid) ((uint32_t)((physid) >> 32)) + +/* + * Register a memory location to get a secondary copy of the vcpu time + * parameters. The master copy still exists as part of the vcpu shared + * memory area, and this secondary copy is updated whenever the master copy + * is updated (and using the same versioning scheme for synchronisation). + * + * The intent is that this copy may be mapped (RO) into userspace so + * that usermode can compute system time using the time info and the + * tsc. Usermode will see an array of vcpu_time_info structures, one + * for each vcpu, and choose the right one by an existing mechanism + * which allows it to get the current vcpu number (such as via a + * segment limit). It can then apply the normal algorithm to compute + * system time from the tsc. + * + * @extra_arg == pointer to vcpu_register_time_info_memory_area structure. + */ +#define VCPUOP_register_vcpu_time_memory_area 13 +DEFINE_GUEST_HANDLE_STRUCT(vcpu_time_info); +struct vcpu_register_time_memory_area { + union { + GUEST_HANDLE(vcpu_time_info) h; + struct pvclock_vcpu_time_info *v; + uint64_t p; + } addr; +}; +DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_time_memory_area); + #endif /* __XEN_PUBLIC_VCPU_H__ */ -- cgit v1.2.3