diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-04 17:32:42 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-04 17:32:42 -0800 |
commit | 41ecf1404b34d9975eb97f5005d9e4274eaeb76a (patch) | |
tree | 8582dec3a644cfbe178132acded5ebb458e0e32e /arch/arm | |
parent | 2dc10ad81fc017837037e60439662e1b16bdffb9 (diff) | |
parent | abed7d0710e8f892c267932a9492ccf447674fb8 (diff) | |
download | linux-41ecf1404b34d9975eb97f5005d9e4274eaeb76a.tar.bz2 |
Merge tag 'for-linus-4.4-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen updates from David Vrabel:
- Improve balloon driver memory hotplug placement.
- Use unpopulated hotplugged memory for foreign pages (if
supported/enabled).
- Support 64 KiB guest pages on arm64.
- CPU hotplug support on arm/arm64.
* tag 'for-linus-4.4-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: (44 commits)
xen: fix the check of e_pfn in xen_find_pfn_range
x86/xen: add reschedule point when mapping foreign GFNs
xen/arm: don't try to re-register vcpu_info on cpu_hotplug.
xen, cpu_hotplug: call device_offline instead of cpu_down
xen/arm: Enable cpu_hotplug.c
xenbus: Support multiple grants ring with 64KB
xen/grant-table: Add an helper to iterate over a specific number of grants
xen/xenbus: Rename *RING_PAGE* to *RING_GRANT*
xen/arm: correct comment in enlighten.c
xen/gntdev: use types from linux/types.h in userspace headers
xen/gntalloc: use types from linux/types.h in userspace headers
xen/balloon: Use the correct sizeof when declaring frame_list
xen/swiotlb: Add support for 64KB page granularity
xen/swiotlb: Pass addresses rather than frame numbers to xen_arch_need_swiotlb
arm/xen: Add support for 64KB page granularity
xen/privcmd: Add support for Linux 64KB page granularity
net/xen-netback: Make it running on 64KB page granularity
net/xen-netfront: Make it running on 64KB page granularity
block/xen-blkback: Make it running on 64KB page granularity
block/xen-blkfront: Make it running on 64KB page granularity
...
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/include/asm/xen/hypervisor.h | 10 | ||||
-rw-r--r-- | arch/arm/include/asm/xen/page-coherent.h | 26 | ||||
-rw-r--r-- | arch/arm/include/asm/xen/page.h | 22 | ||||
-rw-r--r-- | arch/arm/xen/enlighten.c | 20 | ||||
-rw-r--r-- | arch/arm/xen/mm.c | 39 | ||||
-rw-r--r-- | arch/arm/xen/p2m.c | 6 |
6 files changed, 90 insertions, 33 deletions
diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h index 04ff8e7b37df..95251512e2c4 100644 --- a/arch/arm/include/asm/xen/hypervisor.h +++ b/arch/arm/include/asm/xen/hypervisor.h @@ -26,4 +26,14 @@ void __init xen_early_init(void); static inline void xen_early_init(void) { return; } #endif +#ifdef CONFIG_HOTPLUG_CPU +static inline void xen_arch_register_cpu(int num) +{ +} + +static inline void xen_arch_unregister_cpu(int num) +{ +} +#endif + #endif /* _ASM_ARM_XEN_HYPERVISOR_H */ diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h index efd562412850..0375c8caa061 100644 --- a/arch/arm/include/asm/xen/page-coherent.h +++ b/arch/arm/include/asm/xen/page-coherent.h @@ -35,11 +35,15 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, dma_addr_t dev_addr, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - bool local = PFN_DOWN(dev_addr) == page_to_pfn(page); - /* Dom0 is mapped 1:1, so if pfn == mfn the page is local otherwise - * is a foreign page grant-mapped in dom0. If the page is local we - * can safely call the native dma_ops function, otherwise we call - * the xen specific function. */ + bool local = XEN_PFN_DOWN(dev_addr) == page_to_xen_pfn(page); + /* + * Dom0 is mapped 1:1, while the Linux page can be spanned accross + * multiple Xen page, it's not possible to have a mix of local and + * foreign Xen page. So if the first xen_pfn == mfn the page is local + * otherwise it's a foreign page grant-mapped in dom0. If the page is + * local we can safely call the native dma_ops function, otherwise we + * call the xen specific function. + */ if (local) __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); else @@ -51,10 +55,14 @@ static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, struct dma_attrs *attrs) { unsigned long pfn = PFN_DOWN(handle); - /* Dom0 is mapped 1:1, so calling pfn_valid on a foreign mfn will - * always return false. If the page is local we can safely call the - * native dma_ops function, otherwise we call the xen specific - * function. */ + /* + * Dom0 is mapped 1:1, while the Linux page can be spanned accross + * multiple Xen page, it's not possible to have a mix of local and + * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a + * foreign mfn will always return false. If the page is local we can + * safely call the native dma_ops function, otherwise we call the xen + * specific function. + */ if (pfn_valid(pfn)) { if (__generic_dma_ops(hwdev)->unmap_page) __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 127956353b00..415dbc6e43fd 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -13,9 +13,6 @@ #define phys_to_machine_mapping_valid(pfn) (1) -#define pte_mfn pte_pfn -#define mfn_pte pfn_pte - /* Xen machine address */ typedef struct xmaddr { phys_addr_t maddr; @@ -31,6 +28,17 @@ typedef struct xpaddr { #define INVALID_P2M_ENTRY (~0UL) +/* + * The pseudo-physical frame (pfn) used in all the helpers is always based + * on Xen page granularity (i.e 4KB). + * + * A Linux page may be split across multiple non-contiguous Xen page so we + * have to keep track with frame based on 4KB page granularity. + * + * PV drivers should never make a direct usage of those helpers (particularly + * pfn_to_gfn and gfn_to_pfn). + */ + unsigned long __pfn_to_mfn(unsigned long pfn); extern struct rb_root phys_to_mach; @@ -67,8 +75,8 @@ static inline unsigned long bfn_to_pfn(unsigned long bfn) #define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn) /* VIRT <-> GUEST conversion */ -#define virt_to_gfn(v) (pfn_to_gfn(virt_to_pfn(v))) -#define gfn_to_virt(m) (__va(gfn_to_pfn(m) << PAGE_SHIFT)) +#define virt_to_gfn(v) (pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT)) +#define gfn_to_virt(m) (__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT)) /* Only used in PV code. But ARM guests are always HVM. */ static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr) @@ -107,8 +115,8 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) #define xen_unmap(cookie) iounmap((cookie)) bool xen_arch_need_swiotlb(struct device *dev, - unsigned long pfn, - unsigned long bfn); + phys_addr_t phys, + dma_addr_t dev_addr); unsigned long xen_get_swiotlb_free_pages(unsigned int order); #endif /* _ASM_ARM_XEN_PAGE_H */ diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index eeeab074e154..fc7ea529f462 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -86,16 +86,25 @@ static void xen_percpu_init(void) int err; int cpu = get_cpu(); + /* + * VCPUOP_register_vcpu_info cannot be called twice for the same + * vcpu, so if vcpu_info is already registered, just get out. This + * can happen with cpu-hotplug. + */ + if (per_cpu(xen_vcpu, cpu) != NULL) + goto after_register_vcpu_info; + pr_info("Xen: initializing cpu%d\n", cpu); vcpup = per_cpu_ptr(xen_vcpu_info, cpu); - info.mfn = __pa(vcpup) >> PAGE_SHIFT; - info.offset = offset_in_page(vcpup); + info.mfn = virt_to_gfn(vcpup); + info.offset = xen_offset_in_page(vcpup); err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); BUG_ON(err); per_cpu(xen_vcpu, cpu) = vcpup; +after_register_vcpu_info: enable_percpu_irq(xen_events_irq, 0); put_cpu(); } @@ -124,6 +133,9 @@ static int xen_cpu_notification(struct notifier_block *self, case CPU_STARTING: xen_percpu_init(); break; + case CPU_DYING: + disable_percpu_irq(xen_events_irq); + break; default: break; } @@ -213,7 +225,7 @@ static int __init xen_guest_init(void) xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; - xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; + xatp.gpfn = virt_to_gfn(shared_info_page); if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); @@ -284,7 +296,7 @@ void xen_arch_resume(void) { } void xen_arch_suspend(void) { } -/* In the hypervisor.S file. */ +/* In the hypercall.S file. */ EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op); EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op); EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version); diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index 6dd911d1f0ac..7c34f7126b04 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -48,22 +48,22 @@ static void dma_cache_maint(dma_addr_t handle, unsigned long offset, size_t size, enum dma_data_direction dir, enum dma_cache_op op) { struct gnttab_cache_flush cflush; - unsigned long pfn; + unsigned long xen_pfn; size_t left = size; - pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE; - offset %= PAGE_SIZE; + xen_pfn = (handle >> XEN_PAGE_SHIFT) + offset / XEN_PAGE_SIZE; + offset %= XEN_PAGE_SIZE; do { size_t len = left; /* buffers in highmem or foreign pages cannot cross page * boundaries */ - if (len + offset > PAGE_SIZE) - len = PAGE_SIZE - offset; + if (len + offset > XEN_PAGE_SIZE) + len = XEN_PAGE_SIZE - offset; cflush.op = 0; - cflush.a.dev_bus_addr = pfn << PAGE_SHIFT; + cflush.a.dev_bus_addr = xen_pfn << XEN_PAGE_SHIFT; cflush.offset = offset; cflush.length = len; @@ -79,7 +79,7 @@ static void dma_cache_maint(dma_addr_t handle, unsigned long offset, HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1); offset = 0; - pfn++; + xen_pfn++; left -= len; } while (left); } @@ -138,10 +138,29 @@ void __xen_dma_sync_single_for_device(struct device *hwdev, } bool xen_arch_need_swiotlb(struct device *dev, - unsigned long pfn, - unsigned long bfn) + phys_addr_t phys, + dma_addr_t dev_addr) { - return (!hypercall_cflush && (pfn != bfn) && !is_device_dma_coherent(dev)); + unsigned int xen_pfn = XEN_PFN_DOWN(phys); + unsigned int bfn = XEN_PFN_DOWN(dev_addr); + + /* + * The swiotlb buffer should be used if + * - Xen doesn't have the cache flush hypercall + * - The Linux page refers to foreign memory + * - The device doesn't support coherent DMA request + * + * The Linux page may be spanned acrros multiple Xen page, although + * it's not possible to have a mix of local and foreign Xen page. + * Furthermore, range_straddles_page_boundary is already checking + * if buffer is physically contiguous in the host RAM. + * + * Therefore we only need to check the first Xen page to know if we + * require a bounce buffer because the device doesn't support coherent + * memory and we are not able to flush the cache. + */ + return (!hypercall_cflush && (xen_pfn != bfn) && + !is_device_dma_coherent(dev)); } int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c index 887596c67b12..0ed01f2d5ee4 100644 --- a/arch/arm/xen/p2m.c +++ b/arch/arm/xen/p2m.c @@ -93,8 +93,8 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, for (i = 0; i < count; i++) { if (map_ops[i].status) continue; - set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT, - map_ops[i].dev_bus_addr >> PAGE_SHIFT); + set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT, + map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT); } return 0; @@ -108,7 +108,7 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, int i; for (i = 0; i < count; i++) { - set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT, + set_phys_to_machine(unmap_ops[i].host_addr >> XEN_PAGE_SHIFT, INVALID_P2M_ENTRY); } |