From 62cedb9f135794ec26a93ae29e5f0231ab263c84 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Thu, 25 Jun 2015 16:35:49 +0100 Subject: mm: memory hotplug with an existing resource Add add_memory_resource() to add memory using an existing "System RAM" resource. This is useful if the memory region is being located by finding a free resource slot with allocate_resource(). Xen guests will make use of this in their balloon driver to hotplug arbitrary amounts of memory in response to toolstack requests. Signed-off-by: David Vrabel Reviewed-by: Daniel Kiper Reviewed-by: Tang Chen --- include/linux/memory_hotplug.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 8f60e899b33c..2ea574ff9714 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -11,6 +11,7 @@ struct zone; struct pglist_data; struct mem_section; struct memory_block; +struct resource; #ifdef CONFIG_MEMORY_HOTPLUG @@ -266,6 +267,7 @@ static inline void remove_memory(int nid, u64 start, u64 size) {} extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, void *arg, int (*func)(struct memory_block *, void *)); extern int add_memory(int nid, u64 start, u64 size); +extern int add_memory_resource(int nid, struct resource *resource); extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default, bool for_device); extern int arch_add_memory(int nid, u64 start, u64 size, bool for_device); -- cgit v1.2.3 From f6a6cb1afe74d6ccc81aa70aa4ac3953762e7e6e Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Thu, 25 Jun 2015 13:18:12 +0100 Subject: xen/balloon: remove scratch page left overs Commit 0bb599fd30108883b00c7d4a226eeb49111e6932 (xen: remove scratch frames for ballooned pages and m2p override) removed the use of the scratch page for ballooned out pages. Remove some left over function definitions. Signed-off-by: David Vrabel Reviewed-by: Daniel Kiper --- include/xen/balloon.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include') diff --git a/include/xen/balloon.h b/include/xen/balloon.h index a4c1c6a93691..cc2e1a7e44ec 100644 --- a/include/xen/balloon.h +++ b/include/xen/balloon.h @@ -29,9 +29,6 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem); void free_xenballooned_pages(int nr_pages, struct page **pages); -struct page *get_balloon_scratch_page(void); -void put_balloon_scratch_page(void); - struct device; #ifdef CONFIG_XEN_SELFBALLOONING extern int register_xen_selfballooning(struct device *dev); -- cgit v1.2.3 From de5a77d8422fc7ed0b2f4349bceb65a1a639e5b2 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Thu, 25 Jun 2015 12:08:20 +0100 Subject: xen/balloon: rationalize memory hotplug stats The stats used for memory hotplug make no sense and are fiddled with in odd ways. Remove them and introduce total_pages to track the total number of pages (both populated and unpopulated) including those within hotplugged regions (note that this includes not yet onlined pages). This will be used in a subsequent commit (xen/balloon: only hotplug additional memory if required) when deciding whether additional memory needs to be hotplugged. Signed-off-by: David Vrabel Reviewed-by: Daniel Kiper --- drivers/xen/balloon.c | 75 +++++++++------------------------------------------ include/xen/balloon.h | 5 +--- 2 files changed, 13 insertions(+), 67 deletions(-) (limited to 'include') diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 095bb6789731..ac8054765127 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -194,21 +194,6 @@ static enum bp_state update_schedule(enum bp_state state) } #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG -static long current_credit(void) -{ - return balloon_stats.target_pages - balloon_stats.current_pages - - balloon_stats.hotplug_pages; -} - -static bool balloon_is_inflated(void) -{ - if (balloon_stats.balloon_low || balloon_stats.balloon_high || - balloon_stats.balloon_hotplug) - return true; - else - return false; -} - static struct resource *additional_memory_resource(phys_addr_t size) { struct resource *res; @@ -289,10 +274,7 @@ static enum bp_state reserve_additional_memory(long credit) goto err; } - balloon_hotplug -= credit; - - balloon_stats.hotplug_pages += credit; - balloon_stats.balloon_hotplug = balloon_hotplug; + balloon_stats.total_pages += balloon_hotplug; return BP_DONE; err: @@ -308,11 +290,6 @@ static void xen_online_page(struct page *page) __balloon_append(page); - if (balloon_stats.hotplug_pages) - --balloon_stats.hotplug_pages; - else - --balloon_stats.balloon_hotplug; - mutex_unlock(&balloon_mutex); } @@ -329,32 +306,22 @@ static struct notifier_block xen_memory_nb = { .priority = 0 }; #else -static long current_credit(void) +static enum bp_state reserve_additional_memory(long credit) { - unsigned long target = balloon_stats.target_pages; - - target = min(target, - balloon_stats.current_pages + - balloon_stats.balloon_low + - balloon_stats.balloon_high); - - return target - balloon_stats.current_pages; + balloon_stats.target_pages = balloon_stats.current_pages; + return BP_DONE; } +#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */ -static bool balloon_is_inflated(void) +static long current_credit(void) { - if (balloon_stats.balloon_low || balloon_stats.balloon_high) - return true; - else - return false; + return balloon_stats.target_pages - balloon_stats.current_pages; } -static enum bp_state reserve_additional_memory(long credit) +static bool balloon_is_inflated(void) { - balloon_stats.target_pages = balloon_stats.current_pages; - return BP_DONE; + return balloon_stats.balloon_low || balloon_stats.balloon_high; } -#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */ static enum bp_state increase_reservation(unsigned long nr_pages) { @@ -367,15 +334,6 @@ static enum bp_state increase_reservation(unsigned long nr_pages) .domid = DOMID_SELF }; -#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG - if (!balloon_stats.balloon_low && !balloon_stats.balloon_high) { - nr_pages = min(nr_pages, balloon_stats.balloon_hotplug); - balloon_stats.hotplug_pages += nr_pages; - balloon_stats.balloon_hotplug -= nr_pages; - return BP_DONE; - } -#endif - if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); @@ -438,15 +396,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) .domid = DOMID_SELF }; -#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG - if (balloon_stats.hotplug_pages) { - nr_pages = min(nr_pages, balloon_stats.hotplug_pages); - balloon_stats.hotplug_pages -= nr_pages; - balloon_stats.balloon_hotplug += nr_pages; - return BP_DONE; - } -#endif - if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); @@ -635,6 +584,8 @@ static void __init balloon_add_region(unsigned long start_pfn, don't subtract from it. */ __balloon_append(page); } + + balloon_stats.total_pages += extra_pfn_end - start_pfn; } static int __init balloon_init(void) @@ -652,6 +603,7 @@ static int __init balloon_init(void) balloon_stats.target_pages = balloon_stats.current_pages; balloon_stats.balloon_low = 0; balloon_stats.balloon_high = 0; + balloon_stats.total_pages = balloon_stats.current_pages; balloon_stats.schedule_delay = 1; balloon_stats.max_schedule_delay = 32; @@ -659,9 +611,6 @@ static int __init balloon_init(void) balloon_stats.max_retry_count = RETRY_UNLIMITED; #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG - balloon_stats.hotplug_pages = 0; - balloon_stats.balloon_hotplug = 0; - set_online_page_callback(&xen_online_page); register_memory_notifier(&xen_memory_nb); #endif diff --git a/include/xen/balloon.h b/include/xen/balloon.h index cc2e1a7e44ec..c8aee7a8b8d2 100644 --- a/include/xen/balloon.h +++ b/include/xen/balloon.h @@ -11,14 +11,11 @@ struct balloon_stats { /* Number of pages in high- and low-memory balloons. */ unsigned long balloon_low; unsigned long balloon_high; + unsigned long total_pages; unsigned long schedule_delay; unsigned long max_schedule_delay; unsigned long retry_count; unsigned long max_retry_count; -#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG - unsigned long hotplug_pages; - unsigned long balloon_hotplug; -#endif }; extern struct balloon_stats balloon_stats; -- cgit v1.2.3 From 81b286e0f1fe520f2a96f736ffa7e508ac9139ba Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Thu, 25 Jun 2015 13:12:46 +0100 Subject: xen/balloon: make alloc_xenballoon_pages() always allocate low pages All users of alloc_xenballoon_pages() wanted low memory pages, so remove the option for high memory. Signed-off-by: David Vrabel Reviewed-by: Daniel Kiper --- arch/x86/xen/grant-table.c | 2 +- drivers/xen/balloon.c | 21 ++++++++------------- drivers/xen/grant-table.c | 2 +- drivers/xen/privcmd.c | 2 +- drivers/xen/xenbus/xenbus_client.c | 3 +-- include/xen/balloon.h | 3 +-- 6 files changed, 13 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c index 1580e7a5a4cf..e079500b17f3 100644 --- a/arch/x86/xen/grant-table.c +++ b/arch/x86/xen/grant-table.c @@ -133,7 +133,7 @@ static int __init xlated_setup_gnttab_pages(void) kfree(pages); return -ENOMEM; } - rc = alloc_xenballooned_pages(nr_grant_frames, pages, 0 /* lowmem */); + rc = alloc_xenballooned_pages(nr_grant_frames, pages); if (rc) { pr_warn("%s Couldn't balloon alloc %ld pfns rc:%d\n", __func__, nr_grant_frames, rc); diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index ac6391bd8029..7ec933d505d2 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -136,17 +136,16 @@ static void balloon_append(struct page *page) } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ -static struct page *balloon_retrieve(bool prefer_highmem) +static struct page *balloon_retrieve(bool require_lowmem) { struct page *page; if (list_empty(&ballooned_pages)) return NULL; - if (prefer_highmem) - page = list_entry(ballooned_pages.prev, struct page, lru); - else - page = list_entry(ballooned_pages.next, struct page, lru); + page = list_entry(ballooned_pages.next, struct page, lru); + if (require_lowmem && PageHighMem(page)) + return NULL; list_del(&page->lru); if (PageHighMem(page)) @@ -521,24 +520,20 @@ EXPORT_SYMBOL_GPL(balloon_set_new_target); * alloc_xenballooned_pages - get pages that have been ballooned out * @nr_pages: Number of pages to get * @pages: pages returned - * @highmem: allow highmem pages * @return 0 on success, error otherwise */ -int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem) +int alloc_xenballooned_pages(int nr_pages, struct page **pages) { int pgno = 0; struct page *page; mutex_lock(&balloon_mutex); while (pgno < nr_pages) { - page = balloon_retrieve(highmem); - if (page && (highmem || !PageHighMem(page))) { + page = balloon_retrieve(true); + if (page) { pages[pgno++] = page; } else { enum bp_state st; - if (page) - balloon_append(page); - st = decrease_reservation(nr_pages - pgno, - highmem ? GFP_HIGHUSER : GFP_USER); + st = decrease_reservation(nr_pages - pgno, GFP_USER); if (st != BP_DONE) goto out_undo; } diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 62f591f8763c..a4b702c9ac68 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -687,7 +687,7 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages) int i; int ret; - ret = alloc_xenballooned_pages(nr_pages, pages, false); + ret = alloc_xenballooned_pages(nr_pages, pages); if (ret < 0) return ret; diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 5e9adac928e6..b199ad3d4587 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -401,7 +401,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs) if (pages == NULL) return -ENOMEM; - rc = alloc_xenballooned_pages(numpgs, pages, 0); + rc = alloc_xenballooned_pages(numpgs, pages); if (rc != 0) { pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__, numpgs, rc); diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 2ba09c1195c8..aa304d05101b 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -614,8 +614,7 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, if (!node) return -ENOMEM; - err = alloc_xenballooned_pages(nr_grefs, node->hvm.pages, - false /* lowmem */); + err = alloc_xenballooned_pages(nr_grefs, node->hvm.pages); if (err) goto out_err; diff --git a/include/xen/balloon.h b/include/xen/balloon.h index c8aee7a8b8d2..83efdeb243bf 100644 --- a/include/xen/balloon.h +++ b/include/xen/balloon.h @@ -22,8 +22,7 @@ extern struct balloon_stats balloon_stats; void balloon_set_new_target(unsigned long target); -int alloc_xenballooned_pages(int nr_pages, struct page **pages, - bool highmem); +int alloc_xenballooned_pages(int nr_pages, struct page **pages); void free_xenballooned_pages(int nr_pages, struct page **pages); struct device; -- cgit v1.2.3 From 1cf6a6c82918c9aad4bb73a7e7379a649e4d8e50 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Thu, 25 Jun 2015 16:29:18 +0100 Subject: xen/balloon: use hotplugged pages for foreign mappings etc. alloc_xenballooned_pages() is used to get ballooned pages to back foreign mappings etc. Instead of having to balloon out real pages, use (if supported) hotplugged memory. This makes more memory available to the guest and reduces fragmentation in the p2m. This is only enabled if the xen.balloon.hotplug_unpopulated sysctl is set to 1. This sysctl defaults to 0 in case the udev rules to automatically online hotplugged memory do not exist. Signed-off-by: David Vrabel Reviewed-by: Daniel Kiper --- v3: - Add xen.balloon.hotplug_unpopulated sysctl to enable use of hotplug for unpopulated pages. --- drivers/xen/balloon.c | 90 +++++++++++++++++++++++++++++++++++++++++++++------ include/xen/balloon.h | 1 + 2 files changed, 81 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 7ec933d505d2..25fd1bd949d8 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -55,6 +55,7 @@ #include #include #include +#include #include #include @@ -71,6 +72,46 @@ #include #include +static int xen_hotplug_unpopulated; + +#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG + +static int zero; +static int one = 1; + +static struct ctl_table balloon_table[] = { + { + .procname = "hotplug_unpopulated", + .data = &xen_hotplug_unpopulated, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, + { } +}; + +static struct ctl_table balloon_root[] = { + { + .procname = "balloon", + .mode = 0555, + .child = balloon_table, + }, + { } +}; + +static struct ctl_table xen_root[] = { + { + .procname = "xen", + .mode = 0555, + .child = balloon_root, + }, + { } +}; + +#endif + /* * balloon_process() state: * @@ -99,6 +140,7 @@ static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)]; /* List of ballooned pages, threaded through the mem_map array. */ static LIST_HEAD(ballooned_pages); +static DECLARE_WAIT_QUEUE_HEAD(balloon_wq); /* Main work function, always executed in process context. */ static void balloon_process(struct work_struct *work); @@ -127,6 +169,7 @@ static void __balloon_append(struct page *page) list_add(&page->lru, &ballooned_pages); balloon_stats.balloon_low++; } + wake_up(&balloon_wq); } static void balloon_append(struct page *page) @@ -242,7 +285,8 @@ static enum bp_state reserve_additional_memory(void) int nid, rc; unsigned long balloon_hotplug; - credit = balloon_stats.target_pages - balloon_stats.total_pages; + credit = balloon_stats.target_pages + balloon_stats.target_unpopulated + - balloon_stats.total_pages; /* * Already hotplugged enough pages? Wait for them to be @@ -323,7 +367,7 @@ static struct notifier_block xen_memory_nb = { static enum bp_state reserve_additional_memory(void) { balloon_stats.target_pages = balloon_stats.current_pages; - return BP_DONE; + return BP_ECANCELED; } #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */ @@ -516,6 +560,28 @@ void balloon_set_new_target(unsigned long target) } EXPORT_SYMBOL_GPL(balloon_set_new_target); +static int add_ballooned_pages(int nr_pages) +{ + enum bp_state st; + + if (xen_hotplug_unpopulated) { + st = reserve_additional_memory(); + if (st != BP_ECANCELED) { + mutex_unlock(&balloon_mutex); + wait_event(balloon_wq, + !list_empty(&ballooned_pages)); + mutex_lock(&balloon_mutex); + return 0; + } + } + + st = decrease_reservation(nr_pages, GFP_USER); + if (st != BP_DONE) + return -ENOMEM; + + return 0; +} + /** * alloc_xenballooned_pages - get pages that have been ballooned out * @nr_pages: Number of pages to get @@ -526,27 +592,28 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages) { int pgno = 0; struct page *page; + int ret; + mutex_lock(&balloon_mutex); + + balloon_stats.target_unpopulated += nr_pages; + while (pgno < nr_pages) { page = balloon_retrieve(true); if (page) { pages[pgno++] = page; } else { - enum bp_state st; - st = decrease_reservation(nr_pages - pgno, GFP_USER); - if (st != BP_DONE) + ret = add_ballooned_pages(nr_pages - pgno); + if (ret < 0) goto out_undo; } } mutex_unlock(&balloon_mutex); return 0; out_undo: - while (pgno) - balloon_append(pages[--pgno]); - /* Free the memory back to the kernel soon */ - schedule_delayed_work(&balloon_worker, 0); mutex_unlock(&balloon_mutex); - return -ENOMEM; + free_xenballooned_pages(pgno, pages); + return ret; } EXPORT_SYMBOL(alloc_xenballooned_pages); @@ -566,6 +633,8 @@ void free_xenballooned_pages(int nr_pages, struct page **pages) balloon_append(pages[i]); } + balloon_stats.target_unpopulated -= nr_pages; + /* The balloon may be too large now. Shrink it if needed. */ if (current_credit()) schedule_delayed_work(&balloon_worker, 0); @@ -623,6 +692,7 @@ static int __init balloon_init(void) #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG set_online_page_callback(&xen_online_page); register_memory_notifier(&xen_memory_nb); + register_sysctl_table(xen_root); #endif /* diff --git a/include/xen/balloon.h b/include/xen/balloon.h index 83efdeb243bf..d1767dfb0d95 100644 --- a/include/xen/balloon.h +++ b/include/xen/balloon.h @@ -8,6 +8,7 @@ struct balloon_stats { /* We aim for 'current allocation' == 'target allocation'. */ unsigned long current_pages; unsigned long target_pages; + unsigned long target_unpopulated; /* Number of pages in high- and low-memory balloons. */ unsigned long balloon_low; unsigned long balloon_high; -- cgit v1.2.3 From 1084b1988d22dc165c9dbbc2b0e057f9248ac4db Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Mon, 4 May 2015 15:47:16 +0100 Subject: xen: Add Xen specific page definition The Xen hypercall interface is always using 4K page granularity on ARM and x86 architecture. With the incoming support of 64K page granularity for ARM64 guest, it won't be possible to re-use the Linux page definition in Xen drivers. Introduce Xen page definition helpers based on the Linux page definition. They have exactly the same name but prefixed with XEN_/xen_ prefix. Also modify xen_page_to_gfn to use new Xen page definition. Signed-off-by: Julien Grall Reviewed-by: Stefano Stabellini Signed-off-by: David Vrabel --- include/xen/page.h | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/xen/page.h b/include/xen/page.h index 1daae485e336..96294ac93755 100644 --- a/include/xen/page.h +++ b/include/xen/page.h @@ -1,11 +1,36 @@ #ifndef _XEN_PAGE_H #define _XEN_PAGE_H +#include + +/* The hypercall interface supports only 4KB page */ +#define XEN_PAGE_SHIFT 12 +#define XEN_PAGE_SIZE (_AC(1, UL) << XEN_PAGE_SHIFT) +#define XEN_PAGE_MASK (~(XEN_PAGE_SIZE-1)) +#define xen_offset_in_page(p) ((unsigned long)(p) & ~XEN_PAGE_MASK) + +/* + * We assume that PAGE_SIZE is a multiple of XEN_PAGE_SIZE + * XXX: Add a BUILD_BUG_ON? + */ + +#define xen_pfn_to_page(xen_pfn) \ + ((pfn_to_page(((unsigned long)(xen_pfn) << XEN_PAGE_SHIFT) >> PAGE_SHIFT))) +#define page_to_xen_pfn(page) \ + (((page_to_pfn(page)) << PAGE_SHIFT) >> XEN_PAGE_SHIFT) + +#define XEN_PFN_PER_PAGE (PAGE_SIZE / XEN_PAGE_SIZE) + +#define XEN_PFN_DOWN(x) ((x) >> XEN_PAGE_SHIFT) +#define XEN_PFN_UP(x) (((x) + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT) +#define XEN_PFN_PHYS(x) ((phys_addr_t)(x) << XEN_PAGE_SHIFT) + #include +/* Return the GFN associated to the first 4KB of the page */ static inline unsigned long xen_page_to_gfn(struct page *page) { - return pfn_to_gfn(page_to_pfn(page)); + return pfn_to_gfn(page_to_xen_pfn(page)); } struct xen_memory_region { -- cgit v1.2.3 From 008c320a96d218712043f8db0111d5472697785c Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Fri, 19 Jun 2015 17:49:03 +0100 Subject: xen/grant: Introduce helpers to split a page into grant Currently, a grant is always based on the Xen page granularity (i.e 4KB). When Linux is using a different page granularity, a single page will be split between multiple grants. The new helpers will be in charge of splitting the Linux page into grants and call a function given by the caller on each grant. Also provide an helper to count the number of grants within a given contiguous region. Note that the x86/include/asm/xen/page.h is now including xen/interface/grant_table.h rather than xen/grant_table.h. It's necessary because xen/grant_table.h depends on asm/xen/page.h and will break the compilation. Furthermore, only definition in interface/grant_table.h is required. Signed-off-by: Julien Grall Reviewed-by: David Vrabel Reviewed-by: Stefano Stabellini Signed-off-by: David Vrabel --- arch/x86/include/asm/xen/page.h | 2 +- drivers/xen/grant-table.c | 26 +++++++++++++++++++++++++ include/xen/grant_table.h | 42 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 69 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index b922fa4bb4a1..fe58e3a935de 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -12,7 +12,7 @@ #include #include -#include +#include #include /* Xen machine address */ diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index a4b702c9ac68..dbeaa67dec47 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -776,6 +776,32 @@ void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count) } EXPORT_SYMBOL_GPL(gnttab_batch_copy); +void gnttab_foreach_grant_in_range(struct page *page, + unsigned int offset, + unsigned int len, + xen_grant_fn_t fn, + void *data) +{ + unsigned int goffset; + unsigned int glen; + unsigned long xen_pfn; + + len = min_t(unsigned int, PAGE_SIZE - offset, len); + goffset = xen_offset_in_page(offset); + + xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset); + + while (len) { + glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len); + fn(pfn_to_gfn(xen_pfn), goffset, glen, data); + + goffset = 0; + xen_pfn++; + len -= glen; + } +} +EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range); + int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, struct gnttab_map_grant_ref *kmap_ops, struct page **pages, unsigned int count) diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index 4478f4b4aae2..05b5b08c2afc 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h @@ -45,8 +45,10 @@ #include #include +#include #include #include +#include #define GNTTAB_RESERVED_XENSTORE 1 @@ -224,4 +226,44 @@ static inline struct xen_page_foreign *xen_page_foreign(struct page *page) #endif } +/* Split Linux page in chunk of the size of the grant and call fn + * + * Parameters of fn: + * gfn: guest frame number + * offset: offset in the grant + * len: length of the data in the grant. + * data: internal information + */ +typedef void (*xen_grant_fn_t)(unsigned long gfn, unsigned int offset, + unsigned int len, void *data); + +void gnttab_foreach_grant_in_range(struct page *page, + unsigned int offset, + unsigned int len, + xen_grant_fn_t fn, + void *data); + +/* Helper to get to call fn only on the first "grant chunk" */ +static inline void gnttab_for_one_grant(struct page *page, unsigned int offset, + unsigned len, xen_grant_fn_t fn, + void *data) +{ + /* The first request is limited to the size of one grant */ + len = min_t(unsigned int, XEN_PAGE_SIZE - (offset & ~XEN_PAGE_MASK), + len); + + gnttab_foreach_grant_in_range(page, offset, len, fn, data); +} + +/* Get the number of grant in a specified region + * + * start: Offset from the beginning of the first page + * len: total length of data (can cross multiple page) + */ +static inline unsigned int gnttab_count_grant(unsigned int start, + unsigned int len) +{ + return XEN_PFN_UP(xen_offset_in_page(start) + len); +} + #endif /* __ASM_GNTTAB_H__ */ -- cgit v1.2.3 From 3922f32c1e6db2e096ff095a5b8af0b940b97508 Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Fri, 19 Jun 2015 18:05:06 +0100 Subject: xen/grant: Add helper gnttab_page_grant_foreign_access_ref_one Many PV drivers contain the idiom: pfn = page_to_gfn(...) /* Or similar */ gnttab_grant_foreign_access_ref Replace it by a new helper. Note that when Linux is using a different page granularity than Xen, the helper only gives access to the first 4KB grant. This is useful where drivers are allocating a full Linux page for each grant. Also include xen/interface/grant_table.h rather than xen/grant_table.h in asm/page.h for x86 to fix a compilation issue [1]. Only the former is useful in order to get the structure definition. [1] Interdependency between asm/page.h and xen/grant_table.h which result to page_mfn not being defined when necessary. Signed-off-by: Julien Grall Reviewed-by: David Vrabel Reviewed-by: Stefano Stabellini Signed-off-by: David Vrabel --- include/xen/grant_table.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include') diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index 05b5b08c2afc..e17a4b381a16 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h @@ -131,6 +131,15 @@ void gnttab_cancel_free_callback(struct gnttab_free_callback *callback); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int readonly); +/* Give access to the first 4K of the page */ +static inline void gnttab_page_grant_foreign_access_ref_one( + grant_ref_t ref, domid_t domid, + struct page *page, int readonly) +{ + gnttab_grant_foreign_access_ref(ref, domid, xen_page_to_gfn(page), + readonly); +} + void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid, unsigned long pfn); -- cgit v1.2.3 From 91afb7c373e881d5038a78e1206a0f6469440ec3 Mon Sep 17 00:00:00 2001 From: Mikko Rapeli Date: Thu, 15 Oct 2015 07:56:07 +0200 Subject: xen/gntalloc: use types from linux/types.h in userspace headers __u32, __u64 etc. are preferred for userspace API headers. Signed-off-by: Mikko Rapeli Signed-off-by: David Vrabel --- include/uapi/xen/gntalloc.h | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/uapi/xen/gntalloc.h b/include/uapi/xen/gntalloc.h index 76bd58065f4f..48d2790ef928 100644 --- a/include/uapi/xen/gntalloc.h +++ b/include/uapi/xen/gntalloc.h @@ -11,6 +11,8 @@ #ifndef __LINUX_PUBLIC_GNTALLOC_H__ #define __LINUX_PUBLIC_GNTALLOC_H__ +#include + /* * Allocates a new page and creates a new grant reference. */ @@ -19,17 +21,17 @@ _IOC(_IOC_NONE, 'G', 5, sizeof(struct ioctl_gntalloc_alloc_gref)) struct ioctl_gntalloc_alloc_gref { /* IN parameters */ /* The ID of the domain to be given access to the grants. */ - uint16_t domid; + __u16 domid; /* Flags for this mapping */ - uint16_t flags; + __u16 flags; /* Number of pages to map */ - uint32_t count; + __u32 count; /* OUT parameters */ /* The offset to be used on a subsequent call to mmap(). */ - uint64_t index; + __u64 index; /* The grant references of the newly created grant, one per page */ /* Variable size, depending on count */ - uint32_t gref_ids[1]; + __u32 gref_ids[1]; }; #define GNTALLOC_FLAG_WRITABLE 1 @@ -43,9 +45,9 @@ _IOC(_IOC_NONE, 'G', 6, sizeof(struct ioctl_gntalloc_dealloc_gref)) struct ioctl_gntalloc_dealloc_gref { /* IN parameters */ /* The offset returned in the map operation */ - uint64_t index; + __u64 index; /* Number of references to unmap */ - uint32_t count; + __u32 count; }; /* @@ -67,11 +69,11 @@ struct ioctl_gntalloc_unmap_notify { * be cleared. Otherwise, it can be any byte in the page whose * notification we are adjusting. */ - uint64_t index; + __u64 index; /* Action(s) to take on unmap */ - uint32_t action; + __u32 action; /* Event channel to notify */ - uint32_t event_channel_port; + __u32 event_channel_port; }; /* Clear (set to zero) the byte specified by index */ -- cgit v1.2.3 From a36012be64e65760d208c23ea68dc12a895001d8 Mon Sep 17 00:00:00 2001 From: Mikko Rapeli Date: Thu, 15 Oct 2015 07:56:08 +0200 Subject: xen/gntdev: use types from linux/types.h in userspace headers __u32, __u64 etc. are preferred for userspace API headers. Signed-off-by: Mikko Rapeli Signed-off-by: David Vrabel --- include/uapi/xen/gntdev.h | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/include/uapi/xen/gntdev.h b/include/uapi/xen/gntdev.h index 5304bd3c84c5..aa7610a9b867 100644 --- a/include/uapi/xen/gntdev.h +++ b/include/uapi/xen/gntdev.h @@ -33,11 +33,13 @@ #ifndef __LINUX_PUBLIC_GNTDEV_H__ #define __LINUX_PUBLIC_GNTDEV_H__ +#include + struct ioctl_gntdev_grant_ref { /* The domain ID of the grant to be mapped. */ - uint32_t domid; + __u32 domid; /* The grant reference of the grant to be mapped. */ - uint32_t ref; + __u32 ref; }; /* @@ -50,11 +52,11 @@ _IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref)) struct ioctl_gntdev_map_grant_ref { /* IN parameters */ /* The number of grants to be mapped. */ - uint32_t count; - uint32_t pad; + __u32 count; + __u32 pad; /* OUT parameters */ /* The offset to be used on a subsequent call to mmap(). */ - uint64_t index; + __u64 index; /* Variable IN parameter. */ /* Array of grant references, of size @count. */ struct ioctl_gntdev_grant_ref refs[1]; @@ -70,10 +72,10 @@ _IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref)) struct ioctl_gntdev_unmap_grant_ref { /* IN parameters */ /* The offset was returned by the corresponding map operation. */ - uint64_t index; + __u64 index; /* The number of pages to be unmapped. */ - uint32_t count; - uint32_t pad; + __u32 count; + __u32 pad; }; /* @@ -93,13 +95,13 @@ _IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_gntdev_get_offset_for_vaddr)) struct ioctl_gntdev_get_offset_for_vaddr { /* IN parameters */ /* The virtual address of the first mapped page in a range. */ - uint64_t vaddr; + __u64 vaddr; /* OUT parameters */ /* The offset that was used in the initial mmap() operation. */ - uint64_t offset; + __u64 offset; /* The number of pages mapped in the VM area that begins at @vaddr. */ - uint32_t count; - uint32_t pad; + __u32 count; + __u32 pad; }; /* @@ -113,7 +115,7 @@ _IOC(_IOC_NONE, 'G', 3, sizeof(struct ioctl_gntdev_set_max_grants)) struct ioctl_gntdev_set_max_grants { /* IN parameter */ /* The maximum number of grants that may be mapped at once. */ - uint32_t count; + __u32 count; }; /* @@ -135,11 +137,11 @@ struct ioctl_gntdev_unmap_notify { * be cleared. Otherwise, it can be any byte in the page whose * notification we are adjusting. */ - uint64_t index; + __u64 index; /* Action(s) to take on unmap */ - uint32_t action; + __u32 action; /* Event channel to notify */ - uint32_t event_channel_port; + __u32 event_channel_port; }; /* Clear (set to zero) the byte specified by index */ -- cgit v1.2.3 From 9cce2914e2b21339dca12c91dc9f35790366cc4c Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Tue, 13 Oct 2015 17:50:11 +0100 Subject: xen/xenbus: Rename *RING_PAGE* to *RING_GRANT* Linux may use a different page size than the size of grant. So make clear that the order is actually in number of grant. Signed-off-by: Julien Grall Signed-off-by: David Vrabel --- drivers/block/xen-blkback/blkback.c | 8 ++++---- drivers/block/xen-blkback/xenbus.c | 2 +- drivers/block/xen-blkfront.c | 10 +++++----- drivers/xen/xenbus/xenbus_client.c | 34 +++++++++++++++++----------------- include/xen/xenbus.h | 4 ++-- 5 files changed, 29 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 809634ce3b67..f9099940c272 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -87,7 +87,7 @@ MODULE_PARM_DESC(max_persistent_grants, * Maximum order of pages to be used for the shared ring between front and * backend, 4KB page granularity is used. */ -unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_PAGE_ORDER; +unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER; module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO); MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring"); /* @@ -1446,10 +1446,10 @@ static int __init xen_blkif_init(void) if (!xen_domain()) return -ENODEV; - if (xen_blkif_max_ring_order > XENBUS_MAX_RING_PAGE_ORDER) { + if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) { pr_info("Invalid max_ring_order (%d), will use default max: %d.\n", - xen_blkif_max_ring_order, XENBUS_MAX_RING_PAGE_ORDER); - xen_blkif_max_ring_order = XENBUS_MAX_RING_PAGE_ORDER; + xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER); + xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER; } rc = xen_blkif_interface_init(); diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 01c6b41de4e5..f53cff42f8da 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -829,7 +829,7 @@ again: static int connect_ring(struct backend_info *be) { struct xenbus_device *dev = be->dev; - unsigned int ring_ref[XENBUS_MAX_RING_PAGES]; + unsigned int ring_ref[XENBUS_MAX_RING_GRANTS]; unsigned int evtchn, nr_grefs, ring_page_order; unsigned int pers_grants; char protocol[64] = ""; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 057e05da83d1..833955f32430 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -111,7 +111,7 @@ MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages) #define BLK_MAX_RING_SIZE \ - __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * XENBUS_MAX_RING_PAGES) + __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * XENBUS_MAX_RING_GRANTS) /* * ring-ref%i i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19 @@ -133,7 +133,7 @@ struct blkfront_info int vdevice; blkif_vdev_t handle; enum blkif_state connected; - int ring_ref[XENBUS_MAX_RING_PAGES]; + int ring_ref[XENBUS_MAX_RING_GRANTS]; unsigned int nr_ring_pages; struct blkif_front_ring ring; unsigned int evtchn, irq; @@ -1413,7 +1413,7 @@ static int setup_blkring(struct xenbus_device *dev, struct blkif_sring *sring; int err, i; unsigned long ring_size = info->nr_ring_pages * XEN_PAGE_SIZE; - grant_ref_t gref[XENBUS_MAX_RING_PAGES]; + grant_ref_t gref[XENBUS_MAX_RING_GRANTS]; for (i = 0; i < info->nr_ring_pages; i++) info->ring_ref[i] = GRANT_INVALID_REF; @@ -2284,9 +2284,9 @@ static int __init xlblk_init(void) if (!xen_domain()) return -ENODEV; - if (xen_blkif_max_ring_order > XENBUS_MAX_RING_PAGE_ORDER) { + if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) { pr_info("Invalid max_ring_order (%d), will use default max: %d.\n", - xen_blkif_max_ring_order, XENBUS_MAX_RING_PAGE_ORDER); + xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER); xen_blkif_max_ring_order = 0; } diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 42abee3bbb27..b77643361853 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -56,11 +56,11 @@ struct xenbus_map_node { struct vm_struct *area; } pv; struct { - struct page *pages[XENBUS_MAX_RING_PAGES]; + struct page *pages[XENBUS_MAX_RING_GRANTS]; void *addr; } hvm; }; - grant_handle_t handles[XENBUS_MAX_RING_PAGES]; + grant_handle_t handles[XENBUS_MAX_RING_GRANTS]; unsigned int nr_handles; }; @@ -479,12 +479,12 @@ static int __xenbus_map_ring(struct xenbus_device *dev, unsigned int flags, bool *leaked) { - struct gnttab_map_grant_ref map[XENBUS_MAX_RING_PAGES]; - struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES]; + struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS]; + struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; int i, j; int err = GNTST_okay; - if (nr_grefs > XENBUS_MAX_RING_PAGES) + if (nr_grefs > XENBUS_MAX_RING_GRANTS) return -EINVAL; for (i = 0; i < nr_grefs; i++) { @@ -540,15 +540,15 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, { struct xenbus_map_node *node; struct vm_struct *area; - pte_t *ptes[XENBUS_MAX_RING_PAGES]; - phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES]; + pte_t *ptes[XENBUS_MAX_RING_GRANTS]; + phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; int err = GNTST_okay; int i; bool leaked; *vaddr = NULL; - if (nr_grefs > XENBUS_MAX_RING_PAGES) + if (nr_grefs > XENBUS_MAX_RING_GRANTS) return -EINVAL; node = kzalloc(sizeof(*node), GFP_KERNEL); @@ -602,10 +602,10 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, void *addr; bool leaked = false; /* Why do we need two arrays? See comment of __xenbus_map_ring */ - phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES]; - unsigned long addrs[XENBUS_MAX_RING_PAGES]; + phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; + unsigned long addrs[XENBUS_MAX_RING_GRANTS]; - if (nr_grefs > XENBUS_MAX_RING_PAGES) + if (nr_grefs > XENBUS_MAX_RING_GRANTS) return -EINVAL; *vaddr = NULL; @@ -686,10 +686,10 @@ int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t *gnt_refs, unsigned int nr_grefs, grant_handle_t *handles, unsigned long *vaddrs, bool *leaked) { - phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES]; + phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; int i; - if (nr_grefs > XENBUS_MAX_RING_PAGES) + if (nr_grefs > XENBUS_MAX_RING_GRANTS) return -EINVAL; for (i = 0; i < nr_grefs; i++) @@ -722,7 +722,7 @@ EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) { struct xenbus_map_node *node; - struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES]; + struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; unsigned int level; int i; bool leaked = false; @@ -787,7 +787,7 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) int rv; struct xenbus_map_node *node; void *addr; - unsigned long addrs[XENBUS_MAX_RING_PAGES]; + unsigned long addrs[XENBUS_MAX_RING_GRANTS]; int i; spin_lock(&xenbus_valloc_lock); @@ -840,11 +840,11 @@ int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles, unsigned int nr_handles, unsigned long *vaddrs) { - struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES]; + struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; int i; int err; - if (nr_handles > XENBUS_MAX_RING_PAGES) + if (nr_handles > XENBUS_MAX_RING_GRANTS) return -EINVAL; for (i = 0; i < nr_handles; i++) diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index 289c0b5f08fe..32b944b7cebd 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h @@ -46,8 +46,8 @@ #include #include -#define XENBUS_MAX_RING_PAGE_ORDER 4 -#define XENBUS_MAX_RING_PAGES (1U << XENBUS_MAX_RING_PAGE_ORDER) +#define XENBUS_MAX_RING_GRANT_ORDER 4 +#define XENBUS_MAX_RING_GRANTS (1U << XENBUS_MAX_RING_GRANT_ORDER) #define INVALID_GRANT_HANDLE (~0U) /* Register callback to watch this node. */ -- cgit v1.2.3 From f73314b28148f9ee9f89a0ae961c8fb36e3269fa Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Tue, 13 Oct 2015 17:50:12 +0100 Subject: xen/grant-table: Add an helper to iterate over a specific number of grants With the 64KB page granularity support on ARM64, a Linux page may be split accross multiple grant. Currently we have the helper gnttab_foreach_grant_in_grant to break a Linux page based on an offset and a len, but it doesn't fit when we only have a number of grants in hand. Introduce a new helper which take an array of Linux page and a number of grant and will figure out the address of each grant. Signed-off-by: Julien Grall Signed-off-by: David Vrabel --- drivers/xen/grant-table.c | 22 ++++++++++++++++++++++ include/xen/grant_table.h | 6 ++++++ 2 files changed, 28 insertions(+) (limited to 'include') diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 72d633962095..c49f79ed58c5 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -802,6 +802,28 @@ void gnttab_foreach_grant_in_range(struct page *page, } EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range); +void gnttab_foreach_grant(struct page **pages, + unsigned int nr_grefs, + xen_grant_fn_t fn, + void *data) +{ + unsigned int goffset = 0; + unsigned long xen_pfn = 0; + unsigned int i; + + for (i = 0; i < nr_grefs; i++) { + if ((i % XEN_PFN_PER_PAGE) == 0) { + xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]); + goffset = 0; + } + + fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data); + + goffset += XEN_PAGE_SIZE; + xen_pfn++; + } +} + int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, struct gnttab_map_grant_ref *kmap_ops, struct page **pages, unsigned int count) diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index e17a4b381a16..34b1379f9777 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h @@ -264,6 +264,12 @@ static inline void gnttab_for_one_grant(struct page *page, unsigned int offset, gnttab_foreach_grant_in_range(page, offset, len, fn, data); } +/* Get @nr_grefs grants from an array of page and call fn for each grant */ +void gnttab_foreach_grant(struct page **pages, + unsigned int nr_grefs, + xen_grant_fn_t fn, + void *data); + /* Get the number of grant in a specified region * * start: Offset from the beginning of the first page -- cgit v1.2.3