From 24e6d5a59ac7d31adc0322de2d0117dfa370936f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 29 Dec 2017 08:53:53 +0100 Subject: mm: pass the vmem_altmap to arch_add_memory and __add_pages We can just pass this on instead of having to do a radix tree lookup without proper locking 2 levels into the callchain. Signed-off-by: Christoph Hellwig Signed-off-by: Dan Williams --- kernel/memremap.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/memremap.c b/kernel/memremap.c index 403ab9cdb949..8488cdeead16 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -382,6 +382,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, if (altmap) { memcpy(&page_map->altmap, altmap, sizeof(*altmap)); pgmap->altmap = &page_map->altmap; + altmap = pgmap->altmap; } pgmap->ref = ref; pgmap->res = &page_map->res; @@ -427,7 +428,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, goto err_pfn_remap; mem_hotplug_begin(); - error = arch_add_memory(nid, align_start, align_size, false); + error = arch_add_memory(nid, align_start, align_size, altmap, false); if (!error) move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], align_start >> PAGE_SHIFT, -- cgit v1.2.3 From da024512a1fa5c979257e442130ee1d468285057 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 29 Dec 2017 08:53:55 +0100 Subject: mm: pass the vmem_altmap to arch_remove_memory and __remove_pages We can just pass this on instead of having to do a radix tree lookup without proper locking 2 levels into the callchain. Signed-off-by: Christoph Hellwig Signed-off-by: Dan Williams --- arch/ia64/mm/init.c | 4 ++-- arch/powerpc/mm/mem.c | 6 ++---- arch/s390/mm/init.c | 2 +- arch/sh/mm/init.c | 4 ++-- arch/x86/mm/init_32.c | 4 ++-- arch/x86/mm/init_64.c | 6 ++---- include/linux/memory_hotplug.h | 5 +++-- kernel/memremap.c | 2 +- mm/hmm.c | 4 ++-- mm/memory_hotplug.c | 8 ++------ 10 files changed, 19 insertions(+), 26 deletions(-) (limited to 'kernel') diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 2e2e4f532204..6a8ce9e1536e 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -663,7 +663,7 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, } #ifdef CONFIG_MEMORY_HOTREMOVE -int arch_remove_memory(u64 start, u64 size) +int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; @@ -671,7 +671,7 @@ int arch_remove_memory(u64 start, u64 size) int ret; zone = page_zone(pfn_to_page(start_pfn)); - ret = __remove_pages(zone, start_pfn, nr_pages); + ret = __remove_pages(zone, start_pfn, nr_pages, altmap); if (ret) pr_warn("%s: Problem encountered in __remove_pages() as" " ret=%d\n", __func__, ret); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index e670cfc2766e..22aa528b78a2 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -149,11 +149,10 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, } #ifdef CONFIG_MEMORY_HOTREMOVE -int arch_remove_memory(u64 start, u64 size) +int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct vmem_altmap *altmap; struct page *page; int ret; @@ -162,11 +161,10 @@ int arch_remove_memory(u64 start, u64 size) * when querying the zone. */ page = pfn_to_page(start_pfn); - altmap = to_vmem_altmap((unsigned long) page); if (altmap) page += vmem_altmap_offset(altmap); - ret = __remove_pages(page_zone(page), start_pfn, nr_pages); + ret = __remove_pages(page_zone(page), start_pfn, nr_pages, altmap); if (ret) return ret; diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index e12c5af50cd7..3fa3e5323612 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -240,7 +240,7 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, } #ifdef CONFIG_MEMORY_HOTREMOVE -int arch_remove_memory(u64 start, u64 size) +int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) { /* * There is no hardware or firmware interface which could trigger a diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 552afbf55bad..ce0bbaa7e404 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -510,7 +510,7 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); #endif #ifdef CONFIG_MEMORY_HOTREMOVE -int arch_remove_memory(u64 start, u64 size) +int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) { unsigned long start_pfn = PFN_DOWN(start); unsigned long nr_pages = size >> PAGE_SHIFT; @@ -518,7 +518,7 @@ int arch_remove_memory(u64 start, u64 size) int ret; zone = page_zone(pfn_to_page(start_pfn)); - ret = __remove_pages(zone, start_pfn, nr_pages); + ret = __remove_pages(zone, start_pfn, nr_pages, altmap); if (unlikely(ret)) pr_warn("%s: Failed, __remove_pages() == %d\n", __func__, ret); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 8a3091511a71..79cb066f40c0 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -839,14 +839,14 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, } #ifdef CONFIG_MEMORY_HOTREMOVE -int arch_remove_memory(u64 start, u64 size) +int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; struct zone *zone; zone = page_zone(pfn_to_page(start_pfn)); - return __remove_pages(zone, start_pfn, nr_pages); + return __remove_pages(zone, start_pfn, nr_pages, altmap); } #endif #endif diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 594902ef56ef..3c046618cc7e 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1132,21 +1132,19 @@ kernel_physical_mapping_remove(unsigned long start, unsigned long end) remove_pagetable(start, end, true); } -int __ref arch_remove_memory(u64 start, u64 size) +int __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; struct page *page = pfn_to_page(start_pfn); - struct vmem_altmap *altmap; struct zone *zone; int ret; /* With altmap the first mapped page is offset from @start */ - altmap = to_vmem_altmap((unsigned long) page); if (altmap) page += vmem_altmap_offset(altmap); zone = page_zone(page); - ret = __remove_pages(zone, start_pfn, nr_pages); + ret = __remove_pages(zone, start_pfn, nr_pages, altmap); WARN_ON_ONCE(ret); kernel_physical_mapping_remove(start, start + size); diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index cbdd6d52e877..e71927d0d46b 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -126,9 +126,10 @@ static inline bool movable_node_is_enabled(void) #ifdef CONFIG_MEMORY_HOTREMOVE extern bool is_pageblock_removable_nolock(struct page *page); -extern int arch_remove_memory(u64 start, u64 size); +extern int arch_remove_memory(u64 start, u64 size, + struct vmem_altmap *altmap); extern int __remove_pages(struct zone *zone, unsigned long start_pfn, - unsigned long nr_pages); + unsigned long nr_pages, struct vmem_altmap *altmap); #endif /* CONFIG_MEMORY_HOTREMOVE */ /* reasonably generic interface to expand the physical pages */ diff --git a/kernel/memremap.c b/kernel/memremap.c index 8488cdeead16..380fca1c4a02 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -304,7 +304,7 @@ static void devm_memremap_pages_release(struct device *dev, void *data) align_size = ALIGN(resource_size(res), SECTION_SIZE); mem_hotplug_begin(); - arch_remove_memory(align_start, align_size); + arch_remove_memory(align_start, align_size, pgmap->altmap); mem_hotplug_done(); untrack_pfn(NULL, PHYS_PFN(align_start), align_size); diff --git a/mm/hmm.c b/mm/hmm.c index 231aaacd1997..5d17ba89062f 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -838,10 +838,10 @@ static void hmm_devmem_release(struct device *dev, void *data) mem_hotplug_begin(); if (resource->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) - __remove_pages(zone, start_pfn, npages); + __remove_pages(zone, start_pfn, npages, NULL); else arch_remove_memory(start_pfn << PAGE_SHIFT, - npages << PAGE_SHIFT); + npages << PAGE_SHIFT, NULL); mem_hotplug_done(); hmm_devmem_radix_release(resource); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index b36f1822c432..eae6bf47caf7 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -569,7 +569,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms, * calling offline_pages(). */ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, - unsigned long nr_pages) + unsigned long nr_pages, struct vmem_altmap *altmap) { unsigned long i; unsigned long map_offset = 0; @@ -577,10 +577,6 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, /* In the ZONE_DEVICE case device driver owns the memory region */ if (is_dev_zone(zone)) { - struct page *page = pfn_to_page(phys_start_pfn); - struct vmem_altmap *altmap; - - altmap = to_vmem_altmap((unsigned long) page); if (altmap) map_offset = vmem_altmap_offset(altmap); } else { @@ -1890,7 +1886,7 @@ void __ref remove_memory(int nid, u64 start, u64 size) memblock_free(start, size); memblock_remove(start, size); - arch_remove_memory(start, size); + arch_remove_memory(start, size, NULL); try_offline_node(nid); -- cgit v1.2.3 From a99583e780c751003ac9c0105eec9a3b23ec3bc4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 29 Dec 2017 08:53:57 +0100 Subject: mm: pass the vmem_altmap to memmap_init_zone Pass the vmem_altmap two levels down instead of needing a lookup. Signed-off-by: Christoph Hellwig Signed-off-by: Dan Williams --- arch/ia64/mm/init.c | 9 +++++---- include/linux/memory_hotplug.h | 2 +- include/linux/mm.h | 4 ++-- kernel/memremap.c | 2 +- mm/hmm.c | 2 +- mm/memory_hotplug.c | 9 +++++---- mm/page_alloc.c | 6 +++--- 7 files changed, 18 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 6a8ce9e1536e..18278b448530 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -501,7 +501,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg) if (map_start < map_end) memmap_init_zone((unsigned long)(map_end - map_start), args->nid, args->zone, page_to_pfn(map_start), - MEMMAP_EARLY); + MEMMAP_EARLY, NULL); return 0; } @@ -509,9 +509,10 @@ void __meminit memmap_init (unsigned long size, int nid, unsigned long zone, unsigned long start_pfn) { - if (!vmem_map) - memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY); - else { + if (!vmem_map) { + memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY, + NULL); + } else { struct page *start; struct memmap_init_callback_data args; diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 20dd98ad44a0..aba5f86eb038 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -324,7 +324,7 @@ extern int add_memory_resource(int nid, struct resource *resource, bool online); extern int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, bool want_memblock); extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, - unsigned long nr_pages); + unsigned long nr_pages, struct vmem_altmap *altmap); extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); extern bool is_memblock_offlined(struct memory_block *mem); extern void remove_memory(int nid, u64 start, u64 size); diff --git a/include/linux/mm.h b/include/linux/mm.h index 9d4cd4c1dc6d..fd01135324b6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2069,8 +2069,8 @@ static inline void zero_resv_unavail(void) {} #endif extern void set_dma_reserve(unsigned long new_dma_reserve); -extern void memmap_init_zone(unsigned long, int, unsigned long, - unsigned long, enum memmap_context); +extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, + enum memmap_context, struct vmem_altmap *); extern void setup_per_zone_wmarks(void); extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); diff --git a/kernel/memremap.c b/kernel/memremap.c index 380fca1c4a02..64b12c806cc5 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -432,7 +432,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, if (!error) move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], align_start >> PAGE_SHIFT, - align_size >> PAGE_SHIFT); + align_size >> PAGE_SHIFT, altmap); mem_hotplug_done(); if (error) goto err_add_memory; diff --git a/mm/hmm.c b/mm/hmm.c index 5d17ba89062f..2f2e13c61040 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -942,7 +942,7 @@ static int hmm_devmem_pages_create(struct hmm_devmem *devmem) } move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], align_start >> PAGE_SHIFT, - align_size >> PAGE_SHIFT); + align_size >> PAGE_SHIFT, NULL); mem_hotplug_done(); for (pfn = devmem->pfn_first; pfn < devmem->pfn_last; pfn++) { diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a8dde9734120..12df8a5fadcc 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -798,8 +798,8 @@ static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned lon pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; } -void __ref move_pfn_range_to_zone(struct zone *zone, - unsigned long start_pfn, unsigned long nr_pages) +void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, + unsigned long nr_pages, struct vmem_altmap *altmap) { struct pglist_data *pgdat = zone->zone_pgdat; int nid = pgdat->node_id; @@ -824,7 +824,8 @@ void __ref move_pfn_range_to_zone(struct zone *zone, * expects the zone spans the pfn range. All the pages in the range * are reserved so nobody should be touching them so we should be safe */ - memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, MEMMAP_HOTPLUG); + memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, + MEMMAP_HOTPLUG, altmap); set_zone_contiguous(zone); } @@ -896,7 +897,7 @@ static struct zone * __meminit move_pfn_range(int online_type, int nid, struct zone *zone; zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages); - move_pfn_range_to_zone(zone, start_pfn, nr_pages); + move_pfn_range_to_zone(zone, start_pfn, nr_pages, NULL); return zone; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7e5e775e97f4..1748dd4a4b1b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5314,9 +5314,9 @@ void __ref build_all_zonelists(pg_data_t *pgdat) * done. Non-atomic initialization, single-pass. */ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, - unsigned long start_pfn, enum memmap_context context) + unsigned long start_pfn, enum memmap_context context, + struct vmem_altmap *altmap) { - struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn)); unsigned long end_pfn = start_pfn + size; pg_data_t *pgdat = NODE_DATA(nid); unsigned long pfn; @@ -5417,7 +5417,7 @@ static void __meminit zone_init_free_lists(struct zone *zone) #ifndef __HAVE_ARCH_MEMMAP_INIT #define memmap_init(size, nid, zone, start_pfn) \ - memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) + memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY, NULL) #endif static int zone_batchsize(struct zone *zone) -- cgit v1.2.3 From 0822acb86cf340cd45b3af6436cec7e3bb24ebd2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 29 Dec 2017 08:54:00 +0100 Subject: mm: move get_dev_pagemap out of line MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a pretty big function, which should be out of line in general, and a no-op stub if CONFIG_ZONE_DEVICЕ is not set. Signed-off-by: Christoph Hellwig Reviewed-by: Logan Gunthorpe Signed-off-by: Dan Williams --- include/linux/memremap.h | 39 ++++----------------------------------- kernel/memremap.c | 36 ++++++++++++++++++++++++++++++++++-- 2 files changed, 38 insertions(+), 37 deletions(-) (limited to 'kernel') diff --git a/include/linux/memremap.h b/include/linux/memremap.h index d5a6736d9737..26e8aaba27d5 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -133,7 +133,8 @@ struct dev_pagemap { #ifdef CONFIG_ZONE_DEVICE void *devm_memremap_pages(struct device *dev, struct resource *res, struct percpu_ref *ref, struct vmem_altmap *altmap); -struct dev_pagemap *find_dev_pagemap(resource_size_t phys); +struct dev_pagemap *get_dev_pagemap(unsigned long pfn, + struct dev_pagemap *pgmap); unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); @@ -153,7 +154,8 @@ static inline void *devm_memremap_pages(struct device *dev, return ERR_PTR(-ENXIO); } -static inline struct dev_pagemap *find_dev_pagemap(resource_size_t phys) +static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, + struct dev_pagemap *pgmap) { return NULL; } @@ -183,39 +185,6 @@ static inline bool is_device_public_page(const struct page *page) } #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ -/** - * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn - * @pfn: page frame number to lookup page_map - * @pgmap: optional known pgmap that already has a reference - * - * @pgmap allows the overhead of a lookup to be bypassed when @pfn lands in the - * same mapping. - */ -static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, - struct dev_pagemap *pgmap) -{ - const struct resource *res = pgmap ? pgmap->res : NULL; - resource_size_t phys = PFN_PHYS(pfn); - - /* - * In the cached case we're already holding a live reference so - * we can simply do a blind increment - */ - if (res && phys >= res->start && phys <= res->end) { - percpu_ref_get(pgmap->ref); - return pgmap; - } - - /* fall back to slow path lookup */ - rcu_read_lock(); - pgmap = find_dev_pagemap(phys); - if (pgmap && !percpu_ref_tryget_live(pgmap->ref)) - pgmap = NULL; - rcu_read_unlock(); - - return pgmap; -} - static inline void put_dev_pagemap(struct dev_pagemap *pgmap) { if (pgmap) diff --git a/kernel/memremap.c b/kernel/memremap.c index 64b12c806cc5..3df6cd4ffb40 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -314,7 +314,7 @@ static void devm_memremap_pages_release(struct device *dev, void *data) } /* assumes rcu_read_lock() held at entry */ -struct dev_pagemap *find_dev_pagemap(resource_size_t phys) +static struct dev_pagemap *find_dev_pagemap(resource_size_t phys) { struct page_map *page_map; @@ -501,8 +501,40 @@ struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start) return pgmap ? pgmap->altmap : NULL; } -#endif /* CONFIG_ZONE_DEVICE */ +/** + * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn + * @pfn: page frame number to lookup page_map + * @pgmap: optional known pgmap that already has a reference + * + * @pgmap allows the overhead of a lookup to be bypassed when @pfn lands in the + * same mapping. + */ +struct dev_pagemap *get_dev_pagemap(unsigned long pfn, + struct dev_pagemap *pgmap) +{ + const struct resource *res = pgmap ? pgmap->res : NULL; + resource_size_t phys = PFN_PHYS(pfn); + + /* + * In the cached case we're already holding a live reference so + * we can simply do a blind increment + */ + if (res && phys >= res->start && phys <= res->end) { + percpu_ref_get(pgmap->ref); + return pgmap; + } + + /* fall back to slow path lookup */ + rcu_read_lock(); + pgmap = find_dev_pagemap(phys); + if (pgmap && !percpu_ref_tryget_live(pgmap->ref)) + pgmap = NULL; + rcu_read_unlock(); + + return pgmap; +} +#endif /* CONFIG_ZONE_DEVICE */ #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) void put_zone_device_private_or_public_page(struct page *page) -- cgit v1.2.3 From 832d7aa051106c927cae05ced29d3fd31459ed21 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 29 Dec 2017 08:54:01 +0100 Subject: mm: optimize dev_pagemap reference counting around get_dev_pagemap Change the calling convention so that get_dev_pagemap always consumes the previous reference instead of doing this using an explicit earlier call to put_dev_pagemap in the callers. The callers will still need to put the final reference after finishing the loop over the pages. Signed-off-by: Christoph Hellwig Reviewed-by: Logan Gunthorpe Signed-off-by: Dan Williams --- kernel/memremap.c | 17 +++++++++-------- mm/gup.c | 7 +++++-- 2 files changed, 14 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/memremap.c b/kernel/memremap.c index 3df6cd4ffb40..891c77487a6a 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -507,22 +507,23 @@ struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start) * @pfn: page frame number to lookup page_map * @pgmap: optional known pgmap that already has a reference * - * @pgmap allows the overhead of a lookup to be bypassed when @pfn lands in the - * same mapping. + * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap + * is non-NULL but does not cover @pfn the reference to it will be released. */ struct dev_pagemap *get_dev_pagemap(unsigned long pfn, struct dev_pagemap *pgmap) { - const struct resource *res = pgmap ? pgmap->res : NULL; resource_size_t phys = PFN_PHYS(pfn); /* - * In the cached case we're already holding a live reference so - * we can simply do a blind increment + * In the cached case we're already holding a live reference. */ - if (res && phys >= res->start && phys <= res->end) { - percpu_ref_get(pgmap->ref); - return pgmap; + if (pgmap) { + const struct resource *res = pgmap ? pgmap->res : NULL; + + if (res && phys >= res->start && phys <= res->end) + return pgmap; + put_dev_pagemap(pgmap); } /* fall back to slow path lookup */ diff --git a/mm/gup.c b/mm/gup.c index e0d82b6706d7..3affe7544b0c 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1410,7 +1410,6 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, VM_BUG_ON_PAGE(compound_head(page) != head, page); - put_dev_pagemap(pgmap); SetPageReferenced(page); pages[*nr] = page; (*nr)++; @@ -1420,6 +1419,8 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, ret = 1; pte_unmap: + if (pgmap) + put_dev_pagemap(pgmap); pte_unmap(ptem); return ret; } @@ -1459,10 +1460,12 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr, SetPageReferenced(page); pages[*nr] = page; get_page(page); - put_dev_pagemap(pgmap); (*nr)++; pfn++; } while (addr += PAGE_SIZE, addr != end); + + if (pgmap) + put_dev_pagemap(pgmap); return 1; } -- cgit v1.2.3 From 0628b8c650718f4dfedfcdc9ed136bf7e394aae7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 29 Dec 2017 08:54:02 +0100 Subject: memremap: remove to_vmem_altmap All callers are gone now. Signed-off-by: Christoph Hellwig Signed-off-by: Dan Williams --- include/linux/memremap.h | 9 --------- kernel/memremap.c | 26 -------------------------- 2 files changed, 35 deletions(-) (limited to 'kernel') diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 26e8aaba27d5..3fddcfe57bb0 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -26,15 +26,6 @@ struct vmem_altmap { unsigned long alloc; }; -#ifdef CONFIG_ZONE_DEVICE -struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start); -#else -static inline struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start) -{ - return NULL; -} -#endif - /* * Specialize ZONE_DEVICE memory into multiple types each having differents * usage. diff --git a/kernel/memremap.c b/kernel/memremap.c index 891c77487a6a..b09517439dec 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -476,32 +476,6 @@ void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns) altmap->alloc -= nr_pfns; } -struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start) -{ - /* - * 'memmap_start' is the virtual address for the first "struct - * page" in this range of the vmemmap array. In the case of - * CONFIG_SPARSEMEM_VMEMMAP a page_to_pfn conversion is simple - * pointer arithmetic, so we can perform this to_vmem_altmap() - * conversion without concern for the initialization state of - * the struct page fields. - */ - struct page *page = (struct page *) memmap_start; - struct dev_pagemap *pgmap; - - /* - * Unconditionally retrieve a dev_pagemap associated with the - * given physical address, this is only for use in the - * arch_{add|remove}_memory() for setting up and tearing down - * the memmap. - */ - rcu_read_lock(); - pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page))); - rcu_read_unlock(); - - return pgmap ? pgmap->altmap : NULL; -} - /** * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn * @pfn: page frame number to lookup page_map -- cgit v1.2.3 From 7003e3b1f64d0195ea9d31aed0b096ad38f3cb54 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 29 Dec 2017 08:54:03 +0100 Subject: memremap: simplify duplicate region handling in devm_memremap_pages __radix_tree_insert already checks for duplicates and returns -EEXIST in that case, so remove the duplicate (and racy) duplicates check. Signed-off-by: Christoph Hellwig Reviewed-by: Logan Gunthorpe Signed-off-by: Dan Williams --- kernel/memremap.c | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'kernel') diff --git a/kernel/memremap.c b/kernel/memremap.c index b09517439dec..12e78528fea4 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -396,17 +396,6 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, align_end = align_start + align_size - 1; foreach_order_pgoff(res, order, pgoff) { - struct dev_pagemap *dup; - - rcu_read_lock(); - dup = find_dev_pagemap(res->start + PFN_PHYS(pgoff)); - rcu_read_unlock(); - if (dup) { - dev_err(dev, "%s: %pr collides with mapping for %s\n", - __func__, res, dev_name(dup->dev)); - error = -EBUSY; - break; - } error = __radix_tree_insert(&pgmap_radix, PHYS_PFN(res->start) + pgoff, order, page_map); if (error) { -- cgit v1.2.3 From e7744aa25cffe26d3767c9ffcf4e130cca1dff00 Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Fri, 29 Dec 2017 08:54:04 +0100 Subject: memremap: drop private struct page_map 'struct page_map' is a private structure of 'struct dev_pagemap' but the latter replicates all the same fields as the former so there isn't much value in it. Thus drop it in favour of a completely public struct. This is a clean up in preperation for a more generally useful 'devm_memeremap_pages' interface. Signed-off-by: Logan Gunthorpe Signed-off-by: Christoph Hellwig Signed-off-by: Dan Williams --- include/linux/memremap.h | 5 ++-- kernel/memremap.c | 68 ++++++++++++++++++------------------------------ mm/hmm.c | 2 +- 3 files changed, 30 insertions(+), 45 deletions(-) (limited to 'kernel') diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 3fddcfe57bb0..1cb5f39d25c1 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -113,8 +113,9 @@ typedef void (*dev_page_free_t)(struct page *page, void *data); struct dev_pagemap { dev_page_fault_t page_fault; dev_page_free_t page_free; - struct vmem_altmap *altmap; - const struct resource *res; + struct vmem_altmap altmap; + bool altmap_valid; + struct resource res; struct percpu_ref *ref; struct device *dev; void *data; diff --git a/kernel/memremap.c b/kernel/memremap.c index 12e78528fea4..9207c44cce20 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -188,13 +188,6 @@ static RADIX_TREE(pgmap_radix, GFP_KERNEL); #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1) #define SECTION_SIZE (1UL << PA_SECTION_SHIFT) -struct page_map { - struct resource res; - struct percpu_ref *ref; - struct dev_pagemap pgmap; - struct vmem_altmap altmap; -}; - static unsigned long order_at(struct resource *res, unsigned long pgoff) { unsigned long phys_pgoff = PHYS_PFN(res->start) + pgoff; @@ -260,22 +253,21 @@ static void pgmap_radix_release(struct resource *res) synchronize_rcu(); } -static unsigned long pfn_first(struct page_map *page_map) +static unsigned long pfn_first(struct dev_pagemap *pgmap) { - struct dev_pagemap *pgmap = &page_map->pgmap; - const struct resource *res = &page_map->res; - struct vmem_altmap *altmap = pgmap->altmap; + const struct resource *res = &pgmap->res; + struct vmem_altmap *altmap = &pgmap->altmap; unsigned long pfn; pfn = res->start >> PAGE_SHIFT; - if (altmap) + if (pgmap->altmap_valid) pfn += vmem_altmap_offset(altmap); return pfn; } -static unsigned long pfn_end(struct page_map *page_map) +static unsigned long pfn_end(struct dev_pagemap *pgmap) { - const struct resource *res = &page_map->res; + const struct resource *res = &pgmap->res; return (res->start + resource_size(res)) >> PAGE_SHIFT; } @@ -285,13 +277,12 @@ static unsigned long pfn_end(struct page_map *page_map) static void devm_memremap_pages_release(struct device *dev, void *data) { - struct page_map *page_map = data; - struct resource *res = &page_map->res; + struct dev_pagemap *pgmap = data; + struct resource *res = &pgmap->res; resource_size_t align_start, align_size; - struct dev_pagemap *pgmap = &page_map->pgmap; unsigned long pfn; - for_each_device_pfn(pfn, page_map) + for_each_device_pfn(pfn, pgmap) put_page(pfn_to_page(pfn)); if (percpu_ref_tryget_live(pgmap->ref)) { @@ -304,24 +295,22 @@ static void devm_memremap_pages_release(struct device *dev, void *data) align_size = ALIGN(resource_size(res), SECTION_SIZE); mem_hotplug_begin(); - arch_remove_memory(align_start, align_size, pgmap->altmap); + arch_remove_memory(align_start, align_size, pgmap->altmap_valid ? + &pgmap->altmap : NULL); mem_hotplug_done(); untrack_pfn(NULL, PHYS_PFN(align_start), align_size); pgmap_radix_release(res); - dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc, - "%s: failed to free all reserved pages\n", __func__); + dev_WARN_ONCE(dev, pgmap->altmap.alloc, + "%s: failed to free all reserved pages\n", __func__); } /* assumes rcu_read_lock() held at entry */ static struct dev_pagemap *find_dev_pagemap(resource_size_t phys) { - struct page_map *page_map; - WARN_ON_ONCE(!rcu_read_lock_held()); - page_map = radix_tree_lookup(&pgmap_radix, PHYS_PFN(phys)); - return page_map ? &page_map->pgmap : NULL; + return radix_tree_lookup(&pgmap_radix, PHYS_PFN(phys)); } /** @@ -349,7 +338,6 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, unsigned long pfn, pgoff, order; pgprot_t pgprot = PAGE_KERNEL; struct dev_pagemap *pgmap; - struct page_map *page_map; int error, nid, is_ram, i = 0; align_start = res->start & ~(SECTION_SIZE - 1); @@ -370,22 +358,20 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, if (!ref) return ERR_PTR(-EINVAL); - page_map = devres_alloc_node(devm_memremap_pages_release, - sizeof(*page_map), GFP_KERNEL, dev_to_node(dev)); - if (!page_map) + pgmap = devres_alloc_node(devm_memremap_pages_release, + sizeof(*pgmap), GFP_KERNEL, dev_to_node(dev)); + if (!pgmap) return ERR_PTR(-ENOMEM); - pgmap = &page_map->pgmap; - memcpy(&page_map->res, res, sizeof(*res)); + memcpy(&pgmap->res, res, sizeof(*res)); pgmap->dev = dev; if (altmap) { - memcpy(&page_map->altmap, altmap, sizeof(*altmap)); - pgmap->altmap = &page_map->altmap; - altmap = pgmap->altmap; + memcpy(&pgmap->altmap, altmap, sizeof(*altmap)); + pgmap->altmap_valid = true; + altmap = &pgmap->altmap; } pgmap->ref = ref; - pgmap->res = &page_map->res; pgmap->type = MEMORY_DEVICE_HOST; pgmap->page_fault = NULL; pgmap->page_free = NULL; @@ -397,7 +383,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, foreach_order_pgoff(res, order, pgoff) { error = __radix_tree_insert(&pgmap_radix, - PHYS_PFN(res->start) + pgoff, order, page_map); + PHYS_PFN(res->start) + pgoff, order, pgmap); if (error) { dev_err(dev, "%s: failed: %d\n", __func__, error); break; @@ -426,7 +412,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, if (error) goto err_add_memory; - for_each_device_pfn(pfn, page_map) { + for_each_device_pfn(pfn, pgmap) { struct page *page = pfn_to_page(pfn); /* @@ -441,7 +427,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, if (!(++i % 1024)) cond_resched(); } - devres_add(dev, page_map); + devres_add(dev, pgmap); return __va(res->start); err_add_memory: @@ -449,7 +435,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, err_pfn_remap: err_radix: pgmap_radix_release(res); - devres_free(page_map); + devres_free(pgmap); return ERR_PTR(error); } EXPORT_SYMBOL(devm_memremap_pages); @@ -482,9 +468,7 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn, * In the cached case we're already holding a live reference. */ if (pgmap) { - const struct resource *res = pgmap ? pgmap->res : NULL; - - if (res && phys >= res->start && phys <= res->end) + if (phys >= pgmap->res.start && phys <= pgmap->res.end) return pgmap; put_dev_pagemap(pgmap); } diff --git a/mm/hmm.c b/mm/hmm.c index 2f2e13c61040..320fdc87f064 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -882,7 +882,7 @@ static int hmm_devmem_pages_create(struct hmm_devmem *devmem) else devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; - devmem->pagemap.res = devmem->resource; + devmem->pagemap.res = *devmem->resource; devmem->pagemap.page_fault = hmm_devmem_fault; devmem->pagemap.page_free = hmm_devmem_free; devmem->pagemap.dev = devmem->device; -- cgit v1.2.3 From e8d5134833006a46fcbefc5f4a84d0b62bd520e7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 29 Dec 2017 08:54:05 +0100 Subject: memremap: change devm_memremap_pages interface to use struct dev_pagemap This new interface is similar to how struct device (and many others) work. The caller initializes a 'struct dev_pagemap' as required and calls 'devm_memremap_pages'. This allows the pagemap structure to be embedded in another structure and thus container_of can be used. In this way application specific members can be stored in a containing struct. This will be used by the P2P infrastructure and HMM could probably be cleaned up to use it as well (instead of having it's own, similar 'hmm_devmem_pages_create' function). Signed-off-by: Logan Gunthorpe Signed-off-by: Christoph Hellwig Signed-off-by: Dan Williams --- drivers/dax/pmem.c | 20 ++++++++------- drivers/nvdimm/nd.h | 9 +++---- drivers/nvdimm/pfn_devs.c | 27 ++++++++++++--------- drivers/nvdimm/pmem.c | 37 +++++++++++++++------------- drivers/nvdimm/pmem.h | 1 + include/linux/memremap.h | 6 ++--- kernel/memremap.c | 51 ++++++++++++++++----------------------- tools/testing/nvdimm/test/iomap.c | 7 +++--- 8 files changed, 77 insertions(+), 81 deletions(-) (limited to 'kernel') diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c index 8d8c852ba8f2..31b6ecce4c64 100644 --- a/drivers/dax/pmem.c +++ b/drivers/dax/pmem.c @@ -21,6 +21,7 @@ struct dax_pmem { struct device *dev; struct percpu_ref ref; + struct dev_pagemap pgmap; struct completion cmp; }; @@ -69,20 +70,23 @@ static int dax_pmem_probe(struct device *dev) struct nd_namespace_common *ndns; struct nd_dax *nd_dax = to_nd_dax(dev); struct nd_pfn *nd_pfn = &nd_dax->nd_pfn; - struct vmem_altmap __altmap, *altmap = NULL; ndns = nvdimm_namespace_common_probe(dev); if (IS_ERR(ndns)) return PTR_ERR(ndns); nsio = to_nd_namespace_io(&ndns->dev); + dax_pmem = devm_kzalloc(dev, sizeof(*dax_pmem), GFP_KERNEL); + if (!dax_pmem) + return -ENOMEM; + /* parse the 'pfn' info block via ->rw_bytes */ rc = devm_nsio_enable(dev, nsio); if (rc) return rc; - altmap = nvdimm_setup_pfn(nd_pfn, &res, &__altmap); - if (IS_ERR(altmap)) - return PTR_ERR(altmap); + rc = nvdimm_setup_pfn(nd_pfn, &dax_pmem->pgmap); + if (rc) + return rc; devm_nsio_disable(dev, nsio); pfn_sb = nd_pfn->pfn_sb; @@ -94,10 +98,6 @@ static int dax_pmem_probe(struct device *dev) return -EBUSY; } - dax_pmem = devm_kzalloc(dev, sizeof(*dax_pmem), GFP_KERNEL); - if (!dax_pmem) - return -ENOMEM; - dax_pmem->dev = dev; init_completion(&dax_pmem->cmp); rc = percpu_ref_init(&dax_pmem->ref, dax_pmem_percpu_release, 0, @@ -110,7 +110,8 @@ static int dax_pmem_probe(struct device *dev) if (rc) return rc; - addr = devm_memremap_pages(dev, &res, &dax_pmem->ref, altmap); + dax_pmem->pgmap.ref = &dax_pmem->ref; + addr = devm_memremap_pages(dev, &dax_pmem->pgmap); if (IS_ERR(addr)) return PTR_ERR(addr); @@ -120,6 +121,7 @@ static int dax_pmem_probe(struct device *dev) return rc; /* adjust the dax_region resource to the start of data */ + memcpy(&res, &dax_pmem->pgmap.res, sizeof(res)); res.start += le64_to_cpu(pfn_sb->dataoff); rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", ®ion_id, &id); diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index e958f3724c41..8d6375ee0fda 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -368,15 +368,14 @@ unsigned int pmem_sector_size(struct nd_namespace_common *ndns); void nvdimm_badblocks_populate(struct nd_region *nd_region, struct badblocks *bb, const struct resource *res); #if IS_ENABLED(CONFIG_ND_CLAIM) -struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn, - struct resource *res, struct vmem_altmap *altmap); +int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap); int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio); void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio); #else -static inline struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn, - struct resource *res, struct vmem_altmap *altmap) +static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, + struct dev_pagemap *pgmap) { - return ERR_PTR(-ENXIO); + return -ENXIO; } static inline int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio) diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 2adada1a5855..f5c4e8c6e29d 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -542,9 +542,10 @@ static unsigned long init_altmap_reserve(resource_size_t base) return reserve; } -static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn, - struct resource *res, struct vmem_altmap *altmap) +static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) { + struct resource *res = &pgmap->res; + struct vmem_altmap *altmap = &pgmap->altmap; struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; u64 offset = le64_to_cpu(pfn_sb->dataoff); u32 start_pad = __le32_to_cpu(pfn_sb->start_pad); @@ -561,11 +562,13 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn, res->start += start_pad; res->end -= end_trunc; + pgmap->type = MEMORY_DEVICE_HOST; + if (nd_pfn->mode == PFN_MODE_RAM) { if (offset < SZ_8K) - return ERR_PTR(-EINVAL); + return -EINVAL; nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); - altmap = NULL; + pgmap->altmap_valid = false; } else if (nd_pfn->mode == PFN_MODE_PMEM) { nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res) - offset) / PAGE_SIZE); @@ -577,10 +580,11 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn, memcpy(altmap, &__altmap, sizeof(*altmap)); altmap->free = PHYS_PFN(offset - SZ_8K); altmap->alloc = 0; + pgmap->altmap_valid = true; } else - return ERR_PTR(-ENXIO); + return -ENXIO; - return altmap; + return 0; } static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys) @@ -708,19 +712,18 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) * Determine the effective resource range and vmem_altmap from an nd_pfn * instance. */ -struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn, - struct resource *res, struct vmem_altmap *altmap) +int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) { int rc; if (!nd_pfn->uuid || !nd_pfn->ndns) - return ERR_PTR(-ENODEV); + return -ENODEV; rc = nd_pfn_init(nd_pfn); if (rc) - return ERR_PTR(rc); + return rc; - /* we need a valid pfn_sb before we can init a vmem_altmap */ - return __nvdimm_setup_pfn(nd_pfn, res, altmap); + /* we need a valid pfn_sb before we can init a dev_pagemap */ + return __nvdimm_setup_pfn(nd_pfn, pgmap); } EXPORT_SYMBOL_GPL(nvdimm_setup_pfn); diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 7fbc5c5dc8e1..cf074b1ce219 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -298,34 +298,34 @@ static int pmem_attach_disk(struct device *dev, { struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); struct nd_region *nd_region = to_nd_region(dev->parent); - struct vmem_altmap __altmap, *altmap = NULL; int nid = dev_to_node(dev), fua, wbc; struct resource *res = &nsio->res; + struct resource bb_res; struct nd_pfn *nd_pfn = NULL; struct dax_device *dax_dev; struct nd_pfn_sb *pfn_sb; struct pmem_device *pmem; - struct resource pfn_res; struct request_queue *q; struct device *gendev; struct gendisk *disk; void *addr; + int rc; + + pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL); + if (!pmem) + return -ENOMEM; /* while nsio_rw_bytes is active, parse a pfn info block if present */ if (is_nd_pfn(dev)) { nd_pfn = to_nd_pfn(dev); - altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap); - if (IS_ERR(altmap)) - return PTR_ERR(altmap); + rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap); + if (rc) + return rc; } /* we're attaching a block device, disable raw namespace access */ devm_nsio_disable(dev, nsio); - pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL); - if (!pmem) - return -ENOMEM; - dev_set_drvdata(dev, pmem); pmem->phys_addr = res->start; pmem->size = resource_size(res); @@ -350,19 +350,22 @@ static int pmem_attach_disk(struct device *dev, return -ENOMEM; pmem->pfn_flags = PFN_DEV; + pmem->pgmap.ref = &q->q_usage_counter; if (is_nd_pfn(dev)) { - addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter, - altmap); + addr = devm_memremap_pages(dev, &pmem->pgmap); pfn_sb = nd_pfn->pfn_sb; pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); - pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res); + pmem->pfn_pad = resource_size(res) - + resource_size(&pmem->pgmap.res); pmem->pfn_flags |= PFN_MAP; - res = &pfn_res; /* for badblocks populate */ - res->start += pmem->data_offset; + memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res)); + bb_res.start += pmem->data_offset; } else if (pmem_should_map_pages(dev)) { - addr = devm_memremap_pages(dev, &nsio->res, - &q->q_usage_counter, NULL); + memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res)); + pmem->pgmap.altmap_valid = false; + addr = devm_memremap_pages(dev, &pmem->pgmap); pmem->pfn_flags |= PFN_MAP; + memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res)); } else addr = devm_memremap(dev, pmem->phys_addr, pmem->size, ARCH_MEMREMAP_PMEM); @@ -401,7 +404,7 @@ static int pmem_attach_disk(struct device *dev, / 512); if (devm_init_badblocks(dev, &pmem->bb)) return -ENOMEM; - nvdimm_badblocks_populate(nd_region, &pmem->bb, res); + nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res); disk->bb = &pmem->bb; dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops); diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h index 6a3cd2a10db6..a64ebc78b5df 100644 --- a/drivers/nvdimm/pmem.h +++ b/drivers/nvdimm/pmem.h @@ -22,6 +22,7 @@ struct pmem_device { struct badblocks bb; struct dax_device *dax_dev; struct gendisk *disk; + struct dev_pagemap pgmap; }; long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 1cb5f39d25c1..7b4899c06f49 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -123,8 +123,7 @@ struct dev_pagemap { }; #ifdef CONFIG_ZONE_DEVICE -void *devm_memremap_pages(struct device *dev, struct resource *res, - struct percpu_ref *ref, struct vmem_altmap *altmap); +void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); struct dev_pagemap *get_dev_pagemap(unsigned long pfn, struct dev_pagemap *pgmap); @@ -134,8 +133,7 @@ void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); static inline bool is_zone_device_page(const struct page *page); #else static inline void *devm_memremap_pages(struct device *dev, - struct resource *res, struct percpu_ref *ref, - struct vmem_altmap *altmap) + struct dev_pagemap *pgmap) { /* * Fail attempts to call devm_memremap_pages() without diff --git a/kernel/memremap.c b/kernel/memremap.c index 9207c44cce20..a9a948cd3d7f 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -275,9 +275,10 @@ static unsigned long pfn_end(struct dev_pagemap *pgmap) #define for_each_device_pfn(pfn, map) \ for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++) -static void devm_memremap_pages_release(struct device *dev, void *data) +static void devm_memremap_pages_release(void *data) { struct dev_pagemap *pgmap = data; + struct device *dev = pgmap->dev; struct resource *res = &pgmap->res; resource_size_t align_start, align_size; unsigned long pfn; @@ -316,29 +317,34 @@ static struct dev_pagemap *find_dev_pagemap(resource_size_t phys) /** * devm_memremap_pages - remap and provide memmap backing for the given resource * @dev: hosting device for @res - * @res: "host memory" address range - * @ref: a live per-cpu reference count - * @altmap: optional descriptor for allocating the memmap from @res + * @pgmap: pointer to a struct dev_pgmap * * Notes: - * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time - * (or devm release event). The expected order of events is that @ref has + * 1/ At a minimum the res, ref and type members of @pgmap must be initialized + * by the caller before passing it to this function + * + * 2/ The altmap field may optionally be initialized, in which case altmap_valid + * must be set to true + * + * 3/ pgmap.ref must be 'live' on entry and 'dead' before devm_memunmap_pages() + * time (or devm release event). The expected order of events is that ref has * been through percpu_ref_kill() before devm_memremap_pages_release(). The * wait for the completion of all references being dropped and * percpu_ref_exit() must occur after devm_memremap_pages_release(). * - * 2/ @res is expected to be a host memory range that could feasibly be + * 4/ res is expected to be a host memory range that could feasibly be * treated as a "System RAM" range, i.e. not a device mmio range, but * this is not enforced. */ -void *devm_memremap_pages(struct device *dev, struct resource *res, - struct percpu_ref *ref, struct vmem_altmap *altmap) +void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) { resource_size_t align_start, align_size, align_end; + struct vmem_altmap *altmap = pgmap->altmap_valid ? + &pgmap->altmap : NULL; unsigned long pfn, pgoff, order; pgprot_t pgprot = PAGE_KERNEL; - struct dev_pagemap *pgmap; int error, nid, is_ram, i = 0; + struct resource *res = &pgmap->res; align_start = res->start & ~(SECTION_SIZE - 1); align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) @@ -355,27 +361,10 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, if (is_ram == REGION_INTERSECTS) return __va(res->start); - if (!ref) + if (!pgmap->ref) return ERR_PTR(-EINVAL); - pgmap = devres_alloc_node(devm_memremap_pages_release, - sizeof(*pgmap), GFP_KERNEL, dev_to_node(dev)); - if (!pgmap) - return ERR_PTR(-ENOMEM); - - memcpy(&pgmap->res, res, sizeof(*res)); - pgmap->dev = dev; - if (altmap) { - memcpy(&pgmap->altmap, altmap, sizeof(*altmap)); - pgmap->altmap_valid = true; - altmap = &pgmap->altmap; - } - pgmap->ref = ref; - pgmap->type = MEMORY_DEVICE_HOST; - pgmap->page_fault = NULL; - pgmap->page_free = NULL; - pgmap->data = NULL; mutex_lock(&pgmap_lock); error = 0; @@ -423,11 +412,13 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, */ list_del(&page->lru); page->pgmap = pgmap; - percpu_ref_get(ref); + percpu_ref_get(pgmap->ref); if (!(++i % 1024)) cond_resched(); } - devres_add(dev, pgmap); + + devm_add_action(dev, devm_memremap_pages_release, pgmap); + return __va(res->start); err_add_memory: diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c index e1f75a1914a1..ff9d3a5825e1 100644 --- a/tools/testing/nvdimm/test/iomap.c +++ b/tools/testing/nvdimm/test/iomap.c @@ -104,15 +104,14 @@ void *__wrap_devm_memremap(struct device *dev, resource_size_t offset, } EXPORT_SYMBOL(__wrap_devm_memremap); -void *__wrap_devm_memremap_pages(struct device *dev, struct resource *res, - struct percpu_ref *ref, struct vmem_altmap *altmap) +void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) { - resource_size_t offset = res->start; + resource_size_t offset = pgmap->res.start; struct nfit_test_resource *nfit_res = get_nfit_res(offset); if (nfit_res) return nfit_res->buf + offset - nfit_res->res.start; - return devm_memremap_pages(dev, res, ref, altmap); + return devm_memremap_pages(dev, pgmap); } EXPORT_SYMBOL(__wrap_devm_memremap_pages); -- cgit v1.2.3 From e697c5b90e97792187e45f8d78fb2bfa62eb0496 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 29 Dec 2017 08:54:06 +0100 Subject: memremap: merge find_dev_pagemap into get_dev_pagemap There is only one caller of the trivial function find_dev_pagemap left, so just merge it into the caller. Signed-off-by: Christoph Hellwig Signed-off-by: Dan Williams --- kernel/memremap.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/memremap.c b/kernel/memremap.c index a9a948cd3d7f..ada31b0d76d4 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -306,14 +306,6 @@ static void devm_memremap_pages_release(void *data) "%s: failed to free all reserved pages\n", __func__); } -/* assumes rcu_read_lock() held at entry */ -static struct dev_pagemap *find_dev_pagemap(resource_size_t phys) -{ - WARN_ON_ONCE(!rcu_read_lock_held()); - - return radix_tree_lookup(&pgmap_radix, PHYS_PFN(phys)); -} - /** * devm_memremap_pages - remap and provide memmap backing for the given resource * @dev: hosting device for @res @@ -466,7 +458,7 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn, /* fall back to slow path lookup */ rcu_read_lock(); - pgmap = find_dev_pagemap(phys); + pgmap = radix_tree_lookup(&pgmap_radix, PHYS_PFN(phys)); if (pgmap && !percpu_ref_tryget_live(pgmap->ref)) pgmap = NULL; rcu_read_unlock(); -- cgit v1.2.3 From 10a0cd6e4932b5078215b1ec2c896597eec0eff9 Mon Sep 17 00:00:00 2001 From: "Jan H. Schönherr" Date: Fri, 19 Jan 2018 16:27:54 -0800 Subject: mm: Fix memory size alignment in devm_memremap_pages_release() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The functions devm_memremap_pages() and devm_memremap_pages_release() use different ways to calculate the section-aligned amount of memory. The latter function may use an incorrect size if the memory region is small but straddles a section border. Use the same code for both. Cc: Fixes: 5f29a77cd957 ("mm: fix mixed zone detection in devm_memremap_pages") Signed-off-by: Jan H. Schönherr Signed-off-by: Dan Williams --- kernel/memremap.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/memremap.c b/kernel/memremap.c index ada31b0d76d4..4ef97525a4ff 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -293,7 +293,8 @@ static void devm_memremap_pages_release(void *data) /* pages are dead and unused, undo the arch mapping */ align_start = res->start & ~(SECTION_SIZE - 1); - align_size = ALIGN(resource_size(res), SECTION_SIZE); + align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) + - align_start; mem_hotplug_begin(); arch_remove_memory(align_start, align_size, pgmap->altmap_valid ? -- cgit v1.2.3 From 77dd66a3c67c93ab401ccc15efff25578be281fd Mon Sep 17 00:00:00 2001 From: "Jan H. Schönherr" Date: Fri, 19 Jan 2018 16:26:33 -0800 Subject: mm: Fix devm_memremap_pages() collision handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If devm_memremap_pages() detects a collision while adding entries to the radix-tree, we call pgmap_radix_release(). Unfortunately, the function removes *all* entries for the range -- including the entries that caused the collision in the first place. Modify pgmap_radix_release() to take an additional argument to indicate where to stop, so that only newly added entries are removed from the tree. Cc: Fixes: 9476df7d80df ("mm: introduce find_dev_pagemap()") Signed-off-by: Jan H. Schönherr Signed-off-by: Dan Williams --- kernel/memremap.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/memremap.c b/kernel/memremap.c index 4ef97525a4ff..4849be5f9b3c 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -241,13 +241,16 @@ int device_private_entry_fault(struct vm_area_struct *vma, EXPORT_SYMBOL(device_private_entry_fault); #endif /* CONFIG_DEVICE_PRIVATE */ -static void pgmap_radix_release(struct resource *res) +static void pgmap_radix_release(struct resource *res, unsigned long end_pgoff) { unsigned long pgoff, order; mutex_lock(&pgmap_lock); - foreach_order_pgoff(res, order, pgoff) + foreach_order_pgoff(res, order, pgoff) { + if (pgoff >= end_pgoff) + break; radix_tree_delete(&pgmap_radix, PHYS_PFN(res->start) + pgoff); + } mutex_unlock(&pgmap_lock); synchronize_rcu(); @@ -302,7 +305,7 @@ static void devm_memremap_pages_release(void *data) mem_hotplug_done(); untrack_pfn(NULL, PHYS_PFN(align_start), align_size); - pgmap_radix_release(res); + pgmap_radix_release(res, -1); dev_WARN_ONCE(dev, pgmap->altmap.alloc, "%s: failed to free all reserved pages\n", __func__); } @@ -418,7 +421,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) untrack_pfn(NULL, PHYS_PFN(align_start), align_size); err_pfn_remap: err_radix: - pgmap_radix_release(res); + pgmap_radix_release(res, pgoff); devres_free(pgmap); return ERR_PTR(error); } -- cgit v1.2.3