From 815744d75152078cde5391fc1e3c2d4424323fb6 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 13 Jun 2019 15:55:46 -0700 Subject: mm: memcontrol: don't batch updates of local VM stats and events The kernel test robot noticed a 26% will-it-scale pagefault regression from commit 42a300353577 ("mm: memcontrol: fix recursive statistics correctness & scalabilty"). This appears to be caused by bouncing the additional cachelines from the new hierarchical statistics counters. We can fix this by getting rid of the batched local counters instead. Originally, there were *only* group-local counters, and they were fully maintained per cpu. A reader of a stats file high up in the cgroup tree would have to walk the entire subtree and collect each level's per-cpu counters to get the recursive view. This was prohibitively expensive, and so we switched to per-cpu batched updates of the local counters during a983b5ebee57 ("mm: memcontrol: fix excessive complexity in memory.stat reporting"), reducing the complexity from nr_subgroups * nr_cpus to nr_subgroups. With growing machines and cgroup trees, the tree walk itself became too expensive for monitoring top-level groups, and this is when the culprit patch added hierarchy counters on each cgroup level. When the per-cpu batch size would be reached, both the local and the hierarchy counters would get batch-updated from the per-cpu delta simultaneously. This makes local and hierarchical counter reads blazingly fast, but it unfortunately makes the write-side too cache line intense. Since local counter reads were never a problem - we only centralized them to accelerate the hierarchy walk - and use of the local counters are becoming rarer due to replacement with hierarchical views (ongoing rework in the page reclaim and workingset code), we can make those local counters unbatched per-cpu counters again. The scheme will then be as such: when a memcg statistic changes, the writer will: - update the local counter (per-cpu) - update the batch counter (per-cpu). If the batch is full: - spill the batch into the group's atomic_t - spill the batch into all ancestors' atomic_ts - empty out the batch counter (per-cpu) when a local memcg counter is read, the reader will: - collect the local counter from all cpus when a hiearchy memcg counter is read, the reader will: - read the atomic_t We might be able to simplify this further and make the recursive counters unbatched per-cpu counters as well (batch upward propagation, but leave per-cpu collection to the readers), but that will require a more in-depth analysis and testing of all the callsites. Deal with the immediate regression for now. Link: http://lkml.kernel.org/r/20190521151647.GB2870@cmpxchg.org Fixes: 42a300353577 ("mm: memcontrol: fix recursive statistics correctness & scalabilty") Signed-off-by: Johannes Weiner Reported-by: kernel test robot Tested-by: kernel test robot Cc: Michal Hocko Cc: Shakeel Butt Cc: Roman Gushchin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 26 ++++++++++++++++++-------- mm/memcontrol.c | 41 ++++++++++++++++++++++++++++------------- 2 files changed, 46 insertions(+), 21 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index edf9e8f32d70..1dcb763bb610 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -117,9 +117,12 @@ struct memcg_shrinker_map { struct mem_cgroup_per_node { struct lruvec lruvec; + /* Legacy local VM stats */ + struct lruvec_stat __percpu *lruvec_stat_local; + + /* Subtree VM stats (batched updates) */ struct lruvec_stat __percpu *lruvec_stat_cpu; atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS]; - atomic_long_t lruvec_stat_local[NR_VM_NODE_STAT_ITEMS]; unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; @@ -265,17 +268,18 @@ struct mem_cgroup { atomic_t moving_account; struct task_struct *move_lock_task; - /* memory.stat */ + /* Legacy local VM stats and events */ + struct memcg_vmstats_percpu __percpu *vmstats_local; + + /* Subtree VM stats and events (batched updates) */ struct memcg_vmstats_percpu __percpu *vmstats_percpu; MEMCG_PADDING(_pad2_); atomic_long_t vmstats[MEMCG_NR_STAT]; - atomic_long_t vmstats_local[MEMCG_NR_STAT]; - atomic_long_t vmevents[NR_VM_EVENT_ITEMS]; - atomic_long_t vmevents_local[NR_VM_EVENT_ITEMS]; + /* memory.events */ atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; unsigned long socket_pressure; @@ -567,7 +571,11 @@ static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx) { - long x = atomic_long_read(&memcg->vmstats_local[idx]); + long x = 0; + int cpu; + + for_each_possible_cpu(cpu) + x += per_cpu(memcg->vmstats_local->stat[idx], cpu); #ifdef CONFIG_SMP if (x < 0) x = 0; @@ -641,13 +649,15 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, enum node_stat_item idx) { struct mem_cgroup_per_node *pn; - long x; + long x = 0; + int cpu; if (mem_cgroup_disabled()) return node_page_state(lruvec_pgdat(lruvec), idx); pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); - x = atomic_long_read(&pn->lruvec_stat_local[idx]); + for_each_possible_cpu(cpu) + x += per_cpu(pn->lruvec_stat_local->count[idx], cpu); #ifdef CONFIG_SMP if (x < 0) x = 0; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ca0bc6e6be13..ba9138a4a1de 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -691,11 +691,12 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) if (mem_cgroup_disabled()) return; + __this_cpu_add(memcg->vmstats_local->stat[idx], val); + x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { struct mem_cgroup *mi; - atomic_long_add(x, &memcg->vmstats_local[idx]); for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) atomic_long_add(x, &mi->vmstats[idx]); x = 0; @@ -745,11 +746,12 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, __mod_memcg_state(memcg, idx, val); /* Update lruvec */ + __this_cpu_add(pn->lruvec_stat_local->count[idx], val); + x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { struct mem_cgroup_per_node *pi; - atomic_long_add(x, &pn->lruvec_stat_local[idx]); for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) atomic_long_add(x, &pi->lruvec_stat[idx]); x = 0; @@ -771,11 +773,12 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, if (mem_cgroup_disabled()) return; + __this_cpu_add(memcg->vmstats_local->events[idx], count); + x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]); if (unlikely(x > MEMCG_CHARGE_BATCH)) { struct mem_cgroup *mi; - atomic_long_add(x, &memcg->vmevents_local[idx]); for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) atomic_long_add(x, &mi->vmevents[idx]); x = 0; @@ -790,7 +793,12 @@ static unsigned long memcg_events(struct mem_cgroup *memcg, int event) static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) { - return atomic_long_read(&memcg->vmevents_local[event]); + long x = 0; + int cpu; + + for_each_possible_cpu(cpu) + x += per_cpu(memcg->vmstats_local->events[event], cpu); + return x; } static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, @@ -2191,11 +2199,9 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu) long x; x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0); - if (x) { - atomic_long_add(x, &memcg->vmstats_local[i]); + if (x) for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) atomic_long_add(x, &memcg->vmstats[i]); - } if (i >= NR_VM_NODE_STAT_ITEMS) continue; @@ -2205,12 +2211,10 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu) pn = mem_cgroup_nodeinfo(memcg, nid); x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0); - if (x) { - atomic_long_add(x, &pn->lruvec_stat_local[i]); + if (x) do { atomic_long_add(x, &pn->lruvec_stat[i]); } while ((pn = parent_nodeinfo(pn, nid))); - } } } @@ -2218,11 +2222,9 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu) long x; x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0); - if (x) { - atomic_long_add(x, &memcg->vmevents_local[i]); + if (x) for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) atomic_long_add(x, &memcg->vmevents[i]); - } } } @@ -4483,8 +4485,15 @@ static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) if (!pn) return 1; + pn->lruvec_stat_local = alloc_percpu(struct lruvec_stat); + if (!pn->lruvec_stat_local) { + kfree(pn); + return 1; + } + pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat); if (!pn->lruvec_stat_cpu) { + free_percpu(pn->lruvec_stat_local); kfree(pn); return 1; } @@ -4506,6 +4515,7 @@ static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) return; free_percpu(pn->lruvec_stat_cpu); + free_percpu(pn->lruvec_stat_local); kfree(pn); } @@ -4516,6 +4526,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) for_each_node(node) free_mem_cgroup_per_node_info(memcg, node); free_percpu(memcg->vmstats_percpu); + free_percpu(memcg->vmstats_local); kfree(memcg); } @@ -4544,6 +4555,10 @@ static struct mem_cgroup *mem_cgroup_alloc(void) if (memcg->id.id < 0) goto fail; + memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu); + if (!memcg->vmstats_local) + goto fail; + memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu); if (!memcg->vmstats_percpu) goto fail; -- cgit v1.2.3 From 3510955b327176fd4cbab5baa75b449f077722a2 Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Thu, 13 Jun 2019 15:55:49 -0700 Subject: mm/list_lru.c: fix memory leak in __memcg_init_list_lru_node Syzbot reported following memory leak: ffffffffda RBX: 0000000000000003 RCX: 0000000000441f79 BUG: memory leak unreferenced object 0xffff888114f26040 (size 32): comm "syz-executor626", pid 7056, jiffies 4294948701 (age 39.410s) hex dump (first 32 bytes): 40 60 f2 14 81 88 ff ff 40 60 f2 14 81 88 ff ff @`......@`...... 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: slab_post_alloc_hook mm/slab.h:439 [inline] slab_alloc mm/slab.c:3326 [inline] kmem_cache_alloc_trace+0x13d/0x280 mm/slab.c:3553 kmalloc include/linux/slab.h:547 [inline] __memcg_init_list_lru_node+0x58/0xf0 mm/list_lru.c:352 memcg_init_list_lru_node mm/list_lru.c:375 [inline] memcg_init_list_lru mm/list_lru.c:459 [inline] __list_lru_init+0x193/0x2a0 mm/list_lru.c:626 alloc_super+0x2e0/0x310 fs/super.c:269 sget_userns+0x94/0x2a0 fs/super.c:609 sget+0x8d/0xb0 fs/super.c:660 mount_nodev+0x31/0xb0 fs/super.c:1387 fuse_mount+0x2d/0x40 fs/fuse/inode.c:1236 legacy_get_tree+0x27/0x80 fs/fs_context.c:661 vfs_get_tree+0x2e/0x120 fs/super.c:1476 do_new_mount fs/namespace.c:2790 [inline] do_mount+0x932/0xc50 fs/namespace.c:3110 ksys_mount+0xab/0x120 fs/namespace.c:3319 __do_sys_mount fs/namespace.c:3333 [inline] __se_sys_mount fs/namespace.c:3330 [inline] __x64_sys_mount+0x26/0x30 fs/namespace.c:3330 do_syscall_64+0x76/0x1a0 arch/x86/entry/common.c:301 entry_SYSCALL_64_after_hwframe+0x44/0xa9 This is a simple off by one bug on the error path. Link: http://lkml.kernel.org/r/20190528043202.99980-1-shakeelb@google.com Fixes: 60d3fd32a7a9 ("list_lru: introduce per-memcg lists") Reported-by: syzbot+f90a420dfe2b1b03cb2c@syzkaller.appspotmail.com Signed-off-by: Shakeel Butt Acked-by: Michal Hocko Reviewed-by: Kirill Tkhai Cc: [4.0+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/list_lru.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/list_lru.c b/mm/list_lru.c index e4709fdaa8e6..927d85be32f6 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -354,7 +354,7 @@ static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus, } return 0; fail: - __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1); + __memcg_destroy_list_lru_node(memcg_lrus, begin, i); return -ENOMEM; } -- cgit v1.2.3 From c04e32e911653442fc834be6e92e072aeebe01a1 Mon Sep 17 00:00:00 2001 From: Manuel Traut Date: Thu, 13 Jun 2019 15:55:52 -0700 Subject: scripts/decode_stacktrace.sh: prefix addr2line with $CROSS_COMPILE At least for ARM64 kernels compiled with the crosstoolchain from Debian/stretch or with the toolchain from kernel.org the line number is not decoded correctly by 'decode_stacktrace.sh': $ echo "[ 136.513051] f1+0x0/0xc [kcrash]" | \ CROSS_COMPILE=/opt/gcc-8.1.0-nolibc/aarch64-linux/bin/aarch64-linux- \ ./scripts/decode_stacktrace.sh /scratch/linux-arm64/vmlinux \ /scratch/linux-arm64 \ /nfs/debian/lib/modules/4.20.0-devel [ 136.513051] f1 (/linux/drivers/staging/kcrash/kcrash.c:68) kcrash If addr2line from the toolchain is used the decoded line number is correct: [ 136.513051] f1 (/linux/drivers/staging/kcrash/kcrash.c:57) kcrash Link: http://lkml.kernel.org/r/20190527083425.3763-1-manut@linutronix.de Signed-off-by: Manuel Traut Acked-by: Konstantin Khlebnikov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/decode_stacktrace.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh index bcdd45df3f51..a7a36209a193 100755 --- a/scripts/decode_stacktrace.sh +++ b/scripts/decode_stacktrace.sh @@ -73,7 +73,7 @@ parse_symbol() { if [[ "${cache[$module,$address]+isset}" == "isset" ]]; then local code=${cache[$module,$address]} else - local code=$(addr2line -i -e "$objfile" "$address") + local code=$(${CROSS_COMPILE}addr2line -i -e "$objfile" "$address") cache[$module,$address]=$code fi -- cgit v1.2.3 From dedca63504a204dc8410d98883fdc16dffa8cb80 Mon Sep 17 00:00:00 2001 From: "Potyra, Stefan" Date: Thu, 13 Jun 2019 15:55:55 -0700 Subject: mm/mlock.c: mlockall error for flag MCL_ONFAULT If mlockall() is called with only MCL_ONFAULT as flag, it removes any previously applied lockings and does nothing else. This behavior is counter-intuitive and doesn't match the Linux man page. For mlockall(): EINVAL Unknown flags were specified or MCL_ONFAULT was specified without either MCL_FUTURE or MCL_CURRENT. Consequently, return the error EINVAL, if only MCL_ONFAULT is passed. That way, applications will at least detect that they are calling mlockall() incorrectly. Link: http://lkml.kernel.org/r/20190527075333.GA6339@er01809n.ebgroup.elektrobit.com Fixes: b0f205c2a308 ("mm: mlock: add mlock flags to enable VM_LOCKONFAULT usage") Signed-off-by: Stefan Potyra Reviewed-by: Daniel Jordan Acked-by: Michal Hocko Acked-by: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mlock.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/mlock.c b/mm/mlock.c index 080f3b36415b..cef65bf3964c 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -797,7 +797,8 @@ SYSCALL_DEFINE1(mlockall, int, flags) unsigned long lock_limit; int ret; - if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT))) + if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) || + flags == MCL_ONFAULT) return -EINVAL; if (!can_do_mlock()) -- cgit v1.2.3 From b17f18aff2876ceb3f1fde580c96cd489babf28e Mon Sep 17 00:00:00 2001 From: Kirill Tkhai Date: Thu, 13 Jun 2019 15:55:58 -0700 Subject: mm/vmscan.c: fix recent_rotated history Johannes pointed out that after commit 886cf1901db9 ("mm: move recent_rotated pages calculation to shrink_inactive_list()") we lost all zone_reclaim_stat::recent_rotated history. This fixes it. Link: http://lkml.kernel.org/r/155905972210.26456.11178359431724024112.stgit@localhost.localdomain Fixes: 886cf1901db9 ("mm: move recent_rotated pages calculation to shrink_inactive_list()") Signed-off-by: Kirill Tkhai Reported-by: Johannes Weiner Acked-by: Michal Hocko Acked-by: Johannes Weiner Cc: Daniel Jordan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 7acd0afdfc2a..ffa82b1d39a2 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1953,8 +1953,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, if (global_reclaim(sc)) __count_vm_events(item, nr_reclaimed); __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed); - reclaim_stat->recent_rotated[0] = stat.nr_activate[0]; - reclaim_stat->recent_rotated[1] = stat.nr_activate[1]; + reclaim_stat->recent_rotated[0] += stat.nr_activate[0]; + reclaim_stat->recent_rotated[1] += stat.nr_activate[1]; move_pages_to_lru(lruvec, &page_list); -- cgit v1.2.3 From be99ca2716972a712cde46092c54dee5e6192bf8 Mon Sep 17 00:00:00 2001 From: Wengang Wang Date: Thu, 13 Jun 2019 15:56:01 -0700 Subject: fs/ocfs2: fix race in ocfs2_dentry_attach_lock() ocfs2_dentry_attach_lock() can be executed in parallel threads against the same dentry. Make that race safe. The race is like this: thread A thread B (A1) enter ocfs2_dentry_attach_lock, seeing dentry->d_fsdata is NULL, and no alias found by ocfs2_find_local_alias, so kmalloc a new ocfs2_dentry_lock structure to local variable "dl", dl1 ..... (B1) enter ocfs2_dentry_attach_lock, seeing dentry->d_fsdata is NULL, and no alias found by ocfs2_find_local_alias so kmalloc a new ocfs2_dentry_lock structure to local variable "dl", dl2. ...... (A2) set dentry->d_fsdata with dl1, call ocfs2_dentry_lock() and increase dl1->dl_lockres.l_ro_holders to 1 on success. ...... (B2) set dentry->d_fsdata with dl2 call ocfs2_dentry_lock() and increase dl2->dl_lockres.l_ro_holders to 1 on success. ...... (A3) call ocfs2_dentry_unlock() and decrease dl2->dl_lockres.l_ro_holders to 0 on success. .... (B3) call ocfs2_dentry_unlock(), decreasing dl2->dl_lockres.l_ro_holders, but see it's zero now, panic Link: http://lkml.kernel.org/r/20190529174636.22364-1-wen.gang.wang@oracle.com Signed-off-by: Wengang Wang Reported-by: Daniel Sobe Tested-by: Daniel Sobe Reviewed-by: Changwei Ge Reviewed-by: Joseph Qi Cc: Mark Fasheh Cc: Joel Becker Cc: Junxiao Bi Cc: Gang He Cc: Jun Piao Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/dcache.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c index 2d016937fdda..42a61eecdacd 100644 --- a/fs/ocfs2/dcache.c +++ b/fs/ocfs2/dcache.c @@ -296,6 +296,18 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry, out_attach: spin_lock(&dentry_attach_lock); + if (unlikely(dentry->d_fsdata && !alias)) { + /* d_fsdata is set by a racing thread which is doing + * the same thing as this thread is doing. Leave the racing + * thread going ahead and we return here. + */ + spin_unlock(&dentry_attach_lock); + iput(dl->dl_inode); + ocfs2_lock_res_free(&dl->dl_lockres); + kfree(dl); + return 0; + } + dentry->d_fsdata = dl; dl->dl_count++; spin_unlock(&dentry_attach_lock); -- cgit v1.2.3 From 7a30df49f63ad92318ddf1f7498d1129a77dd4bd Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Thu, 13 Jun 2019 15:56:05 -0700 Subject: mm: mmu_gather: remove __tlb_reset_range() for force flush A few new fields were added to mmu_gather to make TLB flush smarter for huge page by telling what level of page table is changed. __tlb_reset_range() is used to reset all these page table state to unchanged, which is called by TLB flush for parallel mapping changes for the same range under non-exclusive lock (i.e. read mmap_sem). Before commit dd2283f2605e ("mm: mmap: zap pages with read mmap_sem in munmap"), the syscalls (e.g. MADV_DONTNEED, MADV_FREE) which may update PTEs in parallel don't remove page tables. But, the forementioned commit may do munmap() under read mmap_sem and free page tables. This may result in program hang on aarch64 reported by Jan Stancek. The problem could be reproduced by his test program with slightly modified below. ---8<--- static int map_size = 4096; static int num_iter = 500; static long threads_total; static void *distant_area; void *map_write_unmap(void *ptr) { int *fd = ptr; unsigned char *map_address; int i, j = 0; for (i = 0; i < num_iter; i++) { map_address = mmap(distant_area, (size_t) map_size, PROT_WRITE | PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0); if (map_address == MAP_FAILED) { perror("mmap"); exit(1); } for (j = 0; j < map_size; j++) map_address[j] = 'b'; if (munmap(map_address, map_size) == -1) { perror("munmap"); exit(1); } } return NULL; } void *dummy(void *ptr) { return NULL; } int main(void) { pthread_t thid[2]; /* hint for mmap in map_write_unmap() */ distant_area = mmap(0, DISTANT_MMAP_SIZE, PROT_WRITE | PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); munmap(distant_area, (size_t)DISTANT_MMAP_SIZE); distant_area += DISTANT_MMAP_SIZE / 2; while (1) { pthread_create(&thid[0], NULL, map_write_unmap, NULL); pthread_create(&thid[1], NULL, dummy, NULL); pthread_join(thid[0], NULL); pthread_join(thid[1], NULL); } } ---8<--- The program may bring in parallel execution like below: t1 t2 munmap(map_address) downgrade_write(&mm->mmap_sem); unmap_region() tlb_gather_mmu() inc_tlb_flush_pending(tlb->mm); free_pgtables() tlb->freed_tables = 1 tlb->cleared_pmds = 1 pthread_exit() madvise(thread_stack, 8M, MADV_DONTNEED) zap_page_range() tlb_gather_mmu() inc_tlb_flush_pending(tlb->mm); tlb_finish_mmu() if (mm_tlb_flush_nested(tlb->mm)) __tlb_reset_range() __tlb_reset_range() would reset freed_tables and cleared_* bits, but this may cause inconsistency for munmap() which do free page tables. Then it may result in some architectures, e.g. aarch64, may not flush TLB completely as expected to have stale TLB entries remained. Use fullmm flush since it yields much better performance on aarch64 and non-fullmm doesn't yields significant difference on x86. The original proposed fix came from Jan Stancek who mainly debugged this issue, I just wrapped up everything together. Jan's testing results: v5.2-rc2-24-gbec7550cca10 -------------------------- mean stddev real 37.382 2.780 user 1.420 0.078 sys 54.658 1.855 v5.2-rc2-24-gbec7550cca10 + "mm: mmu_gather: remove __tlb_reset_range() for force flush" ---------------------------------------------------------------------------------------_ mean stddev real 37.119 2.105 user 1.548 0.087 sys 55.698 1.357 [akpm@linux-foundation.org: coding-style fixes] Link: http://lkml.kernel.org/r/1558322252-113575-1-git-send-email-yang.shi@linux.alibaba.com Fixes: dd2283f2605e ("mm: mmap: zap pages with read mmap_sem in munmap") Signed-off-by: Yang Shi Signed-off-by: Jan Stancek Reported-by: Jan Stancek Tested-by: Jan Stancek Suggested-by: Will Deacon Tested-by: Will Deacon Acked-by: Will Deacon Cc: Peter Zijlstra Cc: Nick Piggin Cc: "Aneesh Kumar K.V" Cc: Nadav Amit Cc: Minchan Kim Cc: Mel Gorman Cc: [4.20+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mmu_gather.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 99740e1dd273..8c943a6e1696 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -245,14 +245,28 @@ void tlb_finish_mmu(struct mmu_gather *tlb, { /* * If there are parallel threads are doing PTE changes on same range - * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB - * flush by batching, a thread has stable TLB entry can fail to flush - * the TLB by observing pte_none|!pte_dirty, for example so flush TLB - * forcefully if we detect parallel PTE batching threads. + * under non-exclusive lock (e.g., mmap_sem read-side) but defer TLB + * flush by batching, one thread may end up seeing inconsistent PTEs + * and result in having stale TLB entries. So flush TLB forcefully + * if we detect parallel PTE batching threads. + * + * However, some syscalls, e.g. munmap(), may free page tables, this + * needs force flush everything in the given range. Otherwise this + * may result in having stale TLB entries for some architectures, + * e.g. aarch64, that could specify flush what level TLB. */ if (mm_tlb_flush_nested(tlb->mm)) { + /* + * The aarch64 yields better performance with fullmm by + * avoiding multiple CPUs spamming TLBI messages at the + * same time. + * + * On x86 non-fullmm doesn't yield significant difference + * against fullmm. + */ + tlb->fullmm = 1; __tlb_reset_range(tlb); - __tlb_adjust_range(tlb, start, end - start); + tlb->freed_tables = 1; } tlb_flush_mmu(tlb); -- cgit v1.2.3 From 0874bb49bb21bf24deda853e8bf61b8325e24bcb Mon Sep 17 00:00:00 2001 From: swkhack Date: Thu, 13 Jun 2019 15:56:08 -0700 Subject: mm/mlock.c: change count_mm_mlocked_page_nr return type On a 64-bit machine the value of "vma->vm_end - vma->vm_start" may be negative when using 32 bit ints and the "count >> PAGE_SHIFT"'s result will be wrong. So change the local variable and return value to unsigned long to fix the problem. Link: http://lkml.kernel.org/r/20190513023701.83056-1-swkhack@gmail.com Fixes: 0cf2f6f6dc60 ("mm: mlock: check against vma for actual mlock() size") Signed-off-by: swkhack Acked-by: Michal Hocko Reviewed-by: Andrew Morton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mlock.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/mlock.c b/mm/mlock.c index cef65bf3964c..a90099da4fb4 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -636,11 +636,11 @@ static int apply_vma_lock_flags(unsigned long start, size_t len, * is also counted. * Return value: previously mlocked page counts */ -static int count_mm_mlocked_page_nr(struct mm_struct *mm, +static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm, unsigned long start, size_t len) { struct vm_area_struct *vma; - int count = 0; + unsigned long count = 0; if (mm == NULL) mm = current->mm; -- cgit v1.2.3 From 59ea6d06cfa9247b586a695c21f94afa7183af74 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Thu, 13 Jun 2019 15:56:11 -0700 Subject: coredump: fix race condition between collapse_huge_page() and core dumping When fixing the race conditions between the coredump and the mmap_sem holders outside the context of the process, we focused on mmget_not_zero()/get_task_mm() callers in 04f5866e41fb70 ("coredump: fix race condition between mmget_not_zero()/get_task_mm() and core dumping"), but those aren't the only cases where the mmap_sem can be taken outside of the context of the process as Michal Hocko noticed while backporting that commit to older -stable kernels. If mmgrab() is called in the context of the process, but then the mm_count reference is transferred outside the context of the process, that can also be a problem if the mmap_sem has to be taken for writing through that mm_count reference. khugepaged registration calls mmgrab() in the context of the process, but the mmap_sem for writing is taken later in the context of the khugepaged kernel thread. collapse_huge_page() after taking the mmap_sem for writing doesn't modify any vma, so it's not obvious that it could cause a problem to the coredump, but it happens to modify the pmd in a way that breaks an invariant that pmd_trans_huge_lock() relies upon. collapse_huge_page() needs the mmap_sem for writing just to block concurrent page faults that call pmd_trans_huge_lock(). Specifically the invariant that "!pmd_trans_huge()" cannot become a "pmd_trans_huge()" doesn't hold while collapse_huge_page() runs. The coredump will call __get_user_pages() without mmap_sem for reading, which eventually can invoke a lockless page fault which will need a functional pmd_trans_huge_lock(). So collapse_huge_page() needs to use mmget_still_valid() to check it's not running concurrently with the coredump... as long as the coredump can invoke page faults without holding the mmap_sem for reading. This has "Fixes: khugepaged" to facilitate backporting, but in my view it's more a bug in the coredump code that will eventually have to be rewritten to stop invoking page faults without the mmap_sem for reading. So the long term plan is still to drop all mmget_still_valid(). Link: http://lkml.kernel.org/r/20190607161558.32104-1-aarcange@redhat.com Fixes: ba76149f47d8 ("thp: khugepaged") Signed-off-by: Andrea Arcangeli Reported-by: Michal Hocko Acked-by: Michal Hocko Acked-by: Kirill A. Shutemov Cc: Oleg Nesterov Cc: Jann Horn Cc: Hugh Dickins Cc: Mike Rapoport Cc: Mike Kravetz Cc: Peter Xu Cc: Jason Gunthorpe Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched/mm.h | 4 ++++ mm/khugepaged.c | 3 +++ 2 files changed, 7 insertions(+) diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index a3fda9f024c3..4a7944078cc3 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -54,6 +54,10 @@ static inline void mmdrop(struct mm_struct *mm) * followed by taking the mmap_sem for writing before modifying the * vmas or anything the coredump pretends not to change from under it. * + * It also has to be called when mmgrab() is used in the context of + * the process, but then the mm_count refcount is transferred outside + * the context of the process to run down_write() on that pinned mm. + * * NOTE: find_extend_vma() called from GUP context is the only place * that can modify the "mm" (notably the vm_start/end) under mmap_sem * for reading and outside the context of the process, so it is also diff --git a/mm/khugepaged.c b/mm/khugepaged.c index a335f7c1fac4..0f7419938008 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1004,6 +1004,9 @@ static void collapse_huge_page(struct mm_struct *mm, * handled by the anon_vma lock + PG_lock. */ down_write(&mm->mmap_sem); + result = SCAN_ANY_PROCESS; + if (!mmget_still_valid(mm)) + goto out; result = hugepage_vma_revalidate(mm, address, &vma); if (result) goto out; -- cgit v1.2.3 From a58f2cef26e1ca44182c8b22f4f4395e702a5795 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Thu, 13 Jun 2019 15:56:15 -0700 Subject: mm/vmscan.c: fix trying to reclaim unevictable LRU page There was the below bug report from Wu Fangsuo. On the CMA allocation path, isolate_migratepages_range() could isolate unevictable LRU pages and reclaim_clean_page_from_list() can try to reclaim them if they are clean file-backed pages. page:ffffffbf02f33b40 count:86 mapcount:84 mapping:ffffffc08fa7a810 index:0x24 flags: 0x19040c(referenced|uptodate|arch_1|mappedtodisk|unevictable|mlocked) raw: 000000000019040c ffffffc08fa7a810 0000000000000024 0000005600000053 raw: ffffffc009b05b20 ffffffc009b05b20 0000000000000000 ffffffc09bf3ee80 page dumped because: VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page)) page->mem_cgroup:ffffffc09bf3ee80 ------------[ cut here ]------------ kernel BUG at /home/build/farmland/adroid9.0/kernel/linux/mm/vmscan.c:1350! Internal error: Oops - BUG: 0 [#1] PREEMPT SMP Modules linked in: CPU: 0 PID: 7125 Comm: syz-executor Tainted: G S 4.14.81 #3 Hardware name: ASR AQUILAC EVB (DT) task: ffffffc00a54cd00 task.stack: ffffffc009b00000 PC is at shrink_page_list+0x1998/0x3240 LR is at shrink_page_list+0x1998/0x3240 pc : [] lr : [] pstate: 60400045 sp : ffffffc009b05940 .. shrink_page_list+0x1998/0x3240 reclaim_clean_pages_from_list+0x3c0/0x4f0 alloc_contig_range+0x3bc/0x650 cma_alloc+0x214/0x668 ion_cma_allocate+0x98/0x1d8 ion_alloc+0x200/0x7e0 ion_ioctl+0x18c/0x378 do_vfs_ioctl+0x17c/0x1780 SyS_ioctl+0xac/0xc0 Wu found it's due to commit ad6b67041a45 ("mm: remove SWAP_MLOCK in ttu"). Before that, unevictable pages go to cull_mlocked so that we can't reach the VM_BUG_ON_PAGE line. To fix the issue, this patch filters out unevictable LRU pages from the reclaim_clean_pages_from_list in CMA. Link: http://lkml.kernel.org/r/20190524071114.74202-1-minchan@kernel.org Fixes: ad6b67041a45 ("mm: remove SWAP_MLOCK in ttu") Signed-off-by: Minchan Kim Reported-by: Wu Fangsuo Debugged-by: Wu Fangsuo Tested-by: Wu Fangsuo Reviewed-by: Andrew Morton Acked-by: Michal Hocko Cc: Pankaj Suryawanshi Cc: [4.12+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index ffa82b1d39a2..7889f583ced9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1505,7 +1505,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, list_for_each_entry_safe(page, next, page_list, lru) { if (page_is_file_cache(page) && !PageDirty(page) && - !__PageMovable(page)) { + !__PageMovable(page) && !PageUnevictable(page)) { ClearPageActive(page); list_move(&page->lru, &clean_pages); } -- cgit v1.2.3 From 2374b682255184d7ef75fcb507ce5af4995ead32 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 13 Jun 2019 15:56:18 -0700 Subject: drivers/base/devres: introduce devm_release_action() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "mm/devm_memremap_pages: Fix page release race", v2. Logan audited the devm_memremap_pages() shutdown path and noticed that it was possible to proceed to arch_remove_memory() before all potential page references have been reaped. Introduce a new ->cleanup() callback to do the work of waiting for any straggling page references and then perform the percpu_ref_exit() in devm_memremap_pages_release() context. For p2pdma this involves some deeper reworks to reference count resources on a per-instance basis rather than a per pci-device basis. A modified genalloc api is introduced to convey a driver-private pointer through gen_pool_{alloc,free}() interfaces. Also, a devm_memunmap_pages() api is introduced since p2pdma does not auto-release resources on a setup failure. The dax and pmem changes pass the nvdimm unit tests, and the p2pdma changes should now pass testing with the pci_p2pdma_release() fix. Jrme, how does this look for HMM? This patch (of 6): The devm_add_action() facility allows a resource allocation routine to add custom devm semantics. One such user is devm_memremap_pages(). There is now a need to manually trigger devm_memremap_pages_release(). Introduce devm_release_action() so the release action can be triggered via a new devm_memunmap_pages() api in a follow-on change. Link: http://lkml.kernel.org/r/155727336530.292046.2926860263201336366.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams Reviewed-by: Ira Weiny Reviewed-by: Logan Gunthorpe Cc: Bjorn Helgaas Cc: Christoph Hellwig Cc: Greg Kroah-Hartman Cc: "Rafael J. Wysocki" Cc: "Jérôme Glisse" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/devres.c | 24 +++++++++++++++++++++++- include/linux/device.h | 1 + 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/drivers/base/devres.c b/drivers/base/devres.c index e038e2b3b7ea..0bbb328bd17f 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -755,10 +755,32 @@ void devm_remove_action(struct device *dev, void (*action)(void *), void *data) WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match, &devres)); - } EXPORT_SYMBOL_GPL(devm_remove_action); +/** + * devm_release_action() - release previously added custom action + * @dev: Device that owns the action + * @action: Function implementing the action + * @data: Pointer to data passed to @action implementation + * + * Releases and removes instance of @action previously added by + * devm_add_action(). Both action and data should match one of the + * existing entries. + */ +void devm_release_action(struct device *dev, void (*action)(void *), void *data) +{ + struct action_devres devres = { + .data = data, + .action = action, + }; + + WARN_ON(devres_release(dev, devm_action_release, devm_action_match, + &devres)); + +} +EXPORT_SYMBOL_GPL(devm_release_action); + /* * Managed kmalloc/kfree */ diff --git a/include/linux/device.h b/include/linux/device.h index e85264fb6616..848fc71c6ba6 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -713,6 +713,7 @@ void __iomem *devm_of_iomap(struct device *dev, /* allows to add/remove a custom action to devres stack */ int devm_add_action(struct device *dev, void (*action)(void *), void *data); void devm_remove_action(struct device *dev, void (*action)(void *), void *data); +void devm_release_action(struct device *dev, void (*action)(void *), void *data); static inline int devm_add_action_or_reset(struct device *dev, void (*action)(void *), void *data) -- cgit v1.2.3 From 2e3f139e8ecebf177fe01299285a56855e93fb84 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 13 Jun 2019 15:56:21 -0700 Subject: mm/devm_memremap_pages: introduce devm_memunmap_pages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the new devm_release_action() facility to allow devm_memremap_pages_release() to be manually triggered. Link: http://lkml.kernel.org/r/155727337088.292046.5774214552136776763.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams Reviewed-by: Ira Weiny Reviewed-by: Logan Gunthorpe Cc: Bjorn Helgaas Cc: Christoph Hellwig Cc: Greg Kroah-Hartman Cc: "Jérôme Glisse" Cc: "Rafael J. Wysocki" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memremap.h | 6 ++++++ kernel/memremap.c | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/include/linux/memremap.h b/include/linux/memremap.h index f0628660d541..7601ee314c4a 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -100,6 +100,7 @@ struct dev_pagemap { #ifdef CONFIG_ZONE_DEVICE void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); +void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap); struct dev_pagemap *get_dev_pagemap(unsigned long pfn, struct dev_pagemap *pgmap); @@ -118,6 +119,11 @@ static inline void *devm_memremap_pages(struct device *dev, return ERR_PTR(-ENXIO); } +static inline void devm_memunmap_pages(struct device *dev, + struct dev_pagemap *pgmap) +{ +} + static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, struct dev_pagemap *pgmap) { diff --git a/kernel/memremap.c b/kernel/memremap.c index 1490e63f69a9..715b434bd316 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -271,6 +271,12 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) } EXPORT_SYMBOL_GPL(devm_memremap_pages); +void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap) +{ + devm_release_action(dev, devm_memremap_pages_release, pgmap); +} +EXPORT_SYMBOL_GPL(devm_memunmap_pages); + unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) { /* number of pfns from base where pfn_to_page() is valid */ -- cgit v1.2.3 From e615a191216e3fb4e9c0d239007f2b0cd48f28bf Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 13 Jun 2019 15:56:24 -0700 Subject: PCI/P2PDMA: fix the gen_pool_add_virt() failure path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The pci_p2pdma_add_resource() implementation immediately frees the pgmap if gen_pool_add_virt() fails. However, that means that when @dev triggers a devres release devm_memremap_pages_release() will crash trying to access the freed @pgmap. Use the new devm_memunmap_pages() to manually free the mapping in the error path. Link: http://lkml.kernel.org/r/155727337603.292046.13101332703665246702.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams Fixes: 52916982af48 ("PCI/P2PDMA: Support peer-to-peer memory") Reviewed-by: Ira Weiny Acked-by: Bjorn Helgaas Reviewed-by: Logan Gunthorpe Cc: Christoph Hellwig Cc: Greg Kroah-Hartman Cc: "Jérôme Glisse" Cc: "Rafael J. Wysocki" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/pci/p2pdma.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index 742928d0053e..d5736b31ce66 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -208,13 +208,15 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, pci_bus_address(pdev, bar) + offset, resource_size(&pgmap->res), dev_to_node(&pdev->dev)); if (error) - goto pgmap_free; + goto pages_free; pci_info(pdev, "added peer-to-peer DMA memory %pR\n", &pgmap->res); return 0; +pages_free: + devm_memunmap_pages(&pdev->dev, pgmap); pgmap_free: devm_kfree(&pdev->dev, pgmap); return error; -- cgit v1.2.3 From 795ee30648c708502da9df637f83c33361d68dcc Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 13 Jun 2019 15:56:27 -0700 Subject: lib/genalloc: introduce chunk owners MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The p2pdma facility enables a provider to publish a pool of dma addresses for a consumer to allocate. A genpool is used internally by p2pdma to collect dma resources, 'chunks', to be handed out to consumers. Whenever a consumer allocates a resource it needs to pin the 'struct dev_pagemap' instance that backs the chunk selected by pci_alloc_p2pmem(). Currently that reference is taken globally on the entire provider device. That sets up a lifetime mismatch whereby the p2pdma core needs to maintain hacks to make sure the percpu_ref is not released twice. This lifetime mismatch also stands in the way of a fix to devm_memremap_pages() whereby devm_memremap_pages_release() must wait for the percpu_ref ->release() callback to complete before it can proceed to teardown pages. So, towards fixing this situation, introduce the ability to store a 'chunk owner' at gen_pool_add() time, and a facility to retrieve the owner at gen_pool_{alloc,free}() time. For p2pdma this will be used to store and recall individual dev_pagemap reference counter instances per-chunk. Link: http://lkml.kernel.org/r/155727338118.292046.13407378933221579644.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams Reviewed-by: Ira Weiny Reviewed-by: Logan Gunthorpe Cc: Bjorn Helgaas Cc: "Jérôme Glisse" Cc: Christoph Hellwig Cc: Greg Kroah-Hartman Cc: "Rafael J. Wysocki" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/genalloc.h | 55 ++++++++++++++++++++++++++++++++++++++++++------ lib/genalloc.c | 51 ++++++++++++++++++++++---------------------- 2 files changed, 74 insertions(+), 32 deletions(-) diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index dd0a452373e7..a337313e064f 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -75,6 +75,7 @@ struct gen_pool_chunk { struct list_head next_chunk; /* next chunk in pool */ atomic_long_t avail; phys_addr_t phys_addr; /* physical starting address of memory chunk */ + void *owner; /* private data to retrieve at alloc time */ unsigned long start_addr; /* start address of memory chunk */ unsigned long end_addr; /* end address of memory chunk (inclusive) */ unsigned long bits[0]; /* bitmap for allocating memory chunk */ @@ -96,8 +97,15 @@ struct genpool_data_fixed { extern struct gen_pool *gen_pool_create(int, int); extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); -extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t, - size_t, int); +extern int gen_pool_add_owner(struct gen_pool *, unsigned long, phys_addr_t, + size_t, int, void *); + +static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr, + phys_addr_t phys, size_t size, int nid) +{ + return gen_pool_add_owner(pool, addr, phys, size, nid, NULL); +} + /** * gen_pool_add - add a new chunk of special memory to the pool * @pool: pool to add new memory chunk to @@ -116,12 +124,47 @@ static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr, return gen_pool_add_virt(pool, addr, -1, size, nid); } extern void gen_pool_destroy(struct gen_pool *); -extern unsigned long gen_pool_alloc(struct gen_pool *, size_t); -extern unsigned long gen_pool_alloc_algo(struct gen_pool *, size_t, - genpool_algo_t algo, void *data); +unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size, + genpool_algo_t algo, void *data, void **owner); + +static inline unsigned long gen_pool_alloc_owner(struct gen_pool *pool, + size_t size, void **owner) +{ + return gen_pool_alloc_algo_owner(pool, size, pool->algo, pool->data, + owner); +} + +static inline unsigned long gen_pool_alloc_algo(struct gen_pool *pool, + size_t size, genpool_algo_t algo, void *data) +{ + return gen_pool_alloc_algo_owner(pool, size, algo, data, NULL); +} + +/** + * gen_pool_alloc - allocate special memory from the pool + * @pool: pool to allocate from + * @size: number of bytes to allocate from the pool + * + * Allocate the requested number of bytes from the specified pool. + * Uses the pool allocation function (with first-fit algorithm by default). + * Can not be used in NMI handler on architectures without + * NMI-safe cmpxchg implementation. + */ +static inline unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) +{ + return gen_pool_alloc_algo(pool, size, pool->algo, pool->data); +} + extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma); -extern void gen_pool_free(struct gen_pool *, unsigned long, size_t); +extern void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, + size_t size, void **owner); +static inline void gen_pool_free(struct gen_pool *pool, unsigned long addr, + size_t size) +{ + gen_pool_free_owner(pool, addr, size, NULL); +} + extern void gen_pool_for_each_chunk(struct gen_pool *, void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *); extern size_t gen_pool_avail(struct gen_pool *); diff --git a/lib/genalloc.c b/lib/genalloc.c index 7e85d1e37a6e..770c769d7cb7 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -168,20 +168,21 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) EXPORT_SYMBOL(gen_pool_create); /** - * gen_pool_add_virt - add a new chunk of special memory to the pool + * gen_pool_add_owner- add a new chunk of special memory to the pool * @pool: pool to add new memory chunk to * @virt: virtual starting address of memory chunk to add to pool * @phys: physical starting address of memory chunk to add to pool * @size: size in bytes of the memory chunk to add to pool * @nid: node id of the node the chunk structure and bitmap should be * allocated on, or -1 + * @owner: private data the publisher would like to recall at alloc time * * Add a new chunk of special memory to the specified pool. * * Returns 0 on success or a -ve errno on failure. */ -int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, - size_t size, int nid) +int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, + size_t size, int nid, void *owner) { struct gen_pool_chunk *chunk; int nbits = size >> pool->min_alloc_order; @@ -195,6 +196,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy chunk->phys_addr = phys; chunk->start_addr = virt; chunk->end_addr = virt + size - 1; + chunk->owner = owner; atomic_long_set(&chunk->avail, size); spin_lock(&pool->lock); @@ -203,7 +205,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy return 0; } -EXPORT_SYMBOL(gen_pool_add_virt); +EXPORT_SYMBOL(gen_pool_add_owner); /** * gen_pool_virt_to_phys - return the physical address of memory @@ -260,35 +262,20 @@ void gen_pool_destroy(struct gen_pool *pool) EXPORT_SYMBOL(gen_pool_destroy); /** - * gen_pool_alloc - allocate special memory from the pool - * @pool: pool to allocate from - * @size: number of bytes to allocate from the pool - * - * Allocate the requested number of bytes from the specified pool. - * Uses the pool allocation function (with first-fit algorithm by default). - * Can not be used in NMI handler on architectures without - * NMI-safe cmpxchg implementation. - */ -unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) -{ - return gen_pool_alloc_algo(pool, size, pool->algo, pool->data); -} -EXPORT_SYMBOL(gen_pool_alloc); - -/** - * gen_pool_alloc_algo - allocate special memory from the pool + * gen_pool_alloc_algo_owner - allocate special memory from the pool * @pool: pool to allocate from * @size: number of bytes to allocate from the pool * @algo: algorithm passed from caller * @data: data passed to algorithm + * @owner: optionally retrieve the chunk owner * * Allocate the requested number of bytes from the specified pool. * Uses the pool allocation function (with first-fit algorithm by default). * Can not be used in NMI handler on architectures without * NMI-safe cmpxchg implementation. */ -unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, - genpool_algo_t algo, void *data) +unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size, + genpool_algo_t algo, void *data, void **owner) { struct gen_pool_chunk *chunk; unsigned long addr = 0; @@ -299,6 +286,9 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, BUG_ON(in_nmi()); #endif + if (owner) + *owner = NULL; + if (size == 0) return 0; @@ -326,12 +316,14 @@ retry: addr = chunk->start_addr + ((unsigned long)start_bit << order); size = nbits << order; atomic_long_sub(size, &chunk->avail); + if (owner) + *owner = chunk->owner; break; } rcu_read_unlock(); return addr; } -EXPORT_SYMBOL(gen_pool_alloc_algo); +EXPORT_SYMBOL(gen_pool_alloc_algo_owner); /** * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage @@ -367,12 +359,14 @@ EXPORT_SYMBOL(gen_pool_dma_alloc); * @pool: pool to free to * @addr: starting address of memory to free back to pool * @size: size in bytes of memory to free + * @owner: private data stashed at gen_pool_add() time * * Free previously allocated special memory back to the specified * pool. Can not be used in NMI handler on architectures without * NMI-safe cmpxchg implementation. */ -void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) +void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size, + void **owner) { struct gen_pool_chunk *chunk; int order = pool->min_alloc_order; @@ -382,6 +376,9 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) BUG_ON(in_nmi()); #endif + if (owner) + *owner = NULL; + nbits = (size + (1UL << order) - 1) >> order; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { @@ -392,6 +389,8 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) BUG_ON(remain); size = nbits << order; atomic_long_add(size, &chunk->avail); + if (owner) + *owner = chunk->owner; rcu_read_unlock(); return; } @@ -399,7 +398,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) rcu_read_unlock(); BUG(); } -EXPORT_SYMBOL(gen_pool_free); +EXPORT_SYMBOL(gen_pool_free_owner); /** * gen_pool_for_each_chunk - call func for every chunk of generic memory pool -- cgit v1.2.3 From 1570175abd164b32ea1cc677f9dfd2dc6bd093f5 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 13 Jun 2019 15:56:30 -0700 Subject: PCI/P2PDMA: track pgmap references per resource, not globally MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In preparation for fixing a race between devm_memremap_pages_release() and the final put of a page from the device-page-map, allocate a percpu-ref per p2pdma resource mapping. Link: http://lkml.kernel.org/r/155727338646.292046.9922678317501435597.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams Reviewed-by: Ira Weiny Reviewed-by: Logan Gunthorpe Cc: Bjorn Helgaas Cc: Christoph Hellwig Cc: Greg Kroah-Hartman Cc: "Jérôme Glisse" Cc: "Rafael J. Wysocki" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/pci/p2pdma.c | 124 +++++++++++++++++++++++++++++++++------------------ 1 file changed, 81 insertions(+), 43 deletions(-) diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index d5736b31ce66..eecba8fbe251 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -20,12 +20,16 @@ #include struct pci_p2pdma { - struct percpu_ref devmap_ref; - struct completion devmap_ref_done; struct gen_pool *pool; bool p2pmem_published; }; +struct p2pdma_pagemap { + struct dev_pagemap pgmap; + struct percpu_ref ref; + struct completion ref_done; +}; + static ssize_t size_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -74,41 +78,45 @@ static const struct attribute_group p2pmem_group = { .name = "p2pmem", }; +static struct p2pdma_pagemap *to_p2p_pgmap(struct percpu_ref *ref) +{ + return container_of(ref, struct p2pdma_pagemap, ref); +} + static void pci_p2pdma_percpu_release(struct percpu_ref *ref) { - struct pci_p2pdma *p2p = - container_of(ref, struct pci_p2pdma, devmap_ref); + struct p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(ref); - complete_all(&p2p->devmap_ref_done); + complete(&p2p_pgmap->ref_done); } static void pci_p2pdma_percpu_kill(struct percpu_ref *ref) { - /* - * pci_p2pdma_add_resource() may be called multiple times - * by a driver and may register the percpu_kill devm action multiple - * times. We only want the first action to actually kill the - * percpu_ref. - */ - if (percpu_ref_is_dying(ref)) - return; - percpu_ref_kill(ref); } +static void pci_p2pdma_percpu_cleanup(void *ref) +{ + struct p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(ref); + + wait_for_completion(&p2p_pgmap->ref_done); + percpu_ref_exit(&p2p_pgmap->ref); +} + static void pci_p2pdma_release(void *data) { struct pci_dev *pdev = data; + struct pci_p2pdma *p2pdma = pdev->p2pdma; - if (!pdev->p2pdma) + if (!p2pdma) return; - wait_for_completion(&pdev->p2pdma->devmap_ref_done); - percpu_ref_exit(&pdev->p2pdma->devmap_ref); + /* Flush and disable pci_alloc_p2p_mem() */ + pdev->p2pdma = NULL; + synchronize_rcu(); - gen_pool_destroy(pdev->p2pdma->pool); + gen_pool_destroy(p2pdma->pool); sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group); - pdev->p2pdma = NULL; } static int pci_p2pdma_setup(struct pci_dev *pdev) @@ -124,12 +132,6 @@ static int pci_p2pdma_setup(struct pci_dev *pdev) if (!p2p->pool) goto out; - init_completion(&p2p->devmap_ref_done); - error = percpu_ref_init(&p2p->devmap_ref, - pci_p2pdma_percpu_release, 0, GFP_KERNEL); - if (error) - goto out_pool_destroy; - error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev); if (error) goto out_pool_destroy; @@ -163,6 +165,7 @@ out: int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, u64 offset) { + struct p2pdma_pagemap *p2p_pgmap; struct dev_pagemap *pgmap; void *addr; int error; @@ -185,14 +188,32 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, return error; } - pgmap = devm_kzalloc(&pdev->dev, sizeof(*pgmap), GFP_KERNEL); - if (!pgmap) + p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL); + if (!p2p_pgmap) return -ENOMEM; + init_completion(&p2p_pgmap->ref_done); + error = percpu_ref_init(&p2p_pgmap->ref, + pci_p2pdma_percpu_release, 0, GFP_KERNEL); + if (error) + goto pgmap_free; + + /* + * FIXME: the percpu_ref_exit needs to be coordinated internal + * to devm_memremap_pages_release(). Duplicate the same ordering + * as other devm_memremap_pages() users for now. + */ + error = devm_add_action(&pdev->dev, pci_p2pdma_percpu_cleanup, + &p2p_pgmap->ref); + if (error) + goto ref_cleanup; + + pgmap = &p2p_pgmap->pgmap; + pgmap->res.start = pci_resource_start(pdev, bar) + offset; pgmap->res.end = pgmap->res.start + size - 1; pgmap->res.flags = pci_resource_flags(pdev, bar); - pgmap->ref = &pdev->p2pdma->devmap_ref; + pgmap->ref = &p2p_pgmap->ref; pgmap->type = MEMORY_DEVICE_PCI_P2PDMA; pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) - pci_resource_start(pdev, bar); @@ -201,12 +222,13 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, addr = devm_memremap_pages(&pdev->dev, pgmap); if (IS_ERR(addr)) { error = PTR_ERR(addr); - goto pgmap_free; + goto ref_exit; } - error = gen_pool_add_virt(pdev->p2pdma->pool, (unsigned long)addr, + error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr, pci_bus_address(pdev, bar) + offset, - resource_size(&pgmap->res), dev_to_node(&pdev->dev)); + resource_size(&pgmap->res), dev_to_node(&pdev->dev), + &p2p_pgmap->ref); if (error) goto pages_free; @@ -217,8 +239,10 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, pages_free: devm_memunmap_pages(&pdev->dev, pgmap); +ref_cleanup: + percpu_ref_exit(&p2p_pgmap->ref); pgmap_free: - devm_kfree(&pdev->dev, pgmap); + devm_kfree(&pdev->dev, p2p_pgmap); return error; } EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource); @@ -587,19 +611,30 @@ EXPORT_SYMBOL_GPL(pci_p2pmem_find_many); */ void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size) { - void *ret; + void *ret = NULL; + struct percpu_ref *ref; + /* + * Pairs with synchronize_rcu() in pci_p2pdma_release() to + * ensure pdev->p2pdma is non-NULL for the duration of the + * read-lock. + */ + rcu_read_lock(); if (unlikely(!pdev->p2pdma)) - return NULL; - - if (unlikely(!percpu_ref_tryget_live(&pdev->p2pdma->devmap_ref))) - return NULL; - - ret = (void *)gen_pool_alloc(pdev->p2pdma->pool, size); + goto out; - if (unlikely(!ret)) - percpu_ref_put(&pdev->p2pdma->devmap_ref); + ret = (void *)gen_pool_alloc_owner(pdev->p2pdma->pool, size, + (void **) &ref); + if (!ret) + goto out; + if (unlikely(!percpu_ref_tryget_live(ref))) { + gen_pool_free(pdev->p2pdma->pool, (unsigned long) ret, size); + ret = NULL; + goto out; + } +out: + rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(pci_alloc_p2pmem); @@ -612,8 +647,11 @@ EXPORT_SYMBOL_GPL(pci_alloc_p2pmem); */ void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size) { - gen_pool_free(pdev->p2pdma->pool, (uintptr_t)addr, size); - percpu_ref_put(&pdev->p2pdma->devmap_ref); + struct percpu_ref *ref; + + gen_pool_free_owner(pdev->p2pdma->pool, (uintptr_t)addr, size, + (void **) &ref); + percpu_ref_put(ref); } EXPORT_SYMBOL_GPL(pci_free_p2pmem); -- cgit v1.2.3 From 50f44ee7248ad2f7984ef081974a6ecd09724b3e Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 13 Jun 2019 15:56:33 -0700 Subject: mm/devm_memremap_pages: fix final page put race MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Logan noticed that devm_memremap_pages_release() kills the percpu_ref drops all the page references that were acquired at init and then immediately proceeds to unplug, arch_remove_memory(), the backing pages for the pagemap. If for some reason device shutdown actually collides with a busy / elevated-ref-count page then arch_remove_memory() should be deferred until after that reference is dropped. As it stands the "wait for last page ref drop" happens *after* devm_memremap_pages_release() returns, which is obviously too late and can lead to crashes. Fix this situation by assigning the responsibility to wait for the percpu_ref to go idle to devm_memremap_pages() with a new ->cleanup() callback. Implement the new cleanup callback for all devm_memremap_pages() users: pmem, devdax, hmm, and p2pdma. Link: http://lkml.kernel.org/r/155727339156.292046.5432007428235387859.stgit@dwillia2-desk3.amr.corp.intel.com Fixes: 41e94a851304 ("add devm_memremap_pages") Signed-off-by: Dan Williams Reported-by: Logan Gunthorpe Reviewed-by: Ira Weiny Reviewed-by: Logan Gunthorpe Cc: Bjorn Helgaas Cc: "Jérôme Glisse" Cc: Christoph Hellwig Cc: Greg Kroah-Hartman Cc: "Rafael J. Wysocki" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/dax/device.c | 13 +++---------- drivers/nvdimm/pmem.c | 17 +++++++++++++---- drivers/pci/p2pdma.c | 17 +++-------------- include/linux/memremap.h | 2 ++ kernel/memremap.c | 17 ++++++++++++----- mm/hmm.c | 14 +++----------- tools/testing/nvdimm/test/iomap.c | 2 ++ 7 files changed, 38 insertions(+), 44 deletions(-) diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 996d68ff992a..8465d12fecba 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -27,9 +27,8 @@ static void dev_dax_percpu_release(struct percpu_ref *ref) complete(&dev_dax->cmp); } -static void dev_dax_percpu_exit(void *data) +static void dev_dax_percpu_exit(struct percpu_ref *ref) { - struct percpu_ref *ref = data; struct dev_dax *dev_dax = ref_to_dev_dax(ref); dev_dbg(&dev_dax->dev, "%s\n", __func__); @@ -466,18 +465,12 @@ int dev_dax_probe(struct device *dev) if (rc) return rc; - rc = devm_add_action_or_reset(dev, dev_dax_percpu_exit, &dev_dax->ref); - if (rc) - return rc; - dev_dax->pgmap.ref = &dev_dax->ref; dev_dax->pgmap.kill = dev_dax_percpu_kill; + dev_dax->pgmap.cleanup = dev_dax_percpu_exit; addr = devm_memremap_pages(dev, &dev_dax->pgmap); - if (IS_ERR(addr)) { - devm_remove_action(dev, dev_dax_percpu_exit, &dev_dax->ref); - percpu_ref_exit(&dev_dax->ref); + if (IS_ERR(addr)) return PTR_ERR(addr); - } inode = dax_inode(dax_dev); cdev = inode->i_cdev; diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 28cb44c61d4a..24d7fe7c74ed 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -303,11 +303,19 @@ static const struct attribute_group *pmem_attribute_groups[] = { NULL, }; -static void pmem_release_queue(void *q) +static void __pmem_release_queue(struct percpu_ref *ref) { + struct request_queue *q; + + q = container_of(ref, typeof(*q), q_usage_counter); blk_cleanup_queue(q); } +static void pmem_release_queue(void *ref) +{ + __pmem_release_queue(ref); +} + static void pmem_freeze_queue(struct percpu_ref *ref) { struct request_queue *q; @@ -399,12 +407,10 @@ static int pmem_attach_disk(struct device *dev, if (!q) return -ENOMEM; - if (devm_add_action_or_reset(dev, pmem_release_queue, q)) - return -ENOMEM; - pmem->pfn_flags = PFN_DEV; pmem->pgmap.ref = &q->q_usage_counter; pmem->pgmap.kill = pmem_freeze_queue; + pmem->pgmap.cleanup = __pmem_release_queue; if (is_nd_pfn(dev)) { if (setup_pagemap_fsdax(dev, &pmem->pgmap)) return -ENOMEM; @@ -425,6 +431,9 @@ static int pmem_attach_disk(struct device *dev, pmem->pfn_flags |= PFN_MAP; memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res)); } else { + if (devm_add_action_or_reset(dev, pmem_release_queue, + &q->q_usage_counter)) + return -ENOMEM; addr = devm_memremap(dev, pmem->phys_addr, pmem->size, ARCH_MEMREMAP_PMEM); memcpy(&bb_res, &nsio->res, sizeof(bb_res)); diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index eecba8fbe251..a98126ad9c3a 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -95,7 +95,7 @@ static void pci_p2pdma_percpu_kill(struct percpu_ref *ref) percpu_ref_kill(ref); } -static void pci_p2pdma_percpu_cleanup(void *ref) +static void pci_p2pdma_percpu_cleanup(struct percpu_ref *ref) { struct p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(ref); @@ -198,16 +198,6 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, if (error) goto pgmap_free; - /* - * FIXME: the percpu_ref_exit needs to be coordinated internal - * to devm_memremap_pages_release(). Duplicate the same ordering - * as other devm_memremap_pages() users for now. - */ - error = devm_add_action(&pdev->dev, pci_p2pdma_percpu_cleanup, - &p2p_pgmap->ref); - if (error) - goto ref_cleanup; - pgmap = &p2p_pgmap->pgmap; pgmap->res.start = pci_resource_start(pdev, bar) + offset; @@ -218,11 +208,12 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) - pci_resource_start(pdev, bar); pgmap->kill = pci_p2pdma_percpu_kill; + pgmap->cleanup = pci_p2pdma_percpu_cleanup; addr = devm_memremap_pages(&pdev->dev, pgmap); if (IS_ERR(addr)) { error = PTR_ERR(addr); - goto ref_exit; + goto pgmap_free; } error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr, @@ -239,8 +230,6 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, pages_free: devm_memunmap_pages(&pdev->dev, pgmap); -ref_cleanup: - percpu_ref_exit(&p2p_pgmap->ref); pgmap_free: devm_kfree(&pdev->dev, p2p_pgmap); return error; diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 7601ee314c4a..1732dea030b2 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -81,6 +81,7 @@ typedef void (*dev_page_free_t)(struct page *page, void *data); * @res: physical address range covered by @ref * @ref: reference count that pins the devm_memremap_pages() mapping * @kill: callback to transition @ref to the dead state + * @cleanup: callback to wait for @ref to be idle and reap it * @dev: host device of the mapping for debug * @data: private data pointer for page_free() * @type: memory type: see MEMORY_* in memory_hotplug.h @@ -92,6 +93,7 @@ struct dev_pagemap { struct resource res; struct percpu_ref *ref; void (*kill)(struct percpu_ref *ref); + void (*cleanup)(struct percpu_ref *ref); struct device *dev; void *data; enum memory_type type; diff --git a/kernel/memremap.c b/kernel/memremap.c index 715b434bd316..6e1970719dc2 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -95,6 +95,7 @@ static void devm_memremap_pages_release(void *data) pgmap->kill(pgmap->ref); for_each_device_pfn(pfn, pgmap) put_page(pfn_to_page(pfn)); + pgmap->cleanup(pgmap->ref); /* pages are dead and unused, undo the arch mapping */ align_start = res->start & ~(SECTION_SIZE - 1); @@ -133,8 +134,8 @@ static void devm_memremap_pages_release(void *data) * 2/ The altmap field may optionally be initialized, in which case altmap_valid * must be set to true * - * 3/ pgmap->ref must be 'live' on entry and will be killed at - * devm_memremap_pages_release() time, or if this routine fails. + * 3/ pgmap->ref must be 'live' on entry and will be killed and reaped + * at devm_memremap_pages_release() time, or if this routine fails. * * 4/ res is expected to be a host memory range that could feasibly be * treated as a "System RAM" range, i.e. not a device mmio range, but @@ -156,8 +157,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) pgprot_t pgprot = PAGE_KERNEL; int error, nid, is_ram; - if (!pgmap->ref || !pgmap->kill) + if (!pgmap->ref || !pgmap->kill || !pgmap->cleanup) { + WARN(1, "Missing reference count teardown definition\n"); return ERR_PTR(-EINVAL); + } align_start = res->start & ~(SECTION_SIZE - 1); align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) @@ -168,14 +171,16 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) if (conflict_pgmap) { dev_WARN(dev, "Conflicting mapping in same section\n"); put_dev_pagemap(conflict_pgmap); - return ERR_PTR(-ENOMEM); + error = -ENOMEM; + goto err_array; } conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL); if (conflict_pgmap) { dev_WARN(dev, "Conflicting mapping in same section\n"); put_dev_pagemap(conflict_pgmap); - return ERR_PTR(-ENOMEM); + error = -ENOMEM; + goto err_array; } is_ram = region_intersects(align_start, align_size, @@ -267,6 +272,8 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) pgmap_array_delete(res); err_array: pgmap->kill(pgmap->ref); + pgmap->cleanup(pgmap->ref); + return ERR_PTR(error); } EXPORT_SYMBOL_GPL(devm_memremap_pages); diff --git a/mm/hmm.c b/mm/hmm.c index c5d840e34b28..f702a3895d05 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -1354,9 +1354,8 @@ static void hmm_devmem_ref_release(struct percpu_ref *ref) complete(&devmem->completion); } -static void hmm_devmem_ref_exit(void *data) +static void hmm_devmem_ref_exit(struct percpu_ref *ref) { - struct percpu_ref *ref = data; struct hmm_devmem *devmem; devmem = container_of(ref, struct hmm_devmem, ref); @@ -1433,10 +1432,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, if (ret) return ERR_PTR(ret); - ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref); - if (ret) - return ERR_PTR(ret); - size = ALIGN(size, PA_SECTION_SIZE); addr = min((unsigned long)iomem_resource.end, (1UL << MAX_PHYSMEM_BITS) - 1); @@ -1475,6 +1470,7 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, devmem->pagemap.ref = &devmem->ref; devmem->pagemap.data = devmem; devmem->pagemap.kill = hmm_devmem_ref_kill; + devmem->pagemap.cleanup = hmm_devmem_ref_exit; result = devm_memremap_pages(devmem->device, &devmem->pagemap); if (IS_ERR(result)) @@ -1512,11 +1508,6 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, if (ret) return ERR_PTR(ret); - ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, - &devmem->ref); - if (ret) - return ERR_PTR(ret); - devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; devmem->pfn_last = devmem->pfn_first + (resource_size(devmem->resource) >> PAGE_SHIFT); @@ -1529,6 +1520,7 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, devmem->pagemap.ref = &devmem->ref; devmem->pagemap.data = devmem; devmem->pagemap.kill = hmm_devmem_ref_kill; + devmem->pagemap.cleanup = hmm_devmem_ref_exit; result = devm_memremap_pages(devmem->device, &devmem->pagemap); if (IS_ERR(result)) diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c index 280015c22598..076df22e4bda 100644 --- a/tools/testing/nvdimm/test/iomap.c +++ b/tools/testing/nvdimm/test/iomap.c @@ -100,7 +100,9 @@ static void nfit_test_kill(void *_pgmap) { struct dev_pagemap *pgmap = _pgmap; + WARN_ON(!pgmap || !pgmap->ref || !pgmap->kill || !pgmap->cleanup); pgmap->kill(pgmap->ref); + pgmap->cleanup(pgmap->ref); } void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) -- cgit v1.2.3