From 453431a54934d917153c65211b2dabf45562ca88 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Thu, 6 Aug 2020 23:18:13 -0700 Subject: mm, treewide: rename kzfree() to kfree_sensitive() As said by Linus: A symmetric naming is only helpful if it implies symmetries in use. Otherwise it's actively misleading. In "kzalloc()", the z is meaningful and an important part of what the caller wants. In "kzfree()", the z is actively detrimental, because maybe in the future we really _might_ want to use that "memfill(0xdeadbeef)" or something. The "zero" part of the interface isn't even _relevant_. The main reason that kzfree() exists is to clear sensitive information that should not be leaked to other future users of the same memory objects. Rename kzfree() to kfree_sensitive() to follow the example of the recently added kvfree_sensitive() and make the intention of the API more explicit. In addition, memzero_explicit() is used to clear the memory to make sure that it won't get optimized away by the compiler. The renaming is done by using the command sequence: git grep -w --name-only kzfree |\ xargs sed -i 's/kzfree/kfree_sensitive/' followed by some editing of the kfree_sensitive() kerneldoc and adding a kzfree backward compatibility macro in slab.h. [akpm@linux-foundation.org: fs/crypto/inline_crypt.c needs linux/slab.h] [akpm@linux-foundation.org: fix fs/crypto/inline_crypt.c some more] Suggested-by: Joe Perches Signed-off-by: Waiman Long Signed-off-by: Andrew Morton Acked-by: David Howells Acked-by: Michal Hocko Acked-by: Johannes Weiner Cc: Jarkko Sakkinen Cc: James Morris Cc: "Serge E. Hallyn" Cc: Joe Perches Cc: Matthew Wilcox Cc: David Rientjes Cc: Dan Carpenter Cc: "Jason A . Donenfeld" Link: http://lkml.kernel.org/r/20200616154311.12314-3-longman@redhat.com Signed-off-by: Linus Torvalds --- mm/slab_common.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'mm/slab_common.c') diff --git a/mm/slab_common.c b/mm/slab_common.c index fe8b68482670..f47a097bb4b8 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1729,17 +1729,17 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) EXPORT_SYMBOL(krealloc); /** - * kzfree - like kfree but zero memory + * kfree_sensitive - Clear sensitive information in memory before freeing * @p: object to free memory of * * The memory of the object @p points to is zeroed before freed. - * If @p is %NULL, kzfree() does nothing. + * If @p is %NULL, kfree_sensitive() does nothing. * * Note: this function zeroes the whole allocated buffer which can be a good * deal bigger than the requested buffer size passed to kmalloc(). So be * careful when using this function in performance sensitive code. */ -void kzfree(const void *p) +void kfree_sensitive(const void *p) { size_t ks; void *mem = (void *)p; @@ -1750,7 +1750,7 @@ void kzfree(const void *p) memzero_explicit(mem, ks); kfree(mem); } -EXPORT_SYMBOL(kzfree); +EXPORT_SYMBOL(kfree_sensitive); /** * ksize - get the actual amount of memory allocated for a given object -- cgit v1.2.3 From fa9ba3aa89f9f1c003b5f5cde893bbbc140c7223 Mon Sep 17 00:00:00 2001 From: William Kucharski Date: Thu, 6 Aug 2020 23:18:17 -0700 Subject: mm: ksize() should silently accept a NULL pointer Other mm routines such as kfree() and kzfree() silently do the right thing if passed a NULL pointer, so ksize() should do the same. Signed-off-by: William Kucharski Signed-off-by: Andrew Morton Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Link: http://lkml.kernel.org/r/20200616225409.4670-1-william.kucharski@oracle.com Signed-off-by: Linus Torvalds --- mm/slab_common.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'mm/slab_common.c') diff --git a/mm/slab_common.c b/mm/slab_common.c index f47a097bb4b8..e493203b5002 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1681,10 +1681,9 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size, gfp_t flags) { void *ret; - size_t ks = 0; + size_t ks; - if (p) - ks = ksize(p); + ks = ksize(p); if (ks >= new_size) { p = kasan_krealloc((void *)p, new_size, flags); @@ -1744,10 +1743,9 @@ void kfree_sensitive(const void *p) size_t ks; void *mem = (void *)p; - if (unlikely(ZERO_OR_NULL_PTR(mem))) - return; ks = ksize(mem); - memzero_explicit(mem, ks); + if (ks) + memzero_explicit(mem, ks); kfree(mem); } EXPORT_SYMBOL(kfree_sensitive); @@ -1770,8 +1768,6 @@ size_t ksize(const void *objp) { size_t size; - if (WARN_ON_ONCE(!objp)) - return 0; /* * We need to check that the pointed to object is valid, and only then * unpoison the shadow memory below. We use __kasan_check_read(), to @@ -1785,7 +1781,7 @@ size_t ksize(const void *objp) * We want to perform the check before __ksize(), to avoid potentially * crashing in __ksize() due to accessing invalid metadata. */ - if (unlikely(objp == ZERO_SIZE_PTR) || !__kasan_check_read(objp, 1)) + if (unlikely(ZERO_OR_NULL_PTR(objp)) || !__kasan_check_read(objp, 1)) return 0; size = __ksize(objp); -- cgit v1.2.3 From 444050990db4abdf0c85ca6050542b888ba316a1 Mon Sep 17 00:00:00 2001 From: Long Li Date: Thu, 6 Aug 2020 23:18:28 -0700 Subject: mm, slab: check GFP_SLAB_BUG_MASK before alloc_pages in kmalloc_order kmalloc cannot allocate memory from HIGHMEM. Allocating large amounts of memory currently bypasses the check and will simply leak the memory when page_address() returns NULL. To fix this, factor the GFP_SLAB_BUG_MASK check out of slab & slub, and call it from kmalloc_order() as well. In order to make the code clear, the warning message is put in one place. Signed-off-by: Long Li Signed-off-by: Andrew Morton Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: Pekka Enberg Acked-by: David Rientjes Cc: Christoph Lameter Cc: Joonsoo Kim Link: http://lkml.kernel.org/r/20200704035027.GA62481@lilong Signed-off-by: Linus Torvalds --- mm/slab.c | 10 +++------- mm/slab.h | 1 + mm/slab_common.c | 17 +++++++++++++++++ mm/slub.c | 9 ++------- 4 files changed, 23 insertions(+), 14 deletions(-) (limited to 'mm/slab_common.c') diff --git a/mm/slab.c b/mm/slab.c index 77e90f9de9c0..0781a2384441 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2589,13 +2589,9 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep, * Be lazy and only check for valid flags here, keeping it out of the * critical path in kmem_cache_alloc(). */ - if (unlikely(flags & GFP_SLAB_BUG_MASK)) { - gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; - flags &= ~GFP_SLAB_BUG_MASK; - pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n", - invalid_mask, &invalid_mask, flags, &flags); - dump_stack(); - } + if (unlikely(flags & GFP_SLAB_BUG_MASK)) + flags = kmalloc_fix_flags(flags); + WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO)); local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); diff --git a/mm/slab.h b/mm/slab.h index 74f7e09a7cfd..58ed025a0b23 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -152,6 +152,7 @@ void create_kmalloc_caches(slab_flags_t); struct kmem_cache *kmalloc_slab(size_t, gfp_t); #endif +gfp_t kmalloc_fix_flags(gfp_t flags); /* Functions provided by the slab allocators */ int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); diff --git a/mm/slab_common.c b/mm/slab_common.c index e493203b5002..616ec8a0d91a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -26,6 +26,8 @@ #define CREATE_TRACE_POINTS #include +#include "internal.h" + #include "slab.h" enum slab_state slab_state; @@ -1332,6 +1334,18 @@ void __init create_kmalloc_caches(slab_flags_t flags) } #endif /* !CONFIG_SLOB */ +gfp_t kmalloc_fix_flags(gfp_t flags) +{ + gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; + + flags &= ~GFP_SLAB_BUG_MASK; + pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n", + invalid_mask, &invalid_mask, flags, &flags); + dump_stack(); + + return flags; +} + /* * To avoid unnecessary overhead, we pass through large allocation requests * directly to the page allocator. We use __GFP_COMP, because we will need to @@ -1342,6 +1356,9 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) void *ret = NULL; struct page *page; + if (unlikely(flags & GFP_SLAB_BUG_MASK)) + flags = kmalloc_fix_flags(flags); + flags |= __GFP_COMP; page = alloc_pages(flags, order); if (likely(page)) { diff --git a/mm/slub.c b/mm/slub.c index f226d66408ee..7f0764b6a3df 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1745,13 +1745,8 @@ out: static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) { - if (unlikely(flags & GFP_SLAB_BUG_MASK)) { - gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; - flags &= ~GFP_SLAB_BUG_MASK; - pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n", - invalid_mask, &invalid_mask, flags, &flags); - dump_stack(); - } + if (unlikely(flags & GFP_SLAB_BUG_MASK)) + flags = kmalloc_fix_flags(flags); return allocate_slab(s, flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); -- cgit v1.2.3 From d42f3245c7e299e017213fa028c319316bcdb7f4 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Aug 2020 23:20:39 -0700 Subject: mm: memcg: convert vmstat slab counters to bytes In order to prepare for per-object slab memory accounting, convert NR_SLAB_RECLAIMABLE and NR_SLAB_UNRECLAIMABLE vmstat items to bytes. To make it obvious, rename them to NR_SLAB_RECLAIMABLE_B and NR_SLAB_UNRECLAIMABLE_B (similar to NR_KERNEL_STACK_KB). Internally global and per-node counters are stored in pages, however memcg and lruvec counters are stored in bytes. This scheme may look weird, but only for now. As soon as slab pages will be shared between multiple cgroups, global and node counters will reflect the total number of slab pages. However memcg and lruvec counters will be used for per-memcg slab memory tracking, which will take separate kernel objects in the account. Keeping global and node counters in pages helps to avoid additional overhead. The size of slab memory shouldn't exceed 4Gb on 32-bit machines, so it will fit into atomic_long_t we use for vmstats. Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Shakeel Butt Acked-by: Johannes Weiner Acked-by: Vlastimil Babka Cc: Christoph Lameter Cc: Michal Hocko Cc: Tejun Heo Link: http://lkml.kernel.org/r/20200623174037.3951353-4-guro@fb.com Signed-off-by: Linus Torvalds --- drivers/base/node.c | 4 ++-- fs/proc/meminfo.c | 4 ++-- include/linux/mmzone.h | 16 +++++++++++++--- kernel/power/snapshot.c | 2 +- mm/memcontrol.c | 11 ++++------- mm/oom_kill.c | 2 +- mm/page_alloc.c | 8 ++++---- mm/slab.h | 15 ++++++++------- mm/slab_common.c | 4 ++-- mm/slob.c | 12 ++++++------ mm/slub.c | 8 ++++---- mm/vmscan.c | 3 ++- mm/workingset.c | 6 ++++-- 13 files changed, 53 insertions(+), 42 deletions(-) (limited to 'mm/slab_common.c') diff --git a/drivers/base/node.c b/drivers/base/node.c index e21e31359297..0cf13e31603c 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -368,8 +368,8 @@ static ssize_t node_read_meminfo(struct device *dev, unsigned long sreclaimable, sunreclaimable; si_meminfo_node(&i, nid); - sreclaimable = node_page_state(pgdat, NR_SLAB_RECLAIMABLE); - sunreclaimable = node_page_state(pgdat, NR_SLAB_UNRECLAIMABLE); + sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B); + sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B); n = sprintf(buf, "Node %d MemTotal: %8lu kB\n" "Node %d MemFree: %8lu kB\n" diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index e9a6841fc25b..38ea95fd919a 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -52,8 +52,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v) pages[lru] = global_node_page_state(NR_LRU_BASE + lru); available = si_mem_available(); - sreclaimable = global_node_page_state(NR_SLAB_RECLAIMABLE); - sunreclaim = global_node_page_state(NR_SLAB_UNRECLAIMABLE); + sreclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B); + sunreclaim = global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B); show_val_kb(m, "MemTotal: ", i.totalram); show_val_kb(m, "MemFree: ", i.freeram); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f16306e15b98..b79100edd228 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -174,8 +174,8 @@ enum node_stat_item { NR_INACTIVE_FILE, /* " " " " " */ NR_ACTIVE_FILE, /* " " " " " */ NR_UNEVICTABLE, /* " " " " " */ - NR_SLAB_RECLAIMABLE, - NR_SLAB_UNRECLAIMABLE, + NR_SLAB_RECLAIMABLE_B, + NR_SLAB_UNRECLAIMABLE_B, NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ WORKINGSET_NODES, @@ -213,7 +213,17 @@ enum node_stat_item { */ static __always_inline bool vmstat_item_in_bytes(int idx) { - return false; + /* + * Global and per-node slab counters track slab pages. + * It's expected that changes are multiples of PAGE_SIZE. + * Internally values are stored in pages. + * + * Per-memcg and per-lruvec counters track memory, consumed + * by individual slab objects. These counters are actually + * byte-precise. + */ + return (idx == NR_SLAB_RECLAIMABLE_B || + idx == NR_SLAB_UNRECLAIMABLE_B); } /* diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index cef154261fe2..d25749bce7cf 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1663,7 +1663,7 @@ static unsigned long minimum_image_size(unsigned long saveable) { unsigned long size; - size = global_node_page_state(NR_SLAB_RECLAIMABLE) + size = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + global_node_page_state(NR_ACTIVE_ANON) + global_node_page_state(NR_INACTIVE_ANON) + global_node_page_state(NR_ACTIVE_FILE) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 61ae6658d59f..328b7e7bf9ab 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1391,9 +1391,8 @@ static char *memory_stat_format(struct mem_cgroup *memcg) (u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) * 1024); seq_buf_printf(&s, "slab %llu\n", - (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) + - memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE)) * - PAGE_SIZE); + (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) + + memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B))); seq_buf_printf(&s, "sock %llu\n", (u64)memcg_page_state(memcg, MEMCG_SOCK) * PAGE_SIZE); @@ -1423,11 +1422,9 @@ static char *memory_stat_format(struct mem_cgroup *memcg) PAGE_SIZE); seq_buf_printf(&s, "slab_reclaimable %llu\n", - (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) * - PAGE_SIZE); + (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B)); seq_buf_printf(&s, "slab_unreclaimable %llu\n", - (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE) * - PAGE_SIZE); + (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B)); /* Accumulated memory events */ diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 6e94962893ee..d30ce75f23fb 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -184,7 +184,7 @@ static bool is_dump_unreclaim_slabs(void) global_node_page_state(NR_ISOLATED_FILE) + global_node_page_state(NR_UNEVICTABLE); - return (global_node_page_state(NR_SLAB_UNRECLAIMABLE) > nr_lru); + return (global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B) > nr_lru); } /** diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 901a21f61d68..f9ad093814d2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5220,8 +5220,8 @@ long si_mem_available(void) * items that are in use, and cannot be freed. Cap this estimate at the * low watermark. */ - reclaimable = global_node_page_state(NR_SLAB_RECLAIMABLE) + - global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); + reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + + global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); available += reclaimable - min(reclaimable / 2, wmark_low); if (available < 0) @@ -5364,8 +5364,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask) global_node_page_state(NR_UNEVICTABLE), global_node_page_state(NR_FILE_DIRTY), global_node_page_state(NR_WRITEBACK), - global_node_page_state(NR_SLAB_RECLAIMABLE), - global_node_page_state(NR_SLAB_UNRECLAIMABLE), + global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B), + global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B), global_node_page_state(NR_FILE_MAPPED), global_node_page_state(NR_SHMEM), global_zone_page_state(NR_PAGETABLE), diff --git a/mm/slab.h b/mm/slab.h index fceb4341ba91..09be3ca6fe87 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -273,7 +273,7 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); static inline int cache_vmstat_idx(struct kmem_cache *s) { return (s->flags & SLAB_RECLAIM_ACCOUNT) ? - NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE; + NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; } #ifdef CONFIG_SLUB_DEBUG @@ -390,7 +390,7 @@ static __always_inline int memcg_charge_slab(struct page *page, if (unlikely(!memcg || mem_cgroup_is_root(memcg))) { mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), - nr_pages); + nr_pages << PAGE_SHIFT); percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages); return 0; } @@ -400,7 +400,7 @@ static __always_inline int memcg_charge_slab(struct page *page, goto out; lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page)); - mod_lruvec_state(lruvec, cache_vmstat_idx(s), nr_pages); + mod_lruvec_state(lruvec, cache_vmstat_idx(s), nr_pages << PAGE_SHIFT); /* transer try_charge() page references to kmem_cache */ percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages); @@ -425,11 +425,12 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order, memcg = READ_ONCE(s->memcg_params.memcg); if (likely(!mem_cgroup_is_root(memcg))) { lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page)); - mod_lruvec_state(lruvec, cache_vmstat_idx(s), -nr_pages); + mod_lruvec_state(lruvec, cache_vmstat_idx(s), + -(nr_pages << PAGE_SHIFT)); memcg_kmem_uncharge(memcg, nr_pages); } else { mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), - -nr_pages); + -(nr_pages << PAGE_SHIFT)); } rcu_read_unlock(); @@ -513,7 +514,7 @@ static __always_inline int charge_slab_page(struct page *page, { if (is_root_cache(s)) { mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), - 1 << order); + PAGE_SIZE << order); return 0; } @@ -525,7 +526,7 @@ static __always_inline void uncharge_slab_page(struct page *page, int order, { if (is_root_cache(s)) { mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), - -(1 << order)); + -(PAGE_SIZE << order)); return; } diff --git a/mm/slab_common.c b/mm/slab_common.c index 616ec8a0d91a..a73f168b1035 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1363,8 +1363,8 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) page = alloc_pages(flags, order); if (likely(page)) { ret = page_address(page); - mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, - 1 << order); + mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B, + PAGE_SIZE << order); } ret = kasan_kmalloc_large(ret, size, flags); /* As ret might get tagged, call kmemleak hook after KASAN. */ diff --git a/mm/slob.c b/mm/slob.c index ac2aecfbc7a8..7cc9805c8091 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -202,8 +202,8 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) if (!page) return NULL; - mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, - 1 << order); + mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B, + PAGE_SIZE << order); return page_address(page); } @@ -214,8 +214,8 @@ static void slob_free_pages(void *b, int order) if (current->reclaim_state) current->reclaim_state->reclaimed_slab += 1 << order; - mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE, - -(1 << order)); + mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE_B, + -(PAGE_SIZE << order)); __free_pages(sp, order); } @@ -552,8 +552,8 @@ void kfree(const void *block) slob_free(m, *m + align); } else { unsigned int order = compound_order(sp); - mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE, - -(1 << order)); + mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE_B, + -(PAGE_SIZE << order)); __free_pages(sp, order); } diff --git a/mm/slub.c b/mm/slub.c index ae39eb392396..2d73d677f7ac 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3991,8 +3991,8 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) page = alloc_pages_node(node, flags, order); if (page) { ptr = page_address(page); - mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, - 1 << order); + mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B, + PAGE_SIZE << order); } return kmalloc_large_node_hook(ptr, size, flags); @@ -4123,8 +4123,8 @@ void kfree(const void *x) BUG_ON(!PageCompound(page)); kfree_hook(object); - mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, - -(1 << order)); + mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B, + -(PAGE_SIZE << order)); __free_pages(page, order); return; } diff --git a/mm/vmscan.c b/mm/vmscan.c index 749d239c62b2..2ac43664aba4 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4222,7 +4222,8 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) * unmapped file backed pages. */ if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && - node_page_state(pgdat, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages) + node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <= + pgdat->min_slab_pages) return NODE_RECLAIM_FULL; /* diff --git a/mm/workingset.c b/mm/workingset.c index 50b7937bab32..b199726924dd 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -486,8 +486,10 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker, for (pages = 0, i = 0; i < NR_LRU_LISTS; i++) pages += lruvec_page_state_local(lruvec, NR_LRU_BASE + i); - pages += lruvec_page_state_local(lruvec, NR_SLAB_RECLAIMABLE); - pages += lruvec_page_state_local(lruvec, NR_SLAB_UNRECLAIMABLE); + pages += lruvec_page_state_local( + lruvec, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT; + pages += lruvec_page_state_local( + lruvec, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT; } else #endif pages = node_present_pages(sc->nid); -- cgit v1.2.3 From 4330a26bc4527f1d8918c398ebc983574f761cca Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Aug 2020 23:21:03 -0700 Subject: mm: memcg/slab: deprecate memory.kmem.slabinfo Deprecate memory.kmem.slabinfo. An empty file will be presented if corresponding config options are enabled. The interface is implementation dependent, isn't present in cgroup v2, and is generally useful only for core mm debugging purposes. In other words, it doesn't provide any value for the absolute majority of users. A drgn-based replacement can be found in tools/cgroup/memcg_slabinfo.py. It does support cgroup v1 and v2, mimics memory.kmem.slabinfo output and also allows to get any additional information without a need to recompile the kernel. If a drgn-based solution is too slow for a task, a bpf-based tracing tool can be used, which can easily keep track of all slab allocations belonging to a memory cgroup. Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Reviewed-by: Shakeel Butt Acked-by: Johannes Weiner Cc: Christoph Lameter Cc: Michal Hocko Cc: Tejun Heo Link: http://lkml.kernel.org/r/20200623174037.3951353-11-guro@fb.com Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 3 --- mm/slab_common.c | 31 ++++--------------------------- 2 files changed, 4 insertions(+), 30 deletions(-) (limited to 'mm/slab_common.c') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ab96a120e630..0356e05bc6e6 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5114,9 +5114,6 @@ static struct cftype mem_cgroup_legacy_files[] = { (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)) { .name = "kmem.slabinfo", - .seq_start = memcg_slab_start, - .seq_next = memcg_slab_next, - .seq_stop = memcg_slab_stop, .seq_show = memcg_slab_show, }, #endif diff --git a/mm/slab_common.c b/mm/slab_common.c index a73f168b1035..f86431d0de73 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1561,35 +1561,12 @@ void dump_unreclaimable_slab(void) } #if defined(CONFIG_MEMCG_KMEM) -void *memcg_slab_start(struct seq_file *m, loff_t *pos) -{ - struct mem_cgroup *memcg = mem_cgroup_from_seq(m); - - mutex_lock(&slab_mutex); - return seq_list_start(&memcg->kmem_caches, *pos); -} - -void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos) -{ - struct mem_cgroup *memcg = mem_cgroup_from_seq(m); - - return seq_list_next(p, &memcg->kmem_caches, pos); -} - -void memcg_slab_stop(struct seq_file *m, void *p) -{ - mutex_unlock(&slab_mutex); -} - int memcg_slab_show(struct seq_file *m, void *p) { - struct kmem_cache *s = list_entry(p, struct kmem_cache, - memcg_params.kmem_caches_node); - struct mem_cgroup *memcg = mem_cgroup_from_seq(m); - - if (p == memcg->kmem_caches.next) - print_slabinfo_header(m); - cache_show(s, m); + /* + * Deprecated. + * Please, take a look at tools/cgroup/slabinfo.py . + */ return 0; } #endif -- cgit v1.2.3 From 9855609bde03e2472b99a95e869d29ee1e78a751 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Aug 2020 23:21:10 -0700 Subject: mm: memcg/slab: use a single set of kmem_caches for all accounted allocations This is fairly big but mostly red patch, which makes all accounted slab allocations use a single set of kmem_caches instead of creating a separate set for each memory cgroup. Because the number of non-root kmem_caches is now capped by the number of root kmem_caches, there is no need to shrink or destroy them prematurely. They can be perfectly destroyed together with their root counterparts. This allows to dramatically simplify the management of non-root kmem_caches and delete a ton of code. This patch performs the following changes: 1) introduces memcg_params.memcg_cache pointer to represent the kmem_cache which will be used for all non-root allocations 2) reuses the existing memcg kmem_cache creation mechanism to create memcg kmem_cache on the first allocation attempt 3) memcg kmem_caches are named -memcg, e.g. dentry-memcg 4) simplifies memcg_kmem_get_cache() to just return memcg kmem_cache or schedule it's creation and return the root cache 5) removes almost all non-root kmem_cache management code (separate refcounter, reparenting, shrinking, etc) 6) makes slab debugfs to display root_mem_cgroup css id and never show :dead and :deact flags in the memcg_slabinfo attribute. Following patches in the series will simplify the kmem_cache creation. Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Reviewed-by: Shakeel Butt Cc: Christoph Lameter Cc: Johannes Weiner Cc: Michal Hocko Cc: Tejun Heo Link: http://lkml.kernel.org/r/20200623174037.3951353-13-guro@fb.com Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 5 +- include/linux/slab.h | 5 +- mm/memcontrol.c | 163 ++++------------ mm/slab.c | 16 +- mm/slab.h | 146 +++++--------- mm/slab_common.c | 459 +++++---------------------------------------- mm/slub.c | 38 +--- 7 files changed, 134 insertions(+), 698 deletions(-) (limited to 'mm/slab_common.c') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 83e2858aecf2..11fd18b3d6c6 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -317,7 +317,6 @@ struct mem_cgroup { /* Index in the kmem_cache->memcg_params.memcg_caches array */ int kmemcg_id; enum memcg_kmem_state kmem_state; - struct list_head kmem_caches; struct obj_cgroup __rcu *objcg; struct list_head objcg_list; /* list of inherited objcgs */ #endif @@ -1404,9 +1403,7 @@ static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, } #endif -struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep, - struct obj_cgroup **objcgp); -void memcg_kmem_put_cache(struct kmem_cache *cachep); +struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); #ifdef CONFIG_MEMCG_KMEM int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, diff --git a/include/linux/slab.h b/include/linux/slab.h index 0884d82c55ee..8b1f91e320f9 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -155,8 +155,7 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name, void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); -void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *); -void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *); +void memcg_create_kmem_cache(struct kmem_cache *cachep); /* * Please use this macro to create slab caches. Simply specify the @@ -580,8 +579,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) return __kmalloc_node(size, flags, node); } -int memcg_update_all_caches(int num_memcgs); - /** * kmalloc_array - allocate memory for an array. * @n: number of elements. diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 5cb2a588cc10..874704c4a48a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -350,7 +350,7 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg, } /* - * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. + * This will be used as a shrinker list's index. * The main reason for not using cgroup id for this: * this works better in sparse environments, where we have a lot of memcgs, * but only a few kmem-limited. Or also, if we have, for instance, 200 @@ -569,20 +569,16 @@ ino_t page_cgroup_ino(struct page *page) unsigned long ino = 0; rcu_read_lock(); - if (PageSlab(page) && !PageTail(page)) { - memcg = memcg_from_slab_page(page); - } else { - memcg = page->mem_cgroup; + memcg = page->mem_cgroup; - /* - * The lowest bit set means that memcg isn't a valid - * memcg pointer, but a obj_cgroups pointer. - * In this case the page is shared and doesn't belong - * to any specific memory cgroup. - */ - if ((unsigned long) memcg & 0x1UL) - memcg = NULL; - } + /* + * The lowest bit set means that memcg isn't a valid + * memcg pointer, but a obj_cgroups pointer. + * In this case the page is shared and doesn't belong + * to any specific memory cgroup. + */ + if ((unsigned long) memcg & 0x1UL) + memcg = NULL; while (memcg && !(memcg->css.flags & CSS_ONLINE)) memcg = parent_mem_cgroup(memcg); @@ -2822,12 +2818,18 @@ struct mem_cgroup *mem_cgroup_from_obj(void *p) page = virt_to_head_page(p); /* - * Slab pages don't have page->mem_cgroup set because corresponding - * kmem caches can be reparented during the lifetime. That's why - * memcg_from_slab_page() should be used instead. + * Slab objects are accounted individually, not per-page. + * Memcg membership data for each individual object is saved in + * the page->obj_cgroups. */ - if (PageSlab(page)) - return memcg_from_slab_page(page); + if (page_has_obj_cgroups(page)) { + struct obj_cgroup *objcg; + unsigned int off; + + off = obj_to_index(page->slab_cache, page, p); + objcg = page_obj_cgroups(page)[off]; + return obj_cgroup_memcg(objcg); + } /* All other pages use page->mem_cgroup */ return page->mem_cgroup; @@ -2882,9 +2884,7 @@ static int memcg_alloc_cache_id(void) else if (size > MEMCG_CACHES_MAX_SIZE) size = MEMCG_CACHES_MAX_SIZE; - err = memcg_update_all_caches(size); - if (!err) - err = memcg_update_all_list_lrus(size); + err = memcg_update_all_list_lrus(size); if (!err) memcg_nr_cache_ids = size; @@ -2903,7 +2903,6 @@ static void memcg_free_cache_id(int id) } struct memcg_kmem_cache_create_work { - struct mem_cgroup *memcg; struct kmem_cache *cachep; struct work_struct work; }; @@ -2912,33 +2911,24 @@ static void memcg_kmem_cache_create_func(struct work_struct *w) { struct memcg_kmem_cache_create_work *cw = container_of(w, struct memcg_kmem_cache_create_work, work); - struct mem_cgroup *memcg = cw->memcg; struct kmem_cache *cachep = cw->cachep; - memcg_create_kmem_cache(memcg, cachep); + memcg_create_kmem_cache(cachep); - css_put(&memcg->css); kfree(cw); } /* * Enqueue the creation of a per-memcg kmem_cache. */ -static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, - struct kmem_cache *cachep) +static void memcg_schedule_kmem_cache_create(struct kmem_cache *cachep) { struct memcg_kmem_cache_create_work *cw; - if (!css_tryget_online(&memcg->css)) - return; - cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN); - if (!cw) { - css_put(&memcg->css); + if (!cw) return; - } - cw->memcg = memcg; cw->cachep = cachep; INIT_WORK(&cw->work, memcg_kmem_cache_create_func); @@ -2946,102 +2936,26 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, } /** - * memcg_kmem_get_cache: select the correct per-memcg cache for allocation + * memcg_kmem_get_cache: select memcg or root cache for allocation * @cachep: the original global kmem cache * * Return the kmem_cache we're supposed to use for a slab allocation. - * We try to use the current memcg's version of the cache. * * If the cache does not exist yet, if we are the first user of it, we * create it asynchronously in a workqueue and let the current allocation * go through with the original cache. - * - * This function takes a reference to the cache it returns to assure it - * won't get destroyed while we are working with it. Once the caller is - * done with it, memcg_kmem_put_cache() must be called to release the - * reference. */ -struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep, - struct obj_cgroup **objcgp) +struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) { - struct mem_cgroup *memcg; struct kmem_cache *memcg_cachep; - struct memcg_cache_array *arr; - int kmemcg_id; - VM_BUG_ON(!is_root_cache(cachep)); - - if (memcg_kmem_bypass()) + memcg_cachep = READ_ONCE(cachep->memcg_params.memcg_cache); + if (unlikely(!memcg_cachep)) { + memcg_schedule_kmem_cache_create(cachep); return cachep; - - rcu_read_lock(); - - if (unlikely(current->active_memcg)) - memcg = current->active_memcg; - else - memcg = mem_cgroup_from_task(current); - - if (!memcg || memcg == root_mem_cgroup) - goto out_unlock; - - kmemcg_id = READ_ONCE(memcg->kmemcg_id); - if (kmemcg_id < 0) - goto out_unlock; - - arr = rcu_dereference(cachep->memcg_params.memcg_caches); - - /* - * Make sure we will access the up-to-date value. The code updating - * memcg_caches issues a write barrier to match the data dependency - * barrier inside READ_ONCE() (see memcg_create_kmem_cache()). - */ - memcg_cachep = READ_ONCE(arr->entries[kmemcg_id]); - - /* - * If we are in a safe context (can wait, and not in interrupt - * context), we could be be predictable and return right away. - * This would guarantee that the allocation being performed - * already belongs in the new cache. - * - * However, there are some clashes that can arrive from locking. - * For instance, because we acquire the slab_mutex while doing - * memcg_create_kmem_cache, this means no further allocation - * could happen with the slab_mutex held. So it's better to - * defer everything. - * - * If the memcg is dying or memcg_cache is about to be released, - * don't bother creating new kmem_caches. Because memcg_cachep - * is ZEROed as the fist step of kmem offlining, we don't need - * percpu_ref_tryget_live() here. css_tryget_online() check in - * memcg_schedule_kmem_cache_create() will prevent us from - * creation of a new kmem_cache. - */ - if (unlikely(!memcg_cachep)) - memcg_schedule_kmem_cache_create(memcg, cachep); - else if (percpu_ref_tryget(&memcg_cachep->memcg_params.refcnt)) { - struct obj_cgroup *objcg = rcu_dereference(memcg->objcg); - - if (!objcg || !obj_cgroup_tryget(objcg)) { - percpu_ref_put(&memcg_cachep->memcg_params.refcnt); - goto out_unlock; - } - - *objcgp = objcg; - cachep = memcg_cachep; } -out_unlock: - rcu_read_unlock(); - return cachep; -} -/** - * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache - * @cachep: the cache returned by memcg_kmem_get_cache - */ -void memcg_kmem_put_cache(struct kmem_cache *cachep) -{ - if (!is_root_cache(cachep)) - percpu_ref_put(&cachep->memcg_params.refcnt); + return memcg_cachep; } /** @@ -3731,7 +3645,6 @@ static int memcg_online_kmem(struct mem_cgroup *memcg) */ memcg->kmemcg_id = memcg_id; memcg->kmem_state = KMEM_ONLINE; - INIT_LIST_HEAD(&memcg->kmem_caches); return 0; } @@ -3744,22 +3657,13 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) if (memcg->kmem_state != KMEM_ONLINE) return; - /* - * Clear the online state before clearing memcg_caches array - * entries. The slab_mutex in memcg_deactivate_kmem_caches() - * guarantees that no cache will be created for this cgroup - * after we are done (see memcg_create_kmem_cache()). - */ + memcg->kmem_state = KMEM_ALLOCATED; parent = parent_mem_cgroup(memcg); if (!parent) parent = root_mem_cgroup; - /* - * Deactivate and reparent kmem_caches and objcgs. - */ - memcg_deactivate_kmem_caches(memcg, parent); memcg_reparent_objcgs(memcg, parent); kmemcg_id = memcg->kmemcg_id; @@ -5384,9 +5288,6 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) /* The following stuff does not apply to the root */ if (!parent) { -#ifdef CONFIG_MEMCG_KMEM - INIT_LIST_HEAD(&memcg->kmem_caches); -#endif root_mem_cgroup = memcg; return &memcg->css; } diff --git a/mm/slab.c b/mm/slab.c index 1e90b67735aa..0dd6956585dc 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1249,7 +1249,7 @@ void __init kmem_cache_init(void) nr_node_ids * sizeof(struct kmem_cache_node *), SLAB_HWCACHE_ALIGN, 0, 0); list_add(&kmem_cache->list, &slab_caches); - memcg_link_cache(kmem_cache, NULL); + memcg_link_cache(kmem_cache); slab_state = PARTIAL; /* @@ -2253,17 +2253,6 @@ int __kmem_cache_shrink(struct kmem_cache *cachep) return (ret ? 1 : 0); } -#ifdef CONFIG_MEMCG -void __kmemcg_cache_deactivate(struct kmem_cache *cachep) -{ - __kmem_cache_shrink(cachep); -} - -void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s) -{ -} -#endif - int __kmem_cache_shutdown(struct kmem_cache *cachep) { return __kmem_cache_shrink(cachep); @@ -3872,7 +3861,8 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, return ret; lockdep_assert_held(&slab_mutex); - for_each_memcg_cache(c, cachep) { + c = memcg_cache(cachep); + if (c) { /* return value determined by the root cache only */ __do_tune_cpucache(c, limit, batchcount, shared, gfp); } diff --git a/mm/slab.h b/mm/slab.h index 1f067e8bc377..e716b80befc2 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -32,66 +32,25 @@ struct kmem_cache { #else /* !CONFIG_SLOB */ -struct memcg_cache_array { - struct rcu_head rcu; - struct kmem_cache *entries[0]; -}; - /* * This is the main placeholder for memcg-related information in kmem caches. - * Both the root cache and the child caches will have it. For the root cache, - * this will hold a dynamically allocated array large enough to hold - * information about the currently limited memcgs in the system. To allow the - * array to be accessed without taking any locks, on relocation we free the old - * version only after a grace period. - * - * Root and child caches hold different metadata. + * Both the root cache and the child cache will have it. Some fields are used + * in both cases, other are specific to root caches. * * @root_cache: Common to root and child caches. NULL for root, pointer to * the root cache for children. * * The following fields are specific to root caches. * - * @memcg_caches: kmemcg ID indexed table of child caches. This table is - * used to index child cachces during allocation and cleared - * early during shutdown. - * - * @root_caches_node: List node for slab_root_caches list. - * - * @children: List of all child caches. While the child caches are also - * reachable through @memcg_caches, a child cache remains on - * this list until it is actually destroyed. - * - * The following fields are specific to child caches. - * - * @memcg: Pointer to the memcg this cache belongs to. - * - * @children_node: List node for @root_cache->children list. - * - * @kmem_caches_node: List node for @memcg->kmem_caches list. + * @memcg_cache: pointer to memcg kmem cache, used by all non-root memory + * cgroups. + * @root_caches_node: list node for slab_root_caches list. */ struct memcg_cache_params { struct kmem_cache *root_cache; - union { - struct { - struct memcg_cache_array __rcu *memcg_caches; - struct list_head __root_caches_node; - struct list_head children; - bool dying; - }; - struct { - struct mem_cgroup *memcg; - struct list_head children_node; - struct list_head kmem_caches_node; - struct percpu_ref refcnt; - - void (*work_fn)(struct kmem_cache *); - union { - struct rcu_head rcu_head; - struct work_struct work; - }; - }; - }; + + struct kmem_cache *memcg_cache; + struct list_head __root_caches_node; }; #endif /* CONFIG_SLOB */ @@ -236,8 +195,6 @@ bool __kmem_cache_empty(struct kmem_cache *); int __kmem_cache_shutdown(struct kmem_cache *); void __kmem_cache_release(struct kmem_cache *); int __kmem_cache_shrink(struct kmem_cache *); -void __kmemcg_cache_deactivate(struct kmem_cache *s); -void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s); void slab_kmem_cache_release(struct kmem_cache *); void kmem_cache_shrink_all(struct kmem_cache *s); @@ -311,14 +268,6 @@ static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t fla extern struct list_head slab_root_caches; #define root_caches_node memcg_params.__root_caches_node -/* - * Iterate over all memcg caches of the given root cache. The caller must hold - * slab_mutex. - */ -#define for_each_memcg_cache(iter, root) \ - list_for_each_entry(iter, &(root)->memcg_params.children, \ - memcg_params.children_node) - static inline bool is_root_cache(struct kmem_cache *s) { return !s->memcg_params.root_cache; @@ -349,6 +298,13 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) return s->memcg_params.root_cache; } +static inline struct kmem_cache *memcg_cache(struct kmem_cache *s) +{ + if (is_root_cache(s)) + return s->memcg_params.memcg_cache; + return NULL; +} + static inline struct obj_cgroup **page_obj_cgroups(struct page *page) { /* @@ -361,25 +317,9 @@ static inline struct obj_cgroup **page_obj_cgroups(struct page *page) ((unsigned long)page->obj_cgroups & ~0x1UL); } -/* - * Expects a pointer to a slab page. Please note, that PageSlab() check - * isn't sufficient, as it returns true also for tail compound slab pages, - * which do not have slab_cache pointer set. - * So this function assumes that the page can pass PageSlab() && !PageTail() - * check. - * - * The kmem_cache can be reparented asynchronously. The caller must ensure - * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex. - */ -static inline struct mem_cgroup *memcg_from_slab_page(struct page *page) +static inline bool page_has_obj_cgroups(struct page *page) { - struct kmem_cache *s; - - s = READ_ONCE(page->slab_cache); - if (s && !is_root_cache(s)) - return READ_ONCE(s->memcg_params.memcg); - - return NULL; + return ((unsigned long)page->obj_cgroups & 0x1UL); } static inline int memcg_alloc_page_obj_cgroups(struct page *page, @@ -418,17 +358,25 @@ static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s, size_t objects, gfp_t flags) { struct kmem_cache *cachep; + struct obj_cgroup *objcg; + + if (memcg_kmem_bypass()) + return s; - cachep = memcg_kmem_get_cache(s, objcgp); + cachep = memcg_kmem_get_cache(s); if (is_root_cache(cachep)) return s; - if (obj_cgroup_charge(*objcgp, flags, objects * obj_full_size(s))) { - obj_cgroup_put(*objcgp); - memcg_kmem_put_cache(cachep); + objcg = get_obj_cgroup_from_current(); + if (!objcg) + return s; + + if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) { + obj_cgroup_put(objcg); cachep = NULL; } + *objcgp = objcg; return cachep; } @@ -467,7 +415,6 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, } } obj_cgroup_put(objcg); - memcg_kmem_put_cache(s); } static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page, @@ -491,7 +438,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page, } extern void slab_init_memcg_params(struct kmem_cache *); -extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg); +extern void memcg_link_cache(struct kmem_cache *s); #else /* CONFIG_MEMCG_KMEM */ @@ -499,9 +446,6 @@ extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg); #define slab_root_caches slab_caches #define root_caches_node list -#define for_each_memcg_cache(iter, root) \ - for ((void)(iter), (void)(root); 0; ) - static inline bool is_root_cache(struct kmem_cache *s) { return true; @@ -523,7 +467,17 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) return s; } -static inline struct mem_cgroup *memcg_from_slab_page(struct page *page) +static inline struct kmem_cache *memcg_cache(struct kmem_cache *s) +{ + return NULL; +} + +static inline bool page_has_obj_cgroups(struct page *page) +{ + return false; +} + +static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr) { return NULL; } @@ -560,8 +514,7 @@ static inline void slab_init_memcg_params(struct kmem_cache *s) { } -static inline void memcg_link_cache(struct kmem_cache *s, - struct mem_cgroup *memcg) +static inline void memcg_link_cache(struct kmem_cache *s) { } @@ -582,17 +535,14 @@ static __always_inline int charge_slab_page(struct page *page, gfp_t gfp, int order, struct kmem_cache *s) { -#ifdef CONFIG_MEMCG_KMEM if (memcg_kmem_enabled() && !is_root_cache(s)) { int ret; ret = memcg_alloc_page_obj_cgroups(page, s, gfp); if (ret) return ret; - - percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order); } -#endif + mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), PAGE_SIZE << order); return 0; @@ -601,12 +551,9 @@ static __always_inline int charge_slab_page(struct page *page, static __always_inline void uncharge_slab_page(struct page *page, int order, struct kmem_cache *s) { -#ifdef CONFIG_MEMCG_KMEM - if (memcg_kmem_enabled() && !is_root_cache(s)) { + if (memcg_kmem_enabled() && !is_root_cache(s)) memcg_free_page_obj_cgroups(page); - percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order); - } -#endif + mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), -(PAGE_SIZE << order)); } @@ -749,9 +696,6 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) void *slab_start(struct seq_file *m, loff_t *pos); void *slab_next(struct seq_file *m, void *p, loff_t *pos); void slab_stop(struct seq_file *m, void *p); -void *memcg_slab_start(struct seq_file *m, loff_t *pos); -void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos); -void memcg_slab_stop(struct seq_file *m, void *p); int memcg_slab_show(struct seq_file *m, void *p); #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) diff --git a/mm/slab_common.c b/mm/slab_common.c index f86431d0de73..e752132eb64d 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -133,141 +133,36 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, #ifdef CONFIG_MEMCG_KMEM LIST_HEAD(slab_root_caches); -static DEFINE_SPINLOCK(memcg_kmem_wq_lock); - -static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref); void slab_init_memcg_params(struct kmem_cache *s) { s->memcg_params.root_cache = NULL; - RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL); - INIT_LIST_HEAD(&s->memcg_params.children); - s->memcg_params.dying = false; + s->memcg_params.memcg_cache = NULL; } -static int init_memcg_params(struct kmem_cache *s, - struct kmem_cache *root_cache) +static void init_memcg_params(struct kmem_cache *s, + struct kmem_cache *root_cache) { - struct memcg_cache_array *arr; - - if (root_cache) { - int ret = percpu_ref_init(&s->memcg_params.refcnt, - kmemcg_cache_shutdown, - 0, GFP_KERNEL); - if (ret) - return ret; - + if (root_cache) s->memcg_params.root_cache = root_cache; - INIT_LIST_HEAD(&s->memcg_params.children_node); - INIT_LIST_HEAD(&s->memcg_params.kmem_caches_node); - return 0; - } - - slab_init_memcg_params(s); - - if (!memcg_nr_cache_ids) - return 0; - - arr = kvzalloc(sizeof(struct memcg_cache_array) + - memcg_nr_cache_ids * sizeof(void *), - GFP_KERNEL); - if (!arr) - return -ENOMEM; - - RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr); - return 0; -} - -static void destroy_memcg_params(struct kmem_cache *s) -{ - if (is_root_cache(s)) { - kvfree(rcu_access_pointer(s->memcg_params.memcg_caches)); - } else { - mem_cgroup_put(s->memcg_params.memcg); - WRITE_ONCE(s->memcg_params.memcg, NULL); - percpu_ref_exit(&s->memcg_params.refcnt); - } -} - -static void free_memcg_params(struct rcu_head *rcu) -{ - struct memcg_cache_array *old; - - old = container_of(rcu, struct memcg_cache_array, rcu); - kvfree(old); -} - -static int update_memcg_params(struct kmem_cache *s, int new_array_size) -{ - struct memcg_cache_array *old, *new; - - new = kvzalloc(sizeof(struct memcg_cache_array) + - new_array_size * sizeof(void *), GFP_KERNEL); - if (!new) - return -ENOMEM; - - old = rcu_dereference_protected(s->memcg_params.memcg_caches, - lockdep_is_held(&slab_mutex)); - if (old) - memcpy(new->entries, old->entries, - memcg_nr_cache_ids * sizeof(void *)); - - rcu_assign_pointer(s->memcg_params.memcg_caches, new); - if (old) - call_rcu(&old->rcu, free_memcg_params); - return 0; + else + slab_init_memcg_params(s); } -int memcg_update_all_caches(int num_memcgs) +void memcg_link_cache(struct kmem_cache *s) { - struct kmem_cache *s; - int ret = 0; - - mutex_lock(&slab_mutex); - list_for_each_entry(s, &slab_root_caches, root_caches_node) { - ret = update_memcg_params(s, num_memcgs); - /* - * Instead of freeing the memory, we'll just leave the caches - * up to this point in an updated state. - */ - if (ret) - break; - } - mutex_unlock(&slab_mutex); - return ret; -} - -void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg) -{ - if (is_root_cache(s)) { + if (is_root_cache(s)) list_add(&s->root_caches_node, &slab_root_caches); - } else { - css_get(&memcg->css); - s->memcg_params.memcg = memcg; - list_add(&s->memcg_params.children_node, - &s->memcg_params.root_cache->memcg_params.children); - list_add(&s->memcg_params.kmem_caches_node, - &s->memcg_params.memcg->kmem_caches); - } } static void memcg_unlink_cache(struct kmem_cache *s) { - if (is_root_cache(s)) { + if (is_root_cache(s)) list_del(&s->root_caches_node); - } else { - list_del(&s->memcg_params.children_node); - list_del(&s->memcg_params.kmem_caches_node); - } } #else -static inline int init_memcg_params(struct kmem_cache *s, - struct kmem_cache *root_cache) -{ - return 0; -} - -static inline void destroy_memcg_params(struct kmem_cache *s) +static inline void init_memcg_params(struct kmem_cache *s, + struct kmem_cache *root_cache) { } @@ -328,14 +223,6 @@ int slab_unmergeable(struct kmem_cache *s) if (s->refcount < 0) return 1; -#ifdef CONFIG_MEMCG_KMEM - /* - * Skip the dying kmem_cache. - */ - if (s->memcg_params.dying) - return 1; -#endif - return 0; } @@ -390,7 +277,7 @@ static struct kmem_cache *create_cache(const char *name, unsigned int object_size, unsigned int align, slab_flags_t flags, unsigned int useroffset, unsigned int usersize, void (*ctor)(void *), - struct mem_cgroup *memcg, struct kmem_cache *root_cache) + struct kmem_cache *root_cache) { struct kmem_cache *s; int err; @@ -410,24 +297,20 @@ static struct kmem_cache *create_cache(const char *name, s->useroffset = useroffset; s->usersize = usersize; - err = init_memcg_params(s, root_cache); - if (err) - goto out_free_cache; - + init_memcg_params(s, root_cache); err = __kmem_cache_create(s, flags); if (err) goto out_free_cache; s->refcount = 1; list_add(&s->list, &slab_caches); - memcg_link_cache(s, memcg); + memcg_link_cache(s); out: if (err) return ERR_PTR(err); return s; out_free_cache: - destroy_memcg_params(s); kmem_cache_free(kmem_cache, s); goto out; } @@ -514,7 +397,7 @@ kmem_cache_create_usercopy(const char *name, s = create_cache(cache_name, size, calculate_alignment(flags, align, size), - flags, useroffset, usersize, ctor, NULL, NULL); + flags, useroffset, usersize, ctor, NULL); if (IS_ERR(s)) { err = PTR_ERR(s); kfree_const(cache_name); @@ -639,51 +522,27 @@ static int shutdown_cache(struct kmem_cache *s) #ifdef CONFIG_MEMCG_KMEM /* - * memcg_create_kmem_cache - Create a cache for a memory cgroup. - * @memcg: The memory cgroup the new cache is for. + * memcg_create_kmem_cache - Create a cache for non-root memory cgroups. * @root_cache: The parent of the new cache. * * This function attempts to create a kmem cache that will serve allocation - * requests going from @memcg to @root_cache. The new cache inherits properties - * from its parent. + * requests going all non-root memory cgroups to @root_cache. The new cache + * inherits properties from its parent. */ -void memcg_create_kmem_cache(struct mem_cgroup *memcg, - struct kmem_cache *root_cache) +void memcg_create_kmem_cache(struct kmem_cache *root_cache) { - static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */ - struct cgroup_subsys_state *css = &memcg->css; - struct memcg_cache_array *arr; struct kmem_cache *s = NULL; char *cache_name; - int idx; get_online_cpus(); get_online_mems(); mutex_lock(&slab_mutex); - /* - * The memory cgroup could have been offlined while the cache - * creation work was pending. - */ - if (memcg->kmem_state != KMEM_ONLINE) + if (root_cache->memcg_params.memcg_cache) goto out_unlock; - idx = memcg_cache_id(memcg); - arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches, - lockdep_is_held(&slab_mutex)); - - /* - * Since per-memcg caches are created asynchronously on first - * allocation (see memcg_kmem_get_cache()), several threads can try to - * create the same cache, but only one of them may succeed. - */ - if (arr->entries[idx]) - goto out_unlock; - - cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf)); - cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name, - css->serial_nr, memcg_name_buf); + cache_name = kasprintf(GFP_KERNEL, "%s-memcg", root_cache->name); if (!cache_name) goto out_unlock; @@ -691,7 +550,7 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg, root_cache->align, root_cache->flags & CACHE_CREATE_MASK, root_cache->useroffset, root_cache->usersize, - root_cache->ctor, memcg, root_cache); + root_cache->ctor, root_cache); /* * If we could not create a memcg cache, do not complain, because * that's not critical at all as we can always proceed with the root @@ -708,7 +567,7 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg, * initialized. */ smp_wmb(); - arr->entries[idx] = s; + root_cache->memcg_params.memcg_cache = s; out_unlock: mutex_unlock(&slab_mutex); @@ -717,200 +576,18 @@ out_unlock: put_online_cpus(); } -static void kmemcg_workfn(struct work_struct *work) -{ - struct kmem_cache *s = container_of(work, struct kmem_cache, - memcg_params.work); - - get_online_cpus(); - get_online_mems(); - - mutex_lock(&slab_mutex); - s->memcg_params.work_fn(s); - mutex_unlock(&slab_mutex); - - put_online_mems(); - put_online_cpus(); -} - -static void kmemcg_rcufn(struct rcu_head *head) -{ - struct kmem_cache *s = container_of(head, struct kmem_cache, - memcg_params.rcu_head); - - /* - * We need to grab blocking locks. Bounce to ->work. The - * work item shares the space with the RCU head and can't be - * initialized earlier. - */ - INIT_WORK(&s->memcg_params.work, kmemcg_workfn); - queue_work(memcg_kmem_cache_wq, &s->memcg_params.work); -} - -static void kmemcg_cache_shutdown_fn(struct kmem_cache *s) -{ - WARN_ON(shutdown_cache(s)); -} - -static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref) -{ - struct kmem_cache *s = container_of(percpu_ref, struct kmem_cache, - memcg_params.refcnt); - unsigned long flags; - - spin_lock_irqsave(&memcg_kmem_wq_lock, flags); - if (s->memcg_params.root_cache->memcg_params.dying) - goto unlock; - - s->memcg_params.work_fn = kmemcg_cache_shutdown_fn; - INIT_WORK(&s->memcg_params.work, kmemcg_workfn); - queue_work(memcg_kmem_cache_wq, &s->memcg_params.work); - -unlock: - spin_unlock_irqrestore(&memcg_kmem_wq_lock, flags); -} - -static void kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s) -{ - __kmemcg_cache_deactivate_after_rcu(s); - percpu_ref_kill(&s->memcg_params.refcnt); -} - -static void kmemcg_cache_deactivate(struct kmem_cache *s) -{ - if (WARN_ON_ONCE(is_root_cache(s))) - return; - - __kmemcg_cache_deactivate(s); - s->flags |= SLAB_DEACTIVATED; - - /* - * memcg_kmem_wq_lock is used to synchronize memcg_params.dying - * flag and make sure that no new kmem_cache deactivation tasks - * are queued (see flush_memcg_workqueue() ). - */ - spin_lock_irq(&memcg_kmem_wq_lock); - if (s->memcg_params.root_cache->memcg_params.dying) - goto unlock; - - s->memcg_params.work_fn = kmemcg_cache_deactivate_after_rcu; - call_rcu(&s->memcg_params.rcu_head, kmemcg_rcufn); -unlock: - spin_unlock_irq(&memcg_kmem_wq_lock); -} - -void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg, - struct mem_cgroup *parent) -{ - int idx; - struct memcg_cache_array *arr; - struct kmem_cache *s, *c; - unsigned int nr_reparented; - - idx = memcg_cache_id(memcg); - - get_online_cpus(); - get_online_mems(); - - mutex_lock(&slab_mutex); - list_for_each_entry(s, &slab_root_caches, root_caches_node) { - arr = rcu_dereference_protected(s->memcg_params.memcg_caches, - lockdep_is_held(&slab_mutex)); - c = arr->entries[idx]; - if (!c) - continue; - - kmemcg_cache_deactivate(c); - arr->entries[idx] = NULL; - } - nr_reparented = 0; - list_for_each_entry(s, &memcg->kmem_caches, - memcg_params.kmem_caches_node) { - WRITE_ONCE(s->memcg_params.memcg, parent); - css_put(&memcg->css); - nr_reparented++; - } - if (nr_reparented) { - list_splice_init(&memcg->kmem_caches, - &parent->kmem_caches); - css_get_many(&parent->css, nr_reparented); - } - mutex_unlock(&slab_mutex); - - put_online_mems(); - put_online_cpus(); -} - static int shutdown_memcg_caches(struct kmem_cache *s) { - struct memcg_cache_array *arr; - struct kmem_cache *c, *c2; - LIST_HEAD(busy); - int i; - BUG_ON(!is_root_cache(s)); - /* - * First, shutdown active caches, i.e. caches that belong to online - * memory cgroups. - */ - arr = rcu_dereference_protected(s->memcg_params.memcg_caches, - lockdep_is_held(&slab_mutex)); - for_each_memcg_cache_index(i) { - c = arr->entries[i]; - if (!c) - continue; - if (shutdown_cache(c)) - /* - * The cache still has objects. Move it to a temporary - * list so as not to try to destroy it for a second - * time while iterating over inactive caches below. - */ - list_move(&c->memcg_params.children_node, &busy); - else - /* - * The cache is empty and will be destroyed soon. Clear - * the pointer to it in the memcg_caches array so that - * it will never be accessed even if the root cache - * stays alive. - */ - arr->entries[i] = NULL; - } - - /* - * Second, shutdown all caches left from memory cgroups that are now - * offline. - */ - list_for_each_entry_safe(c, c2, &s->memcg_params.children, - memcg_params.children_node) - shutdown_cache(c); - - list_splice(&busy, &s->memcg_params.children); + if (s->memcg_params.memcg_cache) + WARN_ON(shutdown_cache(s->memcg_params.memcg_cache)); - /* - * A cache being destroyed must be empty. In particular, this means - * that all per memcg caches attached to it must be empty too. - */ - if (!list_empty(&s->memcg_params.children)) - return -EBUSY; return 0; } -static void memcg_set_kmem_cache_dying(struct kmem_cache *s) -{ - spin_lock_irq(&memcg_kmem_wq_lock); - s->memcg_params.dying = true; - spin_unlock_irq(&memcg_kmem_wq_lock); -} - static void flush_memcg_workqueue(struct kmem_cache *s) { - /* - * SLAB and SLUB deactivate the kmem_caches through call_rcu. Make - * sure all registered rcu callbacks have been invoked. - */ - rcu_barrier(); - /* * SLAB and SLUB create memcg kmem_caches through workqueue and SLUB * deactivates the memcg kmem_caches through workqueue. Make sure all @@ -918,30 +595,21 @@ static void flush_memcg_workqueue(struct kmem_cache *s) */ if (likely(memcg_kmem_cache_wq)) flush_workqueue(memcg_kmem_cache_wq); - - /* - * If we're racing with children kmem_cache deactivation, it might - * take another rcu grace period to complete their destruction. - * At this moment the corresponding percpu_ref_kill() call should be - * done, but it might take another rcu grace period to complete - * switching to the atomic mode. - * Please, note that we check without grabbing the slab_mutex. It's safe - * because at this moment the children list can't grow. - */ - if (!list_empty(&s->memcg_params.children)) - rcu_barrier(); } #else static inline int shutdown_memcg_caches(struct kmem_cache *s) { return 0; } + +static inline void flush_memcg_workqueue(struct kmem_cache *s) +{ +} #endif /* CONFIG_MEMCG_KMEM */ void slab_kmem_cache_release(struct kmem_cache *s) { __kmem_cache_release(s); - destroy_memcg_params(s); kfree_const(s->name); kmem_cache_free(kmem_cache, s); } @@ -953,6 +621,8 @@ void kmem_cache_destroy(struct kmem_cache *s) if (unlikely(!s)) return; + flush_memcg_workqueue(s); + get_online_cpus(); get_online_mems(); @@ -962,22 +632,6 @@ void kmem_cache_destroy(struct kmem_cache *s) if (s->refcount) goto out_unlock; -#ifdef CONFIG_MEMCG_KMEM - memcg_set_kmem_cache_dying(s); - - mutex_unlock(&slab_mutex); - - put_online_mems(); - put_online_cpus(); - - flush_memcg_workqueue(s); - - get_online_cpus(); - get_online_mems(); - - mutex_lock(&slab_mutex); -#endif - err = shutdown_memcg_caches(s); if (!err) err = shutdown_cache(s); @@ -1019,7 +673,7 @@ int kmem_cache_shrink(struct kmem_cache *cachep) EXPORT_SYMBOL(kmem_cache_shrink); /** - * kmem_cache_shrink_all - shrink a cache and all memcg caches for root cache + * kmem_cache_shrink_all - shrink root and memcg caches * @s: The cache pointer */ void kmem_cache_shrink_all(struct kmem_cache *s) @@ -1036,21 +690,11 @@ void kmem_cache_shrink_all(struct kmem_cache *s) kasan_cache_shrink(s); __kmem_cache_shrink(s); - /* - * We have to take the slab_mutex to protect from the memcg list - * modification. - */ - mutex_lock(&slab_mutex); - for_each_memcg_cache(c, s) { - /* - * Don't need to shrink deactivated memcg caches. - */ - if (s->flags & SLAB_DEACTIVATED) - continue; + c = memcg_cache(s); + if (c) { kasan_cache_shrink(c); __kmem_cache_shrink(c); } - mutex_unlock(&slab_mutex); put_online_mems(); put_online_cpus(); } @@ -1105,7 +749,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, create_boot_cache(s, name, size, flags, useroffset, usersize); list_add(&s->list, &slab_caches); - memcg_link_cache(s, NULL); + memcg_link_cache(s); s->refcount = 1; return s; } @@ -1483,7 +1127,8 @@ memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info) if (!is_root_cache(s)) return; - for_each_memcg_cache(c, s) { + c = memcg_cache(s); + if (c) { memset(&sinfo, 0, sizeof(sinfo)); get_slabinfo(c, &sinfo); @@ -1614,7 +1259,7 @@ module_init(slab_proc_init); #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MEMCG_KMEM) /* - * Display information about kmem caches that have child memcg caches. + * Display information about kmem caches that have memcg cache. */ static int memcg_slabinfo_show(struct seq_file *m, void *unused) { @@ -1626,9 +1271,9 @@ static int memcg_slabinfo_show(struct seq_file *m, void *unused) seq_puts(m, " \n"); list_for_each_entry(s, &slab_root_caches, root_caches_node) { /* - * Skip kmem caches that don't have any memcg children. + * Skip kmem caches that don't have the memcg cache. */ - if (list_empty(&s->memcg_params.children)) + if (!s->memcg_params.memcg_cache) continue; memset(&sinfo, 0, sizeof(sinfo)); @@ -1637,23 +1282,13 @@ static int memcg_slabinfo_show(struct seq_file *m, void *unused) cache_name(s), sinfo.active_objs, sinfo.num_objs, sinfo.active_slabs, sinfo.num_slabs); - for_each_memcg_cache(c, s) { - struct cgroup_subsys_state *css; - char *status = ""; - - css = &c->memcg_params.memcg->css; - if (!(css->flags & CSS_ONLINE)) - status = ":dead"; - else if (c->flags & SLAB_DEACTIVATED) - status = ":deact"; - - memset(&sinfo, 0, sizeof(sinfo)); - get_slabinfo(c, &sinfo); - seq_printf(m, "%-17s %4d%-6s %6lu %6lu %6lu %6lu\n", - cache_name(c), css->id, status, - sinfo.active_objs, sinfo.num_objs, - sinfo.active_slabs, sinfo.num_slabs); - } + c = s->memcg_params.memcg_cache; + memset(&sinfo, 0, sizeof(sinfo)); + get_slabinfo(c, &sinfo); + seq_printf(m, "%-17s %4d %6lu %6lu %6lu %6lu\n", + cache_name(c), root_mem_cgroup->css.id, + sinfo.active_objs, sinfo.num_objs, + sinfo.active_slabs, sinfo.num_slabs); } mutex_unlock(&slab_mutex); return 0; diff --git a/mm/slub.c b/mm/slub.c index 47e63b1100d4..44a48a08a691 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4204,36 +4204,6 @@ int __kmem_cache_shrink(struct kmem_cache *s) return ret; } -#ifdef CONFIG_MEMCG -void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s) -{ - /* - * Called with all the locks held after a sched RCU grace period. - * Even if @s becomes empty after shrinking, we can't know that @s - * doesn't have allocations already in-flight and thus can't - * destroy @s until the associated memcg is released. - * - * However, let's remove the sysfs files for empty caches here. - * Each cache has a lot of interface files which aren't - * particularly useful for empty draining caches; otherwise, we can - * easily end up with millions of unnecessary sysfs files on - * systems which have a lot of memory and transient cgroups. - */ - if (!__kmem_cache_shrink(s)) - sysfs_slab_remove(s); -} - -void __kmemcg_cache_deactivate(struct kmem_cache *s) -{ - /* - * Disable empty slabs caching. Used to avoid pinning offline - * memory cgroups by kmem pages that can be freed. - */ - slub_set_cpu_partial(s, 0); - s->min_partial = 0; -} -#endif /* CONFIG_MEMCG */ - static int slab_mem_going_offline_callback(void *arg) { struct kmem_cache *s; @@ -4390,7 +4360,7 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) } slab_init_memcg_params(s); list_add(&s->list, &slab_caches); - memcg_link_cache(s, NULL); + memcg_link_cache(s); return s; } @@ -4458,7 +4428,8 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, s->object_size = max(s->object_size, size); s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); - for_each_memcg_cache(c, s) { + c = memcg_cache(s); + if (c) { c->object_size = s->object_size; c->inuse = max(c->inuse, ALIGN(size, sizeof(void *))); } @@ -5591,7 +5562,8 @@ static ssize_t slab_attr_store(struct kobject *kobj, * directly either failed or succeeded, in which case we loop * through the descendants with best-effort propagation. */ - for_each_memcg_cache(c, s) + c = memcg_cache(s); + if (c) attribute->store(c, buf, len); mutex_unlock(&slab_mutex); } -- cgit v1.2.3 From d797b7d05405c519f7b62ea69a75cea1883863b2 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Aug 2020 23:21:14 -0700 Subject: mm: memcg/slab: simplify memcg cache creation Because the number of non-root kmem_caches doesn't depend on the number of memory cgroups anymore and is generally not very big, there is no more need for a dedicated workqueue. Also, as there is no more need to pass any arguments to the memcg_create_kmem_cache() except the root kmem_cache, it's possible to just embed the work structure into the kmem_cache and avoid the dynamic allocation of the work structure. This will also simplify the synchronization: for each root kmem_cache there is only one work. So there will be no more concurrent attempts to create a non-root kmem_cache for a root kmem_cache: the second and all following attempts to queue the work will fail. On the kmem_cache destruction path there is no more need to call the expensive flush_workqueue() and wait for all pending works to be finished. Instead, cancel_work_sync() can be used to cancel/wait for only one work. Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Reviewed-by: Shakeel Butt Cc: Christoph Lameter Cc: Johannes Weiner Cc: Michal Hocko Cc: Tejun Heo Link: http://lkml.kernel.org/r/20200623174037.3951353-14-guro@fb.com Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 1 - mm/memcontrol.c | 48 +--------------------------------------------- mm/slab.h | 2 ++ mm/slab_common.c | 22 +++++++++++---------- 4 files changed, 15 insertions(+), 58 deletions(-) (limited to 'mm/slab_common.c') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 11fd18b3d6c6..2ac84dcfc9e5 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1418,7 +1418,6 @@ int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); extern struct static_key_false memcg_kmem_enabled_key; -extern struct workqueue_struct *memcg_kmem_cache_wq; extern int memcg_nr_cache_ids; void memcg_get_cache_ids(void); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 874704c4a48a..c713867e496d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -399,8 +399,6 @@ void memcg_put_cache_ids(void) */ DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); EXPORT_SYMBOL(memcg_kmem_enabled_key); - -struct workqueue_struct *memcg_kmem_cache_wq; #endif static int memcg_shrinker_map_size; @@ -2902,39 +2900,6 @@ static void memcg_free_cache_id(int id) ida_simple_remove(&memcg_cache_ida, id); } -struct memcg_kmem_cache_create_work { - struct kmem_cache *cachep; - struct work_struct work; -}; - -static void memcg_kmem_cache_create_func(struct work_struct *w) -{ - struct memcg_kmem_cache_create_work *cw = - container_of(w, struct memcg_kmem_cache_create_work, work); - struct kmem_cache *cachep = cw->cachep; - - memcg_create_kmem_cache(cachep); - - kfree(cw); -} - -/* - * Enqueue the creation of a per-memcg kmem_cache. - */ -static void memcg_schedule_kmem_cache_create(struct kmem_cache *cachep) -{ - struct memcg_kmem_cache_create_work *cw; - - cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN); - if (!cw) - return; - - cw->cachep = cachep; - INIT_WORK(&cw->work, memcg_kmem_cache_create_func); - - queue_work(memcg_kmem_cache_wq, &cw->work); -} - /** * memcg_kmem_get_cache: select memcg or root cache for allocation * @cachep: the original global kmem cache @@ -2951,7 +2916,7 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) memcg_cachep = READ_ONCE(cachep->memcg_params.memcg_cache); if (unlikely(!memcg_cachep)) { - memcg_schedule_kmem_cache_create(cachep); + queue_work(system_wq, &cachep->memcg_params.work); return cachep; } @@ -7022,17 +6987,6 @@ static int __init mem_cgroup_init(void) { int cpu, node; -#ifdef CONFIG_MEMCG_KMEM - /* - * Kmem cache creation is mostly done with the slab_mutex held, - * so use a workqueue with limited concurrency to avoid stalling - * all worker threads in case lots of cgroups are created and - * destroyed simultaneously. - */ - memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1); - BUG_ON(!memcg_kmem_cache_wq); -#endif - cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, memcg_hotplug_cpu_dead); diff --git a/mm/slab.h b/mm/slab.h index e716b80befc2..fd9fcdfb3789 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -45,12 +45,14 @@ struct kmem_cache { * @memcg_cache: pointer to memcg kmem cache, used by all non-root memory * cgroups. * @root_caches_node: list node for slab_root_caches list. + * @work: work struct used to create the non-root cache. */ struct memcg_cache_params { struct kmem_cache *root_cache; struct kmem_cache *memcg_cache; struct list_head __root_caches_node; + struct work_struct work; }; #endif /* CONFIG_SLOB */ diff --git a/mm/slab_common.c b/mm/slab_common.c index e752132eb64d..b898698f6c8a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -134,10 +134,18 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, LIST_HEAD(slab_root_caches); +static void memcg_kmem_cache_create_func(struct work_struct *work) +{ + struct kmem_cache *cachep = container_of(work, struct kmem_cache, + memcg_params.work); + memcg_create_kmem_cache(cachep); +} + void slab_init_memcg_params(struct kmem_cache *s) { s->memcg_params.root_cache = NULL; s->memcg_params.memcg_cache = NULL; + INIT_WORK(&s->memcg_params.work, memcg_kmem_cache_create_func); } static void init_memcg_params(struct kmem_cache *s, @@ -586,15 +594,9 @@ static int shutdown_memcg_caches(struct kmem_cache *s) return 0; } -static void flush_memcg_workqueue(struct kmem_cache *s) +static void cancel_memcg_cache_creation(struct kmem_cache *s) { - /* - * SLAB and SLUB create memcg kmem_caches through workqueue and SLUB - * deactivates the memcg kmem_caches through workqueue. Make sure all - * previous workitems on workqueue are processed. - */ - if (likely(memcg_kmem_cache_wq)) - flush_workqueue(memcg_kmem_cache_wq); + cancel_work_sync(&s->memcg_params.work); } #else static inline int shutdown_memcg_caches(struct kmem_cache *s) @@ -602,7 +604,7 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s) return 0; } -static inline void flush_memcg_workqueue(struct kmem_cache *s) +static inline void cancel_memcg_cache_creation(struct kmem_cache *s) { } #endif /* CONFIG_MEMCG_KMEM */ @@ -621,7 +623,7 @@ void kmem_cache_destroy(struct kmem_cache *s) if (unlikely(!s)) return; - flush_memcg_workqueue(s); + cancel_memcg_cache_creation(s); get_online_cpus(); get_online_mems(); -- cgit v1.2.3 From 272911a4ad18c48f8bc449a5db945a54987dd687 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Aug 2020 23:21:17 -0700 Subject: mm: memcg/slab: remove memcg_kmem_get_cache() The memcg_kmem_get_cache() function became really trivial, so let's just inline it into the single call point: memcg_slab_pre_alloc_hook(). It will make the code less bulky and can also help the compiler to generate a better code. Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Reviewed-by: Shakeel Butt Cc: Christoph Lameter Cc: Johannes Weiner Cc: Michal Hocko Cc: Tejun Heo Link: http://lkml.kernel.org/r/20200623174037.3951353-15-guro@fb.com Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 2 -- mm/memcontrol.c | 25 +------------------------ mm/slab.h | 11 +++++++++-- mm/slab_common.c | 2 +- 4 files changed, 11 insertions(+), 29 deletions(-) (limited to 'mm/slab_common.c') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 2ac84dcfc9e5..5a8b62d075e6 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1403,8 +1403,6 @@ static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, } #endif -struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); - #ifdef CONFIG_MEMCG_KMEM int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, unsigned int nr_pages); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c713867e496d..a8113b77b23a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -393,7 +393,7 @@ void memcg_put_cache_ids(void) /* * A lot of the calls to the cache allocation functions are expected to be - * inlined by the compiler. Since the calls to memcg_kmem_get_cache are + * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are * conditional to this static branch, we'll have to allow modules that does * kmem_cache_alloc and the such to see this symbol as well */ @@ -2900,29 +2900,6 @@ static void memcg_free_cache_id(int id) ida_simple_remove(&memcg_cache_ida, id); } -/** - * memcg_kmem_get_cache: select memcg or root cache for allocation - * @cachep: the original global kmem cache - * - * Return the kmem_cache we're supposed to use for a slab allocation. - * - * If the cache does not exist yet, if we are the first user of it, we - * create it asynchronously in a workqueue and let the current allocation - * go through with the original cache. - */ -struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) -{ - struct kmem_cache *memcg_cachep; - - memcg_cachep = READ_ONCE(cachep->memcg_params.memcg_cache); - if (unlikely(!memcg_cachep)) { - queue_work(system_wq, &cachep->memcg_params.work); - return cachep; - } - - return memcg_cachep; -} - /** * __memcg_kmem_charge: charge a number of kernel pages to a memcg * @memcg: memory cgroup to charge diff --git a/mm/slab.h b/mm/slab.h index fd9fcdfb3789..342eac852967 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -365,9 +365,16 @@ static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s, if (memcg_kmem_bypass()) return s; - cachep = memcg_kmem_get_cache(s); - if (is_root_cache(cachep)) + cachep = READ_ONCE(s->memcg_params.memcg_cache); + if (unlikely(!cachep)) { + /* + * If memcg cache does not exist yet, we schedule it's + * asynchronous creation and let the current allocation + * go through with the root cache. + */ + queue_work(system_wq, &s->memcg_params.work); return s; + } objcg = get_obj_cgroup_from_current(); if (!objcg) diff --git a/mm/slab_common.c b/mm/slab_common.c index b898698f6c8a..de0a46cf974a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -570,7 +570,7 @@ void memcg_create_kmem_cache(struct kmem_cache *root_cache) } /* - * Since readers won't lock (see memcg_kmem_get_cache()), we need a + * Since readers won't lock (see memcg_slab_pre_alloc_hook()), we need a * barrier here to ensure nobody will see the kmem_cache partially * initialized. */ -- cgit v1.2.3 From c7094406fcb7cdf4fe1de8893f0613b75349773d Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Aug 2020 23:21:20 -0700 Subject: mm: memcg/slab: deprecate slab_root_caches Currently there are two lists of kmem_caches: 1) slab_caches, which contains all kmem_caches, 2) slab_root_caches, which contains only root kmem_caches. And there is some preprocessor magic to have a single list if CONFIG_MEMCG_KMEM isn't enabled. It was required earlier because the number of non-root kmem_caches was proportional to the number of memory cgroups and could reach really big values. Now, when it cannot exceed the number of root kmem_caches, there is really no reason to maintain two lists. We never iterate over the slab_root_caches list on any hot paths, so it's perfectly fine to iterate over slab_caches and filter out non-root kmem_caches. It allows to remove a lot of config-dependent code and two pointers from the kmem_cache structure. Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Reviewed-by: Shakeel Butt Cc: Christoph Lameter Cc: Johannes Weiner Cc: Michal Hocko Cc: Tejun Heo Link: http://lkml.kernel.org/r/20200623174037.3951353-16-guro@fb.com Signed-off-by: Linus Torvalds --- mm/slab.c | 1 - mm/slab.h | 17 ----------------- mm/slab_common.c | 37 ++++++++----------------------------- mm/slub.c | 1 - 4 files changed, 8 insertions(+), 48 deletions(-) (limited to 'mm/slab_common.c') diff --git a/mm/slab.c b/mm/slab.c index 0dd6956585dc..f40e5c95e11a 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1249,7 +1249,6 @@ void __init kmem_cache_init(void) nr_node_ids * sizeof(struct kmem_cache_node *), SLAB_HWCACHE_ALIGN, 0, 0); list_add(&kmem_cache->list, &slab_caches); - memcg_link_cache(kmem_cache); slab_state = PARTIAL; /* diff --git a/mm/slab.h b/mm/slab.h index 342eac852967..7500a707121b 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -44,14 +44,12 @@ struct kmem_cache { * * @memcg_cache: pointer to memcg kmem cache, used by all non-root memory * cgroups. - * @root_caches_node: list node for slab_root_caches list. * @work: work struct used to create the non-root cache. */ struct memcg_cache_params { struct kmem_cache *root_cache; struct kmem_cache *memcg_cache; - struct list_head __root_caches_node; struct work_struct work; }; #endif /* CONFIG_SLOB */ @@ -265,11 +263,6 @@ static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t fla } #ifdef CONFIG_MEMCG_KMEM - -/* List of all root caches. */ -extern struct list_head slab_root_caches; -#define root_caches_node memcg_params.__root_caches_node - static inline bool is_root_cache(struct kmem_cache *s) { return !s->memcg_params.root_cache; @@ -447,14 +440,8 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page, } extern void slab_init_memcg_params(struct kmem_cache *); -extern void memcg_link_cache(struct kmem_cache *s); #else /* CONFIG_MEMCG_KMEM */ - -/* If !memcg, all caches are root. */ -#define slab_root_caches slab_caches -#define root_caches_node list - static inline bool is_root_cache(struct kmem_cache *s) { return true; @@ -523,10 +510,6 @@ static inline void slab_init_memcg_params(struct kmem_cache *s) { } -static inline void memcg_link_cache(struct kmem_cache *s) -{ -} - #endif /* CONFIG_MEMCG_KMEM */ static inline struct kmem_cache *virt_to_cache(const void *obj) diff --git a/mm/slab_common.c b/mm/slab_common.c index de0a46cf974a..a9f6ab452ce5 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -131,9 +131,6 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, } #ifdef CONFIG_MEMCG_KMEM - -LIST_HEAD(slab_root_caches); - static void memcg_kmem_cache_create_func(struct work_struct *work) { struct kmem_cache *cachep = container_of(work, struct kmem_cache, @@ -156,27 +153,11 @@ static void init_memcg_params(struct kmem_cache *s, else slab_init_memcg_params(s); } - -void memcg_link_cache(struct kmem_cache *s) -{ - if (is_root_cache(s)) - list_add(&s->root_caches_node, &slab_root_caches); -} - -static void memcg_unlink_cache(struct kmem_cache *s) -{ - if (is_root_cache(s)) - list_del(&s->root_caches_node); -} #else static inline void init_memcg_params(struct kmem_cache *s, struct kmem_cache *root_cache) { } - -static inline void memcg_unlink_cache(struct kmem_cache *s) -{ -} #endif /* CONFIG_MEMCG_KMEM */ /* @@ -253,7 +234,7 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align, if (flags & SLAB_NEVER_MERGE) return NULL; - list_for_each_entry_reverse(s, &slab_root_caches, root_caches_node) { + list_for_each_entry_reverse(s, &slab_caches, list) { if (slab_unmergeable(s)) continue; @@ -312,7 +293,6 @@ static struct kmem_cache *create_cache(const char *name, s->refcount = 1; list_add(&s->list, &slab_caches); - memcg_link_cache(s); out: if (err) return ERR_PTR(err); @@ -507,7 +487,6 @@ static int shutdown_cache(struct kmem_cache *s) if (__kmem_cache_shutdown(s) != 0) return -EBUSY; - memcg_unlink_cache(s); list_del(&s->list); if (s->flags & SLAB_TYPESAFE_BY_RCU) { @@ -751,7 +730,6 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, create_boot_cache(s, name, size, flags, useroffset, usersize); list_add(&s->list, &slab_caches); - memcg_link_cache(s); s->refcount = 1; return s; } @@ -1107,12 +1085,12 @@ static void print_slabinfo_header(struct seq_file *m) void *slab_start(struct seq_file *m, loff_t *pos) { mutex_lock(&slab_mutex); - return seq_list_start(&slab_root_caches, *pos); + return seq_list_start(&slab_caches, *pos); } void *slab_next(struct seq_file *m, void *p, loff_t *pos) { - return seq_list_next(p, &slab_root_caches, pos); + return seq_list_next(p, &slab_caches, pos); } void slab_stop(struct seq_file *m, void *p) @@ -1165,11 +1143,12 @@ static void cache_show(struct kmem_cache *s, struct seq_file *m) static int slab_show(struct seq_file *m, void *p) { - struct kmem_cache *s = list_entry(p, struct kmem_cache, root_caches_node); + struct kmem_cache *s = list_entry(p, struct kmem_cache, list); - if (p == slab_root_caches.next) + if (p == slab_caches.next) print_slabinfo_header(m); - cache_show(s, m); + if (is_root_cache(s)) + cache_show(s, m); return 0; } @@ -1271,7 +1250,7 @@ static int memcg_slabinfo_show(struct seq_file *m, void *unused) mutex_lock(&slab_mutex); seq_puts(m, "# "); seq_puts(m, " \n"); - list_for_each_entry(s, &slab_root_caches, root_caches_node) { + list_for_each_entry(s, &slab_caches, list) { /* * Skip kmem caches that don't have the memcg cache. */ diff --git a/mm/slub.c b/mm/slub.c index 44a48a08a691..9cd724fe37d8 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4360,7 +4360,6 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) } slab_init_memcg_params(s); list_add(&s->list, &slab_caches); - memcg_link_cache(s); return s; } -- cgit v1.2.3 From 15999eef7f25e2ea6a1c33f026166f472c5714e9 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Aug 2020 23:21:24 -0700 Subject: mm: memcg/slab: remove redundant check in memcg_accumulate_slabinfo() memcg_accumulate_slabinfo() is never called with a non-root kmem_cache as a first argument, so the is_root_cache(s) check is redundant and can be removed without any functional change. Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Reviewed-by: Shakeel Butt Cc: Christoph Lameter Cc: Johannes Weiner Cc: Michal Hocko Cc: Tejun Heo Link: http://lkml.kernel.org/r/20200623174037.3951353-17-guro@fb.com Signed-off-by: Linus Torvalds --- mm/slab_common.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'mm/slab_common.c') diff --git a/mm/slab_common.c b/mm/slab_common.c index a9f6ab452ce5..ad67a03c592a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1104,9 +1104,6 @@ memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info) struct kmem_cache *c; struct slabinfo sinfo; - if (!is_root_cache(s)) - return; - c = memcg_cache(s); if (c) { memset(&sinfo, 0, sizeof(sinfo)); -- cgit v1.2.3 From 10befea91b61c4e2c2d1df06a2e978d182fcf792 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Aug 2020 23:21:27 -0700 Subject: mm: memcg/slab: use a single set of kmem_caches for all allocations Instead of having two sets of kmem_caches: one for system-wide and non-accounted allocations and the second one shared by all accounted allocations, we can use just one. The idea is simple: space for obj_cgroup metadata can be allocated on demand and filled only for accounted allocations. It allows to remove a bunch of code which is required to handle kmem_cache clones for accounted allocations. There is no more need to create them, accumulate statistics, propagate attributes, etc. It's a quite significant simplification. Also, because the total number of slab_caches is reduced almost twice (not all kmem_caches have a memcg clone), some additional memory savings are expected. On my devvm it additionally saves about 3.5% of slab memory. [guro@fb.com: fix build on MIPS] Link: http://lkml.kernel.org/r/20200717214810.3733082-1-guro@fb.com Suggested-by: Johannes Weiner Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Reviewed-by: Shakeel Butt Cc: Christoph Lameter Cc: Michal Hocko Cc: Tejun Heo Cc: Naresh Kamboju Link: http://lkml.kernel.org/r/20200623174037.3951353-18-guro@fb.com Signed-off-by: Linus Torvalds --- include/linux/slab.h | 2 - include/linux/slab_def.h | 3 - include/linux/slub_def.h | 10 --- mm/memcontrol.c | 25 +++++- mm/slab.c | 41 +-------- mm/slab.h | 194 +++++++++------------------------------ mm/slab_common.c | 230 ++--------------------------------------------- mm/slub.c | 163 +-------------------------------- 8 files changed, 78 insertions(+), 590 deletions(-) (limited to 'mm/slab_common.c') diff --git a/include/linux/slab.h b/include/linux/slab.h index 8b1f91e320f9..24df2393ec03 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -155,8 +155,6 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name, void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); -void memcg_create_kmem_cache(struct kmem_cache *cachep); - /* * Please use this macro to create slab caches. Simply specify the * name of the structure and maybe some flags that are listed above. diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index ccda7b9669a5..9eb430c163c2 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -72,9 +72,6 @@ struct kmem_cache { int obj_offset; #endif /* CONFIG_DEBUG_SLAB */ -#ifdef CONFIG_MEMCG - struct memcg_cache_params memcg_params; -#endif #ifdef CONFIG_KASAN struct kasan_cache kasan_info; #endif diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index f87302dcfe8c..1be0ed5befa1 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -108,17 +108,7 @@ struct kmem_cache { struct list_head list; /* List of slab caches */ #ifdef CONFIG_SYSFS struct kobject kobj; /* For sysfs */ - struct work_struct kobj_remove_work; #endif -#ifdef CONFIG_MEMCG - struct memcg_cache_params memcg_params; - /* For propagation, maximum size of a stored attr */ - unsigned int max_attr_size; -#ifdef CONFIG_SYSFS - struct kset *memcg_kset; -#endif -#endif - #ifdef CONFIG_SLAB_FREELIST_HARDENED unsigned long random; #endif diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a8113b77b23a..473f9b91d51f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2800,6 +2800,26 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg) } #ifdef CONFIG_MEMCG_KMEM +int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, + gfp_t gfp) +{ + unsigned int objects = objs_per_slab_page(s, page); + void *vec; + + vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, + page_to_nid(page)); + if (!vec) + return -ENOMEM; + + if (cmpxchg(&page->obj_cgroups, NULL, + (struct obj_cgroup **) ((unsigned long)vec | 0x1UL))) + kfree(vec); + else + kmemleak_not_leak(vec); + + return 0; +} + /* * Returns a pointer to the memory cgroup to which the kernel object is charged. * @@ -2826,7 +2846,10 @@ struct mem_cgroup *mem_cgroup_from_obj(void *p) off = obj_to_index(page->slab_cache, page, p); objcg = page_obj_cgroups(page)[off]; - return obj_cgroup_memcg(objcg); + if (objcg) + return obj_cgroup_memcg(objcg); + + return NULL; } /* All other pages use page->mem_cgroup */ diff --git a/mm/slab.c b/mm/slab.c index f40e5c95e11a..684ebe5b0c7a 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1379,11 +1379,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, return NULL; } - if (charge_slab_page(page, flags, cachep->gfporder, cachep)) { - __free_pages(page, cachep->gfporder); - return NULL; - } - + charge_slab_page(page, flags, cachep->gfporder, cachep); __SetPageSlab(page); /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ if (sk_memalloc_socks() && page_is_pfmemalloc(page)) @@ -3799,8 +3795,8 @@ fail: } /* Always called with the slab_mutex held */ -static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, - int batchcount, int shared, gfp_t gfp) +static int do_tune_cpucache(struct kmem_cache *cachep, int limit, + int batchcount, int shared, gfp_t gfp) { struct array_cache __percpu *cpu_cache, *prev; int cpu; @@ -3845,30 +3841,6 @@ setup_node: return setup_kmem_cache_nodes(cachep, gfp); } -static int do_tune_cpucache(struct kmem_cache *cachep, int limit, - int batchcount, int shared, gfp_t gfp) -{ - int ret; - struct kmem_cache *c; - - ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp); - - if (slab_state < FULL) - return ret; - - if ((ret < 0) || !is_root_cache(cachep)) - return ret; - - lockdep_assert_held(&slab_mutex); - c = memcg_cache(cachep); - if (c) { - /* return value determined by the root cache only */ - __do_tune_cpucache(c, limit, batchcount, shared, gfp); - } - - return ret; -} - /* Called with slab_mutex held always */ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) { @@ -3881,13 +3853,6 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) if (err) goto end; - if (!is_root_cache(cachep)) { - struct kmem_cache *root = memcg_root_cache(cachep); - limit = root->limit; - shared = root->shared; - batchcount = root->batchcount; - } - if (limit && shared && batchcount) goto skip_setup; /* diff --git a/mm/slab.h b/mm/slab.h index 7500a707121b..ec8e22ee6544 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -30,28 +30,6 @@ struct kmem_cache { struct list_head list; /* List of all slab caches on the system */ }; -#else /* !CONFIG_SLOB */ - -/* - * This is the main placeholder for memcg-related information in kmem caches. - * Both the root cache and the child cache will have it. Some fields are used - * in both cases, other are specific to root caches. - * - * @root_cache: Common to root and child caches. NULL for root, pointer to - * the root cache for children. - * - * The following fields are specific to root caches. - * - * @memcg_cache: pointer to memcg kmem cache, used by all non-root memory - * cgroups. - * @work: work struct used to create the non-root cache. - */ -struct memcg_cache_params { - struct kmem_cache *root_cache; - - struct kmem_cache *memcg_cache; - struct work_struct work; -}; #endif /* CONFIG_SLOB */ #ifdef CONFIG_SLAB @@ -196,7 +174,6 @@ int __kmem_cache_shutdown(struct kmem_cache *); void __kmem_cache_release(struct kmem_cache *); int __kmem_cache_shrink(struct kmem_cache *); void slab_kmem_cache_release(struct kmem_cache *); -void kmem_cache_shrink_all(struct kmem_cache *s); struct seq_file; struct file; @@ -263,43 +240,6 @@ static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t fla } #ifdef CONFIG_MEMCG_KMEM -static inline bool is_root_cache(struct kmem_cache *s) -{ - return !s->memcg_params.root_cache; -} - -static inline bool slab_equal_or_root(struct kmem_cache *s, - struct kmem_cache *p) -{ - return p == s || p == s->memcg_params.root_cache; -} - -/* - * We use suffixes to the name in memcg because we can't have caches - * created in the system with the same name. But when we print them - * locally, better refer to them with the base name - */ -static inline const char *cache_name(struct kmem_cache *s) -{ - if (!is_root_cache(s)) - s = s->memcg_params.root_cache; - return s->name; -} - -static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) -{ - if (is_root_cache(s)) - return s; - return s->memcg_params.root_cache; -} - -static inline struct kmem_cache *memcg_cache(struct kmem_cache *s) -{ - if (is_root_cache(s)) - return s->memcg_params.memcg_cache; - return NULL; -} - static inline struct obj_cgroup **page_obj_cgroups(struct page *page) { /* @@ -317,21 +257,8 @@ static inline bool page_has_obj_cgroups(struct page *page) return ((unsigned long)page->obj_cgroups & 0x1UL); } -static inline int memcg_alloc_page_obj_cgroups(struct page *page, - struct kmem_cache *s, gfp_t gfp) -{ - unsigned int objects = objs_per_slab_page(s, page); - void *vec; - - vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, - page_to_nid(page)); - if (!vec) - return -ENOMEM; - - kmemleak_not_leak(vec); - page->obj_cgroups = (struct obj_cgroup **) ((unsigned long)vec | 0x1UL); - return 0; -} +int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, + gfp_t gfp); static inline void memcg_free_page_obj_cgroups(struct page *page) { @@ -348,38 +275,25 @@ static inline size_t obj_full_size(struct kmem_cache *s) return s->size + sizeof(struct obj_cgroup *); } -static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s, - struct obj_cgroup **objcgp, - size_t objects, gfp_t flags) +static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s, + size_t objects, + gfp_t flags) { - struct kmem_cache *cachep; struct obj_cgroup *objcg; if (memcg_kmem_bypass()) - return s; - - cachep = READ_ONCE(s->memcg_params.memcg_cache); - if (unlikely(!cachep)) { - /* - * If memcg cache does not exist yet, we schedule it's - * asynchronous creation and let the current allocation - * go through with the root cache. - */ - queue_work(system_wq, &s->memcg_params.work); - return s; - } + return NULL; objcg = get_obj_cgroup_from_current(); if (!objcg) - return s; + return NULL; if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) { obj_cgroup_put(objcg); - cachep = NULL; + return NULL; } - *objcgp = objcg; - return cachep; + return objcg; } static inline void mod_objcg_state(struct obj_cgroup *objcg, @@ -398,15 +312,27 @@ static inline void mod_objcg_state(struct obj_cgroup *objcg, static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg, - size_t size, void **p) + gfp_t flags, size_t size, + void **p) { struct page *page; unsigned long off; size_t i; + if (!objcg) + return; + + flags &= ~__GFP_ACCOUNT; for (i = 0; i < size; i++) { if (likely(p[i])) { page = virt_to_head_page(p[i]); + + if (!page_has_obj_cgroups(page) && + memcg_alloc_page_obj_cgroups(page, s, flags)) { + obj_cgroup_uncharge(objcg, obj_full_size(s)); + continue; + } + off = obj_to_index(s, page, p[i]); obj_cgroup_get(objcg); page_obj_cgroups(page)[off] = objcg; @@ -425,13 +351,19 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page, struct obj_cgroup *objcg; unsigned int off; - if (!memcg_kmem_enabled() || is_root_cache(s)) + if (!memcg_kmem_enabled()) + return; + + if (!page_has_obj_cgroups(page)) return; off = obj_to_index(s, page, p); objcg = page_obj_cgroups(page)[off]; page_obj_cgroups(page)[off] = NULL; + if (!objcg) + return; + obj_cgroup_uncharge(objcg, obj_full_size(s)); mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s), -obj_full_size(s)); @@ -439,35 +371,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page, obj_cgroup_put(objcg); } -extern void slab_init_memcg_params(struct kmem_cache *); - #else /* CONFIG_MEMCG_KMEM */ -static inline bool is_root_cache(struct kmem_cache *s) -{ - return true; -} - -static inline bool slab_equal_or_root(struct kmem_cache *s, - struct kmem_cache *p) -{ - return s == p; -} - -static inline const char *cache_name(struct kmem_cache *s) -{ - return s->name; -} - -static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) -{ - return s; -} - -static inline struct kmem_cache *memcg_cache(struct kmem_cache *s) -{ - return NULL; -} - static inline bool page_has_obj_cgroups(struct page *page) { return false; @@ -488,16 +392,17 @@ static inline void memcg_free_page_obj_cgroups(struct page *page) { } -static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s, - struct obj_cgroup **objcgp, - size_t objects, gfp_t flags) +static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s, + size_t objects, + gfp_t flags) { return NULL; } static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg, - size_t size, void **p) + gfp_t flags, size_t size, + void **p) { } @@ -505,11 +410,6 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page, void *p) { } - -static inline void slab_init_memcg_params(struct kmem_cache *s) -{ -} - #endif /* CONFIG_MEMCG_KMEM */ static inline struct kmem_cache *virt_to_cache(const void *obj) @@ -523,27 +423,18 @@ static inline struct kmem_cache *virt_to_cache(const void *obj) return page->slab_cache; } -static __always_inline int charge_slab_page(struct page *page, - gfp_t gfp, int order, - struct kmem_cache *s) +static __always_inline void charge_slab_page(struct page *page, + gfp_t gfp, int order, + struct kmem_cache *s) { - if (memcg_kmem_enabled() && !is_root_cache(s)) { - int ret; - - ret = memcg_alloc_page_obj_cgroups(page, s, gfp); - if (ret) - return ret; - } - mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), PAGE_SIZE << order); - return 0; } static __always_inline void uncharge_slab_page(struct page *page, int order, struct kmem_cache *s) { - if (memcg_kmem_enabled() && !is_root_cache(s)) + if (memcg_kmem_enabled()) memcg_free_page_obj_cgroups(page); mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), @@ -555,12 +446,11 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) struct kmem_cache *cachep; if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && - !memcg_kmem_enabled() && !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) return s; cachep = virt_to_cache(x); - if (WARN(cachep && !slab_equal_or_root(cachep, s), + if (WARN(cachep && cachep != s, "%s: Wrong slab cache. %s but object is from %s\n", __func__, s->name, cachep->name)) print_tracking(cachep, x); @@ -613,7 +503,7 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, if (memcg_kmem_enabled() && ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) - return memcg_slab_pre_alloc_hook(s, objcgp, size, flags); + *objcgp = memcg_slab_pre_alloc_hook(s, size, flags); return s; } @@ -632,8 +522,8 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, s->flags, flags); } - if (memcg_kmem_enabled() && !is_root_cache(s)) - memcg_slab_post_alloc_hook(s, objcg, size, p); + if (memcg_kmem_enabled()) + memcg_slab_post_alloc_hook(s, objcg, flags, size, p); } #ifndef CONFIG_SLOB diff --git a/mm/slab_common.c b/mm/slab_common.c index ad67a03c592a..a513f3237155 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -130,36 +130,6 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, return i; } -#ifdef CONFIG_MEMCG_KMEM -static void memcg_kmem_cache_create_func(struct work_struct *work) -{ - struct kmem_cache *cachep = container_of(work, struct kmem_cache, - memcg_params.work); - memcg_create_kmem_cache(cachep); -} - -void slab_init_memcg_params(struct kmem_cache *s) -{ - s->memcg_params.root_cache = NULL; - s->memcg_params.memcg_cache = NULL; - INIT_WORK(&s->memcg_params.work, memcg_kmem_cache_create_func); -} - -static void init_memcg_params(struct kmem_cache *s, - struct kmem_cache *root_cache) -{ - if (root_cache) - s->memcg_params.root_cache = root_cache; - else - slab_init_memcg_params(s); -} -#else -static inline void init_memcg_params(struct kmem_cache *s, - struct kmem_cache *root_cache) -{ -} -#endif /* CONFIG_MEMCG_KMEM */ - /* * Figure out what the alignment of the objects will be given a set of * flags, a user specified alignment and the size of the objects. @@ -197,9 +167,6 @@ int slab_unmergeable(struct kmem_cache *s) if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE)) return 1; - if (!is_root_cache(s)) - return 1; - if (s->ctor) return 1; @@ -286,7 +253,6 @@ static struct kmem_cache *create_cache(const char *name, s->useroffset = useroffset; s->usersize = usersize; - init_memcg_params(s, root_cache); err = __kmem_cache_create(s, flags); if (err) goto out_free_cache; @@ -344,7 +310,6 @@ kmem_cache_create_usercopy(const char *name, get_online_cpus(); get_online_mems(); - memcg_get_cache_ids(); mutex_lock(&slab_mutex); @@ -394,7 +359,6 @@ kmem_cache_create_usercopy(const char *name, out_unlock: mutex_unlock(&slab_mutex); - memcg_put_cache_ids(); put_online_mems(); put_online_cpus(); @@ -507,87 +471,6 @@ static int shutdown_cache(struct kmem_cache *s) return 0; } -#ifdef CONFIG_MEMCG_KMEM -/* - * memcg_create_kmem_cache - Create a cache for non-root memory cgroups. - * @root_cache: The parent of the new cache. - * - * This function attempts to create a kmem cache that will serve allocation - * requests going all non-root memory cgroups to @root_cache. The new cache - * inherits properties from its parent. - */ -void memcg_create_kmem_cache(struct kmem_cache *root_cache) -{ - struct kmem_cache *s = NULL; - char *cache_name; - - get_online_cpus(); - get_online_mems(); - - mutex_lock(&slab_mutex); - - if (root_cache->memcg_params.memcg_cache) - goto out_unlock; - - cache_name = kasprintf(GFP_KERNEL, "%s-memcg", root_cache->name); - if (!cache_name) - goto out_unlock; - - s = create_cache(cache_name, root_cache->object_size, - root_cache->align, - root_cache->flags & CACHE_CREATE_MASK, - root_cache->useroffset, root_cache->usersize, - root_cache->ctor, root_cache); - /* - * If we could not create a memcg cache, do not complain, because - * that's not critical at all as we can always proceed with the root - * cache. - */ - if (IS_ERR(s)) { - kfree(cache_name); - goto out_unlock; - } - - /* - * Since readers won't lock (see memcg_slab_pre_alloc_hook()), we need a - * barrier here to ensure nobody will see the kmem_cache partially - * initialized. - */ - smp_wmb(); - root_cache->memcg_params.memcg_cache = s; - -out_unlock: - mutex_unlock(&slab_mutex); - - put_online_mems(); - put_online_cpus(); -} - -static int shutdown_memcg_caches(struct kmem_cache *s) -{ - BUG_ON(!is_root_cache(s)); - - if (s->memcg_params.memcg_cache) - WARN_ON(shutdown_cache(s->memcg_params.memcg_cache)); - - return 0; -} - -static void cancel_memcg_cache_creation(struct kmem_cache *s) -{ - cancel_work_sync(&s->memcg_params.work); -} -#else -static inline int shutdown_memcg_caches(struct kmem_cache *s) -{ - return 0; -} - -static inline void cancel_memcg_cache_creation(struct kmem_cache *s) -{ -} -#endif /* CONFIG_MEMCG_KMEM */ - void slab_kmem_cache_release(struct kmem_cache *s) { __kmem_cache_release(s); @@ -602,8 +485,6 @@ void kmem_cache_destroy(struct kmem_cache *s) if (unlikely(!s)) return; - cancel_memcg_cache_creation(s); - get_online_cpus(); get_online_mems(); @@ -613,10 +494,7 @@ void kmem_cache_destroy(struct kmem_cache *s) if (s->refcount) goto out_unlock; - err = shutdown_memcg_caches(s); - if (!err) - err = shutdown_cache(s); - + err = shutdown_cache(s); if (err) { pr_err("kmem_cache_destroy %s: Slab cache still has objects\n", s->name); @@ -653,33 +531,6 @@ int kmem_cache_shrink(struct kmem_cache *cachep) } EXPORT_SYMBOL(kmem_cache_shrink); -/** - * kmem_cache_shrink_all - shrink root and memcg caches - * @s: The cache pointer - */ -void kmem_cache_shrink_all(struct kmem_cache *s) -{ - struct kmem_cache *c; - - if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || !is_root_cache(s)) { - kmem_cache_shrink(s); - return; - } - - get_online_cpus(); - get_online_mems(); - kasan_cache_shrink(s); - __kmem_cache_shrink(s); - - c = memcg_cache(s); - if (c) { - kasan_cache_shrink(c); - __kmem_cache_shrink(c); - } - put_online_mems(); - put_online_cpus(); -} - bool slab_is_available(void) { return slab_state >= UP; @@ -708,8 +559,6 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, s->useroffset = useroffset; s->usersize = usersize; - slab_init_memcg_params(s); - err = __kmem_cache_create(s, flags); if (err) @@ -1098,25 +947,6 @@ void slab_stop(struct seq_file *m, void *p) mutex_unlock(&slab_mutex); } -static void -memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info) -{ - struct kmem_cache *c; - struct slabinfo sinfo; - - c = memcg_cache(s); - if (c) { - memset(&sinfo, 0, sizeof(sinfo)); - get_slabinfo(c, &sinfo); - - info->active_slabs += sinfo.active_slabs; - info->num_slabs += sinfo.num_slabs; - info->shared_avail += sinfo.shared_avail; - info->active_objs += sinfo.active_objs; - info->num_objs += sinfo.num_objs; - } -} - static void cache_show(struct kmem_cache *s, struct seq_file *m) { struct slabinfo sinfo; @@ -1124,10 +954,8 @@ static void cache_show(struct kmem_cache *s, struct seq_file *m) memset(&sinfo, 0, sizeof(sinfo)); get_slabinfo(s, &sinfo); - memcg_accumulate_slabinfo(s, &sinfo); - seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", - cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size, + s->name, sinfo.active_objs, sinfo.num_objs, s->size, sinfo.objects_per_slab, (1 << sinfo.cache_order)); seq_printf(m, " : tunables %4u %4u %4u", @@ -1144,8 +972,7 @@ static int slab_show(struct seq_file *m, void *p) if (p == slab_caches.next) print_slabinfo_header(m); - if (is_root_cache(s)) - cache_show(s, m); + cache_show(s, m); return 0; } @@ -1170,13 +997,13 @@ void dump_unreclaimable_slab(void) pr_info("Name Used Total\n"); list_for_each_entry_safe(s, s2, &slab_caches, list) { - if (!is_root_cache(s) || (s->flags & SLAB_RECLAIM_ACCOUNT)) + if (s->flags & SLAB_RECLAIM_ACCOUNT) continue; get_slabinfo(s, &sinfo); if (sinfo.num_objs > 0) - pr_info("%-17s %10luKB %10luKB\n", cache_name(s), + pr_info("%-17s %10luKB %10luKB\n", s->name, (sinfo.active_objs * s->size) / 1024, (sinfo.num_objs * s->size) / 1024); } @@ -1235,53 +1062,6 @@ static int __init slab_proc_init(void) } module_init(slab_proc_init); -#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MEMCG_KMEM) -/* - * Display information about kmem caches that have memcg cache. - */ -static int memcg_slabinfo_show(struct seq_file *m, void *unused) -{ - struct kmem_cache *s, *c; - struct slabinfo sinfo; - - mutex_lock(&slab_mutex); - seq_puts(m, "# "); - seq_puts(m, " \n"); - list_for_each_entry(s, &slab_caches, list) { - /* - * Skip kmem caches that don't have the memcg cache. - */ - if (!s->memcg_params.memcg_cache) - continue; - - memset(&sinfo, 0, sizeof(sinfo)); - get_slabinfo(s, &sinfo); - seq_printf(m, "%-17s root %6lu %6lu %6lu %6lu\n", - cache_name(s), sinfo.active_objs, sinfo.num_objs, - sinfo.active_slabs, sinfo.num_slabs); - - c = s->memcg_params.memcg_cache; - memset(&sinfo, 0, sizeof(sinfo)); - get_slabinfo(c, &sinfo); - seq_printf(m, "%-17s %4d %6lu %6lu %6lu %6lu\n", - cache_name(c), root_mem_cgroup->css.id, - sinfo.active_objs, sinfo.num_objs, - sinfo.active_slabs, sinfo.num_slabs); - } - mutex_unlock(&slab_mutex); - return 0; -} -DEFINE_SHOW_ATTRIBUTE(memcg_slabinfo); - -static int __init memcg_slabinfo_init(void) -{ - debugfs_create_file("memcg_slabinfo", S_IFREG | S_IRUGO, - NULL, NULL, &memcg_slabinfo_fops); - return 0; -} - -late_initcall(memcg_slabinfo_init); -#endif /* CONFIG_DEBUG_FS && CONFIG_MEMCG_KMEM */ #endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */ static __always_inline void *__do_krealloc(const void *p, size_t new_size, diff --git a/mm/slub.c b/mm/slub.c index 9cd724fe37d8..eba8f57d5734 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -218,14 +218,10 @@ enum track_item { TRACK_ALLOC, TRACK_FREE }; #ifdef CONFIG_SYSFS static int sysfs_slab_add(struct kmem_cache *); static int sysfs_slab_alias(struct kmem_cache *, const char *); -static void memcg_propagate_slab_attrs(struct kmem_cache *s); -static void sysfs_slab_remove(struct kmem_cache *s); #else static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; } -static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { } -static inline void sysfs_slab_remove(struct kmem_cache *s) { } #endif static inline void stat(const struct kmem_cache *s, enum stat_item si) @@ -1624,10 +1620,8 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s, else page = __alloc_pages_node(node, flags, order); - if (page && charge_slab_page(page, flags, order, s)) { - __free_pages(page, order); - page = NULL; - } + if (page) + charge_slab_page(page, flags, order, s); return page; } @@ -3920,7 +3914,6 @@ int __kmem_cache_shutdown(struct kmem_cache *s) if (n->nr_partial || slabs_node(s, node)) return 1; } - sysfs_slab_remove(s); return 0; } @@ -4358,7 +4351,6 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) p->slab_cache = s; #endif } - slab_init_memcg_params(s); list_add(&s->list, &slab_caches); return s; } @@ -4414,7 +4406,7 @@ struct kmem_cache * __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, slab_flags_t flags, void (*ctor)(void *)) { - struct kmem_cache *s, *c; + struct kmem_cache *s; s = find_mergeable(size, align, flags, name, ctor); if (s) { @@ -4427,12 +4419,6 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, s->object_size = max(s->object_size, size); s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); - c = memcg_cache(s); - if (c) { - c->object_size = s->object_size; - c->inuse = max(c->inuse, ALIGN(size, sizeof(void *))); - } - if (sysfs_slab_alias(s, name)) { s->refcount--; s = NULL; @@ -4454,7 +4440,6 @@ int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) if (slab_state <= UP) return 0; - memcg_propagate_slab_attrs(s); err = sysfs_slab_add(s); if (err) __kmem_cache_release(s); @@ -5312,7 +5297,7 @@ static ssize_t shrink_store(struct kmem_cache *s, const char *buf, size_t length) { if (buf[0] == '1') - kmem_cache_shrink_all(s); + kmem_cache_shrink(s); else return -EINVAL; return length; @@ -5536,99 +5521,9 @@ static ssize_t slab_attr_store(struct kobject *kobj, return -EIO; err = attribute->store(s, buf, len); -#ifdef CONFIG_MEMCG - if (slab_state >= FULL && err >= 0 && is_root_cache(s)) { - struct kmem_cache *c; - - mutex_lock(&slab_mutex); - if (s->max_attr_size < len) - s->max_attr_size = len; - - /* - * This is a best effort propagation, so this function's return - * value will be determined by the parent cache only. This is - * basically because not all attributes will have a well - * defined semantics for rollbacks - most of the actions will - * have permanent effects. - * - * Returning the error value of any of the children that fail - * is not 100 % defined, in the sense that users seeing the - * error code won't be able to know anything about the state of - * the cache. - * - * Only returning the error code for the parent cache at least - * has well defined semantics. The cache being written to - * directly either failed or succeeded, in which case we loop - * through the descendants with best-effort propagation. - */ - c = memcg_cache(s); - if (c) - attribute->store(c, buf, len); - mutex_unlock(&slab_mutex); - } -#endif return err; } -static void memcg_propagate_slab_attrs(struct kmem_cache *s) -{ -#ifdef CONFIG_MEMCG - int i; - char *buffer = NULL; - struct kmem_cache *root_cache; - - if (is_root_cache(s)) - return; - - root_cache = s->memcg_params.root_cache; - - /* - * This mean this cache had no attribute written. Therefore, no point - * in copying default values around - */ - if (!root_cache->max_attr_size) - return; - - for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) { - char mbuf[64]; - char *buf; - struct slab_attribute *attr = to_slab_attr(slab_attrs[i]); - ssize_t len; - - if (!attr || !attr->store || !attr->show) - continue; - - /* - * It is really bad that we have to allocate here, so we will - * do it only as a fallback. If we actually allocate, though, - * we can just use the allocated buffer until the end. - * - * Most of the slub attributes will tend to be very small in - * size, but sysfs allows buffers up to a page, so they can - * theoretically happen. - */ - if (buffer) - buf = buffer; - else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf) && - !IS_ENABLED(CONFIG_SLUB_STATS)) - buf = mbuf; - else { - buffer = (char *) get_zeroed_page(GFP_KERNEL); - if (WARN_ON(!buffer)) - continue; - buf = buffer; - } - - len = attr->show(root_cache, buf); - if (len > 0) - attr->store(s, buf, len); - } - - if (buffer) - free_page((unsigned long)buffer); -#endif /* CONFIG_MEMCG */ -} - static void kmem_cache_release(struct kobject *k) { slab_kmem_cache_release(to_slab(k)); @@ -5648,10 +5543,6 @@ static struct kset *slab_kset; static inline struct kset *cache_kset(struct kmem_cache *s) { -#ifdef CONFIG_MEMCG - if (!is_root_cache(s)) - return s->memcg_params.root_cache->memcg_kset; -#endif return slab_kset; } @@ -5694,27 +5585,6 @@ static char *create_unique_id(struct kmem_cache *s) return name; } -static void sysfs_slab_remove_workfn(struct work_struct *work) -{ - struct kmem_cache *s = - container_of(work, struct kmem_cache, kobj_remove_work); - - if (!s->kobj.state_in_sysfs) - /* - * For a memcg cache, this may be called during - * deactivation and again on shutdown. Remove only once. - * A cache is never shut down before deactivation is - * complete, so no need to worry about synchronization. - */ - goto out; - -#ifdef CONFIG_MEMCG - kset_unregister(s->memcg_kset); -#endif -out: - kobject_put(&s->kobj); -} - static int sysfs_slab_add(struct kmem_cache *s) { int err; @@ -5722,8 +5592,6 @@ static int sysfs_slab_add(struct kmem_cache *s) struct kset *kset = cache_kset(s); int unmergeable = slab_unmergeable(s); - INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn); - if (!kset) { kobject_init(&s->kobj, &slab_ktype); return 0; @@ -5760,16 +5628,6 @@ static int sysfs_slab_add(struct kmem_cache *s) if (err) goto out_del_kobj; -#ifdef CONFIG_MEMCG - if (is_root_cache(s) && memcg_sysfs_enabled) { - s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj); - if (!s->memcg_kset) { - err = -ENOMEM; - goto out_del_kobj; - } - } -#endif - if (!unmergeable) { /* Setup first alias */ sysfs_slab_alias(s, s->name); @@ -5783,19 +5641,6 @@ out_del_kobj: goto out; } -static void sysfs_slab_remove(struct kmem_cache *s) -{ - if (slab_state < FULL) - /* - * Sysfs has not been setup yet so no need to remove the - * cache from sysfs. - */ - return; - - kobject_get(&s->kobj); - schedule_work(&s->kobj_remove_work); -} - void sysfs_slab_unlink(struct kmem_cache *s) { if (slab_state >= FULL) -- cgit v1.2.3