diff options
author | Roman Gushchin <guro@fb.com> | 2020-08-06 23:21:20 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-08-07 11:33:25 -0700 |
commit | c7094406fcb7cdf4fe1de8893f0613b75349773d (patch) | |
tree | 17ae29ac1e0c8374227bb23e44be0ee5084e6052 /mm/slab_common.c | |
parent | 272911a4ad18c48f8bc449a5db945a54987dd687 (diff) | |
download | linux-c7094406fcb7cdf4fe1de8893f0613b75349773d.tar.bz2 |
mm: memcg/slab: deprecate slab_root_caches
Currently there are two lists of kmem_caches:
1) slab_caches, which contains all kmem_caches,
2) slab_root_caches, which contains only root kmem_caches.
And there is some preprocessor magic to have a single list if
CONFIG_MEMCG_KMEM isn't enabled.
It was required earlier because the number of non-root kmem_caches was
proportional to the number of memory cgroups and could reach really big
values. Now, when it cannot exceed the number of root kmem_caches, there
is really no reason to maintain two lists.
We never iterate over the slab_root_caches list on any hot paths, so it's
perfectly fine to iterate over slab_caches and filter out non-root
kmem_caches.
It allows to remove a lot of config-dependent code and two pointers from
the kmem_cache structure.
Signed-off-by: Roman Gushchin <guro@fb.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/20200623174037.3951353-16-guro@fb.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r-- | mm/slab_common.c | 37 |
1 files changed, 8 insertions, 29 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index de0a46cf974a..a9f6ab452ce5 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -131,9 +131,6 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, } #ifdef CONFIG_MEMCG_KMEM - -LIST_HEAD(slab_root_caches); - static void memcg_kmem_cache_create_func(struct work_struct *work) { struct kmem_cache *cachep = container_of(work, struct kmem_cache, @@ -156,27 +153,11 @@ static void init_memcg_params(struct kmem_cache *s, else slab_init_memcg_params(s); } - -void memcg_link_cache(struct kmem_cache *s) -{ - if (is_root_cache(s)) - list_add(&s->root_caches_node, &slab_root_caches); -} - -static void memcg_unlink_cache(struct kmem_cache *s) -{ - if (is_root_cache(s)) - list_del(&s->root_caches_node); -} #else static inline void init_memcg_params(struct kmem_cache *s, struct kmem_cache *root_cache) { } - -static inline void memcg_unlink_cache(struct kmem_cache *s) -{ -} #endif /* CONFIG_MEMCG_KMEM */ /* @@ -253,7 +234,7 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align, if (flags & SLAB_NEVER_MERGE) return NULL; - list_for_each_entry_reverse(s, &slab_root_caches, root_caches_node) { + list_for_each_entry_reverse(s, &slab_caches, list) { if (slab_unmergeable(s)) continue; @@ -312,7 +293,6 @@ static struct kmem_cache *create_cache(const char *name, s->refcount = 1; list_add(&s->list, &slab_caches); - memcg_link_cache(s); out: if (err) return ERR_PTR(err); @@ -507,7 +487,6 @@ static int shutdown_cache(struct kmem_cache *s) if (__kmem_cache_shutdown(s) != 0) return -EBUSY; - memcg_unlink_cache(s); list_del(&s->list); if (s->flags & SLAB_TYPESAFE_BY_RCU) { @@ -751,7 +730,6 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, create_boot_cache(s, name, size, flags, useroffset, usersize); list_add(&s->list, &slab_caches); - memcg_link_cache(s); s->refcount = 1; return s; } @@ -1107,12 +1085,12 @@ static void print_slabinfo_header(struct seq_file *m) void *slab_start(struct seq_file *m, loff_t *pos) { mutex_lock(&slab_mutex); - return seq_list_start(&slab_root_caches, *pos); + return seq_list_start(&slab_caches, *pos); } void *slab_next(struct seq_file *m, void *p, loff_t *pos) { - return seq_list_next(p, &slab_root_caches, pos); + return seq_list_next(p, &slab_caches, pos); } void slab_stop(struct seq_file *m, void *p) @@ -1165,11 +1143,12 @@ static void cache_show(struct kmem_cache *s, struct seq_file *m) static int slab_show(struct seq_file *m, void *p) { - struct kmem_cache *s = list_entry(p, struct kmem_cache, root_caches_node); + struct kmem_cache *s = list_entry(p, struct kmem_cache, list); - if (p == slab_root_caches.next) + if (p == slab_caches.next) print_slabinfo_header(m); - cache_show(s, m); + if (is_root_cache(s)) + cache_show(s, m); return 0; } @@ -1271,7 +1250,7 @@ static int memcg_slabinfo_show(struct seq_file *m, void *unused) mutex_lock(&slab_mutex); seq_puts(m, "# <name> <css_id[:dead|deact]> <active_objs> <num_objs>"); seq_puts(m, " <active_slabs> <num_slabs>\n"); - list_for_each_entry(s, &slab_root_caches, root_caches_node) { + list_for_each_entry(s, &slab_caches, list) { /* * Skip kmem caches that don't have the memcg cache. */ |