diff options
-rw-r--r-- | lib/Kconfig.debug | 2 | ||||
-rw-r--r-- | mm/slub.c | 30 |
2 files changed, 16 insertions, 16 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 0796c1a090c0..eef557dc46c3 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -211,7 +211,7 @@ config SLUB_DEBUG_ON config SLUB_STATS default n bool "Enable SLUB performance statistics" - depends on SLUB + depends on SLUB && SLUB_DEBUG && SYSFS help SLUB statistics are useful to debug SLUBs allocation behavior in order find ways to optimize the allocator. This should never be diff --git a/mm/slub.c b/mm/slub.c index 3df6d5bdd711..3fcdcf7d77ba 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2688,21 +2688,6 @@ void kfree(const void *x) } EXPORT_SYMBOL(kfree); -#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SLABINFO) -static unsigned long count_partial(struct kmem_cache_node *n) -{ - unsigned long flags; - unsigned long x = 0; - struct page *page; - - spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, lru) - x += page->inuse; - spin_unlock_irqrestore(&n->list_lock, flags); - return x; -} -#endif - /* * kmem_cache_shrink removes empty slabs from the partial lists and sorts * the remaining slabs by the number of items in use. The slabs with the @@ -3181,6 +3166,21 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, return slab_alloc(s, gfpflags, node, caller); } +#if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO) +static unsigned long count_partial(struct kmem_cache_node *n) +{ + unsigned long flags; + unsigned long x = 0; + struct page *page; + + spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) + x += page->inuse; + spin_unlock_irqrestore(&n->list_lock, flags); + return x; +} +#endif + #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) static int validate_slab(struct kmem_cache *s, struct page *page, unsigned long *map) |