summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-22 08:10:34 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-22 08:10:34 -0800
commit24f971abbda045c24d5d6f2438a7785567d2fde9 (patch)
treea4df2b80eafa1199625b53464bcf34e786a03a28 /mm/slub.c
parent3bab0bf045e1cc4880e2cfc9351e52cf7ec8e35e (diff)
parent721ae22ae1a51c25871b7a0b543877aa94ff2a20 (diff)
downloadlinux-24f971abbda045c24d5d6f2438a7785567d2fde9.tar.bz2
Merge branch 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull SLAB changes from Pekka Enberg: "The patches from Joonsoo Kim switch mm/slab.c to use 'struct page' for slab internals similar to mm/slub.c. This reduces memory usage and improves performance: https://lkml.org/lkml/2013/10/16/155 Rest of the changes are bug fixes from various people" * 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: (21 commits) mm, slub: fix the typo in mm/slub.c mm, slub: fix the typo in include/linux/slub_def.h slub: Handle NULL parameter in kmem_cache_flags slab: replace non-existing 'struct freelist *' with 'void *' slab: fix to calm down kmemleak warning slub: proper kmemleak tracking if CONFIG_SLUB_DEBUG disabled slab: rename slab_bufctl to slab_freelist slab: remove useless statement for checking pfmemalloc slab: use struct page for slab management slab: replace free and inuse in struct slab with newly introduced active slab: remove SLAB_LIMIT slab: remove kmem_bufctl_t slab: change the management method of free objects of the slab slab: use __GFP_COMP flag for allocating slab pages slab: use well-defined macro, virt_to_slab() slab: overloading the RCU head over the LRU for RCU free slab: remove cachep in struct slab_rcu slab: remove nodeid in struct slab slab: remove colouroff in struct slab slab: change return type of kmem_getpages() to struct page ...
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c45
1 files changed, 36 insertions, 9 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 7e8bd8d828bc..545a170ebf9f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -155,7 +155,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
/*
* Maximum number of desirable partial slabs.
* The existence of more partial slabs makes kmem_cache_shrink
- * sort the partial list by the number of objects in the.
+ * sort the partial list by the number of objects in use.
*/
#define MAX_PARTIAL 10
@@ -933,6 +933,16 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
*/
+static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
+{
+ kmemleak_alloc(ptr, size, 1, flags);
+}
+
+static inline void kfree_hook(const void *x)
+{
+ kmemleak_free(x);
+}
+
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{
flags &= gfp_allowed_mask;
@@ -1217,8 +1227,8 @@ static unsigned long kmem_cache_flags(unsigned long object_size,
/*
* Enable debugging if selected on the kernel commandline.
*/
- if (slub_debug && (!slub_debug_slabs ||
- !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
+ if (slub_debug && (!slub_debug_slabs || (name &&
+ !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
flags |= slub_debug;
return flags;
@@ -1260,13 +1270,30 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {}
+static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
+{
+ kmemleak_alloc(ptr, size, 1, flags);
+}
+
+static inline void kfree_hook(const void *x)
+{
+ kmemleak_free(x);
+}
+
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{ return 0; }
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
- void *object) {}
+ void *object)
+{
+ kmemleak_alloc_recursive(object, s->object_size, 1, s->flags,
+ flags & gfp_allowed_mask);
+}
-static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
+static inline void slab_free_hook(struct kmem_cache *s, void *x)
+{
+ kmemleak_free_recursive(x, s->flags);
+}
#endif /* CONFIG_SLUB_DEBUG */
@@ -2829,8 +2856,8 @@ static struct kmem_cache *kmem_cache_node;
* slab on the node for this slabcache. There are no concurrent accesses
* possible.
*
- * Note that this function only works on the kmalloc_node_cache
- * when allocating for the kmalloc_node_cache. This is used for bootstrapping
+ * Note that this function only works on the kmem_cache_node
+ * when allocating for the kmem_cache_node. This is used for bootstrapping
* memory on a fresh node that has no slab structures yet.
*/
static void early_kmem_cache_node_alloc(int node)
@@ -3272,7 +3299,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
if (page)
ptr = page_address(page);
- kmemleak_alloc(ptr, size, 1, flags);
+ kmalloc_large_node_hook(ptr, size, flags);
return ptr;
}
@@ -3336,7 +3363,7 @@ void kfree(const void *x)
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page));
- kmemleak_free(x);
+ kfree_hook(x);
__free_memcg_kmem_pages(page, compound_order(page));
return;
}