From d0d04b78f403b0bcfe03315e16b50d196610720d Mon Sep 17 00:00:00 2001 From: Zhouping Liu Date: Thu, 16 May 2013 11:36:23 +0800 Subject: mm, slab: moved kmem_cache_alloc_node comment to correct place After several fixing about kmem_cache_alloc_node(), its comment was splitted. This patch moved it on top of kmem_cache_alloc_node() definition. Signed-off-by: Zhouping Liu Signed-off-by: Pekka Enberg --- mm/slab.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index a98f8db93670..273a5ac2ade3 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3340,18 +3340,6 @@ done: return obj; } -/** - * kmem_cache_alloc_node - Allocate an object on the specified node - * @cachep: The cache to allocate from. - * @flags: See kmalloc(). - * @nodeid: node number of the target node. - * @caller: return address of caller, used for debug information - * - * Identical to kmem_cache_alloc but it will allocate memory on the given - * node, which can improve the performance for cpu bound structures. - * - * Fallback to other node is possible if __GFP_THISNODE is not set. - */ static __always_inline void * slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, unsigned long caller) @@ -3645,6 +3633,17 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace); #endif #ifdef CONFIG_NUMA +/** + * kmem_cache_alloc_node - Allocate an object on the specified node + * @cachep: The cache to allocate from. + * @flags: See kmalloc(). + * @nodeid: node number of the target node. + * + * Identical to kmem_cache_alloc but it will allocate memory on the given + * node, which can improve the performance for cpu bound structures. + * + * Fallback to other node is possible if __GFP_THISNODE is not set. + */ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); -- cgit v1.2.3 From 0fa8103be4c20f893486c533e4c6dfbc5ccddeb4 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Thu, 4 Jul 2013 08:33:22 +0800 Subject: mm/slab: Fix drain freelist excessively The drain_freelist is called to drain slabs_free lists for cache reap, cache shrink, memory hotplug callback etc. The tofree parameter should be the number of slab to free instead of the number of slab objects to free. This patch fix the callers that pass # of objects. Make sure they pass # of slabs. Acked-by: Christoph Lameter Signed-off-by: Wanpeng Li Signed-off-by: Pekka Enberg --- mm/slab.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 273a5ac2ade3..c9b4da9a1fe5 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1180,6 +1180,12 @@ static int init_cache_node_node(int node) return 0; } +static inline int slabs_tofree(struct kmem_cache *cachep, + struct kmem_cache_node *n) +{ + return (n->free_objects + cachep->num - 1) / cachep->num; +} + static void __cpuinit cpuup_canceled(long cpu) { struct kmem_cache *cachep; @@ -1241,7 +1247,7 @@ free_array_cache: n = cachep->node[node]; if (!n) continue; - drain_freelist(cachep, n, n->free_objects); + drain_freelist(cachep, n, slabs_tofree(cachep, n)); } } @@ -1408,7 +1414,7 @@ static int __meminit drain_cache_node_node(int node) if (!n) continue; - drain_freelist(cachep, n, n->free_objects); + drain_freelist(cachep, n, slabs_tofree(cachep, n)); if (!list_empty(&n->slabs_full) || !list_empty(&n->slabs_partial)) { @@ -2534,7 +2540,7 @@ static int __cache_shrink(struct kmem_cache *cachep) if (!n) continue; - drain_freelist(cachep, n, n->free_objects); + drain_freelist(cachep, n, slabs_tofree(cachep, n)); ret += !list_empty(&n->slabs_full) || !list_empty(&n->slabs_partial); -- cgit v1.2.3 From e25839f67948ca54fa55a45686d72c266f65f099 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Thu, 4 Jul 2013 08:33:23 +0800 Subject: mm/slab: Sharing s_next and s_stop between slab and slub This patch shares s_next and s_stop between slab and slub. Acked-by: Christoph Lameter Signed-off-by: Wanpeng Li Signed-off-by: Pekka Enberg --- mm/slab.c | 10 ---------- mm/slab.h | 3 +++ mm/slab_common.c | 4 ++-- 3 files changed, 5 insertions(+), 12 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index c9b4da9a1fe5..4a907a072669 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4438,16 +4438,6 @@ static int leaks_show(struct seq_file *m, void *p) return 0; } -static void *s_next(struct seq_file *m, void *p, loff_t *pos) -{ - return seq_list_next(p, &slab_caches, pos); -} - -static void s_stop(struct seq_file *m, void *p) -{ - mutex_unlock(&slab_mutex); -} - static const struct seq_operations slabstats_op = { .start = leaks_start, .next = s_next, diff --git a/mm/slab.h b/mm/slab.h index f96b49e4704e..95c88604aab7 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -271,3 +271,6 @@ struct kmem_cache_node { #endif }; + +void *s_next(struct seq_file *m, void *p, loff_t *pos); +void s_stop(struct seq_file *m, void *p); diff --git a/mm/slab_common.c b/mm/slab_common.c index d2517b05d5bc..68518eb67229 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -529,12 +529,12 @@ static void *s_start(struct seq_file *m, loff_t *pos) return seq_list_start(&slab_caches, *pos); } -static void *s_next(struct seq_file *m, void *p, loff_t *pos) +void *s_next(struct seq_file *m, void *p, loff_t *pos) { return seq_list_next(p, &slab_caches, pos); } -static void s_stop(struct seq_file *m, void *p) +void s_stop(struct seq_file *m, void *p) { mutex_unlock(&slab_mutex); } -- cgit v1.2.3 From 0f8f8094d28eb53368ac09186ea6b3a324cc7d44 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 2 Jul 2013 12:12:10 -0700 Subject: slab: fix init_lock_keys Some architectures (e.g. powerpc built with CONFIG_PPC_256K_PAGES=y CONFIG_FORCE_MAX_ZONEORDER=11) get PAGE_SHIFT + MAX_ORDER > 26. In 3.10 kernels, CONFIG_LOCKDEP=y with PAGE_SHIFT + MAX_ORDER > 26 makes init_lock_keys() dereference beyond kmalloc_caches[26]. This leads to an unbootable system (kernel panic at initializing SLAB) if one of kmalloc_caches[26...PAGE_SHIFT+MAX_ORDER-1] is not NULL. Fix this by making sure that init_lock_keys() does not dereference beyond kmalloc_caches[26] arrays. Signed-off-by: Christoph Lameter Reported-by: Tetsuo Handa Cc: Pekka Enberg Cc: [3.10.x] Signed-off-by: Andrew Morton Signed-off-by: Pekka Enberg --- mm/slab.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 4a907a072669..9bf225162fcb 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -565,7 +565,7 @@ static void init_node_lock_keys(int q) if (slab_state < UP) return; - for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) { + for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) { struct kmem_cache_node *n; struct kmem_cache *cache = kmalloc_caches[i]; -- cgit v1.2.3 From 276a2439ce7917b8c3043af7ad6bf17bbcc24030 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Mon, 8 Jul 2013 08:08:28 +0800 Subject: mm/slab: Give s_next and s_stop slab-specific names Give s_next and s_stop slab-specific names instead of exporting "s_next" and "s_stop". Signed-off-by: Wanpeng Li Signed-off-by: Pekka Enberg --- mm/slab.c | 4 ++-- mm/slab.h | 4 ++-- mm/slab_common.c | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 9bf225162fcb..57ab42297d96 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4440,8 +4440,8 @@ static int leaks_show(struct seq_file *m, void *p) static const struct seq_operations slabstats_op = { .start = leaks_start, - .next = s_next, - .stop = s_stop, + .next = slab_next, + .stop = slab_stop, .show = leaks_show, }; diff --git a/mm/slab.h b/mm/slab.h index 95c88604aab7..620ceeddbe1a 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -272,5 +272,5 @@ struct kmem_cache_node { }; -void *s_next(struct seq_file *m, void *p, loff_t *pos); -void s_stop(struct seq_file *m, void *p); +void *slab_next(struct seq_file *m, void *p, loff_t *pos); +void slab_stop(struct seq_file *m, void *p); diff --git a/mm/slab_common.c b/mm/slab_common.c index 13ae037c71d4..eacdffaf71c9 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -536,12 +536,12 @@ static void *s_start(struct seq_file *m, loff_t *pos) return seq_list_start(&slab_caches, *pos); } -void *s_next(struct seq_file *m, void *p, loff_t *pos) +void *slab_next(struct seq_file *m, void *p, loff_t *pos) { return seq_list_next(p, &slab_caches, pos); } -void s_stop(struct seq_file *m, void *p) +void slab_stop(struct seq_file *m, void *p) { mutex_unlock(&slab_mutex); } @@ -618,8 +618,8 @@ static int s_show(struct seq_file *m, void *p) */ static const struct seq_operations slabinfo_op = { .start = s_start, - .next = s_next, - .stop = s_stop, + .next = slab_next, + .stop = slab_stop, .show = s_show, }; -- cgit v1.2.3