diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-06-01 16:50:23 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-06-01 16:50:23 -0700 |
commit | af4f8ba31a4e328677bec493ceeaf112ca193b65 (patch) | |
tree | d97a6dc3a8ddcd2bcebe124a4716e565a7868cdc /mm | |
parent | efff0471b0dd8b08ca3830b06a9083f6d6cef44e (diff) | |
parent | c03f94ccbd67fbcf546e5a9fcfeb99ef0aca4ada (diff) | |
download | linux-af4f8ba31a4e328677bec493ceeaf112ca193b65.tar.bz2 |
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull slab updates from Pekka Enberg:
"Mainly a bunch of SLUB fixes from Joonsoo Kim"
* 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux:
slub: use __SetPageSlab function to set PG_slab flag
slub: fix a memory leak in get_partial_node()
slub: remove unused argument of init_kmem_cache_node()
slub: fix a possible memory leak
Documentations: Fix slabinfo.c directory in vm/slub.txt
slub: fix incorrect return type of get_any_partial()
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 23 |
1 files changed, 13 insertions, 10 deletions
diff --git a/mm/slub.c b/mm/slub.c index 80848cd3901c..8c691fa1cf3c 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1369,7 +1369,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) inc_slabs_node(s, page_to_nid(page), page->objects); page->slab = s; - page->flags |= 1 << PG_slab; + __SetPageSlab(page); start = page_address(page); @@ -1514,15 +1514,19 @@ static inline void *acquire_slab(struct kmem_cache *s, freelist = page->freelist; counters = page->counters; new.counters = counters; - if (mode) + if (mode) { new.inuse = page->objects; + new.freelist = NULL; + } else { + new.freelist = freelist; + } VM_BUG_ON(new.frozen); new.frozen = 1; } while (!__cmpxchg_double_slab(s, page, freelist, counters, - NULL, new.counters, + new.freelist, new.counters, "lock and freeze")); remove_partial(n, page); @@ -1564,7 +1568,6 @@ static void *get_partial_node(struct kmem_cache *s, object = t; available = page->objects - page->inuse; } else { - page->freelist = t; available = put_cpu_partial(s, page, 0); stat(s, CPU_PARTIAL_NODE); } @@ -1579,7 +1582,7 @@ static void *get_partial_node(struct kmem_cache *s, /* * Get a page from somewhere. Search in increasing NUMA distances. */ -static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags, +static void *get_any_partial(struct kmem_cache *s, gfp_t flags, struct kmem_cache_cpu *c) { #ifdef CONFIG_NUMA @@ -2766,7 +2769,7 @@ static unsigned long calculate_alignment(unsigned long flags, } static void -init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) +init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; spin_lock_init(&n->list_lock); @@ -2836,7 +2839,7 @@ static void early_kmem_cache_node_alloc(int node) init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); init_tracking(kmem_cache_node, n); #endif - init_kmem_cache_node(n, kmem_cache_node); + init_kmem_cache_node(n); inc_slabs_node(kmem_cache_node, node, page->objects); add_partial(n, page, DEACTIVATE_TO_HEAD); @@ -2876,7 +2879,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s) } s->node[node] = n; - init_kmem_cache_node(n, s); + init_kmem_cache_node(n); } return 1; } @@ -3625,7 +3628,7 @@ static int slab_mem_going_online_callback(void *arg) ret = -ENOMEM; goto out; } - init_kmem_cache_node(n, s); + init_kmem_cache_node(n); s->node[nid] = n; } out: @@ -3968,9 +3971,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, } return s; } - kfree(n); kfree(s); } + kfree(n); err: up_write(&slub_lock); |