summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-03-26 16:18:17 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-03-26 16:18:17 -0700
commitbe0ea69674ed95e1e98cb3687a241badc756d228 (patch)
tree36d0db8fe389d65bbc8c7aa5be0e61b066f9536a /mm
parent4496d937a518fde0d0e1980e4ab470cedb4b50cd (diff)
parent15a5b0a4912d98a9615ef457c7bde8d08195a771 (diff)
downloadlinux-be0ea69674ed95e1e98cb3687a241badc756d228.tar.bz2
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: slob: fix lockup in slob_free() slub: use get_track() slub: rename calculate_min_partial() to set_min_partial() slub: add min_partial sysfs tunable slub: move min_partial to struct kmem_cache SLUB: Fix default slab order for big object sizes SLUB: Do not pass 8k objects through to the page allocator SLUB: Introduce and use SLUB_MAX_SIZE and SLUB_PAGE_SHIFT constants slob: clean up the code SLUB: Use ->objsize from struct kmem_cache_cpu in slab_free()
Diffstat (limited to 'mm')
-rw-r--r--mm/slob.c43
-rw-r--r--mm/slub.c82
2 files changed, 79 insertions, 46 deletions
diff --git a/mm/slob.c b/mm/slob.c
index 52bc8a2bd9ef..0bfa680a8981 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -126,9 +126,9 @@ static LIST_HEAD(free_slob_medium);
static LIST_HEAD(free_slob_large);
/*
- * slob_page: True for all slob pages (false for bigblock pages)
+ * is_slob_page: True for all slob pages (false for bigblock pages)
*/
-static inline int slob_page(struct slob_page *sp)
+static inline int is_slob_page(struct slob_page *sp)
{
return PageSlobPage((struct page *)sp);
}
@@ -143,6 +143,11 @@ static inline void clear_slob_page(struct slob_page *sp)
__ClearPageSlobPage((struct page *)sp);
}
+static inline struct slob_page *slob_page(const void *addr)
+{
+ return (struct slob_page *)virt_to_page(addr);
+}
+
/*
* slob_page_free: true for pages on free_slob_pages list.
*/
@@ -230,7 +235,7 @@ static int slob_last(slob_t *s)
return !((unsigned long)slob_next(s) & ~PAGE_MASK);
}
-static void *slob_new_page(gfp_t gfp, int order, int node)
+static void *slob_new_pages(gfp_t gfp, int order, int node)
{
void *page;
@@ -247,12 +252,17 @@ static void *slob_new_page(gfp_t gfp, int order, int node)
return page_address(page);
}
+static void slob_free_pages(void *b, int order)
+{
+ free_pages((unsigned long)b, order);
+}
+
/*
* Allocate a slob block within a given slob_page sp.
*/
static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
{
- slob_t *prev, *cur, *aligned = 0;
+ slob_t *prev, *cur, *aligned = NULL;
int delta = 0, units = SLOB_UNITS(size);
for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
@@ -349,10 +359,10 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
/* Not enough space: must allocate a new page */
if (!b) {
- b = slob_new_page(gfp & ~__GFP_ZERO, 0, node);
+ b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
if (!b)
- return 0;
- sp = (struct slob_page *)virt_to_page(b);
+ return NULL;
+ sp = slob_page(b);
set_slob_page(sp);
spin_lock_irqsave(&slob_lock, flags);
@@ -384,7 +394,7 @@ static void slob_free(void *block, int size)
return;
BUG_ON(!size);
- sp = (struct slob_page *)virt_to_page(block);
+ sp = slob_page(block);
units = SLOB_UNITS(size);
spin_lock_irqsave(&slob_lock, flags);
@@ -393,10 +403,11 @@ static void slob_free(void *block, int size)
/* Go directly to page allocator. Do not pass slob allocator */
if (slob_page_free(sp))
clear_slob_page_free(sp);
+ spin_unlock_irqrestore(&slob_lock, flags);
clear_slob_page(sp);
free_slob_page(sp);
free_page((unsigned long)b);
- goto out;
+ return;
}
if (!slob_page_free(sp)) {
@@ -476,7 +487,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
} else {
void *ret;
- ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node);
+ ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
if (ret) {
struct page *page;
page = virt_to_page(ret);
@@ -494,8 +505,8 @@ void kfree(const void *block)
if (unlikely(ZERO_OR_NULL_PTR(block)))
return;
- sp = (struct slob_page *)virt_to_page(block);
- if (slob_page(sp)) {
+ sp = slob_page(block);
+ if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align);
@@ -513,8 +524,8 @@ size_t ksize(const void *block)
if (unlikely(block == ZERO_SIZE_PTR))
return 0;
- sp = (struct slob_page *)virt_to_page(block);
- if (slob_page(sp)) {
+ sp = slob_page(block);
+ if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
return SLOB_UNITS(*m) * SLOB_UNIT;
@@ -573,7 +584,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
if (c->size < PAGE_SIZE)
b = slob_alloc(c->size, flags, c->align, node);
else
- b = slob_new_page(flags, get_order(c->size), node);
+ b = slob_new_pages(flags, get_order(c->size), node);
if (c->ctor)
c->ctor(b);
@@ -587,7 +598,7 @@ static void __kmem_cache_free(void *b, int size)
if (size < PAGE_SIZE)
slob_free(b, size);
else
- free_pages((unsigned long)b, get_order(size));
+ slob_free_pages(b, get_order(size));
}
static void kmem_rcu_free(struct rcu_head *head)
diff --git a/mm/slub.c b/mm/slub.c
index 0280eee6cf37..c65a4edafc33 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -374,14 +374,8 @@ static struct track *get_track(struct kmem_cache *s, void *object,
static void set_track(struct kmem_cache *s, void *object,
enum track_item alloc, unsigned long addr)
{
- struct track *p;
-
- if (s->offset)
- p = object + s->offset + sizeof(void *);
- else
- p = object + s->inuse;
+ struct track *p = get_track(s, object, alloc);
- p += alloc;
if (addr) {
p->addr = addr;
p->cpu = smp_processor_id();
@@ -1335,7 +1329,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
n = get_node(s, zone_to_nid(zone));
if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
- n->nr_partial > n->min_partial) {
+ n->nr_partial > s->min_partial) {
page = get_partial_node(n);
if (page)
return page;
@@ -1387,7 +1381,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
slab_unlock(page);
} else {
stat(c, DEACTIVATE_EMPTY);
- if (n->nr_partial < n->min_partial) {
+ if (n->nr_partial < s->min_partial) {
/*
* Adding an empty slab to the partial slabs in order
* to avoid page allocator overhead. This slab needs
@@ -1724,7 +1718,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
c = get_cpu_slab(s, smp_processor_id());
debug_check_no_locks_freed(object, c->objsize);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
- debug_check_no_obj_freed(object, s->objsize);
+ debug_check_no_obj_freed(object, c->objsize);
if (likely(page == c->page && c->node >= 0)) {
object[c->offset] = c->freelist;
c->freelist = object;
@@ -1844,6 +1838,7 @@ static inline int calculate_order(int size)
int order;
int min_objects;
int fraction;
+ int max_objects;
/*
* Attempt to find best configuration for a slab. This
@@ -1856,6 +1851,9 @@ static inline int calculate_order(int size)
min_objects = slub_min_objects;
if (!min_objects)
min_objects = 4 * (fls(nr_cpu_ids) + 1);
+ max_objects = (PAGE_SIZE << slub_max_order)/size;
+ min_objects = min(min_objects, max_objects);
+
while (min_objects > 1) {
fraction = 16;
while (fraction >= 4) {
@@ -1865,7 +1863,7 @@ static inline int calculate_order(int size)
return order;
fraction /= 2;
}
- min_objects /= 2;
+ min_objects --;
}
/*
@@ -1928,17 +1926,6 @@ static void
init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
{
n->nr_partial = 0;
-
- /*
- * The larger the object size is, the more pages we want on the partial
- * list to avoid pounding the page allocator excessively.
- */
- n->min_partial = ilog2(s->size);
- if (n->min_partial < MIN_PARTIAL)
- n->min_partial = MIN_PARTIAL;
- else if (n->min_partial > MAX_PARTIAL)
- n->min_partial = MAX_PARTIAL;
-
spin_lock_init(&n->list_lock);
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
@@ -2181,6 +2168,15 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
}
#endif
+static void set_min_partial(struct kmem_cache *s, unsigned long min)
+{
+ if (min < MIN_PARTIAL)
+ min = MIN_PARTIAL;
+ else if (min > MAX_PARTIAL)
+ min = MAX_PARTIAL;
+ s->min_partial = min;
+}
+
/*
* calculate_sizes() determines the order and the distribution of data within
* a slab object.
@@ -2319,6 +2315,11 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
if (!calculate_sizes(s, -1))
goto error;
+ /*
+ * The larger the object size is, the more pages we want on the partial
+ * list to avoid pounding the page allocator excessively.
+ */
+ set_min_partial(s, ilog2(s->size));
s->refcount = 1;
#ifdef CONFIG_NUMA
s->remote_node_defrag_ratio = 1000;
@@ -2475,7 +2476,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
* Kmalloc subsystem
*******************************************************************/
-struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
+struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches);
static int __init setup_slub_min_order(char *str)
@@ -2537,7 +2538,7 @@ panic:
}
#ifdef CONFIG_ZONE_DMA
-static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
+static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
static void sysfs_add_func(struct work_struct *w)
{
@@ -2658,7 +2659,7 @@ void *__kmalloc(size_t size, gfp_t flags)
{
struct kmem_cache *s;
- if (unlikely(size > PAGE_SIZE))
+ if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large(size, flags);
s = get_slab(size, flags);
@@ -2686,7 +2687,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
struct kmem_cache *s;
- if (unlikely(size > PAGE_SIZE))
+ if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large_node(size, flags, node);
s = get_slab(size, flags);
@@ -2986,7 +2987,7 @@ void __init kmem_cache_init(void)
caches++;
}
- for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
+ for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
create_kmalloc_cache(&kmalloc_caches[i],
"kmalloc", 1 << i, GFP_KERNEL);
caches++;
@@ -3023,7 +3024,7 @@ void __init kmem_cache_init(void)
slab_state = UP;
/* Provide the correct kmalloc names now that the caches are up */
- for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
+ for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
kmalloc_caches[i]. name =
kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
@@ -3223,7 +3224,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
{
struct kmem_cache *s;
- if (unlikely(size > PAGE_SIZE))
+ if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large(size, gfpflags);
s = get_slab(size, gfpflags);
@@ -3239,7 +3240,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
{
struct kmem_cache *s;
- if (unlikely(size > PAGE_SIZE))
+ if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large_node(size, gfpflags, node);
s = get_slab(size, gfpflags);
@@ -3836,6 +3837,26 @@ static ssize_t order_show(struct kmem_cache *s, char *buf)
}
SLAB_ATTR(order);
+static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%lu\n", s->min_partial);
+}
+
+static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
+ size_t length)
+{
+ unsigned long min;
+ int err;
+
+ err = strict_strtoul(buf, 10, &min);
+ if (err)
+ return err;
+
+ set_min_partial(s, min);
+ return length;
+}
+SLAB_ATTR(min_partial);
+
static ssize_t ctor_show(struct kmem_cache *s, char *buf)
{
if (s->ctor) {
@@ -4151,6 +4172,7 @@ static struct attribute *slab_attrs[] = {
&object_size_attr.attr,
&objs_per_slab_attr.attr,
&order_attr.attr,
+ &min_partial_attr.attr,
&objects_attr.attr,
&objects_partial_attr.attr,
&total_objects_attr.attr,