summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2009-07-27 18:30:35 -0700
committerPekka Enberg <penberg@cs.helsinki.fi>2009-07-28 10:53:09 +0300
commit3de472138a138008b534d9587593ba83390e330a (patch)
tree2f31ccb13c90dff68d8fd108575caa534c6c622d /mm/slub.c
parentfa5ec8a1f66f3c2a3af723abcf8085509c9ee682 (diff)
downloadlinux-3de472138a138008b534d9587593ba83390e330a.tar.bz2
slub: use size and objsize orders to disable debug flags
This patch moves the masking of debugging flags which increase a cache's min order due to metadata when `slub_debug=O' is used from kmem_cache_flags() to kmem_cache_open(). Instead of defining the maximum metadata size increase in a preprocessor macro, this approach uses the cache's ->size and ->objsize members to determine if the min order increased due to debugging options. If so, the flags specified in the more appropriately named DEBUG_METADATA_FLAGS are masked off. This approach was suggested by Christoph Lameter <cl@linux-foundation.org>. Cc: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: David Rientjes <rientjes@google.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c40
1 files changed, 19 insertions, 21 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 466089cd5deb..a465c0a09fb5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -142,11 +142,11 @@
SLAB_POISON | SLAB_STORE_USER)
/*
- * Debugging flags that require metadata to be stored in the slab, up to
- * DEBUG_SIZE in size.
+ * Debugging flags that require metadata to be stored in the slab. These get
+ * disabled when slub_debug=O is used and a cache's min order increases with
+ * metadata.
*/
-#define DEBUG_SIZE_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
-#define DEBUG_SIZE (3 * sizeof(void *) + 2 * sizeof(struct track))
+#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
/*
* Set of flags that will prevent slab merging
@@ -1040,27 +1040,13 @@ static unsigned long kmem_cache_flags(unsigned long objsize,
unsigned long flags, const char *name,
void (*ctor)(void *))
{
- int debug_flags = slub_debug;
-
/*
* Enable debugging if selected on the kernel commandline.
*/
- if (debug_flags) {
- if (slub_debug_slabs &&
- strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))
- goto out;
-
- /*
- * Disable debugging that increases slab size if the minimum
- * slab order would have increased as a result.
- */
- if (disable_higher_order_debug &&
- get_order(objsize + DEBUG_SIZE) > get_order(objsize))
- debug_flags &= ~DEBUG_SIZE_FLAGS;
+ if (slub_debug && (!slub_debug_slabs ||
+ !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
+ flags |= slub_debug;
- flags |= debug_flags;
- }
-out:
return flags;
}
#else
@@ -2488,6 +2474,18 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
if (!calculate_sizes(s, -1))
goto error;
+ if (disable_higher_order_debug) {
+ /*
+ * Disable debugging flags that store metadata if the min slab
+ * order increased.
+ */
+ if (get_order(s->size) > get_order(s->objsize)) {
+ s->flags &= ~DEBUG_METADATA_FLAGS;
+ s->offset = 0;
+ if (!calculate_sizes(s, -1))
+ goto error;
+ }
+ }
/*
* The larger the object size is, the more pages we want on the partial