diff options
-rw-r--r-- | fs/proc/proc_misc.c | 37 | ||||
-rw-r--r-- | include/linux/slab.h | 6 | ||||
-rw-r--r-- | lib/Kconfig.debug | 4 | ||||
-rw-r--r-- | mm/slab.c | 180 | ||||
-rw-r--r-- | mm/util.c | 4 | ||||
-rw-r--r-- | net/core/skbuff.c | 2 |
6 files changed, 222 insertions, 11 deletions
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 826c131994c3..1e9ea37d457e 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c @@ -485,6 +485,40 @@ static struct file_operations proc_slabinfo_operations = { .llseek = seq_lseek, .release = seq_release, }; + +#ifdef CONFIG_DEBUG_SLAB_LEAK +extern struct seq_operations slabstats_op; +static int slabstats_open(struct inode *inode, struct file *file) +{ + unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL); + int ret = -ENOMEM; + if (n) { + ret = seq_open(file, &slabstats_op); + if (!ret) { + struct seq_file *m = file->private_data; + *n = PAGE_SIZE / (2 * sizeof(unsigned long)); + m->private = n; + n = NULL; + } + kfree(n); + } + return ret; +} + +static int slabstats_release(struct inode *inode, struct file *file) +{ + struct seq_file *m = file->private_data; + kfree(m->private); + return seq_release(inode, file); +} + +static struct file_operations proc_slabstats_operations = { + .open = slabstats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = slabstats_release, +}; +#endif #endif static int show_stat(struct seq_file *p, void *v) @@ -744,6 +778,9 @@ void __init proc_misc_init(void) create_seq_entry("interrupts", 0, &proc_interrupts_operations); #ifdef CONFIG_SLAB create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations); +#ifdef CONFIG_DEBUG_SLAB_LEAK + create_seq_entry("slab_allocators", 0 ,&proc_slabstats_operations); +#endif #endif create_seq_entry("buddyinfo",S_IRUGO, &fragmentation_file_operations); create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations); diff --git a/include/linux/slab.h b/include/linux/slab.h index e2ee5b268797..f88e08a5802c 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -77,11 +77,12 @@ struct cache_sizes { }; extern struct cache_sizes malloc_sizes[]; -#ifndef CONFIG_DEBUG_SLAB extern void *__kmalloc(size_t, gfp_t); +#ifndef CONFIG_DEBUG_SLAB +#define ____kmalloc(size, flags) __kmalloc(size, flags) #else extern void *__kmalloc_track_caller(size_t, gfp_t, void*); -#define __kmalloc(size, flags) \ +#define ____kmalloc(size, flags) \ __kmalloc_track_caller(size, flags, __builtin_return_address(0)) #endif @@ -173,6 +174,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) #define kmem_ptr_validate(a, b) (0) #define kmem_cache_alloc_node(c, f, n) kmem_cache_alloc(c, f) #define kmalloc_node(s, f, n) kmalloc(s, f) +#define ____kmalloc kmalloc #endif /* CONFIG_SLOB */ diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index f2618e1c2b93..1fe3f897145f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -85,6 +85,10 @@ config DEBUG_SLAB allocation as well as poisoning memory on free to catch use of freed memory. This can make kmalloc/kfree-intensive workloads much slower. +config DEBUG_SLAB_LEAK + bool "Memory leak debugging" + depends on DEBUG_SLAB + config DEBUG_PREEMPT bool "Debug preemptible kernel" depends on DEBUG_KERNEL && PREEMPT diff --git a/mm/slab.c b/mm/slab.c index 26138c9f8f00..a5047161084e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -204,7 +204,8 @@ typedef unsigned int kmem_bufctl_t; #define BUFCTL_END (((kmem_bufctl_t)(~0U))-0) #define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) -#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-2) +#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) +#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) /* Max number of objs-per-slab for caches which use off-slab slabs. * Needed to avoid a possible looping condition in cache_grow(). @@ -2399,7 +2400,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, /* Verify that the slab belongs to the intended node */ WARN_ON(slabp->nodeid != nodeid); - if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) { + if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) { printk(KERN_ERR "slab: double free detected in cache " "'%s', objp %p\n", cachep->name, objp); BUG(); @@ -2605,6 +2606,9 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, */ cachep->dtor(objp + obj_offset(cachep), cachep, 0); } +#ifdef CONFIG_DEBUG_SLAB_LEAK + slab_bufctl(slabp)[objnr] = BUFCTL_FREE; +#endif if (cachep->flags & SLAB_POISON) { #ifdef CONFIG_DEBUG_PAGEALLOC if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { @@ -2788,6 +2792,16 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, *dbg_redzone1(cachep, objp) = RED_ACTIVE; *dbg_redzone2(cachep, objp) = RED_ACTIVE; } +#ifdef CONFIG_DEBUG_SLAB_LEAK + { + struct slab *slabp; + unsigned objnr; + + slabp = page_get_slab(virt_to_page(objp)); + objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; + slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; + } +#endif objp += obj_offset(cachep); if (cachep->ctor && cachep->flags & SLAB_POISON) { unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; @@ -3220,22 +3234,23 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, return __cache_alloc(cachep, flags, caller); } -#ifndef CONFIG_DEBUG_SLAB void *__kmalloc(size_t size, gfp_t flags) { +#ifndef CONFIG_DEBUG_SLAB return __do_kmalloc(size, flags, NULL); +#else + return __do_kmalloc(size, flags, __builtin_return_address(0)); +#endif } EXPORT_SYMBOL(__kmalloc); -#else - +#ifdef CONFIG_DEBUG_SLAB void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) { return __do_kmalloc(size, flags, caller); } EXPORT_SYMBOL(__kmalloc_track_caller); - #endif #ifdef CONFIG_SMP @@ -3899,6 +3914,159 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer, res = count; return res; } + +#ifdef CONFIG_DEBUG_SLAB_LEAK + +static void *leaks_start(struct seq_file *m, loff_t *pos) +{ + loff_t n = *pos; + struct list_head *p; + + mutex_lock(&cache_chain_mutex); + p = cache_chain.next; + while (n--) { + p = p->next; + if (p == &cache_chain) + return NULL; + } + return list_entry(p, struct kmem_cache, next); +} + +static inline int add_caller(unsigned long *n, unsigned long v) +{ + unsigned long *p; + int l; + if (!v) + return 1; + l = n[1]; + p = n + 2; + while (l) { + int i = l/2; + unsigned long *q = p + 2 * i; + if (*q == v) { + q[1]++; + return 1; + } + if (*q > v) { + l = i; + } else { + p = q + 2; + l -= i + 1; + } + } + if (++n[1] == n[0]) + return 0; + memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); + p[0] = v; + p[1] = 1; + return 1; +} + +static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) +{ + void *p; + int i; + if (n[0] == n[1]) + return; + for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) { + if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) + continue; + if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) + return; + } +} + +static void show_symbol(struct seq_file *m, unsigned long address) +{ +#ifdef CONFIG_KALLSYMS + char *modname; + const char *name; + unsigned long offset, size; + char namebuf[KSYM_NAME_LEN+1]; + + name = kallsyms_lookup(address, &size, &offset, &modname, namebuf); + + if (name) { + seq_printf(m, "%s+%#lx/%#lx", name, offset, size); + if (modname) + seq_printf(m, " [%s]", modname); + return; + } +#endif + seq_printf(m, "%p", (void *)address); +} + +static int leaks_show(struct seq_file *m, void *p) +{ + struct kmem_cache *cachep = p; + struct list_head *q; + struct slab *slabp; + struct kmem_list3 *l3; + const char *name; + unsigned long *n = m->private; + int node; + int i; + + if (!(cachep->flags & SLAB_STORE_USER)) + return 0; + if (!(cachep->flags & SLAB_RED_ZONE)) + return 0; + + /* OK, we can do it */ + + n[1] = 0; + + for_each_online_node(node) { + l3 = cachep->nodelists[node]; + if (!l3) + continue; + + check_irq_on(); + spin_lock_irq(&l3->list_lock); + + list_for_each(q, &l3->slabs_full) { + slabp = list_entry(q, struct slab, list); + handle_slab(n, cachep, slabp); + } + list_for_each(q, &l3->slabs_partial) { + slabp = list_entry(q, struct slab, list); + handle_slab(n, cachep, slabp); + } + spin_unlock_irq(&l3->list_lock); + } + name = cachep->name; + if (n[0] == n[1]) { + /* Increase the buffer size */ + mutex_unlock(&cache_chain_mutex); + m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); + if (!m->private) { + /* Too bad, we are really out */ + m->private = n; + mutex_lock(&cache_chain_mutex); + return -ENOMEM; + } + *(unsigned long *)m->private = n[0] * 2; + kfree(n); + mutex_lock(&cache_chain_mutex); + /* Now make sure this entry will be retried */ + m->count = m->size; + return 0; + } + for (i = 0; i < n[1]; i++) { + seq_printf(m, "%s: %lu ", name, n[2*i+3]); + show_symbol(m, n[2*i+2]); + seq_putc(m, '\n'); + } + return 0; +} + +struct seq_operations slabstats_op = { + .start = leaks_start, + .next = s_next, + .stop = s_stop, + .show = leaks_show, +}; +#endif #endif /** diff --git a/mm/util.c b/mm/util.c index 49e29f751b50..b68d3d7d0359 100644 --- a/mm/util.c +++ b/mm/util.c @@ -11,7 +11,7 @@ */ void *kzalloc(size_t size, gfp_t flags) { - void *ret = kmalloc(size, flags); + void *ret = ____kmalloc(size, flags); if (ret) memset(ret, 0, size); return ret; @@ -33,7 +33,7 @@ char *kstrdup(const char *s, gfp_t gfp) return NULL; len = strlen(s) + 1; - buf = kmalloc(len, gfp); + buf = ____kmalloc(len, gfp); if (buf) memcpy(buf, s, len); return buf; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c9f878454531..09464fa8d72f 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -149,7 +149,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, /* Get the DATA. Size must match skb_add_mtu(). */ size = SKB_DATA_ALIGN(size); - data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); + data = ____kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); if (!data) goto nodata; |