summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-11-22 16:02:02 +0100
committerPekka Enberg <penberg@kernel.org>2011-11-24 08:44:19 +0200
commitbc6697d8a506dedf09e8e9974ffa3a316183e608 (patch)
treeb54d3d45349fb8a00ce8a086da55a1ebf3394646 /mm
parent42d623a8cd08eb93ab221d22cee5a62618895bbf (diff)
downloadlinux-bc6697d8a506dedf09e8e9974ffa3a316183e608.tar.bz2
slub: avoid potential NULL dereference or corruption
show_slab_objects() can trigger NULL dereferences or memory corruption. Another cpu can change its c->page to NULL or c->node to NUMA_NO_NODE while we use them. Use ACCESS_ONCE(c->page) and ACCESS_ONCE(c->node) to make sure this cannot happen. Acked-by: Christoph Lameter <cl@linux.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 2a9cfd72a3d7..ed3334d9b6da 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4444,30 +4444,31 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
for_each_possible_cpu(cpu) {
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+ int node = ACCESS_ONCE(c->node);
struct page *page;
- if (!c || c->node < 0)
+ if (node < 0)
continue;
-
- if (c->page) {
- if (flags & SO_TOTAL)
- x = c->page->objects;
+ page = ACCESS_ONCE(c->page);
+ if (page) {
+ if (flags & SO_TOTAL)
+ x = page->objects;
else if (flags & SO_OBJECTS)
- x = c->page->inuse;
+ x = page->inuse;
else
x = 1;
total += x;
- nodes[c->node] += x;
+ nodes[node] += x;
}
page = c->partial;
if (page) {
x = page->pobjects;
- total += x;
- nodes[c->node] += x;
+ total += x;
+ nodes[node] += x;
}
- per_cpu[c->node]++;
+ per_cpu[node]++;
}
}