summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-05-16 15:26:08 -0500
committerPekka Enberg <penberg@kernel.org>2011-05-17 22:18:55 +0300
commit1393d9a1857471f816d0be1ccc1d6433a86050f6 (patch)
tree2bcad46d15f1dc7ea6e458f2ab4d7f17b766dd66 /mm/slub.c
parent6332aa9d25e911cc97aa9cc09acee21afda07ea6 (diff)
downloadlinux-1393d9a1857471f816d0be1ccc1d6433a86050f6.tar.bz2
slub: Make CONFIG_DEBUG_PAGE_ALLOC work with new fastpath
Fastpath can do a speculative access to a page that CONFIG_DEBUG_PAGE_ALLOC may have marked as invalid to retrieve the pointer to the next free object. Use probe_kernel_read in that case in order not to cause a page fault. Cc: <stable@kernel.org> # 38.x Reported-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c14
1 files changed, 13 insertions, 1 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 8657ab838b82..97bb5b8d935f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -261,6 +261,18 @@ static inline void *get_freepointer(struct kmem_cache *s, void *object)
return *(void **)(object + s->offset);
}
+static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
+{
+ void *p;
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
+#else
+ p = get_freepointer(s, object);
+#endif
+ return p;
+}
+
static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
{
*(void **)(object + s->offset) = fp;
@@ -1933,7 +1945,7 @@ redo:
if (unlikely(!this_cpu_cmpxchg_double(
s->cpu_slab->freelist, s->cpu_slab->tid,
object, tid,
- get_freepointer(s, object), next_tid(tid)))) {
+ get_freepointer_safe(s, object), next_tid(tid)))) {
note_cmpxchg_failure("slab_alloc", s, tid);
goto redo;