diff options
author | Alexander Potapenko <glider@google.com> | 2016-03-25 14:22:02 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-25 16:37:42 -0700 |
commit | 505f5dcb1c419e55a9621a01f83eb5745d8d7398 (patch) | |
tree | 4d608fdad5254972f8bba02f437060764e30bc6f /mm/slab.c | |
parent | 7ed2f9e663854db313f177a511145630e398b402 (diff) | |
download | linux-505f5dcb1c419e55a9621a01f83eb5745d8d7398.tar.bz2 |
mm, kasan: add GFP flags to KASAN API
Add GFP flags to KASAN hooks for future patches to use.
This patch is based on the "mm: kasan: unified support for SLUB and SLAB
allocators" patch originally prepared by Dmitry Chernenkov.
Signed-off-by: Alexander Potapenko <glider@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrey Konovalov <adech.fo@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Konstantin Serebryany <kcc@google.com>
Cc: Dmitry Chernenkov <dmitryc@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/mm/slab.c b/mm/slab.c index 7515578471d8..17e2848979c5 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3378,7 +3378,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) { void *ret = slab_alloc(cachep, flags, _RET_IP_); - kasan_slab_alloc(cachep, ret); + kasan_slab_alloc(cachep, ret, flags); trace_kmem_cache_alloc(_RET_IP_, ret, cachep->object_size, cachep->size, flags); @@ -3444,7 +3444,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) ret = slab_alloc(cachep, flags, _RET_IP_); - kasan_kmalloc(cachep, ret, size); + kasan_kmalloc(cachep, ret, size, flags); trace_kmalloc(_RET_IP_, ret, size, cachep->size, flags); return ret; @@ -3468,7 +3468,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); - kasan_slab_alloc(cachep, ret); + kasan_slab_alloc(cachep, ret, flags); trace_kmem_cache_alloc_node(_RET_IP_, ret, cachep->object_size, cachep->size, flags, nodeid); @@ -3486,7 +3486,8 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, void *ret; ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); - kasan_kmalloc(cachep, ret, size); + + kasan_kmalloc(cachep, ret, size, flags); trace_kmalloc_node(_RET_IP_, ret, size, cachep->size, flags, nodeid); @@ -3505,7 +3506,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) if (unlikely(ZERO_OR_NULL_PTR(cachep))) return cachep; ret = kmem_cache_alloc_node_trace(cachep, flags, node, size); - kasan_kmalloc(cachep, ret, size); + kasan_kmalloc(cachep, ret, size, flags); return ret; } @@ -3541,7 +3542,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, return cachep; ret = slab_alloc(cachep, flags, caller); - kasan_kmalloc(cachep, ret, size); + kasan_kmalloc(cachep, ret, size, flags); trace_kmalloc(caller, ret, size, cachep->size, flags); @@ -4323,7 +4324,7 @@ size_t ksize(const void *objp) /* We assume that ksize callers could use the whole allocated area, * so we need to unpoison this area. */ - kasan_krealloc(objp, size); + kasan_krealloc(objp, size, GFP_NOWAIT); return size; } |