From 342061ee4ef3d80001d1ae494378f3979c861dba Mon Sep 17 00:00:00 2001 From: Paul Lawrence Date: Tue, 6 Feb 2018 15:36:11 -0800 Subject: kasan: support alloca() poisoning clang's AddressSanitizer implementation adds redzones on either side of alloca()ed buffers. These redzones are 32-byte aligned and at least 32 bytes long. __asan_alloca_poison() is passed the size and address of the allocated buffer, *excluding* the redzones on either side. The left redzone will always be to the immediate left of this buffer; but AddressSanitizer may need to add padding between the end of the buffer and the right redzone. If there are any 8-byte chunks inside this padding, we should poison those too. __asan_allocas_unpoison() is just passed the top and bottom of the dynamic stack area, so unpoisoning is simpler. Link: http://lkml.kernel.org/r/20171204191735.132544-4-paullawrence@google.com Signed-off-by: Greg Hackmann Signed-off-by: Paul Lawrence Acked-by: Andrey Ryabinin Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: Masahiro Yamada Cc: Matthias Kaehlcke Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/kasan/kasan.c | 34 ++++++++++++++++++++++++++++++++++ mm/kasan/kasan.h | 8 ++++++++ mm/kasan/report.c | 4 ++++ 3 files changed, 46 insertions(+) (limited to 'mm') diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 405bba487df5..d96b36088b2f 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -736,6 +736,40 @@ void __asan_unpoison_stack_memory(const void *addr, size_t size) } EXPORT_SYMBOL(__asan_unpoison_stack_memory); +/* Emitted by compiler to poison alloca()ed objects. */ +void __asan_alloca_poison(unsigned long addr, size_t size) +{ + size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); + size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) - + rounded_up_size; + size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE); + + const void *left_redzone = (const void *)(addr - + KASAN_ALLOCA_REDZONE_SIZE); + const void *right_redzone = (const void *)(addr + rounded_up_size); + + WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE)); + + kasan_unpoison_shadow((const void *)(addr + rounded_down_size), + size - rounded_down_size); + kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE, + KASAN_ALLOCA_LEFT); + kasan_poison_shadow(right_redzone, + padding_size + KASAN_ALLOCA_REDZONE_SIZE, + KASAN_ALLOCA_RIGHT); +} +EXPORT_SYMBOL(__asan_alloca_poison); + +/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */ +void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom) +{ + if (unlikely(!stack_top || stack_top > stack_bottom)) + return; + + kasan_unpoison_shadow(stack_top, stack_bottom - stack_top); +} +EXPORT_SYMBOL(__asan_allocas_unpoison); + #ifdef CONFIG_MEMORY_HOTPLUG static int __meminit kasan_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index c70851a9a6a4..7c0bcd1f4c0d 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -24,6 +24,14 @@ #define KASAN_STACK_PARTIAL 0xF4 #define KASAN_USE_AFTER_SCOPE 0xF8 +/* + * alloca redzone shadow values + */ +#define KASAN_ALLOCA_LEFT 0xCA +#define KASAN_ALLOCA_RIGHT 0xCB + +#define KASAN_ALLOCA_REDZONE_SIZE 32 + /* Don't break randconfig/all*config builds */ #ifndef KASAN_ABI_VERSION #define KASAN_ABI_VERSION 1 diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 410c8235e671..eff12e040498 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -102,6 +102,10 @@ static const char *get_shadow_bug_type(struct kasan_access_info *info) case KASAN_USE_AFTER_SCOPE: bug_type = "use-after-scope"; break; + case KASAN_ALLOCA_LEFT: + case KASAN_ALLOCA_RIGHT: + bug_type = "alloca-out-of-bounds"; + break; } return bug_type; -- cgit v1.2.3 From d321599cf6b861beefe92327476b617435c7fc4a Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Tue, 6 Feb 2018 15:36:20 -0800 Subject: kasan: add functions for unpoisoning stack variables As a code-size optimization, LLVM builds since r279383 may bulk-manipulate the shadow region when (un)poisoning large memory blocks. This requires new callbacks that simply do an uninstrumented memset(). This fixes linking the Clang-built kernel when using KASAN. [arnd@arndb.de: add declarations for internal functions] Link: http://lkml.kernel.org/r/20180105094112.2690475-1-arnd@arndb.de [fengguang.wu@intel.com: __asan_set_shadow_00 can be static] Link: http://lkml.kernel.org/r/20171223125943.GA74341@lkp-ib03 [ghackmann@google.com: fix memset() parameters, and tweak commit message to describe new callbacks] Link: http://lkml.kernel.org/r/20171204191735.132544-6-paullawrence@google.com Signed-off-by: Alexander Potapenko Signed-off-by: Greg Hackmann Signed-off-by: Paul Lawrence Signed-off-by: Fengguang Wu Signed-off-by: Arnd Bergmann Acked-by: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Masahiro Yamada Cc: Matthias Kaehlcke Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/kasan/kasan.c | 15 +++++++++++++++ mm/kasan/kasan.h | 44 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+) (limited to 'mm') diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index d96b36088b2f..8aaee42fcfab 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -770,6 +770,21 @@ void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom) } EXPORT_SYMBOL(__asan_allocas_unpoison); +/* Emitted by the compiler to [un]poison local variables. */ +#define DEFINE_ASAN_SET_SHADOW(byte) \ + void __asan_set_shadow_##byte(const void *addr, size_t size) \ + { \ + __memset((void *)addr, 0x##byte, size); \ + } \ + EXPORT_SYMBOL(__asan_set_shadow_##byte) + +DEFINE_ASAN_SET_SHADOW(00); +DEFINE_ASAN_SET_SHADOW(f1); +DEFINE_ASAN_SET_SHADOW(f2); +DEFINE_ASAN_SET_SHADOW(f3); +DEFINE_ASAN_SET_SHADOW(f5); +DEFINE_ASAN_SET_SHADOW(f8); + #ifdef CONFIG_MEMORY_HOTPLUG static int __meminit kasan_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 7c0bcd1f4c0d..9a768dd71c51 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -121,4 +121,48 @@ static inline void quarantine_reduce(void) { } static inline void quarantine_remove_cache(struct kmem_cache *cache) { } #endif +/* + * Exported functions for interfaces called from assembly or from generated + * code. Declarations here to avoid warning about missing declarations. + */ +asmlinkage void kasan_unpoison_task_stack_below(const void *watermark); +void __asan_register_globals(struct kasan_global *globals, size_t size); +void __asan_unregister_globals(struct kasan_global *globals, size_t size); +void __asan_loadN(unsigned long addr, size_t size); +void __asan_storeN(unsigned long addr, size_t size); +void __asan_handle_no_return(void); +void __asan_poison_stack_memory(const void *addr, size_t size); +void __asan_unpoison_stack_memory(const void *addr, size_t size); +void __asan_alloca_poison(unsigned long addr, size_t size); +void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom); + +void __asan_load1(unsigned long addr); +void __asan_store1(unsigned long addr); +void __asan_load2(unsigned long addr); +void __asan_store2(unsigned long addr); +void __asan_load4(unsigned long addr); +void __asan_store4(unsigned long addr); +void __asan_load8(unsigned long addr); +void __asan_store8(unsigned long addr); +void __asan_load16(unsigned long addr); +void __asan_store16(unsigned long addr); + +void __asan_load1_noabort(unsigned long addr); +void __asan_store1_noabort(unsigned long addr); +void __asan_load2_noabort(unsigned long addr); +void __asan_store2_noabort(unsigned long addr); +void __asan_load4_noabort(unsigned long addr); +void __asan_store4_noabort(unsigned long addr); +void __asan_load8_noabort(unsigned long addr); +void __asan_store8_noabort(unsigned long addr); +void __asan_load16_noabort(unsigned long addr); +void __asan_store16_noabort(unsigned long addr); + +void __asan_set_shadow_00(const void *addr, size_t size); +void __asan_set_shadow_f1(const void *addr, size_t size); +void __asan_set_shadow_f2(const void *addr, size_t size); +void __asan_set_shadow_f3(const void *addr, size_t size); +void __asan_set_shadow_f5(const void *addr, size_t size); +void __asan_set_shadow_f8(const void *addr, size_t size); + #endif -- cgit v1.2.3 From 47adccce3e8a31d315f47183ab1185862b2fc5d4 Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Tue, 6 Feb 2018 15:36:23 -0800 Subject: kasan: detect invalid frees for large objects Patch series "kasan: detect invalid frees". KASAN detects double-frees, but does not detect invalid-frees (when a pointer into a middle of heap object is passed to free). We recently had a very unpleasant case in crypto code which freed an inner object inside of a heap allocation. This left unnoticed during free, but totally corrupted heap and later lead to a bunch of random crashes all over kernel code. Detect invalid frees. This patch (of 5): Detect frees of pointers into middle of large heap objects. I dropped const from kasan_kfree_large() because it starts propagating through a bunch of functions in kasan_report.c, slab/slub nearest_obj(), all of their local variables, fixup_red_left(), etc. Link: http://lkml.kernel.org/r/1b45b4fe1d20fc0de1329aab674c1dd973fee723.1514378558.git.dvyukov@google.com Signed-off-by: Dmitry Vyukov Cc: Andrey Ryabinin a Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 4 ++-- lib/test_kasan.c | 33 +++++++++++++++++++++++++++++++++ mm/kasan/kasan.c | 12 +++++------- mm/kasan/kasan.h | 3 +-- mm/kasan/report.c | 3 +-- mm/slub.c | 4 ++-- 6 files changed, 44 insertions(+), 15 deletions(-) (limited to 'mm') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index e3eb834c9a35..fc9e642533f8 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -56,7 +56,7 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object); void kasan_init_slab_obj(struct kmem_cache *cache, const void *object); void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); -void kasan_kfree_large(const void *ptr); +void kasan_kfree_large(void *ptr); void kasan_poison_kfree(void *ptr); void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, gfp_t flags); @@ -108,7 +108,7 @@ static inline void kasan_init_slab_obj(struct kmem_cache *cache, const void *object) {} static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} -static inline void kasan_kfree_large(const void *ptr) {} +static inline void kasan_kfree_large(void *ptr) {} static inline void kasan_poison_kfree(void *ptr) {} static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, gfp_t flags) {} diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 2724f86c4cef..e9c5d765be66 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -94,6 +94,37 @@ static noinline void __init kmalloc_pagealloc_oob_right(void) ptr[size] = 0; kfree(ptr); } + +static noinline void __init kmalloc_pagealloc_uaf(void) +{ + char *ptr; + size_t size = KMALLOC_MAX_CACHE_SIZE + 10; + + pr_info("kmalloc pagealloc allocation: use-after-free\n"); + ptr = kmalloc(size, GFP_KERNEL); + if (!ptr) { + pr_err("Allocation failed\n"); + return; + } + + kfree(ptr); + ptr[0] = 0; +} + +static noinline void __init kmalloc_pagealloc_invalid_free(void) +{ + char *ptr; + size_t size = KMALLOC_MAX_CACHE_SIZE + 10; + + pr_info("kmalloc pagealloc allocation: invalid-free\n"); + ptr = kmalloc(size, GFP_KERNEL); + if (!ptr) { + pr_err("Allocation failed\n"); + return; + } + + kfree(ptr + 1); +} #endif static noinline void __init kmalloc_large_oob_right(void) @@ -505,6 +536,8 @@ static int __init kmalloc_tests_init(void) kmalloc_node_oob_right(); #ifdef CONFIG_SLUB kmalloc_pagealloc_oob_right(); + kmalloc_pagealloc_uaf(); + kmalloc_pagealloc_invalid_free(); #endif kmalloc_large_oob_right(); kmalloc_oob_krealloc_more(); diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 8aaee42fcfab..ecb64fda79e6 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -511,8 +511,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object) shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) { - kasan_report_double_free(cache, object, - __builtin_return_address(1)); + kasan_report_invalid_free(object, __builtin_return_address(1)); return true; } @@ -602,12 +601,11 @@ void kasan_poison_kfree(void *ptr) kasan_poison_slab_free(page->slab_cache, ptr); } -void kasan_kfree_large(const void *ptr) +void kasan_kfree_large(void *ptr) { - struct page *page = virt_to_page(ptr); - - kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), - KASAN_FREE_PAGE); + if (ptr != page_address(virt_to_head_page(ptr))) + kasan_report_invalid_free(ptr, __builtin_return_address(1)); + /* The object will be poisoned by page_alloc. */ } int kasan_module_alloc(void *addr, size_t size) diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 9a768dd71c51..bf353a18c908 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -107,8 +107,7 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr) void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); -void kasan_report_double_free(struct kmem_cache *cache, void *object, - void *ip); +void kasan_report_invalid_free(void *object, void *ip); #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB) void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache); diff --git a/mm/kasan/report.c b/mm/kasan/report.c index eff12e040498..55916ad21722 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -326,8 +326,7 @@ static void print_shadow_for_address(const void *addr) } } -void kasan_report_double_free(struct kmem_cache *cache, void *object, - void *ip) +void kasan_report_invalid_free(void *object, void *ip) { unsigned long flags; diff --git a/mm/slub.c b/mm/slub.c index cc71176c6eef..b54f8787c674 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1356,7 +1356,7 @@ static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) kasan_kmalloc_large(ptr, size, flags); } -static inline void kfree_hook(const void *x) +static inline void kfree_hook(void *x) { kmemleak_free(x); kasan_kfree_large(x); @@ -3910,7 +3910,7 @@ void kfree(const void *x) page = virt_to_head_page(x); if (unlikely(!PageSlab(page))) { BUG_ON(!PageCompound(page)); - kfree_hook(x); + kfree_hook(object); __free_pages(page, compound_order(page)); return; } -- cgit v1.2.3 From ee3ce779b58c31acacdfab0ad6c86d428ba2c2e3 Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Tue, 6 Feb 2018 15:36:27 -0800 Subject: kasan: don't use __builtin_return_address(1) __builtin_return_address(1) is unreliable without frame pointers. With defconfig on kmalloc_pagealloc_invalid_free test I am getting: BUG: KASAN: double-free or invalid-free in (null) Pass caller PC from callers explicitly. Link: http://lkml.kernel.org/r/9b01bc2d237a4df74ff8472a3bf6b7635908de01.1514378558.git.dvyukov@google.com Signed-off-by: Dmitry Vyukov Cc: Andrey Ryabinin a Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 9 +++++---- mm/kasan/kasan.c | 8 ++++---- mm/kasan/kasan.h | 2 +- mm/kasan/report.c | 4 ++-- mm/slab.c | 6 +++--- mm/slub.c | 8 ++++---- 6 files changed, 19 insertions(+), 18 deletions(-) (limited to 'mm') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index fc9e642533f8..f0d13c30acc6 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -56,14 +56,14 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object); void kasan_init_slab_obj(struct kmem_cache *cache, const void *object); void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); -void kasan_kfree_large(void *ptr); +void kasan_kfree_large(void *ptr, unsigned long ip); void kasan_poison_kfree(void *ptr); void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, gfp_t flags); void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); -bool kasan_slab_free(struct kmem_cache *s, void *object); +bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); struct kasan_cache { int alloc_meta_offset; @@ -108,7 +108,7 @@ static inline void kasan_init_slab_obj(struct kmem_cache *cache, const void *object) {} static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} -static inline void kasan_kfree_large(void *ptr) {} +static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} static inline void kasan_poison_kfree(void *ptr) {} static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, gfp_t flags) {} @@ -117,7 +117,8 @@ static inline void kasan_krealloc(const void *object, size_t new_size, static inline void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags) {} -static inline bool kasan_slab_free(struct kmem_cache *s, void *object) +static inline bool kasan_slab_free(struct kmem_cache *s, void *object, + unsigned long ip) { return false; } diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index ecb64fda79e6..32f555ded938 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -501,7 +501,7 @@ static void kasan_poison_slab_free(struct kmem_cache *cache, void *object) kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); } -bool kasan_slab_free(struct kmem_cache *cache, void *object) +bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) { s8 shadow_byte; @@ -511,7 +511,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object) shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) { - kasan_report_invalid_free(object, __builtin_return_address(1)); + kasan_report_invalid_free(object, ip); return true; } @@ -601,10 +601,10 @@ void kasan_poison_kfree(void *ptr) kasan_poison_slab_free(page->slab_cache, ptr); } -void kasan_kfree_large(void *ptr) +void kasan_kfree_large(void *ptr, unsigned long ip) { if (ptr != page_address(virt_to_head_page(ptr))) - kasan_report_invalid_free(ptr, __builtin_return_address(1)); + kasan_report_invalid_free(ptr, ip); /* The object will be poisoned by page_alloc. */ } diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index bf353a18c908..c12dcfde2ebd 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -107,7 +107,7 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr) void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); -void kasan_report_invalid_free(void *object, void *ip); +void kasan_report_invalid_free(void *object, unsigned long ip); #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB) void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache); diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 55916ad21722..75206991ece0 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -326,12 +326,12 @@ static void print_shadow_for_address(const void *addr) } } -void kasan_report_invalid_free(void *object, void *ip) +void kasan_report_invalid_free(void *object, unsigned long ip) { unsigned long flags; kasan_start_report(&flags); - pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", ip); + pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", (void *)ip); pr_err("\n"); print_address_description(object); pr_err("\n"); diff --git a/mm/slab.c b/mm/slab.c index cd86f15071ad..324446621b3e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3478,11 +3478,11 @@ free_done: * Release an obj back to its cache. If the obj has a constructed state, it must * be in this state _before_ it is released. Called with disabled ints. */ -static inline void __cache_free(struct kmem_cache *cachep, void *objp, - unsigned long caller) +static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp, + unsigned long caller) { /* Put the object into the quarantine, don't touch it for now. */ - if (kasan_slab_free(cachep, objp)) + if (kasan_slab_free(cachep, objp, _RET_IP_)) return; ___cache_free(cachep, objp, caller); diff --git a/mm/slub.c b/mm/slub.c index b54f8787c674..e381728a3751 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1356,13 +1356,13 @@ static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) kasan_kmalloc_large(ptr, size, flags); } -static inline void kfree_hook(void *x) +static __always_inline void kfree_hook(void *x) { kmemleak_free(x); - kasan_kfree_large(x); + kasan_kfree_large(x, _RET_IP_); } -static inline void *slab_free_hook(struct kmem_cache *s, void *x) +static __always_inline void *slab_free_hook(struct kmem_cache *s, void *x) { void *freeptr; @@ -1390,7 +1390,7 @@ static inline void *slab_free_hook(struct kmem_cache *s, void *x) * kasan_slab_free() may put x into memory quarantine, delaying its * reuse. In this case the object's freelist pointer is changed. */ - kasan_slab_free(s, x); + kasan_slab_free(s, x, _RET_IP_); return freeptr; } -- cgit v1.2.3 From 6860f6340c0918cddcd3c9fcf8c36401c8184268 Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Tue, 6 Feb 2018 15:36:30 -0800 Subject: kasan: detect invalid frees for large mempool objects Detect frees of pointers into middle of mempool objects. I did a one-off test, but it turned out to be very tricky, so I reverted it. First, mempool does not call kasan_poison_kfree() unless allocation function fails. I stubbed an allocation function to fail on second and subsequent allocations. But then mempool stopped to call kasan_poison_kfree() at all, because it does it only when allocation function is mempool_kmalloc(). We could support this special failing test allocation function in mempool, but it also can't live with kasan tests, because these are in a module. Link: http://lkml.kernel.org/r/bf7a7d035d7a5ed62d2dd0e3d2e8a4fcdf456aa7.1514378558.git.dvyukov@google.com Signed-off-by: Dmitry Vyukov Cc: Andrey Ryabinin a Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 4 ++-- mm/kasan/kasan.c | 11 ++++++++--- mm/mempool.c | 6 +++--- 3 files changed, 13 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index f0d13c30acc6..fc45f8952d1e 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -57,7 +57,7 @@ void kasan_init_slab_obj(struct kmem_cache *cache, const void *object); void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); void kasan_kfree_large(void *ptr, unsigned long ip); -void kasan_poison_kfree(void *ptr); +void kasan_poison_kfree(void *ptr, unsigned long ip); void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, gfp_t flags); void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); @@ -109,7 +109,7 @@ static inline void kasan_init_slab_obj(struct kmem_cache *cache, static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} -static inline void kasan_poison_kfree(void *ptr) {} +static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {} static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, gfp_t flags) {} static inline void kasan_krealloc(const void *object, size_t new_size, diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 32f555ded938..77c103748728 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -588,17 +588,22 @@ void kasan_krealloc(const void *object, size_t size, gfp_t flags) kasan_kmalloc(page->slab_cache, object, size, flags); } -void kasan_poison_kfree(void *ptr) +void kasan_poison_kfree(void *ptr, unsigned long ip) { struct page *page; page = virt_to_head_page(ptr); - if (unlikely(!PageSlab(page))) + if (unlikely(!PageSlab(page))) { + if (ptr != page_address(page)) { + kasan_report_invalid_free(ptr, ip); + return; + } kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), KASAN_FREE_PAGE); - else + } else { kasan_poison_slab_free(page->slab_cache, ptr); + } } void kasan_kfree_large(void *ptr, unsigned long ip) diff --git a/mm/mempool.c b/mm/mempool.c index 7d8c5a0010a2..5c9dce34719b 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -103,10 +103,10 @@ static inline void poison_element(mempool_t *pool, void *element) } #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ -static void kasan_poison_element(mempool_t *pool, void *element) +static __always_inline void kasan_poison_element(mempool_t *pool, void *element) { if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) - kasan_poison_kfree(element); + kasan_poison_kfree(element, _RET_IP_); if (pool->alloc == mempool_alloc_pages) kasan_free_pages(element, (unsigned long)pool->pool_data); } @@ -119,7 +119,7 @@ static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags) kasan_alloc_pages(element, (unsigned long)pool->pool_data); } -static void add_element(mempool_t *pool, void *element) +static __always_inline void add_element(mempool_t *pool, void *element) { BUG_ON(pool->curr_nr >= pool->min_nr); poison_element(pool, element); -- cgit v1.2.3 From 1db0e0f9dd9816e5d018518f933066d7b9bc0e33 Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Tue, 6 Feb 2018 15:36:34 -0800 Subject: kasan: unify code between kasan_slab_free() and kasan_poison_kfree() Both of these functions deal with freeing of slab objects. However, kasan_poison_kfree() mishandles SLAB_TYPESAFE_BY_RCU (must also not poison such objects) and does not detect double-frees. Unify code between these functions. This solves both of the problems and allows to add more common code (e.g. detection of invalid frees). Link: http://lkml.kernel.org/r/385493d863acf60408be219a021c3c8e27daa96f.1514378558.git.dvyukov@google.com Signed-off-by: Dmitry Vyukov Cc: Andrey Ryabinin a Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/kasan/kasan.c | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 77c103748728..578843fab5dc 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -489,21 +489,11 @@ void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags) kasan_kmalloc(cache, object, cache->object_size, flags); } -static void kasan_poison_slab_free(struct kmem_cache *cache, void *object) -{ - unsigned long size = cache->object_size; - unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); - - /* RCU slabs could be legally used after free within the RCU period */ - if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) - return; - - kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); -} - -bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) +static bool __kasan_slab_free(struct kmem_cache *cache, void *object, + unsigned long ip, bool quarantine) { s8 shadow_byte; + unsigned long rounded_up_size; /* RCU slabs could be legally used after free within the RCU period */ if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) @@ -515,9 +505,10 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) return true; } - kasan_poison_slab_free(cache, object); + rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE); + kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); - if (unlikely(!(cache->flags & SLAB_KASAN))) + if (!quarantine || unlikely(!(cache->flags & SLAB_KASAN))) return false; set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT); @@ -525,6 +516,11 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) return true; } +bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) +{ + return __kasan_slab_free(cache, object, ip, true); +} + void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, gfp_t flags) { @@ -602,7 +598,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip) kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), KASAN_FREE_PAGE); } else { - kasan_poison_slab_free(page->slab_cache, ptr); + __kasan_slab_free(page->slab_cache, ptr, ip, false); } } -- cgit v1.2.3 From b1d5728939ebe01a773a75a72e7161408ec9805e Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Tue, 6 Feb 2018 15:36:37 -0800 Subject: kasan: detect invalid frees Detect frees of pointers into middle of heap objects. Link: http://lkml.kernel.org/r/cb569193190356beb018a03bb8d6fbae67e7adbc.1514378558.git.dvyukov@google.com Signed-off-by: Dmitry Vyukov Cc: Andrey Ryabinin a Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_kasan.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ mm/kasan/kasan.c | 6 ++++++ 2 files changed, 56 insertions(+) (limited to 'mm') diff --git a/lib/test_kasan.c b/lib/test_kasan.c index e9c5d765be66..a808d81b409d 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -523,6 +523,54 @@ static noinline void __init kasan_alloca_oob_right(void) *(volatile char *)p; } +static noinline void __init kmem_cache_double_free(void) +{ + char *p; + size_t size = 200; + struct kmem_cache *cache; + + cache = kmem_cache_create("test_cache", size, 0, 0, NULL); + if (!cache) { + pr_err("Cache allocation failed\n"); + return; + } + pr_info("double-free on heap object\n"); + p = kmem_cache_alloc(cache, GFP_KERNEL); + if (!p) { + pr_err("Allocation failed\n"); + kmem_cache_destroy(cache); + return; + } + + kmem_cache_free(cache, p); + kmem_cache_free(cache, p); + kmem_cache_destroy(cache); +} + +static noinline void __init kmem_cache_invalid_free(void) +{ + char *p; + size_t size = 200; + struct kmem_cache *cache; + + cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU, + NULL); + if (!cache) { + pr_err("Cache allocation failed\n"); + return; + } + pr_info("invalid-free of heap object\n"); + p = kmem_cache_alloc(cache, GFP_KERNEL); + if (!p) { + pr_err("Allocation failed\n"); + kmem_cache_destroy(cache); + return; + } + + kmem_cache_free(cache, p + 1); + kmem_cache_destroy(cache); +} + static int __init kmalloc_tests_init(void) { /* @@ -560,6 +608,8 @@ static int __init kmalloc_tests_init(void) ksize_unpoisons_memory(); copy_user_test(); use_after_scope_test(); + kmem_cache_double_free(); + kmem_cache_invalid_free(); kasan_restore_multi_shot(multishot); diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 578843fab5dc..3fb497d4fbf8 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -495,6 +495,12 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object, s8 shadow_byte; unsigned long rounded_up_size; + if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != + object)) { + kasan_report_invalid_free(object, ip); + return true; + } + /* RCU slabs could be legally used after free within the RCU period */ if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) return false; -- cgit v1.2.3 From 5f21f3a8f4dcba77792d60bcc711131470a689bb Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 6 Feb 2018 15:36:41 -0800 Subject: kasan: fix prototype author email address Use the new one. Link: http://lkml.kernel.org/r/de3b7ffc30a55178913a7d3865216aa7accf6c40.1515775666.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Andrey Ryabinin Cc: Dmitry Vyukov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/kasan/kasan.c | 2 +- mm/kasan/report.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 3fb497d4fbf8..e13d911251e7 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -5,7 +5,7 @@ * Author: Andrey Ryabinin * * Some code borrowed from https://github.com/xairy/kasan-prototype by - * Andrey Konovalov + * Andrey Konovalov * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 75206991ece0..5c169aa688fd 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -5,7 +5,7 @@ * Author: Andrey Ryabinin * * Some code borrowed from https://github.com/xairy/kasan-prototype by - * Andrey Konovalov + * Andrey Konovalov * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as -- cgit v1.2.3 From 2ee0826085d1c0281cb60c1f4bc3e0c27efeedc3 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Tue, 6 Feb 2018 15:40:17 -0800 Subject: pids: introduce find_get_task_by_vpid() helper There are several functions that do find_task_by_vpid() followed by get_task_struct(). We can use a helper function instead. Link: http://lkml.kernel.org/r/1509602027-11337-1-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport Acked-by: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/kernel/perfmon.c | 13 +++---------- include/linux/sched.h | 5 +++++ kernel/futex.c | 20 +------------------- kernel/pid.c | 13 +++++++++++++ kernel/ptrace.c | 27 ++++++--------------------- kernel/taskstats.c | 6 +----- mm/process_vm_access.c | 6 +----- security/yama/yama_lsm.c | 11 +++-------- 8 files changed, 33 insertions(+), 68 deletions(-) (limited to 'mm') diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index c44f002e8f6b..858602494096 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -2610,17 +2610,10 @@ pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task) if (pid < 2) return -EPERM; if (pid != task_pid_vnr(current)) { - - read_lock(&tasklist_lock); - - p = find_task_by_vpid(pid); - /* make sure task cannot go away while we operate on it */ - if (p) get_task_struct(p); - - read_unlock(&tasklist_lock); - - if (p == NULL) return -ESRCH; + p = find_get_task_by_vpid(pid); + if (!p) + return -ESRCH; } ret = pfm_task_incompatible(ctx, p); diff --git a/include/linux/sched.h b/include/linux/sched.h index 166144c04ef6..ce5a27304b03 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1489,6 +1489,11 @@ static inline struct thread_info *task_thread_info(struct task_struct *task) extern struct task_struct *find_task_by_vpid(pid_t nr); extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); +/* + * find a task by its virtual pid and get the task struct + */ +extern struct task_struct *find_get_task_by_vpid(pid_t nr); + extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); extern void wake_up_new_task(struct task_struct *tsk); diff --git a/kernel/futex.c b/kernel/futex.c index 7f719d110908..1f450e092c74 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -862,24 +862,6 @@ static void put_pi_state(struct futex_pi_state *pi_state) } } -/* - * Look up the task based on what TID userspace gave us. - * We dont trust it. - */ -static struct task_struct *futex_find_get_task(pid_t pid) -{ - struct task_struct *p; - - rcu_read_lock(); - p = find_task_by_vpid(pid); - if (p) - get_task_struct(p); - - rcu_read_unlock(); - - return p; -} - #ifdef CONFIG_FUTEX_PI /* @@ -1183,7 +1165,7 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key, */ if (!pid) return -ESRCH; - p = futex_find_get_task(pid); + p = find_get_task_by_vpid(pid); if (!p) return -ESRCH; diff --git a/kernel/pid.c b/kernel/pid.c index 5d30c87e3c42..ed6c343fe50d 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -343,6 +343,19 @@ struct task_struct *find_task_by_vpid(pid_t vnr) return find_task_by_pid_ns(vnr, task_active_pid_ns(current)); } +struct task_struct *find_get_task_by_vpid(pid_t nr) +{ + struct task_struct *task; + + rcu_read_lock(); + task = find_task_by_vpid(nr); + if (task) + get_task_struct(task); + rcu_read_unlock(); + + return task; +} + struct pid *get_task_pid(struct task_struct *task, enum pid_type type) { struct pid *pid; diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 5e1d713c8e61..21fec73d45d4 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -1103,21 +1103,6 @@ int ptrace_request(struct task_struct *child, long request, return ret; } -static struct task_struct *ptrace_get_task_struct(pid_t pid) -{ - struct task_struct *child; - - rcu_read_lock(); - child = find_task_by_vpid(pid); - if (child) - get_task_struct(child); - rcu_read_unlock(); - - if (!child) - return ERR_PTR(-ESRCH); - return child; -} - #ifndef arch_ptrace_attach #define arch_ptrace_attach(child) do { } while (0) #endif @@ -1135,9 +1120,9 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, goto out; } - child = ptrace_get_task_struct(pid); - if (IS_ERR(child)) { - ret = PTR_ERR(child); + child = find_get_task_by_vpid(pid); + if (!child) { + ret = -ESRCH; goto out; } @@ -1281,9 +1266,9 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid, goto out; } - child = ptrace_get_task_struct(pid); - if (IS_ERR(child)) { - ret = PTR_ERR(child); + child = find_get_task_by_vpid(pid); + if (!child) { + ret = -ESRCH; goto out; } diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 4559e914452b..4e62a4a8fa91 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -194,11 +194,7 @@ static int fill_stats_for_pid(pid_t pid, struct taskstats *stats) { struct task_struct *tsk; - rcu_read_lock(); - tsk = find_task_by_vpid(pid); - if (tsk) - get_task_struct(tsk); - rcu_read_unlock(); + tsk = find_get_task_by_vpid(pid); if (!tsk) return -ESRCH; fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats); diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 8973cd231ece..16424b9ae424 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -197,11 +197,7 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter, } /* Get process information */ - rcu_read_lock(); - task = find_task_by_vpid(pid); - if (task) - get_task_struct(task); - rcu_read_unlock(); + task = find_get_task_by_vpid(pid); if (!task) { rc = -ESRCH; goto free_proc_pages; diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index 8298e094f4f7..ffda91a4a1aa 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c @@ -250,15 +250,10 @@ int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3, } else { struct task_struct *tracer; - rcu_read_lock(); - tracer = find_task_by_vpid(arg2); - if (tracer) - get_task_struct(tracer); - else + tracer = find_get_task_by_vpid(arg2); + if (!tracer) { rc = -EINVAL; - rcu_read_unlock(); - - if (tracer) { + } else { rc = yama_ptracer_add(tracer, myself); put_task_struct(tracer); } -- cgit v1.2.3 From 4fd39c23fe78c903c6e62a506d08bc2003d8eaed Mon Sep 17 00:00:00 2001 From: Pravin Shedge Date: Tue, 6 Feb 2018 15:41:03 -0800 Subject: mm/userfaultfd.c: remove duplicate include These duplicate includes have been found with scripts/checkincludes.pl but they have been removed manually to avoid removing false positives. Link: http://lkml.kernel.org/r/1512580957-6071-1-git-send-email-pravin.shedge4linux@gmail.com Signed-off-by: Pravin Shedge Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/userfaultfd.c | 1 - 1 file changed, 1 deletion(-) (limited to 'mm') diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 81192701964d..39791b81ede7 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include "internal.h" -- cgit v1.2.3 From e7c98df5981e0ee54f153bb79f117e382a7b2887 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 6 Feb 2018 15:41:06 -0800 Subject: mm: remove unneeded kallsyms include The file was converted from print_symbol() to %pSR a while ago in commit 071361d3473e ("mm: Convert print_symbol to %pSR"). kallsyms does not seem to be needed anymore. Link: http://lkml.kernel.org/r/20171208025616.16267-3-sergey.senozhatsky@gmail.com Signed-off-by: Sergey Senozhatsky Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 53373b7a1512..5b744b30a195 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -59,7 +59,6 @@ #include #include #include -#include #include #include #include @@ -767,9 +766,6 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, dump_page(page, "bad pte"); pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); - /* - * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y - */ pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n", vma->vm_file, vma->vm_ops ? vma->vm_ops->fault : NULL, -- cgit v1.2.3 From 937f0c2675a1ad6f94e0768dbb5379954d9953ab Mon Sep 17 00:00:00 2001 From: Yaowei Bai Date: Tue, 6 Feb 2018 15:41:18 -0800 Subject: mm/memblock: memblock_is_map/region_memory can be boolean Make memblock_is_map/region_memory return bool due to these two functions only using either true or false as its return value. No functional change. Link: http://lkml.kernel.org/r/1513266622-15860-2-git-send-email-baiyaowei@cmss.chinamobile.com Signed-off-by: Yaowei Bai Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memblock.h | 4 ++-- mm/memblock.c | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 7ed0f7782d16..8be5077efb5f 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -332,8 +332,8 @@ void memblock_enforce_memory_limit(phys_addr_t memory_limit); void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size); void memblock_mem_limit_remove_map(phys_addr_t limit); bool memblock_is_memory(phys_addr_t addr); -int memblock_is_map_memory(phys_addr_t addr); -int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); +bool memblock_is_map_memory(phys_addr_t addr); +bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size); bool memblock_is_reserved(phys_addr_t addr); bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); diff --git a/mm/memblock.c b/mm/memblock.c index 46aacdfa4f4d..5a9ca2a1751b 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1654,7 +1654,7 @@ bool __init_memblock memblock_is_memory(phys_addr_t addr) return memblock_search(&memblock.memory, addr) != -1; } -int __init_memblock memblock_is_map_memory(phys_addr_t addr) +bool __init_memblock memblock_is_map_memory(phys_addr_t addr) { int i = memblock_search(&memblock.memory, addr); @@ -1690,13 +1690,13 @@ int __init_memblock memblock_search_pfn_nid(unsigned long pfn, * RETURNS: * 0 if false, non-zero if true */ -int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) +bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) { int idx = memblock_search(&memblock.memory, base); phys_addr_t end = base + memblock_cap_size(base, &size); if (idx == -1) - return 0; + return false; return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size) >= end; } -- cgit v1.2.3 From b7701a5f2ee8e64244d5ccbb90cbe81b940c546e Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Tue, 6 Feb 2018 15:42:13 -0800 Subject: mm: docs: fixup punctuation so that kernel-doc will properly recognize the parameter and function descriptions. Link: http://lkml.kernel.org/r/1516700871-22279-2-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport Cc: Jonathan Corbet Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/ksm.c | 2 +- mm/memcontrol.c | 4 ++-- mm/mlock.c | 2 +- mm/nommu.c | 2 +- mm/zpool.c | 44 ++++++++++++++++++++++---------------------- 5 files changed, 27 insertions(+), 27 deletions(-) (limited to 'mm') diff --git a/mm/ksm.c b/mm/ksm.c index c406f75957ad..293721f5da70 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2302,7 +2302,7 @@ next_mm: /** * ksm_do_scan - the ksm scanner main worker function. - * @scan_npages - number of pages we want to scan before we return. + * @scan_npages: number of pages we want to scan before we return. */ static void ksm_do_scan(unsigned int scan_npages) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0937f2c52c7d..3793e22977c0 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5818,8 +5818,8 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) /** * mem_cgroup_uncharge_skmem - uncharge socket memory - * @memcg - memcg to uncharge - * @nr_pages - number of pages to uncharge + * @memcg: memcg to uncharge + * @nr_pages: number of pages to uncharge */ void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) { diff --git a/mm/mlock.c b/mm/mlock.c index f7f54fd2e13f..79398200e423 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -157,7 +157,7 @@ static void __munlock_isolation_failed(struct page *page) /** * munlock_vma_page - munlock a vma page - * @page - page to be unlocked, either a normal page or THP page head + * @page: page to be unlocked, either a normal page or THP page head * * returns the size of the page as a page mask (0 for normal page, * HPAGE_PMD_NR - 1 for THP head page) diff --git a/mm/nommu.c b/mm/nommu.c index 4b9864b17cb0..ebb6e618dade 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1836,7 +1836,7 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, } /** - * @access_remote_vm - access another process' address space + * access_remote_vm - access another process' address space * @mm: the mm_struct of the target address space * @addr: start address to access * @buf: source or destination buffer diff --git a/mm/zpool.c b/mm/zpool.c index e1e7aa6d1d06..be67bcffb9ef 100644 --- a/mm/zpool.c +++ b/mm/zpool.c @@ -101,7 +101,7 @@ static void zpool_put_driver(struct zpool_driver *driver) /** * zpool_has_pool() - Check if the pool driver is available - * @type The type of the zpool to check (e.g. zbud, zsmalloc) + * @type: The type of the zpool to check (e.g. zbud, zsmalloc) * * This checks if the @type pool driver is available. This will try to load * the requested module, if needed, but there is no guarantee the module will @@ -136,10 +136,10 @@ EXPORT_SYMBOL(zpool_has_pool); /** * zpool_create_pool() - Create a new zpool - * @type The type of the zpool to create (e.g. zbud, zsmalloc) - * @name The name of the zpool (e.g. zram0, zswap) - * @gfp The GFP flags to use when allocating the pool. - * @ops The optional ops callback. + * @type: The type of the zpool to create (e.g. zbud, zsmalloc) + * @name: The name of the zpool (e.g. zram0, zswap) + * @gfp: The GFP flags to use when allocating the pool. + * @ops: The optional ops callback. * * This creates a new zpool of the specified type. The gfp flags will be * used when allocating memory, if the implementation supports it. If the @@ -201,7 +201,7 @@ struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp, /** * zpool_destroy_pool() - Destroy a zpool - * @pool The zpool to destroy. + * @pool: The zpool to destroy. * * Implementations must guarantee this to be thread-safe, * however only when destroying different pools. The same @@ -224,7 +224,7 @@ void zpool_destroy_pool(struct zpool *zpool) /** * zpool_get_type() - Get the type of the zpool - * @pool The zpool to check + * @pool: The zpool to check * * This returns the type of the pool. * @@ -239,10 +239,10 @@ const char *zpool_get_type(struct zpool *zpool) /** * zpool_malloc() - Allocate memory - * @pool The zpool to allocate from. - * @size The amount of memory to allocate. - * @gfp The GFP flags to use when allocating memory. - * @handle Pointer to the handle to set + * @pool: The zpool to allocate from. + * @size: The amount of memory to allocate. + * @gfp: The GFP flags to use when allocating memory. + * @handle: Pointer to the handle to set * * This allocates the requested amount of memory from the pool. * The gfp flags will be used when allocating memory, if the @@ -261,8 +261,8 @@ int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp, /** * zpool_free() - Free previously allocated memory - * @pool The zpool that allocated the memory. - * @handle The handle to the memory to free. + * @pool: The zpool that allocated the memory. + * @handle: The handle to the memory to free. * * This frees previously allocated memory. This does not guarantee * that the pool will actually free memory, only that the memory @@ -280,9 +280,9 @@ void zpool_free(struct zpool *zpool, unsigned long handle) /** * zpool_shrink() - Shrink the pool size - * @pool The zpool to shrink. - * @pages The number of pages to shrink the pool. - * @reclaimed The number of pages successfully evicted. + * @pool: The zpool to shrink. + * @pages: The number of pages to shrink the pool. + * @reclaimed: The number of pages successfully evicted. * * This attempts to shrink the actual memory size of the pool * by evicting currently used handle(s). If the pool was @@ -304,9 +304,9 @@ int zpool_shrink(struct zpool *zpool, unsigned int pages, /** * zpool_map_handle() - Map a previously allocated handle into memory - * @pool The zpool that the handle was allocated from - * @handle The handle to map - * @mm How the memory should be mapped + * @pool: The zpool that the handle was allocated from + * @handle: The handle to map + * @mm: How the memory should be mapped * * This maps a previously allocated handle into memory. The @mm * param indicates to the implementation how the memory will be @@ -332,8 +332,8 @@ void *zpool_map_handle(struct zpool *zpool, unsigned long handle, /** * zpool_unmap_handle() - Unmap a previously mapped handle - * @pool The zpool that the handle was allocated from - * @handle The handle to unmap + * @pool: The zpool that the handle was allocated from + * @handle: The handle to unmap * * This unmaps a previously mapped handle. Any locks or other * actions that the implementation took in zpool_map_handle() @@ -347,7 +347,7 @@ void zpool_unmap_handle(struct zpool *zpool, unsigned long handle) /** * zpool_get_total_size() - The total size of the pool - * @pool The zpool to check + * @pool: The zpool to check * * This returns the total size in bytes of the pool. * -- cgit v1.2.3 From f144c390f9059d9efafe54c4eb22bb13a2cb5534 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Tue, 6 Feb 2018 15:42:16 -0800 Subject: mm: docs: fix parameter names mismatch There are several places where parameter descriptions do no match the actual code. Fix it. Link: http://lkml.kernel.org/r/1516700871-22279-3-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport Cc: Jonathan Corbet Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/bootmem.c | 2 +- mm/maccess.c | 2 +- mm/memcontrol.c | 2 +- mm/process_vm_access.c | 2 +- mm/swap.c | 4 ++-- mm/z3fold.c | 4 ++-- mm/zbud.c | 4 ++-- mm/zpool.c | 20 ++++++++++---------- 8 files changed, 20 insertions(+), 20 deletions(-) (limited to 'mm') diff --git a/mm/bootmem.c b/mm/bootmem.c index 6aef64254203..9e197987b67d 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -410,7 +410,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, /** * free_bootmem - mark a page range as usable - * @addr: starting physical address of the range + * @physaddr: starting physical address of the range * @size: size of the range in bytes * * Partial pages will be considered reserved and left as they are. diff --git a/mm/maccess.c b/mm/maccess.c index 78f9274dd49d..ec00be51a24f 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -70,7 +70,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_write); * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address. * @dst: Destination address, in kernel space. This buffer must be at * least @count bytes long. - * @src: Unsafe address. + * @unsafe_addr: Unsafe address. * @count: Maximum number of bytes to copy, including the trailing NUL. * * Copies a NUL-terminated string from unsafe address to kernel buffer. diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3793e22977c0..13b35ffa021e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -917,7 +917,7 @@ int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, /** * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page * @page: the page - * @zone: zone of the page + * @pgdat: pgdat of the page * * This function is only safe when following the LRU page isolation * and putback protocol: the LRU lock must be held, and the page must diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 16424b9ae424..f24c297dba6f 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -25,7 +25,7 @@ /** * process_vm_rw_pages - read/write pages from task specified * @pages: array of pointers to pages we want to copy - * @start_offset: offset in page to start copying from/to + * @offset: offset in page to start copying from/to * @len: number of bytes to copy * @iter: where to copy to/from locally * @vm_write: 0 means copy from, 1 means copy to diff --git a/mm/swap.c b/mm/swap.c index 10568b1548d4..567a7b96e41d 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -913,11 +913,11 @@ EXPORT_SYMBOL(__pagevec_lru_add); * @pvec: Where the resulting entries are placed * @mapping: The address_space to search * @start: The starting entry index - * @nr_entries: The maximum number of entries + * @nr_pages: The maximum number of pages * @indices: The cache indices corresponding to the entries in @pvec * * pagevec_lookup_entries() will search for and return a group of up - * to @nr_entries pages and shadow entries in the mapping. All + * to @nr_pages pages and shadow entries in the mapping. All * entries are placed in @pvec. pagevec_lookup_entries() takes a * reference against actual pages in @pvec. * diff --git a/mm/z3fold.c b/mm/z3fold.c index 39e19125d6a0..d589d318727f 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -769,7 +769,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) /** * z3fold_reclaim_page() - evicts allocations from a pool page and frees it * @pool: pool from which a page will attempt to be evicted - * @retires: number of pages on the LRU list for which eviction will + * @retries: number of pages on the LRU list for which eviction will * be attempted before failing * * z3fold reclaim is different from normal system reclaim in that it is done @@ -779,7 +779,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) * z3fold and the user, however. * * To avoid these, this is how z3fold_reclaim_page() should be called: - + * * The user detects a page should be reclaimed and calls z3fold_reclaim_page(). * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and * call the user-defined eviction handler with the pool and handle as diff --git a/mm/zbud.c b/mm/zbud.c index b42322e50f63..28458f7d1e84 100644 --- a/mm/zbud.c +++ b/mm/zbud.c @@ -466,7 +466,7 @@ void zbud_free(struct zbud_pool *pool, unsigned long handle) /** * zbud_reclaim_page() - evicts allocations from a pool page and frees it * @pool: pool from which a page will attempt to be evicted - * @retires: number of pages on the LRU list for which eviction will + * @retries: number of pages on the LRU list for which eviction will * be attempted before failing * * zbud reclaim is different from normal system reclaim in that the reclaim is @@ -476,7 +476,7 @@ void zbud_free(struct zbud_pool *pool, unsigned long handle) * the user, however. * * To avoid these, this is how zbud_reclaim_page() should be called: - + * * The user detects a page should be reclaimed and calls zbud_reclaim_page(). * zbud_reclaim_page() will remove a zbud page from the pool LRU list and call * the user-defined eviction handler with the pool and handle as arguments. diff --git a/mm/zpool.c b/mm/zpool.c index be67bcffb9ef..f8cb83e7699b 100644 --- a/mm/zpool.c +++ b/mm/zpool.c @@ -201,7 +201,7 @@ struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp, /** * zpool_destroy_pool() - Destroy a zpool - * @pool: The zpool to destroy. + * @zpool: The zpool to destroy. * * Implementations must guarantee this to be thread-safe, * however only when destroying different pools. The same @@ -224,7 +224,7 @@ void zpool_destroy_pool(struct zpool *zpool) /** * zpool_get_type() - Get the type of the zpool - * @pool: The zpool to check + * @zpool: The zpool to check * * This returns the type of the pool. * @@ -239,7 +239,7 @@ const char *zpool_get_type(struct zpool *zpool) /** * zpool_malloc() - Allocate memory - * @pool: The zpool to allocate from. + * @zpool: The zpool to allocate from. * @size: The amount of memory to allocate. * @gfp: The GFP flags to use when allocating memory. * @handle: Pointer to the handle to set @@ -261,7 +261,7 @@ int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp, /** * zpool_free() - Free previously allocated memory - * @pool: The zpool that allocated the memory. + * @zpool: The zpool that allocated the memory. * @handle: The handle to the memory to free. * * This frees previously allocated memory. This does not guarantee @@ -280,7 +280,7 @@ void zpool_free(struct zpool *zpool, unsigned long handle) /** * zpool_shrink() - Shrink the pool size - * @pool: The zpool to shrink. + * @zpool: The zpool to shrink. * @pages: The number of pages to shrink the pool. * @reclaimed: The number of pages successfully evicted. * @@ -304,11 +304,11 @@ int zpool_shrink(struct zpool *zpool, unsigned int pages, /** * zpool_map_handle() - Map a previously allocated handle into memory - * @pool: The zpool that the handle was allocated from + * @zpool: The zpool that the handle was allocated from * @handle: The handle to map - * @mm: How the memory should be mapped + * @mapmode: How the memory should be mapped * - * This maps a previously allocated handle into memory. The @mm + * This maps a previously allocated handle into memory. The @mapmode * param indicates to the implementation how the memory will be * used, i.e. read-only, write-only, read-write. If the * implementation does not support it, the memory will be treated @@ -332,7 +332,7 @@ void *zpool_map_handle(struct zpool *zpool, unsigned long handle, /** * zpool_unmap_handle() - Unmap a previously mapped handle - * @pool: The zpool that the handle was allocated from + * @zpool: The zpool that the handle was allocated from * @handle: The handle to unmap * * This unmaps a previously mapped handle. Any locks or other @@ -347,7 +347,7 @@ void zpool_unmap_handle(struct zpool *zpool, unsigned long handle) /** * zpool_get_total_size() - The total size of the pool - * @pool: The zpool to check + * @zpool: The zpool to check * * This returns the total size in bytes of the pool. * -- cgit v1.2.3 From a5d09bed7ff7e463ad7328e8738deb551c6bbc1e Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Tue, 6 Feb 2018 15:42:19 -0800 Subject: mm: docs: add blank lines to silence sphinx "Unexpected indentation" errors Link: http://lkml.kernel.org/r/1516700871-22279-4-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport Cc: Jonathan Corbet Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/pagewalk.c | 1 + mm/process_vm_access.c | 2 ++ mm/vmscan.c | 1 + 3 files changed, 4 insertions(+) (limited to 'mm') diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 23a3e415ac2c..8d2da5dec1e0 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -265,6 +265,7 @@ static int __walk_page_range(unsigned long start, unsigned long end, * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these * callbacks, the associated entries/pages are just ignored. * The return values of these callbacks are commonly defined like below: + * * - 0 : succeeded to handle the current entry, and if you don't reach the * end address yet, continue to walk. * - >0 : succeeded to handle the current entry, and return to the caller diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index f24c297dba6f..a447092d4635 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -147,6 +147,7 @@ static int process_vm_rw_single_vec(unsigned long addr, * @riovcnt: size of rvec array * @flags: currently unused * @vm_write: 0 if reading from other process, 1 if writing to other process + * * Returns the number of bytes read/written or error code. May * return less bytes than expected if an error occurs during the copying * process. @@ -249,6 +250,7 @@ free_proc_pages: * @riovcnt: size of rvec array * @flags: currently unused * @vm_write: 0 if reading from other process, 1 if writing to other process + * * Returns the number of bytes read/written or error code. May * return less bytes than expected if an error occurs during the copying * process. diff --git a/mm/vmscan.c b/mm/vmscan.c index fdd3fc6be862..444749669187 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1595,6 +1595,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, * found will be decremented. * * Restrictions: + * * (1) Must be called with an elevated refcount on the page. This is a * fundamentnal difference from isolate_lru_pages (which is called * without a stable reference). -- cgit v1.2.3