summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2021-07-23 15:59:46 +0100
committerDavid S. Miller <davem@davemloft.net>2021-07-23 16:13:06 +0100
commit5af84df962dd6699e3972fda7a0c8b579fb3ab04 (patch)
tree0a66f54c99c0c0d22588304d030ecb752487dfa1 /mm
parent090597b4a9c1b81b03fd7cfb4ba458a0e7a78b31 (diff)
parent9f42f674a89200d4f465a7db6070e079f3c6145f (diff)
downloadlinux-5af84df962dd6699e3972fda7a0c8b579fb3ab04.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Conflicts are simple overlapping changes. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c5
-rw-r--r--mm/kasan/kasan.h12
-rw-r--r--mm/page_alloc.c28
-rw-r--r--mm/slab.h15
-rw-r--r--mm/slub.c93
5 files changed, 77 insertions, 76 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 924553aa8f78..dfc940d5221d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5440,8 +5440,9 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
continue;
}
- refs = min3(pages_per_huge_page(h) - pfn_offset,
- (vma->vm_end - vaddr) >> PAGE_SHIFT, remainder);
+ /* vaddr may not be aligned to PAGE_SIZE */
+ refs = min3(pages_per_huge_page(h) - pfn_offset, remainder,
+ (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);
if (pages || vmas)
record_subpages_vmas(mem_map_offset(page, pfn_offset),
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 98e3059bfea4..d739cdd1621a 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -9,6 +9,7 @@
#ifdef CONFIG_KASAN_HW_TAGS
#include <linux/static_key.h>
+#include "../slab.h"
DECLARE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
extern bool kasan_flag_async __ro_after_init;
@@ -387,6 +388,17 @@ static inline void kasan_unpoison(const void *addr, size_t size, bool init)
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return;
+ /*
+ * Explicitly initialize the memory with the precise object size to
+ * avoid overwriting the SLAB redzone. This disables initialization in
+ * the arch code and may thus lead to performance penalty. The penalty
+ * is accepted since SLAB redzones aren't enabled in production builds.
+ */
+ if (__slub_debug_enabled() &&
+ init && ((unsigned long)size & KASAN_GRANULE_MASK)) {
+ init = false;
+ memzero_explicit((void *)addr, size);
+ }
size = round_up(size, KASAN_GRANULE_SIZE);
hw_set_mem_tag_range((void *)addr, size, tag, init);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3b97e17806be..3e97e68aef7a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3820,7 +3820,7 @@ static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
#endif /* CONFIG_FAIL_PAGE_ALLOC */
-static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{
return __should_fail_alloc_page(gfp_mask, order);
}
@@ -5221,9 +5221,6 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
unsigned int alloc_flags = ALLOC_WMARK_LOW;
int nr_populated = 0, nr_account = 0;
- if (unlikely(nr_pages <= 0))
- return 0;
-
/*
* Skip populated array elements to determine if any pages need
* to be allocated before disabling IRQs.
@@ -5231,19 +5228,35 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
while (page_array && nr_populated < nr_pages && page_array[nr_populated])
nr_populated++;
+ /* No pages requested? */
+ if (unlikely(nr_pages <= 0))
+ goto out;
+
/* Already populated array? */
if (unlikely(page_array && nr_pages - nr_populated == 0))
- return nr_populated;
+ goto out;
/* Use the single page allocator for one page. */
if (nr_pages - nr_populated == 1)
goto failed;
+#ifdef CONFIG_PAGE_OWNER
+ /*
+ * PAGE_OWNER may recurse into the allocator to allocate space to
+ * save the stack with pagesets.lock held. Releasing/reacquiring
+ * removes much of the performance benefit of bulk allocation so
+ * force the caller to allocate one page at a time as it'll have
+ * similar performance to added complexity to the bulk allocator.
+ */
+ if (static_branch_unlikely(&page_owner_inited))
+ goto failed;
+#endif
+
/* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
gfp &= gfp_allowed_mask;
alloc_gfp = gfp;
if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
- return 0;
+ goto out;
gfp = alloc_gfp;
/* Find an allowed local zone that meets the low watermark. */
@@ -5311,6 +5324,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
+out:
return nr_populated;
failed_irq:
@@ -5326,7 +5340,7 @@ failed:
nr_populated++;
}
- return nr_populated;
+ goto out;
}
EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
diff --git a/mm/slab.h b/mm/slab.h
index 67e06637ff2e..f997fd5e42c8 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -216,10 +216,18 @@ DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
#endif
extern void print_tracking(struct kmem_cache *s, void *object);
long validate_slab_cache(struct kmem_cache *s);
+static inline bool __slub_debug_enabled(void)
+{
+ return static_branch_unlikely(&slub_debug_enabled);
+}
#else
static inline void print_tracking(struct kmem_cache *s, void *object)
{
}
+static inline bool __slub_debug_enabled(void)
+{
+ return false;
+}
#endif
/*
@@ -229,11 +237,10 @@ static inline void print_tracking(struct kmem_cache *s, void *object)
*/
static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
{
-#ifdef CONFIG_SLUB_DEBUG
- VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
- if (static_branch_unlikely(&slub_debug_enabled))
+ if (IS_ENABLED(CONFIG_SLUB_DEBUG))
+ VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
+ if (__slub_debug_enabled())
return s->flags & flags;
-#endif
return false;
}
diff --git a/mm/slub.c b/mm/slub.c
index dc863c1ea324..090fa14628f9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -26,7 +26,6 @@
#include <linux/cpuset.h>
#include <linux/mempolicy.h>
#include <linux/ctype.h>
-#include <linux/stackdepot.h>
#include <linux/debugobjects.h>
#include <linux/kallsyms.h>
#include <linux/kfence.h>
@@ -120,25 +119,11 @@
*/
#ifdef CONFIG_SLUB_DEBUG
-
#ifdef CONFIG_SLUB_DEBUG_ON
DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
#else
DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
#endif
-
-static inline bool __slub_debug_enabled(void)
-{
- return static_branch_unlikely(&slub_debug_enabled);
-}
-
-#else /* CONFIG_SLUB_DEBUG */
-
-static inline bool __slub_debug_enabled(void)
-{
- return false;
-}
-
#endif /* CONFIG_SLUB_DEBUG */
static inline bool kmem_cache_debug(struct kmem_cache *s)
@@ -221,8 +206,8 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
#define TRACK_ADDRS_COUNT 16
struct track {
unsigned long addr; /* Called from address */
-#ifdef CONFIG_STACKDEPOT
- depot_stack_handle_t handle;
+#ifdef CONFIG_STACKTRACE
+ unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
#endif
int cpu; /* Was running on cpu */
int pid; /* Pid context */
@@ -626,27 +611,22 @@ static struct track *get_track(struct kmem_cache *s, void *object,
return kasan_reset_tag(p + alloc);
}
-#ifdef CONFIG_STACKDEPOT
-static depot_stack_handle_t save_stack_depot_trace(gfp_t flags)
-{
- unsigned long entries[TRACK_ADDRS_COUNT];
- depot_stack_handle_t handle;
- unsigned int nr_entries;
-
- nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 4);
- handle = stack_depot_save(entries, nr_entries, flags);
- return handle;
-}
-#endif
-
static void set_track(struct kmem_cache *s, void *object,
enum track_item alloc, unsigned long addr)
{
struct track *p = get_track(s, object, alloc);
if (addr) {
-#ifdef CONFIG_STACKDEPOT
- p->handle = save_stack_depot_trace(GFP_NOWAIT);
+#ifdef CONFIG_STACKTRACE
+ unsigned int nr_entries;
+
+ metadata_access_enable();
+ nr_entries = stack_trace_save(kasan_reset_tag(p->addrs),
+ TRACK_ADDRS_COUNT, 3);
+ metadata_access_disable();
+
+ if (nr_entries < TRACK_ADDRS_COUNT)
+ p->addrs[nr_entries] = 0;
#endif
p->addr = addr;
p->cpu = smp_processor_id();
@@ -673,19 +653,14 @@ static void print_track(const char *s, struct track *t, unsigned long pr_time)
pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
-#ifdef CONFIG_STACKDEPOT
+#ifdef CONFIG_STACKTRACE
{
- depot_stack_handle_t handle;
- unsigned long *entries;
- unsigned int nr_entries;
-
- handle = READ_ONCE(t->handle);
- if (!handle) {
- pr_err("object allocation/free stack trace missing\n");
- } else {
- nr_entries = stack_depot_fetch(handle, &entries);
- stack_trace_print(entries, nr_entries, 0);
- }
+ int i;
+ for (i = 0; i < TRACK_ADDRS_COUNT; i++)
+ if (t->addrs[i])
+ pr_err("\t%pS\n", (void *)t->addrs[i]);
+ else
+ break;
}
#endif
}
@@ -4059,26 +4034,18 @@ void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
objp = fixup_red_left(s, objp);
trackp = get_track(s, objp, TRACK_ALLOC);
kpp->kp_ret = (void *)trackp->addr;
-#ifdef CONFIG_STACKDEPOT
- {
- depot_stack_handle_t handle;
- unsigned long *entries;
- unsigned int nr_entries;
-
- handle = READ_ONCE(trackp->handle);
- if (handle) {
- nr_entries = stack_depot_fetch(handle, &entries);
- for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
- kpp->kp_stack[i] = (void *)entries[i];
- }
+#ifdef CONFIG_STACKTRACE
+ for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
+ kpp->kp_stack[i] = (void *)trackp->addrs[i];
+ if (!kpp->kp_stack[i])
+ break;
+ }
- trackp = get_track(s, objp, TRACK_FREE);
- handle = READ_ONCE(trackp->handle);
- if (handle) {
- nr_entries = stack_depot_fetch(handle, &entries);
- for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
- kpp->kp_free_stack[i] = (void *)entries[i];
- }
+ trackp = get_track(s, objp, TRACK_FREE);
+ for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
+ kpp->kp_free_stack[i] = (void *)trackp->addrs[i];
+ if (!kpp->kp_free_stack[i])
+ break;
}
#endif
#endif