diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 5 | ||||
-rw-r--r-- | mm/backing-dev.c | 7 | ||||
-rw-r--r-- | mm/damon/core.c | 20 | ||||
-rw-r--r-- | mm/damon/dbgfs.c | 24 | ||||
-rw-r--r-- | mm/damon/vaddr-test.h | 79 | ||||
-rw-r--r-- | mm/damon/vaddr.c | 2 | ||||
-rw-r--r-- | mm/filemap.c | 2 | ||||
-rw-r--r-- | mm/highmem.c | 34 | ||||
-rw-r--r-- | mm/hugetlb.c | 38 | ||||
-rw-r--r-- | mm/memcontrol.c | 108 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 1 | ||||
-rw-r--r-- | mm/shmem.c | 3 | ||||
-rw-r--r-- | mm/slab.c | 3 | ||||
-rw-r--r-- | mm/slab.h | 2 | ||||
-rw-r--r-- | mm/slob.c | 3 | ||||
-rw-r--r-- | mm/slub.c | 17 | ||||
-rw-r--r-- | mm/swap.c | 1 | ||||
-rw-r--r-- | mm/swap_slots.c | 1 | ||||
-rw-r--r-- | mm/util.c | 2 |
19 files changed, 192 insertions, 160 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index 068ce591a13a..356f4f2c779e 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -428,7 +428,7 @@ config THP_SWAP # UP and nommu archs use km based percpu allocator # config NEED_PER_CPU_KM - depends on !SMP + depends on !SMP || !MMU bool default y @@ -890,6 +890,9 @@ config MAPPING_DIRTY_HELPERS config KMAP_LOCAL bool +config KMAP_LOCAL_NON_LINEAR_PTE_ARRAY + bool + # struct io_mapping based helper. Selected by drivers that need them config IO_MAPPING bool diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 1eead4761011..eae96dfe0261 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -945,6 +945,13 @@ void bdi_unregister(struct backing_dev_info *bdi) wb_shutdown(&bdi->wb); cgwb_bdi_unregister(bdi); + /* + * If this BDI's min ratio has been set, use bdi_set_min_ratio() to + * update the global bdi_min_ratio. + */ + if (bdi->min_ratio) + bdi_set_min_ratio(bdi, 0); + if (bdi->dev) { bdi_debug_unregister(bdi); device_unregister(bdi->dev); diff --git a/mm/damon/core.c b/mm/damon/core.c index c381b3c525d0..e92497895202 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -282,7 +282,6 @@ int damon_set_targets(struct damon_ctx *ctx, for (i = 0; i < nr_ids; i++) { t = damon_new_target(ids[i]); if (!t) { - pr_err("Failed to alloc damon_target\n"); /* The caller should do cleanup of the ids itself */ damon_for_each_target_safe(t, next, ctx) damon_destroy_target(t); @@ -312,16 +311,10 @@ int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int, unsigned long aggr_int, unsigned long primitive_upd_int, unsigned long min_nr_reg, unsigned long max_nr_reg) { - if (min_nr_reg < 3) { - pr_err("min_nr_regions (%lu) must be at least 3\n", - min_nr_reg); + if (min_nr_reg < 3) return -EINVAL; - } - if (min_nr_reg > max_nr_reg) { - pr_err("invalid nr_regions. min (%lu) > max (%lu)\n", - min_nr_reg, max_nr_reg); + if (min_nr_reg > max_nr_reg) return -EINVAL; - } ctx->sample_interval = sample_int; ctx->aggr_interval = aggr_int; @@ -980,10 +973,11 @@ static unsigned long damos_wmark_wait_us(struct damos *scheme) static void kdamond_usleep(unsigned long usecs) { - if (usecs > 100 * 1000) - schedule_timeout_interruptible(usecs_to_jiffies(usecs)); + /* See Documentation/timers/timers-howto.rst for the thresholds */ + if (usecs > 20 * USEC_PER_MSEC) + schedule_timeout_idle(usecs_to_jiffies(usecs)); else - usleep_range(usecs, usecs + 1); + usleep_idle_range(usecs, usecs + 1); } /* Returns negative error code if it's not activated but should return */ @@ -1038,7 +1032,7 @@ static int kdamond_fn(void *data) ctx->callback.after_sampling(ctx)) done = true; - usleep_range(ctx->sample_interval, ctx->sample_interval + 1); + kdamond_usleep(ctx->sample_interval); if (ctx->primitive.check_accesses) max_nr_accesses = ctx->primitive.check_accesses(ctx); diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c index eccc14b34901..1efac0022e9a 100644 --- a/mm/damon/dbgfs.c +++ b/mm/damon/dbgfs.c @@ -32,7 +32,7 @@ static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos) if (*ppos) return ERR_PTR(-EINVAL); - kbuf = kmalloc(count + 1, GFP_KERNEL); + kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN); if (!kbuf) return ERR_PTR(-ENOMEM); @@ -133,7 +133,7 @@ static ssize_t dbgfs_schemes_read(struct file *file, char __user *buf, char *kbuf; ssize_t len; - kbuf = kmalloc(count, GFP_KERNEL); + kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN); if (!kbuf) return -ENOMEM; @@ -210,10 +210,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len, &wmarks.low, &parsed); if (ret != 18) break; - if (!damos_action_valid(action)) { - pr_err("wrong action %d\n", action); + if (!damos_action_valid(action)) goto fail; - } pos += parsed; scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a, @@ -452,7 +450,7 @@ static ssize_t dbgfs_init_regions_read(struct file *file, char __user *buf, char *kbuf; ssize_t len; - kbuf = kmalloc(count, GFP_KERNEL); + kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN); if (!kbuf) return -ENOMEM; @@ -578,7 +576,7 @@ static ssize_t dbgfs_kdamond_pid_read(struct file *file, char *kbuf; ssize_t len; - kbuf = kmalloc(count, GFP_KERNEL); + kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN); if (!kbuf) return -ENOMEM; @@ -877,12 +875,14 @@ static ssize_t dbgfs_monitor_on_write(struct file *file, return -EINVAL; } + mutex_lock(&damon_dbgfs_lock); if (!strncmp(kbuf, "on", count)) { int i; for (i = 0; i < dbgfs_nr_ctxs; i++) { if (damon_targets_empty(dbgfs_ctxs[i])) { kfree(kbuf); + mutex_unlock(&damon_dbgfs_lock); return -EINVAL; } } @@ -892,6 +892,7 @@ static ssize_t dbgfs_monitor_on_write(struct file *file, } else { ret = -EINVAL; } + mutex_unlock(&damon_dbgfs_lock); if (!ret) ret = count; @@ -944,15 +945,16 @@ static int __init __damon_dbgfs_init(void) static int __init damon_dbgfs_init(void) { - int rc; + int rc = -ENOMEM; + mutex_lock(&damon_dbgfs_lock); dbgfs_ctxs = kmalloc(sizeof(*dbgfs_ctxs), GFP_KERNEL); if (!dbgfs_ctxs) - return -ENOMEM; + goto out; dbgfs_ctxs[0] = dbgfs_new_ctx(); if (!dbgfs_ctxs[0]) { kfree(dbgfs_ctxs); - return -ENOMEM; + goto out; } dbgfs_nr_ctxs = 1; @@ -963,6 +965,8 @@ static int __init damon_dbgfs_init(void) pr_err("%s: dbgfs init failed\n", __func__); } +out: + mutex_unlock(&damon_dbgfs_lock); return rc; } diff --git a/mm/damon/vaddr-test.h b/mm/damon/vaddr-test.h index ecfd0b2ed222..6a1b9272ea12 100644 --- a/mm/damon/vaddr-test.h +++ b/mm/damon/vaddr-test.h @@ -135,7 +135,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test, struct damon_addr_range *three_regions, unsigned long *expected, int nr_expected) { - struct damon_ctx *ctx = damon_new_ctx(); struct damon_target *t; struct damon_region *r; int i; @@ -145,7 +144,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test, r = damon_new_region(regions[i * 2], regions[i * 2 + 1]); damon_add_region(r, t); } - damon_add_target(ctx, t); damon_va_apply_three_regions(t, three_regions); @@ -154,8 +152,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test, KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]); KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]); } - - damon_destroy_ctx(ctx); } /* @@ -252,60 +248,59 @@ static void damon_test_apply_three_regions4(struct kunit *test) new_three_regions, expected, ARRAY_SIZE(expected)); } -static void damon_test_split_evenly(struct kunit *test) +static void damon_test_split_evenly_fail(struct kunit *test, + unsigned long start, unsigned long end, unsigned int nr_pieces) { - struct damon_ctx *c = damon_new_ctx(); - struct damon_target *t; - struct damon_region *r; - unsigned long i; - - KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5), - -EINVAL); - - t = damon_new_target(42); - r = damon_new_region(0, 100); - KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 0), -EINVAL); + struct damon_target *t = damon_new_target(42); + struct damon_region *r = damon_new_region(start, end); damon_add_region(r, t); - KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 10), 0); - KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 10u); + KUNIT_EXPECT_EQ(test, + damon_va_evenly_split_region(t, r, nr_pieces), -EINVAL); + KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u); - i = 0; damon_for_each_region(r, t) { - KUNIT_EXPECT_EQ(test, r->ar.start, i++ * 10); - KUNIT_EXPECT_EQ(test, r->ar.end, i * 10); + KUNIT_EXPECT_EQ(test, r->ar.start, start); + KUNIT_EXPECT_EQ(test, r->ar.end, end); } + damon_free_target(t); +} + +static void damon_test_split_evenly_succ(struct kunit *test, + unsigned long start, unsigned long end, unsigned int nr_pieces) +{ + struct damon_target *t = damon_new_target(42); + struct damon_region *r = damon_new_region(start, end); + unsigned long expected_width = (end - start) / nr_pieces; + unsigned long i = 0; - t = damon_new_target(42); - r = damon_new_region(5, 59); damon_add_region(r, t); - KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 5), 0); - KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u); + KUNIT_EXPECT_EQ(test, + damon_va_evenly_split_region(t, r, nr_pieces), 0); + KUNIT_EXPECT_EQ(test, damon_nr_regions(t), nr_pieces); - i = 0; damon_for_each_region(r, t) { - if (i == 4) + if (i == nr_pieces - 1) break; - KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i++); - KUNIT_EXPECT_EQ(test, r->ar.end, 5 + 10 * i); + KUNIT_EXPECT_EQ(test, + r->ar.start, start + i++ * expected_width); + KUNIT_EXPECT_EQ(test, r->ar.end, start + i * expected_width); } - KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i); - KUNIT_EXPECT_EQ(test, r->ar.end, 59ul); + KUNIT_EXPECT_EQ(test, r->ar.start, start + i * expected_width); + KUNIT_EXPECT_EQ(test, r->ar.end, end); damon_free_target(t); +} - t = damon_new_target(42); - r = damon_new_region(5, 6); - damon_add_region(r, t); - KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 2), -EINVAL); - KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u); +static void damon_test_split_evenly(struct kunit *test) +{ + KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5), + -EINVAL); - damon_for_each_region(r, t) { - KUNIT_EXPECT_EQ(test, r->ar.start, 5ul); - KUNIT_EXPECT_EQ(test, r->ar.end, 6ul); - } - damon_free_target(t); - damon_destroy_ctx(c); + damon_test_split_evenly_fail(test, 0, 100, 0); + damon_test_split_evenly_succ(test, 0, 100, 10); + damon_test_split_evenly_succ(test, 5, 59, 5); + damon_test_split_evenly_fail(test, 5, 6, 2); } static struct kunit_case damon_test_cases[] = { diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 35fe49080ee9..20a9a9d69eb1 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -13,6 +13,7 @@ #include <linux/mmu_notifier.h> #include <linux/page_idle.h> #include <linux/pagewalk.h> +#include <linux/sched/mm.h> #include "prmtv-common.h" @@ -626,7 +627,6 @@ int damon_va_apply_scheme(struct damon_ctx *ctx, struct damon_target *t, case DAMOS_STAT: return 0; default: - pr_warn("Wrong action %d\n", scheme->action); return -EINVAL; } diff --git a/mm/filemap.c b/mm/filemap.c index daa0e23a6ee6..39c4c46c6133 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3253,8 +3253,6 @@ static struct page *next_uptodate_page(struct page *page, goto skip; if (!PageUptodate(page) || PageReadahead(page)) goto skip; - if (PageHWPoison(page)) - goto skip; if (!trylock_page(page)) goto skip; if (page->mapping != mapping) diff --git a/mm/highmem.c b/mm/highmem.c index 88f65f155845..762679050c9a 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -359,7 +359,6 @@ void kunmap_high(struct page *page) } EXPORT_SYMBOL(kunmap_high); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE void zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2) { @@ -416,7 +415,6 @@ void zero_user_segments(struct page *page, unsigned start1, unsigned end1, BUG_ON((start1 | start2 | end1 | end2) != 0); } EXPORT_SYMBOL(zero_user_segments); -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_HIGHMEM */ #ifdef CONFIG_KMAP_LOCAL @@ -503,16 +501,22 @@ static inline int kmap_local_calc_idx(int idx) static pte_t *__kmap_pte; -static pte_t *kmap_get_pte(void) +static pte_t *kmap_get_pte(unsigned long vaddr, int idx) { + if (IS_ENABLED(CONFIG_KMAP_LOCAL_NON_LINEAR_PTE_ARRAY)) + /* + * Set by the arch if __kmap_pte[-idx] does not produce + * the correct entry. + */ + return virt_to_kpte(vaddr); if (!__kmap_pte) __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); - return __kmap_pte; + return &__kmap_pte[-idx]; } void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) { - pte_t pteval, *kmap_pte = kmap_get_pte(); + pte_t pteval, *kmap_pte; unsigned long vaddr; int idx; @@ -524,9 +528,10 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) preempt_disable(); idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - BUG_ON(!pte_none(*(kmap_pte - idx))); + kmap_pte = kmap_get_pte(vaddr, idx); + BUG_ON(!pte_none(*kmap_pte)); pteval = pfn_pte(pfn, prot); - arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte - idx, pteval); + arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte, pteval); arch_kmap_local_post_map(vaddr, pteval); current->kmap_ctrl.pteval[kmap_local_idx()] = pteval; preempt_enable(); @@ -559,7 +564,7 @@ EXPORT_SYMBOL(__kmap_local_page_prot); void kunmap_local_indexed(void *vaddr) { unsigned long addr = (unsigned long) vaddr & PAGE_MASK; - pte_t *kmap_pte = kmap_get_pte(); + pte_t *kmap_pte; int idx; if (addr < __fix_to_virt(FIX_KMAP_END) || @@ -584,8 +589,9 @@ void kunmap_local_indexed(void *vaddr) idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr); WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); + kmap_pte = kmap_get_pte(addr, idx); arch_kmap_local_pre_unmap(addr); - pte_clear(&init_mm, addr, kmap_pte - idx); + pte_clear(&init_mm, addr, kmap_pte); arch_kmap_local_post_unmap(addr); current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0); kmap_local_idx_pop(); @@ -607,7 +613,7 @@ EXPORT_SYMBOL(kunmap_local_indexed); void __kmap_local_sched_out(void) { struct task_struct *tsk = current; - pte_t *kmap_pte = kmap_get_pte(); + pte_t *kmap_pte; int i; /* Clear kmaps */ @@ -634,8 +640,9 @@ void __kmap_local_sched_out(void) idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + kmap_pte = kmap_get_pte(addr, idx); arch_kmap_local_pre_unmap(addr); - pte_clear(&init_mm, addr, kmap_pte - idx); + pte_clear(&init_mm, addr, kmap_pte); arch_kmap_local_post_unmap(addr); } } @@ -643,7 +650,7 @@ void __kmap_local_sched_out(void) void __kmap_local_sched_in(void) { struct task_struct *tsk = current; - pte_t *kmap_pte = kmap_get_pte(); + pte_t *kmap_pte; int i; /* Restore kmaps */ @@ -663,7 +670,8 @@ void __kmap_local_sched_in(void) /* See comment in __kmap_local_sched_out() */ idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - set_pte_at(&init_mm, addr, kmap_pte - idx, pteval); + kmap_pte = kmap_get_pte(addr, idx); + set_pte_at(&init_mm, addr, kmap_pte, pteval); arch_kmap_local_post_map(addr, pteval); } } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e09159c957e3..a1baa198519a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1037,8 +1037,10 @@ void clear_vma_resv_huge_pages(struct vm_area_struct *vma) */ struct resv_map *reservations = vma_resv_map(vma); - if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) + if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { + resv_map_put_hugetlb_cgroup_uncharge_info(reservations); kref_put(&reservations->refs, resv_map_release); + } reset_vma_resv_huge_pages(vma); } @@ -2971,7 +2973,7 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid) struct huge_bootmem_page *m = NULL; /* initialize for clang */ int nr_nodes, node; - if (nid >= nr_online_nodes) + if (nid != NUMA_NO_NODE && nid >= nr_online_nodes) return 0; /* do node specific alloc */ if (nid != NUMA_NO_NODE) { @@ -4917,9 +4919,9 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma, move_huge_pte(vma, old_addr, new_addr, src_pte); } - i_mmap_unlock_write(mapping); flush_tlb_range(vma, old_end - len, old_end); mmu_notifier_invalidate_range_end(&range); + i_mmap_unlock_write(mapping); return len + old_addr - old_end; } @@ -4937,6 +4939,7 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); struct mmu_notifier_range range; + bool force_flush = false; WARN_ON(!is_vm_hugetlb_page(vma)); BUG_ON(start & ~huge_page_mask(h)); @@ -4965,10 +4968,8 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct ptl = huge_pte_lock(h, mm, ptep); if (huge_pmd_unshare(mm, vma, &address, ptep)) { spin_unlock(ptl); - /* - * We just unmapped a page of PMDs by clearing a PUD. - * The caller's TLB flush range should cover this area. - */ + tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); + force_flush = true; continue; } @@ -5025,6 +5026,22 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct } mmu_notifier_invalidate_range_end(&range); tlb_end_vma(tlb, vma); + + /* + * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We + * could defer the flush until now, since by holding i_mmap_rwsem we + * guaranteed that the last refernece would not be dropped. But we must + * do the flushing before we return, as otherwise i_mmap_rwsem will be + * dropped and the last reference to the shared PMDs page might be + * dropped as well. + * + * In theory we could defer the freeing of the PMD pages as well, but + * huge_pmd_unshare() relies on the exact page_count for the PMD page to + * detect sharing, so we cannot defer the release of the page either. + * Instead, do flush now. + */ + if (force_flush) + tlb_flush_mmu_tlbonly(tlb); } void __unmap_hugepage_range_final(struct mmu_gather *tlb, @@ -5734,13 +5751,14 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, int ret = -ENOMEM; struct page *page; int writable; - bool new_pagecache_page = false; + bool page_in_pagecache = false; if (is_continue) { ret = -EFAULT; page = find_lock_page(mapping, idx); if (!page) goto out; + page_in_pagecache = true; } else if (!*pagep) { /* If a page already exists, then it's UFFDIO_COPY for * a non-missing case. Return -EEXIST. @@ -5828,7 +5846,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, ret = huge_add_to_page_cache(page, mapping, idx); if (ret) goto out_release_nounlock; - new_pagecache_page = true; + page_in_pagecache = true; } ptl = huge_pte_lockptr(h, dst_mm, dst_pte); @@ -5892,7 +5910,7 @@ out_release_unlock: if (vm_shared || is_continue) unlock_page(page); out_release_nounlock: - if (!new_pagecache_page) + if (!page_in_pagecache) restore_reserve_on_error(h, dst_vma, dst_addr, page); put_page(page); goto out; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 781605e92015..2ed5f2a0879d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -776,24 +776,6 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) rcu_read_unlock(); } -/* - * mod_objcg_mlstate() may be called with irq enabled, so - * mod_memcg_lruvec_state() should be used. - */ -static inline void mod_objcg_mlstate(struct obj_cgroup *objcg, - struct pglist_data *pgdat, - enum node_stat_item idx, int nr) -{ - struct mem_cgroup *memcg; - struct lruvec *lruvec; - - rcu_read_lock(); - memcg = obj_cgroup_memcg(objcg); - lruvec = mem_cgroup_lruvec(memcg, pgdat); - mod_memcg_lruvec_state(lruvec, idx, nr); - rcu_read_unlock(); -} - /** * __count_memcg_events - account VM events in a cgroup * @memcg: the memory cgroup @@ -2137,41 +2119,6 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, } #endif -/* - * Most kmem_cache_alloc() calls are from user context. The irq disable/enable - * sequence used in this case to access content from object stock is slow. - * To optimize for user context access, there are now two object stocks for - * task context and interrupt context access respectively. - * - * The task context object stock can be accessed by disabling preemption only - * which is cheap in non-preempt kernel. The interrupt context object stock - * can only be accessed after disabling interrupt. User context code can - * access interrupt object stock, but not vice versa. - */ -static inline struct obj_stock *get_obj_stock(unsigned long *pflags) -{ - struct memcg_stock_pcp *stock; - - if (likely(in_task())) { - *pflags = 0UL; - preempt_disable(); - stock = this_cpu_ptr(&memcg_stock); - return &stock->task_obj; - } - - local_irq_save(*pflags); - stock = this_cpu_ptr(&memcg_stock); - return &stock->irq_obj; -} - -static inline void put_obj_stock(unsigned long flags) -{ - if (likely(in_task())) - preempt_enable(); - else - local_irq_restore(flags); -} - /** * consume_stock: Try to consume stocked charge on this cpu. * @memcg: memcg to consume from. @@ -2816,6 +2763,59 @@ retry: */ #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT) +/* + * Most kmem_cache_alloc() calls are from user context. The irq disable/enable + * sequence used in this case to access content from object stock is slow. + * To optimize for user context access, there are now two object stocks for + * task context and interrupt context access respectively. + * + * The task context object stock can be accessed by disabling preemption only + * which is cheap in non-preempt kernel. The interrupt context object stock + * can only be accessed after disabling interrupt. User context code can + * access interrupt object stock, but not vice versa. + */ +static inline struct obj_stock *get_obj_stock(unsigned long *pflags) +{ + struct memcg_stock_pcp *stock; + + if (likely(in_task())) { + *pflags = 0UL; + preempt_disable(); + stock = this_cpu_ptr(&memcg_stock); + return &stock->task_obj; + } + + local_irq_save(*pflags); + stock = this_cpu_ptr(&memcg_stock); + return &stock->irq_obj; +} + +static inline void put_obj_stock(unsigned long flags) +{ + if (likely(in_task())) + preempt_enable(); + else + local_irq_restore(flags); +} + +/* + * mod_objcg_mlstate() may be called with irq enabled, so + * mod_memcg_lruvec_state() should be used. + */ +static inline void mod_objcg_mlstate(struct obj_cgroup *objcg, + struct pglist_data *pgdat, + enum node_stat_item idx, int nr) +{ + struct mem_cgroup *memcg; + struct lruvec *lruvec; + + rcu_read_lock(); + memcg = obj_cgroup_memcg(objcg); + lruvec = mem_cgroup_lruvec(memcg, pgdat); + mod_memcg_lruvec_state(lruvec, idx, nr); + rcu_read_unlock(); +} + int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, gfp_t gfp, bool new_page) { @@ -5558,7 +5558,7 @@ static int mem_cgroup_move_account(struct page *page, VM_BUG_ON(from == to); VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); - VM_BUG_ON(compound && !folio_test_multi(folio)); + VM_BUG_ON(compound && !folio_test_large(folio)); /* * Prevent mem_cgroup_migrate() from looking at diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 852041f6be41..2a9627dc784c 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -35,6 +35,7 @@ #include <linux/memblock.h> #include <linux/compaction.h> #include <linux/rmap.h> +#include <linux/module.h> #include <asm/tlbflush.h> diff --git a/mm/shmem.c b/mm/shmem.c index dc038ce78700..18f93c2d68f1 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2303,6 +2303,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode INIT_LIST_HEAD(&info->swaplist); simple_xattrs_init(&info->xattrs); cache_no_acl(inode); + mapping_set_large_folios(inode->i_mapping); switch (mode & S_IFMT) { default: @@ -3870,7 +3871,7 @@ static struct file_system_type shmem_fs_type = { .parameters = shmem_fs_parameters, #endif .kill_sb = kill_litter_super, - .fs_flags = FS_USERNS_MOUNT | FS_THP_SUPPORT, + .fs_flags = FS_USERNS_MOUNT, }; int __init shmem_init(void) diff --git a/mm/slab.c b/mm/slab.c index da132a9ae6f8..ca4822f6b2b6 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3733,14 +3733,13 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) if (!cachep) return; + trace_kmem_cache_free(_RET_IP_, objp, cachep->name); local_irq_save(flags); debug_check_no_locks_freed(objp, cachep->object_size); if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) debug_check_no_obj_freed(objp, cachep->object_size); __cache_free(cachep, objp, _RET_IP_); local_irq_restore(flags); - - trace_kmem_cache_free(_RET_IP_, objp, cachep->name); } EXPORT_SYMBOL(kmem_cache_free); diff --git a/mm/slab.h b/mm/slab.h index 58c01a34e5b8..56ad7eea3ddf 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -147,7 +147,7 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size, #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ SLAB_TEMPORARY | SLAB_ACCOUNT) #else -#define SLAB_CACHE_FLAGS (0) +#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE) #endif /* Common flags available with current configuration */ diff --git a/mm/slob.c b/mm/slob.c index 74d3f6e60666..03deee1e6a94 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -666,6 +666,7 @@ static void kmem_rcu_free(struct rcu_head *head) void kmem_cache_free(struct kmem_cache *c, void *b) { kmemleak_free_recursive(b, c->flags); + trace_kmem_cache_free(_RET_IP_, b, c->name); if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) { struct slob_rcu *slob_rcu; slob_rcu = b + (c->size - sizeof(struct slob_rcu)); @@ -674,8 +675,6 @@ void kmem_cache_free(struct kmem_cache *c, void *b) } else { __kmem_cache_free(b, c->size); } - - trace_kmem_cache_free(_RET_IP_, b, c->name); } EXPORT_SYMBOL(kmem_cache_free); diff --git a/mm/slub.c b/mm/slub.c index f7368bfffb7a..abe7db581d68 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3526,8 +3526,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x) s = cache_from_obj(s, x); if (!s) return; - slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_); trace_kmem_cache_free(_RET_IP_, x, s->name); + slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_); } EXPORT_SYMBOL(kmem_cache_free); @@ -5081,6 +5081,7 @@ struct loc_track { unsigned long max; unsigned long count; struct location *loc; + loff_t idx; }; static struct dentry *slab_debugfs_root; @@ -6052,11 +6053,11 @@ __initcall(slab_sysfs_init); #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS) static int slab_debugfs_show(struct seq_file *seq, void *v) { - - struct location *l; - unsigned int idx = *(unsigned int *)v; struct loc_track *t = seq->private; + struct location *l; + unsigned long idx; + idx = (unsigned long) t->idx; if (idx < t->count) { l = &t->loc[idx]; @@ -6105,16 +6106,18 @@ static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos) { struct loc_track *t = seq->private; - v = ppos; - ++*ppos; + t->idx = ++(*ppos); if (*ppos <= t->count) - return v; + return ppos; return NULL; } static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos) { + struct loc_track *t = seq->private; + + t->idx = *ppos; return ppos; } diff --git a/mm/swap.c b/mm/swap.c index 1841c24682f8..e8c9dc6d0377 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -156,6 +156,7 @@ void put_pages_list(struct list_head *pages) } free_unref_page_list(pages); + INIT_LIST_HEAD(pages); } EXPORT_SYMBOL(put_pages_list); diff --git a/mm/swap_slots.c b/mm/swap_slots.c index 16f706c55d92..2b5531840583 100644 --- a/mm/swap_slots.c +++ b/mm/swap_slots.c @@ -30,6 +30,7 @@ #include <linux/swap_slots.h> #include <linux/cpu.h> #include <linux/cpumask.h> +#include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mutex.h> #include <linux/mm.h> diff --git a/mm/util.c b/mm/util.c index e58151a61255..741ba32a43ac 100644 --- a/mm/util.c +++ b/mm/util.c @@ -670,7 +670,7 @@ bool folio_mapped(struct folio *folio) { long i, nr; - if (folio_test_single(folio)) + if (!folio_test_large(folio)) return atomic_read(&folio->_mapcount) >= 0; if (atomic_read(folio_mapcount_ptr(folio)) >= 0) return true; |