diff options
-rw-r--r-- | Documentation/vm/hmm.rst | 2 | ||||
-rw-r--r-- | arch/arm64/mm/kasan_init.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_uvmem.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_dmem.c | 4 | ||||
-rw-r--r-- | include/linux/migrate.h | 1 | ||||
-rw-r--r-- | include/linux/page_owner.h | 12 | ||||
-rw-r--r-- | init/Kconfig | 2 | ||||
-rw-r--r-- | lib/test_hmm.c | 5 | ||||
-rw-r--r-- | mm/kasan/hw_tags.c | 14 | ||||
-rw-r--r-- | mm/kasan/sw_tags.c | 2 | ||||
-rw-r--r-- | mm/memcontrol.c | 4 | ||||
-rw-r--r-- | mm/migrate.c | 151 | ||||
-rw-r--r-- | mm/page_owner.c | 6 |
14 files changed, 61 insertions, 150 deletions
diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst index a14c2938e7af..f2a59ed82ed3 100644 --- a/Documentation/vm/hmm.rst +++ b/Documentation/vm/hmm.rst @@ -360,7 +360,7 @@ between device driver specific code and shared common code: system memory page, locks the page with ``lock_page()``, and fills in the ``dst`` array entry with:: - dst[i] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; + dst[i] = migrate_pfn(page_to_pfn(dpage)); Now that the driver knows that this page is being migrated, it can invalidate device private MMU mappings and copy device private memory diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index ec276f75fa05..c12cd700598f 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -310,7 +310,7 @@ void __init kasan_init(void) kasan_init_depth(); #if defined(CONFIG_KASAN_GENERIC) /* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */ - pr_info("KernelAddressSanitizer initialized\n"); + pr_info("KernelAddressSanitizer initialized (generic)\n"); #endif } diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index a7061ee3b157..28c436df9935 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -560,7 +560,7 @@ static int __kvmppc_svm_page_out(struct vm_area_struct *vma, gpa, 0, page_shift); if (ret == U_SUCCESS) - *mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED; + *mig.dst = migrate_pfn(pfn); else { unlock_page(dpage); __free_page(dpage); @@ -774,7 +774,7 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma, } } - *mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; + *mig.dst = migrate_pfn(page_to_pfn(dpage)); migrate_vma_pages(&mig); out_finalize: migrate_vma_finalize(&mig); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 6d8634e40b3b..d43bfd8b35ae 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -317,7 +317,6 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]); svm_migrate_get_vram_page(prange, migrate->dst[i]); migrate->dst[i] = migrate_pfn(migrate->dst[i]); - migrate->dst[i] |= MIGRATE_PFN_LOCKED; src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_TO_DEVICE); r = dma_mapping_error(dev, src[i]); @@ -610,7 +609,6 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, dst[i] >> PAGE_SHIFT, page_to_pfn(dpage)); migrate->dst[i] = migrate_pfn(page_to_pfn(dpage)); - migrate->dst[i] |= MIGRATE_PFN_LOCKED; j++; } diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 92987daa5e17..3828aafd3ac4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -166,7 +166,7 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm, goto error_dma_unmap; mutex_unlock(&svmm->mutex); - args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; + args->dst[0] = migrate_pfn(page_to_pfn(dpage)); return 0; error_dma_unmap: @@ -602,7 +602,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm, ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT); if (src & MIGRATE_PFN_WRITE) *pfn |= NVIF_VMM_PFNMAP_V0_W; - return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; + return migrate_pfn(page_to_pfn(dpage)); out_dma_unmap: dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); diff --git a/include/linux/migrate.h b/include/linux/migrate.h index eeb818c4fc78..4850cc5bf813 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -110,7 +110,6 @@ static inline int migrate_misplaced_page(struct page *page, */ #define MIGRATE_PFN_VALID (1UL << 0) #define MIGRATE_PFN_MIGRATE (1UL << 1) -#define MIGRATE_PFN_LOCKED (1UL << 2) #define MIGRATE_PFN_WRITE (1UL << 3) #define MIGRATE_PFN_SHIFT 6 diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h index 43c638c51c1f..119a0c9d2a8b 100644 --- a/include/linux/page_owner.h +++ b/include/linux/page_owner.h @@ -8,9 +8,9 @@ extern struct static_key_false page_owner_inited; extern struct page_ext_operations page_owner_ops; -extern void __reset_page_owner(struct page *page, unsigned int order); +extern void __reset_page_owner(struct page *page, unsigned short order); extern void __set_page_owner(struct page *page, - unsigned int order, gfp_t gfp_mask); + unsigned short order, gfp_t gfp_mask); extern void __split_page_owner(struct page *page, unsigned int nr); extern void __folio_copy_owner(struct folio *newfolio, struct folio *old); extern void __set_page_owner_migrate_reason(struct page *page, int reason); @@ -18,14 +18,14 @@ extern void __dump_page_owner(const struct page *page); extern void pagetypeinfo_showmixedcount_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone); -static inline void reset_page_owner(struct page *page, unsigned int order) +static inline void reset_page_owner(struct page *page, unsigned short order) { if (static_branch_unlikely(&page_owner_inited)) __reset_page_owner(page, order); } static inline void set_page_owner(struct page *page, - unsigned int order, gfp_t gfp_mask) + unsigned short order, gfp_t gfp_mask) { if (static_branch_unlikely(&page_owner_inited)) __set_page_owner(page, order, gfp_mask); @@ -52,7 +52,7 @@ static inline void dump_page_owner(const struct page *page) __dump_page_owner(page); } #else -static inline void reset_page_owner(struct page *page, unsigned int order) +static inline void reset_page_owner(struct page *page, unsigned short order) { } static inline void set_page_owner(struct page *page, @@ -60,7 +60,7 @@ static inline void set_page_owner(struct page *page, { } static inline void split_page_owner(struct page *page, - unsigned int order) + unsigned short order) { } static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio) diff --git a/init/Kconfig b/init/Kconfig index 21b1f4870c80..45bcaa8e7481 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1896,6 +1896,7 @@ choice config SLAB bool "SLAB" + depends on !PREEMPT_RT select HAVE_HARDENED_USERCOPY_ALLOCATOR help The regular slab allocator that is established and known to work @@ -1916,6 +1917,7 @@ config SLUB config SLOB depends on EXPERT bool "SLOB (Simple Allocator)" + depends on !PREEMPT_RT help SLOB replaces the stock allocator with a drastically simpler allocator. SLOB is generally more space efficient but diff --git a/lib/test_hmm.c b/lib/test_hmm.c index c259842f6d44..e2ce8f9b7605 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -613,8 +613,7 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args, */ rpage->zone_device_data = dmirror; - *dst = migrate_pfn(page_to_pfn(dpage)) | - MIGRATE_PFN_LOCKED; + *dst = migrate_pfn(page_to_pfn(dpage)); if ((*src & MIGRATE_PFN_WRITE) || (!spage && args->vma->vm_flags & VM_WRITE)) *dst |= MIGRATE_PFN_WRITE; @@ -1137,7 +1136,7 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args, lock_page(dpage); xa_erase(&dmirror->pt, addr >> PAGE_SHIFT); copy_highpage(dpage, spage); - *dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; + *dst = migrate_pfn(page_to_pfn(dpage)); if (*src & MIGRATE_PFN_WRITE) *dst |= MIGRATE_PFN_WRITE; } diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index dc892119e88f..7355cb534e4f 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -106,6 +106,16 @@ static int __init early_kasan_flag_stacktrace(char *arg) } early_param("kasan.stacktrace", early_kasan_flag_stacktrace); +static inline const char *kasan_mode_info(void) +{ + if (kasan_mode == KASAN_MODE_ASYNC) + return "async"; + else if (kasan_mode == KASAN_MODE_ASYMM) + return "asymm"; + else + return "sync"; +} + /* kasan_init_hw_tags_cpu() is called for each CPU. */ void kasan_init_hw_tags_cpu(void) { @@ -177,7 +187,9 @@ void __init kasan_init_hw_tags(void) break; } - pr_info("KernelAddressSanitizer initialized\n"); + pr_info("KernelAddressSanitizer initialized (hw-tags, mode=%s, stacktrace=%s)\n", + kasan_mode_info(), + kasan_stack_collection_enabled() ? "on" : "off"); } void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags) diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c index bd3f540feb47..77f13f391b57 100644 --- a/mm/kasan/sw_tags.c +++ b/mm/kasan/sw_tags.c @@ -42,7 +42,7 @@ void __init kasan_init_sw_tags(void) for_each_possible_cpu(cpu) per_cpu(prng_state, cpu) = (u32)get_cycles(); - pr_info("KernelAddressSanitizer initialized\n"); + pr_info("KernelAddressSanitizer initialized (sw-tags)\n"); } /* diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 508bcea7df56..781605e92015 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2058,13 +2058,11 @@ again: memcg->move_lock_task = current; memcg->move_lock_flags = flags; } -EXPORT_SYMBOL(folio_memcg_lock); void lock_page_memcg(struct page *page) { folio_memcg_lock(page_folio(page)); } -EXPORT_SYMBOL(lock_page_memcg); static void __folio_memcg_unlock(struct mem_cgroup *memcg) { @@ -2092,13 +2090,11 @@ void folio_memcg_unlock(struct folio *folio) { __folio_memcg_unlock(folio_memcg(folio)); } -EXPORT_SYMBOL(folio_memcg_unlock); void unlock_page_memcg(struct page *page) { folio_memcg_unlock(page_folio(page)); } -EXPORT_SYMBOL(unlock_page_memcg); struct obj_stock { #ifdef CONFIG_MEMCG_KMEM diff --git a/mm/migrate.c b/mm/migrate.c index a11e948593df..cf25b00f03c8 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -404,12 +404,6 @@ int folio_migrate_mapping(struct address_space *mapping, newzone = folio_zone(newfolio); xas_lock_irq(&xas); - if (folio_ref_count(folio) != expected_count || - xas_load(&xas) != folio) { - xas_unlock_irq(&xas); - return -EAGAIN; - } - if (!folio_ref_freeze(folio, expected_count)) { xas_unlock_irq(&xas); return -EAGAIN; @@ -2368,7 +2362,6 @@ again: * can't be dropped from it). */ get_page(page); - migrate->cpages++; /* * Optimize for the common case where page is only mapped once @@ -2378,7 +2371,7 @@ again: if (trylock_page(page)) { pte_t swp_pte; - mpfn |= MIGRATE_PFN_LOCKED; + migrate->cpages++; ptep_get_and_clear(mm, addr, ptep); /* Setup special migration page table entry */ @@ -2412,6 +2405,9 @@ again: if (pte_present(pte)) unmapped++; + } else { + put_page(page); + mpfn = 0; } next: @@ -2516,15 +2512,17 @@ static bool migrate_vma_check_page(struct page *page) } /* - * migrate_vma_prepare() - lock pages and isolate them from the lru + * migrate_vma_unmap() - replace page mapping with special migration pte entry * @migrate: migrate struct containing all migration information * - * This locks pages that have been collected by migrate_vma_collect(). Once each - * page is locked it is isolated from the lru (for non-device pages). Finally, - * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be - * migrated by concurrent kernel threads. + * Isolate pages from the LRU and replace mappings (CPU page table pte) with a + * special migration pte entry and check if it has been pinned. Pinned pages are + * restored because we cannot migrate them. + * + * This is the last step before we call the device driver callback to allocate + * destination memory and copy contents of original page over to new page. */ -static void migrate_vma_prepare(struct migrate_vma *migrate) +static void migrate_vma_unmap(struct migrate_vma *migrate) { const unsigned long npages = migrate->npages; const unsigned long start = migrate->start; @@ -2533,32 +2531,12 @@ static void migrate_vma_prepare(struct migrate_vma *migrate) lru_add_drain(); - for (i = 0; (i < npages) && migrate->cpages; i++) { + for (i = 0; i < npages; i++) { struct page *page = migrate_pfn_to_page(migrate->src[i]); - bool remap = true; if (!page) continue; - if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) { - /* - * Because we are migrating several pages there can be - * a deadlock between 2 concurrent migration where each - * are waiting on each other page lock. - * - * Make migrate_vma() a best effort thing and backoff - * for any page we can not lock right away. - */ - if (!trylock_page(page)) { - migrate->src[i] = 0; - migrate->cpages--; - put_page(page); - continue; - } - remap = false; - migrate->src[i] |= MIGRATE_PFN_LOCKED; - } - /* ZONE_DEVICE pages are not on LRU */ if (!is_zone_device_page(page)) { if (!PageLRU(page) && allow_drain) { @@ -2568,16 +2546,9 @@ static void migrate_vma_prepare(struct migrate_vma *migrate) } if (isolate_lru_page(page)) { - if (remap) { - migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; - migrate->cpages--; - restore++; - } else { - migrate->src[i] = 0; - unlock_page(page); - migrate->cpages--; - put_page(page); - } + migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; + migrate->cpages--; + restore++; continue; } @@ -2585,80 +2556,20 @@ static void migrate_vma_prepare(struct migrate_vma *migrate) put_page(page); } - if (!migrate_vma_check_page(page)) { - if (remap) { - migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; - migrate->cpages--; - restore++; - - if (!is_zone_device_page(page)) { - get_page(page); - putback_lru_page(page); - } - } else { - migrate->src[i] = 0; - unlock_page(page); - migrate->cpages--; + if (page_mapped(page)) + try_to_migrate(page, 0); - if (!is_zone_device_page(page)) - putback_lru_page(page); - else - put_page(page); + if (page_mapped(page) || !migrate_vma_check_page(page)) { + if (!is_zone_device_page(page)) { + get_page(page); + putback_lru_page(page); } - } - } - - for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) { - struct page *page = migrate_pfn_to_page(migrate->src[i]); - - if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) - continue; - - remove_migration_pte(page, migrate->vma, addr, page); - - migrate->src[i] = 0; - unlock_page(page); - put_page(page); - restore--; - } -} - -/* - * migrate_vma_unmap() - replace page mapping with special migration pte entry - * @migrate: migrate struct containing all migration information - * - * Replace page mapping (CPU page table pte) with a special migration pte entry - * and check again if it has been pinned. Pinned pages are restored because we - * cannot migrate them. - * - * This is the last step before we call the device driver callback to allocate - * destination memory and copy contents of original page over to new page. - */ -static void migrate_vma_unmap(struct migrate_vma *migrate) -{ - const unsigned long npages = migrate->npages; - const unsigned long start = migrate->start; - unsigned long addr, i, restore = 0; - - for (i = 0; i < npages; i++) { - struct page *page = migrate_pfn_to_page(migrate->src[i]); - if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) + migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; + migrate->cpages--; + restore++; continue; - - if (page_mapped(page)) { - try_to_migrate(page, 0); - if (page_mapped(page)) - goto restore; } - - if (migrate_vma_check_page(page)) - continue; - -restore: - migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; - migrate->cpages--; - restore++; } for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) { @@ -2671,12 +2582,8 @@ restore: migrate->src[i] = 0; unlock_page(page); + put_page(page); restore--; - - if (is_zone_device_page(page)) - put_page(page); - else - putback_lru_page(page); } } @@ -2699,8 +2606,8 @@ restore: * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE * flag set). Once these are allocated and copied, the caller must update each * corresponding entry in the dst array with the pfn value of the destination - * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set - * (destination pages must have their struct pages locked, via lock_page()). + * page and with MIGRATE_PFN_VALID. Destination pages must be locked via + * lock_page(). * * Note that the caller does not have to migrate all the pages that are marked * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from @@ -2770,8 +2677,6 @@ int migrate_vma_setup(struct migrate_vma *args) migrate_vma_collect(args); if (args->cpages) - migrate_vma_prepare(args); - if (args->cpages) migrate_vma_unmap(args); /* diff --git a/mm/page_owner.c b/mm/page_owner.c index 79936db59859..4f924957ce7a 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -125,7 +125,7 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags) return handle; } -void __reset_page_owner(struct page *page, unsigned int order) +void __reset_page_owner(struct page *page, unsigned short order) { int i; struct page_ext *page_ext; @@ -149,7 +149,7 @@ void __reset_page_owner(struct page *page, unsigned int order) static inline void __set_page_owner_handle(struct page_ext *page_ext, depot_stack_handle_t handle, - unsigned int order, gfp_t gfp_mask) + unsigned short order, gfp_t gfp_mask) { struct page_owner *page_owner; int i; @@ -169,7 +169,7 @@ static inline void __set_page_owner_handle(struct page_ext *page_ext, } } -noinline void __set_page_owner(struct page *page, unsigned int order, +noinline void __set_page_owner(struct page *page, unsigned short order, gfp_t gfp_mask) { struct page_ext *page_ext = lookup_page_ext(page); |