diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-11-11 14:31:47 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-11-11 14:31:47 -0800 |
commit | dbf49896187fd58c577fa1574a338e4f3672b4b2 (patch) | |
tree | e35ef2bb3cbb97d21547c8101419e33b5c5e9d80 /arch | |
parent | 6d76f6eb46cbf8334b37352f2c2908329d028286 (diff) | |
parent | b873e986816a0b8408c177b2c52a6915cca8713c (diff) | |
download | linux-dbf49896187fd58c577fa1574a338e4f3672b4b2.tar.bz2 |
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton:
"The post-linux-next material.
7 patches.
Subsystems affected by this patch series (all mm): debug,
slab-generic, migration, memcg, and kasan"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
kasan: add kasan mode messages when kasan init
mm: unexport {,un}lock_page_memcg
mm: unexport folio_memcg_{,un}lock
mm/migrate.c: remove MIGRATE_PFN_LOCKED
mm: migrate: simplify the file-backed pages validation when migrating its mapping
mm: allow only SLUB on PREEMPT_RT
mm/page_owner.c: modify the type of argument "order" in some functions
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/mm/kasan_init.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_uvmem.c | 4 |
2 files changed, 3 insertions, 3 deletions
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index ec276f75fa05..c12cd700598f 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -310,7 +310,7 @@ void __init kasan_init(void) kasan_init_depth(); #if defined(CONFIG_KASAN_GENERIC) /* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */ - pr_info("KernelAddressSanitizer initialized\n"); + pr_info("KernelAddressSanitizer initialized (generic)\n"); #endif } diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index a7061ee3b157..28c436df9935 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -560,7 +560,7 @@ static int __kvmppc_svm_page_out(struct vm_area_struct *vma, gpa, 0, page_shift); if (ret == U_SUCCESS) - *mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED; + *mig.dst = migrate_pfn(pfn); else { unlock_page(dpage); __free_page(dpage); @@ -774,7 +774,7 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma, } } - *mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; + *mig.dst = migrate_pfn(page_to_pfn(dpage)); migrate_vma_pages(&mig); out_finalize: migrate_vma_finalize(&mig); |