summaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-01-24 12:16:34 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-01-24 12:16:34 -0800
commit51306806426d0ffa4f9b11e65447092ae7d57ee7 (patch)
tree50fbdda3dbfd0747ece31fc7091ca4a7a28e9a6c /mm/migrate.c
parentfdbc80bdc4365078a0f7d65631171cb80e3ffd6e (diff)
parente82d891a63afebefde5d26971768f5cb91627f73 (diff)
downloadlinux-51306806426d0ffa4f9b11e65447092ae7d57ee7.tar.bz2
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "18 patches. Subsystems affected by this patch series: mm (pagealloc, memcg, kasan, memory-failure, and highmem), ubsan, proc, and MAINTAINERS" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: MAINTAINERS: add a couple more files to the Clang/LLVM section proc_sysctl: fix oops caused by incorrect command parameters powerpc/mm/highmem: use __set_pte_at() for kmap_local() mips/mm/highmem: use set_pte() for kmap_local() mm/highmem: prepare for overriding set_pte_at() sparc/mm/highmem: flush cache and TLB mm: fix page reference leak in soft_offline_page() ubsan: disable unsigned-overflow check for i386 kasan, mm: fix resetting page_alloc tags for HW_TAGS kasan, mm: fix conflicts with init_on_alloc/free kasan: fix HW_TAGS boot parameters kasan: fix incorrect arguments passing in kasan_add_zero_shadow kasan: fix unaligned address is unhandled in kasan_remove_zero_shadow mm: fix numa stats for thp migration mm: memcg: fix memcg file_dirty numa stat mm: memcg/slab: optimize objcg stock draining mm: fix initialization of struct page for holes in memory layout x86/setup: don't remove E820_TYPE_RAM for pfn 0
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c23
1 files changed, 12 insertions, 11 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index ee5e612b4cd8..c0efe921bca5 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -402,6 +402,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
struct zone *oldzone, *newzone;
int dirty;
int expected_count = expected_page_refs(mapping, page) + extra_count;
+ int nr = thp_nr_pages(page);
if (!mapping) {
/* Anonymous page without mapping */
@@ -437,7 +438,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
*/
newpage->index = page->index;
newpage->mapping = page->mapping;
- page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */
+ page_ref_add(newpage, nr); /* add cache reference */
if (PageSwapBacked(page)) {
__SetPageSwapBacked(newpage);
if (PageSwapCache(page)) {
@@ -459,7 +460,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
if (PageTransHuge(page)) {
int i;
- for (i = 1; i < HPAGE_PMD_NR; i++) {
+ for (i = 1; i < nr; i++) {
xas_next(&xas);
xas_store(&xas, newpage);
}
@@ -470,7 +471,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
* to one less reference.
* We know this isn't the last reference.
*/
- page_ref_unfreeze(page, expected_count - thp_nr_pages(page));
+ page_ref_unfreeze(page, expected_count - nr);
xas_unlock(&xas);
/* Leave irq disabled to prevent preemption while updating stats */
@@ -493,17 +494,17 @@ int migrate_page_move_mapping(struct address_space *mapping,
old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
- __dec_lruvec_state(old_lruvec, NR_FILE_PAGES);
- __inc_lruvec_state(new_lruvec, NR_FILE_PAGES);
+ __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
+ __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
if (PageSwapBacked(page) && !PageSwapCache(page)) {
- __dec_lruvec_state(old_lruvec, NR_SHMEM);
- __inc_lruvec_state(new_lruvec, NR_SHMEM);
+ __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
+ __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
}
if (dirty && mapping_can_writeback(mapping)) {
- __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
- __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
- __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
- __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
+ __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
+ __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
+ __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
+ __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
}
}
local_irq_enable();