summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/vmstat.h1
-rw-r--r--mm/vmscan.c26
2 files changed, 21 insertions, 6 deletions
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 292485f3d24d..10cc932e209a 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -29,6 +29,7 @@ struct reclaim_stat {
unsigned nr_activate[2];
unsigned nr_ref_keep;
unsigned nr_unmap_fail;
+ unsigned nr_lazyfree_fail;
};
enum writeback_stat_item {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 197eba50e157..8be3d52548ca 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1295,11 +1295,15 @@ static unsigned long shrink_page_list(struct list_head *page_list,
*/
if (page_mapped(page)) {
enum ttu_flags flags = ttu_flags | TTU_BATCH_FLUSH;
+ bool was_swapbacked = PageSwapBacked(page);
if (unlikely(PageTransHuge(page)))
flags |= TTU_SPLIT_HUGE_PMD;
+
if (!try_to_unmap(page, flags)) {
stat->nr_unmap_fail += nr_pages;
+ if (!was_swapbacked && PageSwapBacked(page))
+ stat->nr_lazyfree_fail += nr_pages;
goto activate_locked;
}
}
@@ -1491,8 +1495,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
.priority = DEF_PRIORITY,
.may_unmap = 1,
};
- struct reclaim_stat dummy_stat;
- unsigned long ret;
+ struct reclaim_stat stat;
+ unsigned long nr_reclaimed;
struct page *page, *next;
LIST_HEAD(clean_pages);
@@ -1504,11 +1508,21 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
}
}
- ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
- TTU_IGNORE_ACCESS, &dummy_stat, true);
+ nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
+ TTU_IGNORE_ACCESS, &stat, true);
list_splice(&clean_pages, page_list);
- mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
- return ret;
+ mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -nr_reclaimed);
+ /*
+ * Since lazyfree pages are isolated from file LRU from the beginning,
+ * they will rotate back to anonymous LRU in the end if it failed to
+ * discard so isolated count will be mismatched.
+ * Compensate the isolated count for both LRU lists.
+ */
+ mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON,
+ stat.nr_lazyfree_fail);
+ mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
+ -stat.nr_lazyfree_fail);
+ return nr_reclaimed;
}
/*