summaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-13 09:49:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-13 09:49:35 -0700
commit1251704a631b62591ad1d1b6ead252e9e597d5f5 (patch)
treebc394a069d3b8aef8c6dc147438c05cc9ba057aa /mm/vmscan.c
parent0fcc3ab23d7395f58e8ab0834e7913e2e4314a83 (diff)
parentb340959ea281dbac15344277094d0a294dbe8aca (diff)
downloadlinux-1251704a631b62591ad1d1b6ead252e9e597d5f5.tar.bz2
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "15 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm, docs: update memory.stat description with workingset* entries mm: vmscan: scan until it finds eligible pages mm, thp: copying user pages must schedule on collapse dax: fix PMD data corruption when fault races with write dax: fix data corruption when fault races with write ext4: return to starting transaction in ext4_dax_huge_fault() mm: fix data corruption due to stale mmap reads dax: prevent invalidation of mapped DAX entries Tigran has moved mm, vmalloc: fix vmalloc users tracking properly mm/khugepaged: add missed tracepoint for collapse_huge_page_swapin gcov: support GCC 7.1 mm, vmstat: Remove spurious WARN() during zoneinfo print time: delete current_fs_time() hwpoison, memcg: forcibly uncharge LRU pages
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2f45c0520f43..8ad39bbc79e6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1449,7 +1449,7 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
*
* Appropriate locks must be held before calling this function.
*
- * @nr_to_scan: The number of pages to look through on the list.
+ * @nr_to_scan: The number of eligible pages to look through on the list.
* @lruvec: The LRU vector to pull pages from.
* @dst: The temp list to put pages on to.
* @nr_scanned: The number of pages that were scanned.
@@ -1469,11 +1469,13 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
unsigned long skipped = 0;
- unsigned long scan, nr_pages;
+ unsigned long scan, total_scan, nr_pages;
LIST_HEAD(pages_skipped);
- for (scan = 0; scan < nr_to_scan && nr_taken < nr_to_scan &&
- !list_empty(src); scan++) {
+ scan = 0;
+ for (total_scan = 0;
+ scan < nr_to_scan && nr_taken < nr_to_scan && !list_empty(src);
+ total_scan++) {
struct page *page;
page = lru_to_page(src);
@@ -1487,6 +1489,13 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
continue;
}
+ /*
+ * Do not count skipped pages because that makes the function
+ * return with no isolated pages if the LRU mostly contains
+ * ineligible pages. This causes the VM to not reclaim any
+ * pages, triggering a premature OOM.
+ */
+ scan++;
switch (__isolate_lru_page(page, mode)) {
case 0:
nr_pages = hpage_nr_pages(page);
@@ -1524,9 +1533,9 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
skipped += nr_skipped[zid];
}
}
- *nr_scanned = scan;
+ *nr_scanned = total_scan;
trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
- scan, skipped, nr_taken, mode, lru);
+ total_scan, skipped, nr_taken, mode, lru);
update_lru_sizes(lruvec, lru, nr_zone_taken);
return nr_taken;
}