summaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorKirill Tkhai <ktkhai@virtuozzo.com>2019-05-13 17:16:57 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-14 09:47:45 -0700
commitf372d89e5dbbf2bc8e37089bacd526afd4e1d6c2 (patch)
treeb8025428b6cae7c406ecef32e42e3a63feebe6d9 /mm/vmscan.c
parent9851ac13592df77958ae7bac6ba39e71420c38ec (diff)
downloadlinux-f372d89e5dbbf2bc8e37089bacd526afd4e1d6c2.tar.bz2
mm: remove pages_to_free argument of move_active_pages_to_lru()
We may use input argument list as output argument too. This makes the function more similar to putback_inactive_pages(). Link: http://lkml.kernel.org/r/155290129079.31489.16180612694090502942.stgit@localhost.localdomain Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c19
1 files changed, 13 insertions, 6 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5002cc43e32f..4c5f4b862420 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2004,10 +2004,10 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
static unsigned move_active_pages_to_lru(struct lruvec *lruvec,
struct list_head *list,
- struct list_head *pages_to_free,
enum lru_list lru)
{
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
+ LIST_HEAD(pages_to_free);
struct page *page;
int nr_pages;
int nr_moved = 0;
@@ -2034,12 +2034,17 @@ static unsigned move_active_pages_to_lru(struct lruvec *lruvec,
(*get_compound_page_dtor(page))(page);
spin_lock_irq(&pgdat->lru_lock);
} else
- list_add(&page->lru, pages_to_free);
+ list_add(&page->lru, &pages_to_free);
} else {
nr_moved += nr_pages;
}
}
+ /*
+ * To save our caller's stack, now use input list for pages to free.
+ */
+ list_splice(&pages_to_free, list);
+
return nr_moved;
}
@@ -2129,8 +2134,10 @@ static void shrink_active_list(unsigned long nr_to_scan,
*/
reclaim_stat->recent_rotated[file] += nr_rotated;
- nr_activate = move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
- nr_deactivate = move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
+ nr_activate = move_active_pages_to_lru(lruvec, &l_active, lru);
+ nr_deactivate = move_active_pages_to_lru(lruvec, &l_inactive, lru - LRU_ACTIVE);
+ /* Keep all free pages in l_active list */
+ list_splice(&l_inactive, &l_active);
__count_vm_events(PGDEACTIVATE, nr_deactivate);
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
@@ -2138,8 +2145,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
spin_unlock_irq(&pgdat->lru_lock);
- mem_cgroup_uncharge_list(&l_hold);
- free_unref_page_list(&l_hold);
+ mem_cgroup_uncharge_list(&l_active);
+ free_unref_page_list(&l_active);
trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
nr_deactivate, nr_rotated, sc->priority, file);
}