summaryrefslogtreecommitdiffstats
path: root/mm/shmem.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2018-06-07 17:05:31 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-07 17:34:34 -0700
commitbb98f2c5ac5db92d34908dbac81a8de7c47c8353 (patch)
treee8545ddd4cc053639171a106e958bb40f5f021bd /mm/shmem.c
parent88aa7cc688d48ddd84558b41d5905a0db9535c4b (diff)
downloadlinux-bb98f2c5ac5db92d34908dbac81a8de7c47c8353.tar.bz2
mm, memcontrol: move swap charge handling into get_swap_page()
Patch series "mm, memcontrol: Implement memory.swap.events", v2. This patchset implements memory.swap.events which contains max and fail events so that userland can monitor and respond to swap running out. This patch (of 2): get_swap_page() is always followed by mem_cgroup_try_charge_swap(). This patch moves mem_cgroup_try_charge_swap() into get_swap_page() and makes get_swap_page() call the function even after swap allocation failure. This simplifies the callers and consolidates memcg related logic and will ease adding swap related memcg events. Link: http://lkml.kernel.org/r/20180416230934.GH1911913@devbig577.frc2.facebook.com Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Roman Gushchin <guro@fb.com> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c4
1 files changed, 0 insertions, 4 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 9d6c7e595415..8c43e207cd3b 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1322,9 +1322,6 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
if (!swap.val)
goto redirty;
- if (mem_cgroup_try_charge_swap(page, swap))
- goto free_swap;
-
/*
* Add inode to shmem_unuse()'s list of swapped-out inodes,
* if it's not already there. Do it now before the page is
@@ -1353,7 +1350,6 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
}
mutex_unlock(&shmem_swaplist_mutex);
-free_swap:
put_swap_page(page, swap);
redirty:
set_page_dirty(page);