diff options
author | Sasha Levin <levinsasha928@gmail.com> | 2012-06-10 12:51:02 +0200 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2012-06-11 15:30:57 -0400 |
commit | f116695a500cdd84cbeac68bc373e98ae729c24b (patch) | |
tree | e0af3b569d8b50a1a03612a57fec02f9846e1deb | |
parent | 96253444dbd90c6e9e9cfcb25315da5c412b058a (diff) | |
download | linux-f116695a500cdd84cbeac68bc373e98ae729c24b.tar.bz2 |
mm: frontswap: split out __frontswap_unuse_pages
An attempt at making frontswap_shrink shorter and more readable. This patch
splits out walking through the swap list to find an entry with enough
pages to unuse.
Also, assert that the internal __frontswap_unuse_pages is called under swap
lock, since that part of code was previously directly happen inside the lock.
Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-rw-r--r-- | mm/frontswap.c | 59 |
1 files changed, 39 insertions, 20 deletions
diff --git a/mm/frontswap.c b/mm/frontswap.c index 5faf840f8726..faa43b7eea6f 100644 --- a/mm/frontswap.c +++ b/mm/frontswap.c @@ -230,6 +230,41 @@ static unsigned long __frontswap_curr_pages(void) return totalpages; } +static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused, + int *swapid) +{ + int ret = -EINVAL; + struct swap_info_struct *si = NULL; + int si_frontswap_pages; + unsigned long total_pages_to_unuse = total; + unsigned long pages = 0, pages_to_unuse = 0; + int type; + + assert_spin_locked(&swap_lock); + for (type = swap_list.head; type >= 0; type = si->next) { + si = swap_info[type]; + si_frontswap_pages = atomic_read(&si->frontswap_pages); + if (total_pages_to_unuse < si_frontswap_pages) { + pages = pages_to_unuse = total_pages_to_unuse; + } else { + pages = si_frontswap_pages; + pages_to_unuse = 0; /* unuse all */ + } + /* ensure there is enough RAM to fetch pages from frontswap */ + if (security_vm_enough_memory_mm(current->mm, pages)) { + ret = -ENOMEM; + continue; + } + vm_unacct_memory(pages); + *unused = pages_to_unuse; + *swapid = type; + ret = 0; + break; + } + + return ret; +} + /* * Frontswap, like a true swap device, may unnecessarily retain pages * under certain circumstances; "shrink" frontswap is essentially a @@ -240,11 +275,9 @@ static unsigned long __frontswap_curr_pages(void) */ void frontswap_shrink(unsigned long target_pages) { - struct swap_info_struct *si = NULL; - int si_frontswap_pages; unsigned long total_pages = 0, total_pages_to_unuse; - unsigned long pages = 0, pages_to_unuse = 0; - int type; + unsigned long pages_to_unuse = 0; + int type, ret; bool locked = false; /* @@ -258,22 +291,8 @@ void frontswap_shrink(unsigned long target_pages) if (total_pages <= target_pages) goto out; total_pages_to_unuse = total_pages - target_pages; - for (type = swap_list.head; type >= 0; type = si->next) { - si = swap_info[type]; - si_frontswap_pages = atomic_read(&si->frontswap_pages); - if (total_pages_to_unuse < si_frontswap_pages) { - pages = pages_to_unuse = total_pages_to_unuse; - } else { - pages = si_frontswap_pages; - pages_to_unuse = 0; /* unuse all */ - } - /* ensure there is enough RAM to fetch pages from frontswap */ - if (security_vm_enough_memory_mm(current->mm, pages)) - continue; - vm_unacct_memory(pages); - break; - } - if (type < 0) + ret = __frontswap_unuse_pages(total_pages_to_unuse, &pages_to_unuse, &type); + if (ret < 0) goto out; locked = false; spin_unlock(&swap_lock); |