summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/swap.c24
-rw-r--r--mm/swapfile.c25
-rw-r--r--mm/vmscan.c7
3 files changed, 53 insertions, 3 deletions
diff --git a/mm/swap.c b/mm/swap.c
index e3045040dc3e..88a394872677 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -428,6 +428,30 @@ void pagevec_strip(struct pagevec *pvec)
}
/**
+ * pagevec_swap_free - try to free swap space from the pages in a pagevec
+ * @pvec: pagevec with swapcache pages to free the swap space of
+ *
+ * The caller needs to hold an extra reference to each page and
+ * not hold the page lock on the pages. This function uses a
+ * trylock on the page lock so it may not always free the swap
+ * space associated with a page.
+ */
+void pagevec_swap_free(struct pagevec *pvec)
+{
+ int i;
+
+ for (i = 0; i < pagevec_count(pvec); i++) {
+ struct page *page = pvec->pages[i];
+
+ if (PageSwapCache(page) && trylock_page(page)) {
+ if (PageSwapCache(page))
+ remove_exclusive_swap_page_ref(page);
+ unlock_page(page);
+ }
+ }
+}
+
+/**
* pagevec_lookup - gang pagecache lookup
* @pvec: Where the resulting pages are placed
* @mapping: The address_space to search
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 1e330f2998fa..2a97fafa3d89 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -344,7 +344,7 @@ int can_share_swap_page(struct page *page)
* Work out if there are any other processes sharing this
* swap cache page. Free it if you can. Return success.
*/
-int remove_exclusive_swap_page(struct page *page)
+static int remove_exclusive_swap_page_count(struct page *page, int count)
{
int retval;
struct swap_info_struct * p;
@@ -357,7 +357,7 @@ int remove_exclusive_swap_page(struct page *page)
return 0;
if (PageWriteback(page))
return 0;
- if (page_count(page) != 2) /* 2: us + cache */
+ if (page_count(page) != count) /* us + cache + ptes */
return 0;
entry.val = page_private(page);
@@ -370,7 +370,7 @@ int remove_exclusive_swap_page(struct page *page)
if (p->swap_map[swp_offset(entry)] == 1) {
/* Recheck the page count with the swapcache lock held.. */
spin_lock_irq(&swapper_space.tree_lock);
- if ((page_count(page) == 2) && !PageWriteback(page)) {
+ if ((page_count(page) == count) && !PageWriteback(page)) {
__delete_from_swap_cache(page);
SetPageDirty(page);
retval = 1;
@@ -388,6 +388,25 @@ int remove_exclusive_swap_page(struct page *page)
}
/*
+ * Most of the time the page should have two references: one for the
+ * process and one for the swap cache.
+ */
+int remove_exclusive_swap_page(struct page *page)
+{
+ return remove_exclusive_swap_page_count(page, 2);
+}
+
+/*
+ * The pageout code holds an extra reference to the page. That raises
+ * the reference count to test for to 2 for a page that is only in the
+ * swap cache plus 1 for each process that maps the page.
+ */
+int remove_exclusive_swap_page_ref(struct page *page)
+{
+ return remove_exclusive_swap_page_count(page, 2 + page_mapcount(page));
+}
+
+/*
* Free the swap entry like above, but also try to
* free the page cache entry if it is the last user.
*/
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 46fdaa546b8d..e656035d3406 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -647,6 +647,9 @@ free_it:
continue;
activate_locked:
+ /* Not a candidate for swapping, so reclaim swap space. */
+ if (PageSwapCache(page) && vm_swap_full())
+ remove_exclusive_swap_page_ref(page);
SetPageActive(page);
pgactivate++;
keep_locked:
@@ -1228,6 +1231,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
__mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
pgmoved = 0;
spin_unlock_irq(&zone->lru_lock);
+ if (vm_swap_full())
+ pagevec_swap_free(&pvec);
__pagevec_release(&pvec);
spin_lock_irq(&zone->lru_lock);
}
@@ -1237,6 +1242,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
__count_zone_vm_events(PGREFILL, zone, pgscanned);
__count_vm_events(PGDEACTIVATE, pgdeactivate);
spin_unlock_irq(&zone->lru_lock);
+ if (vm_swap_full())
+ pagevec_swap_free(&pvec);
pagevec_release(&pvec);
}