summaryrefslogtreecommitdiffstats
path: root/mm/shmem.c
diff options
context:
space:
mode:
authorMinchan Kim <minchan.kim@gmail.com>2011-03-22 16:32:40 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-22 17:44:02 -0700
commit4c73b1bc6bb14aab7888ebe6bffe957cf7c07fa0 (patch)
tree152bc82ac08f59cdec1d7d3d4312f90e8aea124a /mm/shmem.c
parentbd65cb86c98a79bc61afd0d80166005f125e9064 (diff)
downloadlinux-4c73b1bc6bb14aab7888ebe6bffe957cf7c07fa0.tar.bz2
mm: shmem: change remove_from_page_cache
This patch series changes remove_from_page_cache()'s page ref counting rule. Page cache ref count is decreased in delete_from_page_cache(). So we don't need to decrease the page reference in callers. Signed-off-by: Minchan Kim <minchan.kim@gmail.com> Acked-by: Hugh Dickins <hughd@google.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c3
1 files changed, 1 insertions, 2 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 048a95a5244d..88593586bdb7 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1081,7 +1081,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
shmem_recalc_inode(inode);
if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
- remove_from_page_cache(page);
+ delete_from_page_cache(page);
shmem_swp_set(info, entry, swap.val);
shmem_swp_unmap(entry);
if (list_empty(&info->swaplist))
@@ -1091,7 +1091,6 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
spin_unlock(&info->lock);
swap_shmem_alloc(swap);
BUG_ON(page_mapped(page));
- page_cache_release(page); /* pagecache ref */
swap_writepage(page, wbc);
if (inode) {
mutex_lock(&shmem_swaplist_mutex);