summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 19:25:39 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 19:25:39 -0700
commitac694dbdbc403c00e2c14d10bc7b8412cc378259 (patch)
treee37328cfbeaf43716dd5914cad9179e57e84df76 /fs
parenta40a1d3d0a2fd613fdec6d89d3c053268ced76ed (diff)
parent437ea90cc3afdca5229b41c6b1d38c4842756cb9 (diff)
downloadlinux-ac694dbdbc403c00e2c14d10bc7b8412cc378259.tar.bz2
Merge branch 'akpm' (Andrew's patch-bomb)
Merge Andrew's second set of patches: - MM - a few random fixes - a couple of RTC leftovers * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (120 commits) rtc/rtc-88pm80x: remove unneed devm_kfree rtc/rtc-88pm80x: assign ret only when rtc_register_driver fails mm: hugetlbfs: close race during teardown of hugetlbfs shared page tables tmpfs: distribute interleave better across nodes mm: remove redundant initialization mm: warn if pg_data_t isn't initialized with zero mips: zero out pg_data_t when it's allocated memcg: gix memory accounting scalability in shrink_page_list mm/sparse: remove index_init_lock mm/sparse: more checks on mem_section number mm/sparse: optimize sparse_index_alloc memcg: add mem_cgroup_from_css() helper memcg: further prevent OOM with too many dirty pages memcg: prevent OOM with too many dirty pages mm: mmu_notifier: fix freed page still mapped in secondary MMU mm: memcg: only check anon swapin page charges for swap cache mm: memcg: only check swap cache pages for repeated charging mm: memcg: split swapin charge function into private and public part mm: memcg: remove needless !mm fixup to init_mm when charging mm: memcg: remove unneeded shmem charge type ...
Diffstat (limited to 'fs')
-rw-r--r--fs/fs-writeback.c5
-rw-r--r--fs/hugetlbfs/inode.c4
-rw-r--r--fs/nfs/Kconfig8
-rw-r--r--fs/nfs/direct.c82
-rw-r--r--fs/nfs/file.c28
-rw-r--r--fs/nfs/inode.c4
-rw-r--r--fs/nfs/internal.h7
-rw-r--r--fs/nfs/pagelist.c4
-rw-r--r--fs/nfs/read.c6
-rw-r--r--fs/nfs/write.c89
-rw-r--r--fs/super.c2
11 files changed, 156 insertions, 83 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 50d0b78130a1..be3efc4f64f4 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -52,11 +52,6 @@ struct wb_writeback_work {
struct completion *done; /* set if the caller waits */
};
-/*
- * We don't actually have pdflush, but this one is exported though /proc...
- */
-int nr_pdflush_threads;
-
/**
* writeback_in_progress - determine whether there is writeback in progress
* @bdi: the device's backing_dev_info structure.
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index e13e9bdb0bf5..8349a899912e 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -416,8 +416,8 @@ hugetlb_vmtruncate_list(struct prio_tree_root *root, pgoff_t pgoff)
else
v_offset = 0;
- __unmap_hugepage_range(vma,
- vma->vm_start + v_offset, vma->vm_end, NULL);
+ unmap_hugepage_range(vma, vma->vm_start + v_offset,
+ vma->vm_end, NULL);
}
}
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 195c1ea6151a..db7ad719628a 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -86,6 +86,14 @@ config NFS_V4
If unsure, say Y.
+config NFS_SWAP
+ bool "Provide swap over NFS support"
+ default n
+ depends on NFS_FS
+ select SUNRPC_SWAP
+ help
+ This option enables swapon to work on files located on NFS mounts.
+
config NFS_V4_1
bool "NFS client support for NFSv4.1 (EXPERIMENTAL)"
depends on NFS_V4 && EXPERIMENTAL
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index b7b4f80968b5..1ba385b7c90d 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -115,17 +115,28 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
* @nr_segs: size of iovec array
*
* The presence of this routine in the address space ops vector means
- * the NFS client supports direct I/O. However, we shunt off direct
- * read and write requests before the VFS gets them, so this method
- * should never be called.
+ * the NFS client supports direct I/O. However, for most direct IO, we
+ * shunt off direct read and write requests before the VFS gets them,
+ * so this method is only ever called for swap.
*/
ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
{
+#ifndef CONFIG_NFS_SWAP
dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
iocb->ki_filp->f_path.dentry->d_name.name,
(long long) pos, nr_segs);
return -EINVAL;
+#else
+ VM_BUG_ON(iocb->ki_left != PAGE_SIZE);
+ VM_BUG_ON(iocb->ki_nbytes != PAGE_SIZE);
+
+ if (rw == READ || rw == KERNEL_READ)
+ return nfs_file_direct_read(iocb, iov, nr_segs, pos,
+ rw == READ ? true : false);
+ return nfs_file_direct_write(iocb, iov, nr_segs, pos,
+ rw == WRITE ? true : false);
+#endif /* CONFIG_NFS_SWAP */
}
static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
@@ -303,7 +314,7 @@ static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
*/
static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
const struct iovec *iov,
- loff_t pos)
+ loff_t pos, bool uio)
{
struct nfs_direct_req *dreq = desc->pg_dreq;
struct nfs_open_context *ctx = dreq->ctx;
@@ -331,12 +342,20 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *de
GFP_KERNEL);
if (!pagevec)
break;
- down_read(&current->mm->mmap_sem);
- result = get_user_pages(current, current->mm, user_addr,
+ if (uio) {
+ down_read(&current->mm->mmap_sem);
+ result = get_user_pages(current, current->mm, user_addr,
npages, 1, 0, pagevec, NULL);
- up_read(&current->mm->mmap_sem);
- if (result < 0)
- break;
+ up_read(&current->mm->mmap_sem);
+ if (result < 0)
+ break;
+ } else {
+ WARN_ON(npages != 1);
+ result = get_kernel_page(user_addr, 1, pagevec);
+ if (WARN_ON(result != 1))
+ break;
+ }
+
if ((unsigned)result < npages) {
bytes = result * PAGE_SIZE;
if (bytes <= pgbase) {
@@ -386,7 +405,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *de
static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
const struct iovec *iov,
unsigned long nr_segs,
- loff_t pos)
+ loff_t pos, bool uio)
{
struct nfs_pageio_descriptor desc;
ssize_t result = -EINVAL;
@@ -400,7 +419,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
for (seg = 0; seg < nr_segs; seg++) {
const struct iovec *vec = &iov[seg];
- result = nfs_direct_read_schedule_segment(&desc, vec, pos);
+ result = nfs_direct_read_schedule_segment(&desc, vec, pos, uio);
if (result < 0)
break;
requested_bytes += result;
@@ -426,7 +445,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
}
static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+ unsigned long nr_segs, loff_t pos, bool uio)
{
ssize_t result = -ENOMEM;
struct inode *inode = iocb->ki_filp->f_mapping->host;
@@ -444,7 +463,7 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
- result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
+ result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos, uio);
if (!result)
result = nfs_direct_wait(dreq);
NFS_I(inode)->read_io += result;
@@ -610,7 +629,7 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode
*/
static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
const struct iovec *iov,
- loff_t pos)
+ loff_t pos, bool uio)
{
struct nfs_direct_req *dreq = desc->pg_dreq;
struct nfs_open_context *ctx = dreq->ctx;
@@ -638,12 +657,19 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *d
if (!pagevec)
break;
- down_read(&current->mm->mmap_sem);
- result = get_user_pages(current, current->mm, user_addr,
- npages, 0, 0, pagevec, NULL);
- up_read(&current->mm->mmap_sem);
- if (result < 0)
- break;
+ if (uio) {
+ down_read(&current->mm->mmap_sem);
+ result = get_user_pages(current, current->mm, user_addr,
+ npages, 0, 0, pagevec, NULL);
+ up_read(&current->mm->mmap_sem);
+ if (result < 0)
+ break;
+ } else {
+ WARN_ON(npages != 1);
+ result = get_kernel_page(user_addr, 0, pagevec);
+ if (WARN_ON(result != 1))
+ break;
+ }
if ((unsigned)result < npages) {
bytes = result * PAGE_SIZE;
@@ -774,7 +800,7 @@ static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
const struct iovec *iov,
unsigned long nr_segs,
- loff_t pos)
+ loff_t pos, bool uio)
{
struct nfs_pageio_descriptor desc;
struct inode *inode = dreq->inode;
@@ -790,7 +816,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
for (seg = 0; seg < nr_segs; seg++) {
const struct iovec *vec = &iov[seg];
- result = nfs_direct_write_schedule_segment(&desc, vec, pos);
+ result = nfs_direct_write_schedule_segment(&desc, vec, pos, uio);
if (result < 0)
break;
requested_bytes += result;
@@ -818,7 +844,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos,
- size_t count)
+ size_t count, bool uio)
{
ssize_t result = -ENOMEM;
struct inode *inode = iocb->ki_filp->f_mapping->host;
@@ -836,7 +862,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
- result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos);
+ result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, uio);
if (!result)
result = nfs_direct_wait(dreq);
out_release:
@@ -867,7 +893,7 @@ out:
* cache.
*/
ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+ unsigned long nr_segs, loff_t pos, bool uio)
{
ssize_t retval = -EINVAL;
struct file *file = iocb->ki_filp;
@@ -892,7 +918,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
task_io_account_read(count);
- retval = nfs_direct_read(iocb, iov, nr_segs, pos);
+ retval = nfs_direct_read(iocb, iov, nr_segs, pos, uio);
if (retval > 0)
iocb->ki_pos = pos + retval;
@@ -923,7 +949,7 @@ out:
* is no atomic O_APPEND write facility in the NFS protocol.
*/
ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+ unsigned long nr_segs, loff_t pos, bool uio)
{
ssize_t retval = -EINVAL;
struct file *file = iocb->ki_filp;
@@ -955,7 +981,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
task_io_account_write(count);
- retval = nfs_direct_write(iocb, iov, nr_segs, pos, count);
+ retval = nfs_direct_write(iocb, iov, nr_segs, pos, count, uio);
if (retval > 0) {
struct inode *inode = mapping->host;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index b039a17ee941..75d6d0a3d32e 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -180,7 +180,7 @@ nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
ssize_t result;
if (iocb->ki_filp->f_flags & O_DIRECT)
- return nfs_file_direct_read(iocb, iov, nr_segs, pos);
+ return nfs_file_direct_read(iocb, iov, nr_segs, pos, true);
dprintk("NFS: read(%s/%s, %lu@%lu)\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
@@ -439,7 +439,7 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset)
if (offset != 0)
return;
/* Cancel any unstarted writes on this page */
- nfs_wb_page_cancel(page->mapping->host, page);
+ nfs_wb_page_cancel(page_file_mapping(page)->host, page);
nfs_fscache_invalidate_page(page, page->mapping->host);
}
@@ -484,7 +484,7 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
*/
static int nfs_launder_page(struct page *page)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
struct nfs_inode *nfsi = NFS_I(inode);
dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n",
@@ -494,6 +494,20 @@ static int nfs_launder_page(struct page *page)
return nfs_wb_page(inode, page);
}
+#ifdef CONFIG_NFS_SWAP
+static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ sector_t *span)
+{
+ *span = sis->pages;
+ return xs_swapper(NFS_CLIENT(file->f_mapping->host)->cl_xprt, 1);
+}
+
+static void nfs_swap_deactivate(struct file *file)
+{
+ xs_swapper(NFS_CLIENT(file->f_mapping->host)->cl_xprt, 0);
+}
+#endif
+
const struct address_space_operations nfs_file_aops = {
.readpage = nfs_readpage,
.readpages = nfs_readpages,
@@ -508,6 +522,10 @@ const struct address_space_operations nfs_file_aops = {
.migratepage = nfs_migrate_page,
.launder_page = nfs_launder_page,
.error_remove_page = generic_error_remove_page,
+#ifdef CONFIG_NFS_SWAP
+ .swap_activate = nfs_swap_activate,
+ .swap_deactivate = nfs_swap_deactivate,
+#endif
};
/*
@@ -533,7 +551,7 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
nfs_fscache_wait_on_page_write(NFS_I(dentry->d_inode), page);
lock_page(page);
- mapping = page->mapping;
+ mapping = page_file_mapping(page);
if (mapping != dentry->d_inode->i_mapping)
goto out_unlock;
@@ -582,7 +600,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
size_t count = iov_length(iov, nr_segs);
if (iocb->ki_filp->f_flags & O_DIRECT)
- return nfs_file_direct_write(iocb, iov, nr_segs, pos);
+ return nfs_file_direct_write(iocb, iov, nr_segs, pos, true);
dprintk("NFS: write(%s/%s, %lu@%Ld)\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 2ed6138f32ad..c6e895f0fbf3 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -897,6 +897,10 @@ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
struct nfs_inode *nfsi = NFS_I(inode);
int ret = 0;
+ /* swapfiles are not supposed to be shared. */
+ if (IS_SWAPFILE(inode))
+ goto out;
+
if (nfs_mapping_need_revalidate_inode(inode)) {
ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
if (ret < 0)
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 8865538b26b6..31fdb03225cd 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -554,13 +554,14 @@ void nfs_super_set_maxbytes(struct super_block *sb, __u64 maxfilesize)
static inline
unsigned int nfs_page_length(struct page *page)
{
- loff_t i_size = i_size_read(page->mapping->host);
+ loff_t i_size = i_size_read(page_file_mapping(page)->host);
if (i_size > 0) {
+ pgoff_t page_index = page_file_index(page);
pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
- if (page->index < end_index)
+ if (page_index < end_index)
return PAGE_CACHE_SIZE;
- if (page->index == end_index)
+ if (page_index == end_index)
return ((i_size - 1) & ~PAGE_CACHE_MASK) + 1;
}
return 0;
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 1e7d8879dae6..1a6732ed04a4 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -71,7 +71,7 @@ void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
static inline struct nfs_page *
nfs_page_alloc(void)
{
- struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL);
+ struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO);
if (p)
INIT_LIST_HEAD(&p->wb_list);
return p;
@@ -118,7 +118,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
* long write-back delay. This will be adjusted in
* update_nfs_request below if the region is not locked. */
req->wb_page = page;
- req->wb_index = page->index;
+ req->wb_index = page_file_index(page);
page_cache_get(page);
req->wb_offset = offset;
req->wb_pgbase = offset;
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 6935e401ad76..b6bdb18e892c 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -527,11 +527,11 @@ static const struct rpc_call_ops nfs_read_common_ops = {
int nfs_readpage(struct file *file, struct page *page)
{
struct nfs_open_context *ctx;
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
int error;
dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
- page, PAGE_CACHE_SIZE, page->index);
+ page, PAGE_CACHE_SIZE, page_file_index(page));
nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
nfs_add_stats(inode, NFSIOS_READPAGES, 1);
@@ -585,7 +585,7 @@ static int
readpage_async_filler(void *data, struct page *page)
{
struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
struct nfs_page *new;
unsigned int len;
int error;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index e4a2ad2059bd..5829d0ce7cfb 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -52,7 +52,7 @@ static mempool_t *nfs_commit_mempool;
struct nfs_commit_data *nfs_commitdata_alloc(void)
{
- struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
+ struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOIO);
if (p) {
memset(p, 0, sizeof(*p));
@@ -70,7 +70,7 @@ EXPORT_SYMBOL_GPL(nfs_commit_free);
struct nfs_write_header *nfs_writehdr_alloc(void)
{
- struct nfs_write_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
+ struct nfs_write_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO);
if (p) {
struct nfs_pgio_header *hdr = &p->header;
@@ -142,25 +142,38 @@ static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
}
-static struct nfs_page *nfs_page_find_request_locked(struct page *page)
+static struct nfs_page *
+nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page)
{
struct nfs_page *req = NULL;
- if (PagePrivate(page)) {
+ if (PagePrivate(page))
req = (struct nfs_page *)page_private(page);
- if (req != NULL)
- kref_get(&req->wb_kref);
+ else if (unlikely(PageSwapCache(page))) {
+ struct nfs_page *freq, *t;
+
+ /* Linearly search the commit list for the correct req */
+ list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) {
+ if (freq->wb_page == page) {
+ req = freq;
+ break;
+ }
+ }
}
+
+ if (req)
+ kref_get(&req->wb_kref);
+
return req;
}
static struct nfs_page *nfs_page_find_request(struct page *page)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
struct nfs_page *req = NULL;
spin_lock(&inode->i_lock);
- req = nfs_page_find_request_locked(page);
+ req = nfs_page_find_request_locked(NFS_I(inode), page);
spin_unlock(&inode->i_lock);
return req;
}
@@ -168,16 +181,16 @@ static struct nfs_page *nfs_page_find_request(struct page *page)
/* Adjust the file length if we're writing beyond the end */
static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
loff_t end, i_size;
pgoff_t end_index;
spin_lock(&inode->i_lock);
i_size = i_size_read(inode);
end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
- if (i_size > 0 && page->index < end_index)
+ if (i_size > 0 && page_file_index(page) < end_index)
goto out;
- end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
+ end = page_file_offset(page) + ((loff_t)offset+count);
if (i_size >= end)
goto out;
i_size_write(inode, end);
@@ -190,7 +203,7 @@ out:
static void nfs_set_pageerror(struct page *page)
{
SetPageError(page);
- nfs_zap_mapping(page->mapping->host, page->mapping);
+ nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page));
}
/* We can set the PG_uptodate flag if we see that a write request
@@ -231,7 +244,7 @@ static int nfs_set_page_writeback(struct page *page)
int ret = test_set_page_writeback(page);
if (!ret) {
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
struct nfs_server *nfss = NFS_SERVER(inode);
if (atomic_long_inc_return(&nfss->writeback) >
@@ -245,7 +258,7 @@ static int nfs_set_page_writeback(struct page *page)
static void nfs_end_page_writeback(struct page *page)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
struct nfs_server *nfss = NFS_SERVER(inode);
end_page_writeback(page);
@@ -255,13 +268,13 @@ static void nfs_end_page_writeback(struct page *page)
static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
struct nfs_page *req;
int ret;
spin_lock(&inode->i_lock);
for (;;) {
- req = nfs_page_find_request_locked(page);
+ req = nfs_page_find_request_locked(NFS_I(inode), page);
if (req == NULL)
break;
if (nfs_lock_request(req))
@@ -316,13 +329,13 @@ out:
static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
int ret;
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
- nfs_pageio_cond_complete(pgio, page->index);
+ nfs_pageio_cond_complete(pgio, page_file_index(page));
ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
if (ret == -EAGAIN) {
redirty_page_for_writepage(wbc, page);
@@ -339,7 +352,7 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc
struct nfs_pageio_descriptor pgio;
int err;
- NFS_PROTO(page->mapping->host)->write_pageio_init(&pgio,
+ NFS_PROTO(page_file_mapping(page)->host)->write_pageio_init(&pgio,
page->mapping->host,
wb_priority(wbc),
&nfs_async_write_completion_ops);
@@ -416,9 +429,15 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
spin_lock(&inode->i_lock);
if (!nfsi->npages && NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
inode->i_version++;
- set_bit(PG_MAPPED, &req->wb_flags);
- SetPagePrivate(req->wb_page);
- set_page_private(req->wb_page, (unsigned long)req);
+ /*
+ * Swap-space should not get truncated. Hence no need to plug the race
+ * with invalidate/truncate.
+ */
+ if (likely(!PageSwapCache(req->wb_page))) {
+ set_bit(PG_MAPPED, &req->wb_flags);
+ SetPagePrivate(req->wb_page);
+ set_page_private(req->wb_page, (unsigned long)req);
+ }
nfsi->npages++;
kref_get(&req->wb_kref);
spin_unlock(&inode->i_lock);
@@ -435,9 +454,11 @@ static void nfs_inode_remove_request(struct nfs_page *req)
BUG_ON (!NFS_WBACK_BUSY(req));
spin_lock(&inode->i_lock);
- set_page_private(req->wb_page, 0);
- ClearPagePrivate(req->wb_page);
- clear_bit(PG_MAPPED, &req->wb_flags);
+ if (likely(!PageSwapCache(req->wb_page))) {
+ set_page_private(req->wb_page, 0);
+ ClearPagePrivate(req->wb_page);
+ clear_bit(PG_MAPPED, &req->wb_flags);
+ }
nfsi->npages--;
spin_unlock(&inode->i_lock);
nfs_release_request(req);
@@ -474,7 +495,7 @@ nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
spin_unlock(cinfo->lock);
if (!cinfo->dreq) {
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
- inc_bdi_stat(req->wb_page->mapping->backing_dev_info,
+ inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
BDI_RECLAIMABLE);
__mark_inode_dirty(req->wb_context->dentry->d_inode,
I_DIRTY_DATASYNC);
@@ -541,7 +562,7 @@ static void
nfs_clear_page_commit(struct page *page)
{
dec_zone_page_state(page, NR_UNSTABLE_NFS);
- dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE);
+ dec_bdi_stat(page_file_mapping(page)->backing_dev_info, BDI_RECLAIMABLE);
}
static void
@@ -733,7 +754,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
spin_lock(&inode->i_lock);
for (;;) {
- req = nfs_page_find_request_locked(page);
+ req = nfs_page_find_request_locked(NFS_I(inode), page);
if (req == NULL)
goto out_unlock;
@@ -792,7 +813,7 @@ out_err:
static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
struct page *page, unsigned int offset, unsigned int bytes)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
struct nfs_page *req;
req = nfs_try_to_update_request(inode, page, offset, bytes);
@@ -845,7 +866,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
nfs_release_request(req);
if (!do_flush)
return 0;
- status = nfs_wb_page(page->mapping->host, page);
+ status = nfs_wb_page(page_file_mapping(page)->host, page);
} while (status == 0);
return status;
}
@@ -875,7 +896,7 @@ int nfs_updatepage(struct file *file, struct page *page,
unsigned int offset, unsigned int count)
{
struct nfs_open_context *ctx = nfs_file_open_context(file);
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
int status = 0;
nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
@@ -883,7 +904,7 @@ int nfs_updatepage(struct file *file, struct page *page,
dprintk("NFS: nfs_updatepage(%s/%s %d@%lld)\n",
file->f_path.dentry->d_parent->d_name.name,
file->f_path.dentry->d_name.name, count,
- (long long)(page_offset(page) + offset));
+ (long long)(page_file_offset(page) + offset));
/* If we're not using byte range locks, and we know the page
* is up to date, it may be more efficient to extend the write
@@ -1474,7 +1495,7 @@ void nfs_retry_commit(struct list_head *page_list,
nfs_mark_request_commit(req, lseg, cinfo);
if (!cinfo->dreq) {
dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
- dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
+ dec_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
BDI_RECLAIMABLE);
}
nfs_unlock_and_release_request(req);
@@ -1731,7 +1752,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
*/
int nfs_wb_page(struct inode *inode, struct page *page)
{
- loff_t range_start = page_offset(page);
+ loff_t range_start = page_file_offset(page);
loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
diff --git a/fs/super.c b/fs/super.c
index 4c5d82f56ec4..4bf714459a4b 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -62,7 +62,7 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
return -1;
if (!grab_super_passive(sb))
- return !sc->nr_to_scan ? 0 : -1;
+ return -1;
if (sb->s_op && sb->s_op->nr_cached_objects)
fs_objects = sb->s_op->nr_cached_objects(sb);