summaryrefslogtreecommitdiffstats
path: root/fs/nfsd
diff options
context:
space:
mode:
authorTrond Myklebust <trond.myklebust@hammerspace.com>2022-03-31 09:54:01 -0400
committerChuck Lever <chuck.lever@oracle.com>2022-03-31 10:39:57 -0400
commit6b8a94332ee4f7d9a8ae0cbac7609f79c212f06c (patch)
treecbc91ab71ce43b7bf6a05a7da19bd634573a2233 /fs/nfsd
parent5f7b839d47dbc74cf4a07beeab5191f93678673e (diff)
downloadlinux-6b8a94332ee4f7d9a8ae0cbac7609f79c212f06c.tar.bz2
nfsd: Fix a write performance regression
The call to filemap_flush() in nfsd_file_put() is there to ensure that we clear out any writes belonging to a NFSv3 client relatively quickly and avoid situations where the file can't be evicted by the garbage collector. It also ensures that we detect write errors quickly. The problem is this causes a regression in performance for some workloads. So try to improve matters by deferring writeback until we're ready to close the file, and need to detect errors so that we can force the client to resend. Tested-by: Jan Kara <jack@suse.cz> Fixes: b6669305d35a ("nfsd: Reduce the number of calls to nfsd_file_gc()") Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com> Link: https://lore.kernel.org/all/20220330103457.r4xrhy2d6nhtouzk@quack3.lan Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'fs/nfsd')
-rw-r--r--fs/nfsd/filecache.c18
1 files changed, 15 insertions, 3 deletions
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index cc2831cec669..496f7b3f7523 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -236,6 +236,13 @@ nfsd_file_check_write_error(struct nfsd_file *nf)
}
static void
+nfsd_file_flush(struct nfsd_file *nf)
+{
+ if (nf->nf_file && vfs_fsync(nf->nf_file, 1) != 0)
+ nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
+}
+
+static void
nfsd_file_do_unhash(struct nfsd_file *nf)
{
lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
@@ -302,11 +309,14 @@ nfsd_file_put(struct nfsd_file *nf)
return;
}
- filemap_flush(nf->nf_file->f_mapping);
is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
- nfsd_file_put_noref(nf);
- if (is_hashed)
+ if (!is_hashed) {
+ nfsd_file_flush(nf);
+ nfsd_file_put_noref(nf);
+ } else {
+ nfsd_file_put_noref(nf);
nfsd_file_schedule_laundrette();
+ }
if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT)
nfsd_file_gc();
}
@@ -327,6 +337,7 @@ nfsd_file_dispose_list(struct list_head *dispose)
while(!list_empty(dispose)) {
nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
list_del(&nf->nf_lru);
+ nfsd_file_flush(nf);
nfsd_file_put_noref(nf);
}
}
@@ -340,6 +351,7 @@ nfsd_file_dispose_list_sync(struct list_head *dispose)
while(!list_empty(dispose)) {
nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
list_del(&nf->nf_lru);
+ nfsd_file_flush(nf);
if (!refcount_dec_and_test(&nf->nf_ref))
continue;
if (nfsd_file_free(nf))