From 98c27f276be85a73f0babc61c9f8128b7ef593c6 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Mon, 17 Jan 2022 09:50:10 -0800 Subject: NFS: simplify check for freeing cn_resp nfs42_files_from_same_server() is called to check if freeing cn_resp is required, just do the free. Signed-off-by: Tom Rix Signed-off-by: Trond Myklebust --- fs/nfs/nfs4file.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index e79ae4cbc395..ba117592a95b 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -180,8 +180,8 @@ retry: ret = nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count, nss, cnrs, sync); out: - if (!nfs42_files_from_same_server(file_in, file_out)) - kfree(cn_resp); + kfree(cn_resp); + if (ret == -EAGAIN) goto retry; return ret; -- cgit v1.2.3 From 8786fde8421ce755a842051f9528674a1b1f0b9a Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 22 Jan 2022 20:54:52 +0000 Subject: Convert NFS from readpages to readahead NFS is one of the last two users of the deprecated ->readpages aop. This conversion looks straightforward, but I have only compile-tested it. Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Trond Myklebust --- fs/nfs/file.c | 2 +- fs/nfs/nfstrace.h | 6 +++--- fs/nfs/read.c | 21 +++++++++++++-------- include/linux/nfs_fs.h | 3 +-- 4 files changed, 18 insertions(+), 14 deletions(-) diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 76d76acbc594..4d681683d13c 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -514,7 +514,7 @@ static void nfs_swap_deactivate(struct file *file) const struct address_space_operations nfs_file_aops = { .readpage = nfs_readpage, - .readpages = nfs_readpages, + .readahead = nfs_readahead, .set_page_dirty = __set_page_dirty_nobuffers, .writepage = nfs_writepage, .writepages = nfs_writepages, diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index 317ce27bdc4b..4611aa3a21a4 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -889,11 +889,11 @@ TRACE_EVENT(nfs_aop_readpage_done, TRACE_EVENT(nfs_aop_readahead, TP_PROTO( const struct inode *inode, - struct page *page, + loff_t pos, unsigned int nr_pages ), - TP_ARGS(inode, page, nr_pages), + TP_ARGS(inode, pos, nr_pages), TP_STRUCT__entry( __field(dev_t, dev) @@ -911,7 +911,7 @@ TRACE_EVENT(nfs_aop_readahead, __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(&nfsi->fh); __entry->version = inode_peek_iversion_raw(inode); - __entry->offset = page_index(page) << PAGE_SHIFT; + __entry->offset = pos; __entry->nr_pages = nr_pages; ), diff --git a/fs/nfs/read.c b/fs/nfs/read.c index eb00229c1a50..2472f962a9a2 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -290,9 +290,8 @@ static void nfs_readpage_result(struct rpc_task *task, } static int -readpage_async_filler(void *data, struct page *page) +readpage_async_filler(struct nfs_readdesc *desc, struct page *page) { - struct nfs_readdesc *desc = data; struct inode *inode = page_file_mapping(page)->host; unsigned int rsize = NFS_SERVER(inode)->rsize; struct nfs_page *new; @@ -397,14 +396,16 @@ out_unlock: return ret; } -int nfs_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +void nfs_readahead(struct readahead_control *ractl) { + unsigned int nr_pages = readahead_count(ractl); + struct file *file = ractl->file; struct nfs_readdesc desc; - struct inode *inode = mapping->host; + struct inode *inode = ractl->mapping->host; + struct page *page; int ret; - trace_nfs_aop_readahead(inode, lru_to_page(pages), nr_pages); + trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages); nfs_inc_stats(inode, NFSIOS_VFSREADPAGES); ret = -ESTALE; @@ -422,14 +423,18 @@ int nfs_readpages(struct file *file, struct address_space *mapping, nfs_pageio_init_read(&desc.pgio, inode, false, &nfs_async_read_completion_ops); - ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); + while ((page = readahead_page(ractl)) != NULL) { + ret = readpage_async_filler(&desc, page); + put_page(page); + if (ret) + break; + } nfs_pageio_complete_read(&desc.pgio); put_nfs_open_context(desc.ctx); out: trace_nfs_aop_readahead_done(inode, nr_pages, ret); - return ret; } int __init nfs_init_readpagecache(void) diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 68f81d8d36de..333ea05e2531 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -601,8 +601,7 @@ nfs_have_writebacks(struct inode *inode) * linux/fs/nfs/read.c */ extern int nfs_readpage(struct file *, struct page *); -extern int nfs_readpages(struct file *, struct address_space *, - struct list_head *, unsigned); +void nfs_readahead(struct readahead_control *); /* * inline functions -- cgit v1.2.3 From b7f114edd54326f730a754547e7cfb197b5bc132 Mon Sep 17 00:00:00 2001 From: Xin Xiong Date: Tue, 25 Jan 2022 21:10:45 +0800 Subject: NFSv4.2: fix reference count leaks in _nfs42_proc_copy_notify() [You don't often get email from xiongx18@fudan.edu.cn. Learn why this is important at http://aka.ms/LearnAboutSenderIdentification.] The reference counting issue happens in two error paths in the function _nfs42_proc_copy_notify(). In both error paths, the function simply returns the error code and forgets to balance the refcount of object `ctx`, bumped by get_nfs_open_context() earlier, which may cause refcount leaks. Fix it by balancing refcount of the `ctx` object before the function returns in both error paths. Signed-off-by: Xin Xiong Signed-off-by: Xiyu Yang Signed-off-by: Xin Tan Signed-off-by: Trond Myklebust --- fs/nfs/nfs42proc.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 32129446beca..ca878d021fab 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -591,8 +591,10 @@ static int _nfs42_proc_copy_notify(struct file *src, struct file *dst, ctx = get_nfs_open_context(nfs_file_open_context(src)); l_ctx = nfs_get_lock_context(ctx); - if (IS_ERR(l_ctx)) - return PTR_ERR(l_ctx); + if (IS_ERR(l_ctx)) { + status = PTR_ERR(l_ctx); + goto out; + } status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx, FMODE_READ); @@ -600,7 +602,7 @@ static int _nfs42_proc_copy_notify(struct file *src, struct file *dst, if (status) { if (status == -EAGAIN) status = -NFS4ERR_BAD_STATEID; - return status; + goto out; } status = nfs4_call_sync(src_server->client, src_server, &msg, @@ -609,6 +611,7 @@ static int _nfs42_proc_copy_notify(struct file *src, struct file *dst, if (status == -ENOTSUPP) src_server->caps &= ~NFS_CAP_COPY_NOTIFY; +out: put_nfs_open_context(nfs_file_open_context(src)); return status; } -- cgit v1.2.3 From ab22e2cbbccbcf65bed9524605ac5dacb89471e8 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sun, 30 Jan 2022 22:44:10 +0000 Subject: SUNRPC: remove redundant pointer plainhdr [You don't often get email from colin.i.king@gmail.com. Learn why this is important at http://aka.ms/LearnAboutSenderIdentification.] Pointer plainhdr is being assigned a value that is never read, the pointer is redundant and can be removed. Signed-off-by: Colin Ian King Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_krb5_wrap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index e95c009bb869..5f96e75f9eec 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -409,7 +409,7 @@ static u32 gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, struct page **pages) { - u8 *ptr, *plainhdr; + u8 *ptr; time64_t now; u8 flags = 0x00; __be16 *be16ptr; @@ -426,7 +426,7 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset, return GSS_S_FAILURE; /* construct gss token header */ - ptr = plainhdr = buf->head[0].iov_base + offset; + ptr = buf->head[0].iov_base + offset; *ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff); *ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff); -- cgit v1.2.3 From 3e17898aca293a24dae757a440a50aa63ca29671 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 29 Jan 2022 13:32:45 -0500 Subject: NFSv4: Protect the state recovery thread against direct reclaim If memory allocation triggers a direct reclaim from the state recovery thread, then we can deadlock. Use memalloc_nofs_save/restore to ensure that doesn't happen. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4state.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index f5a62c0d999b..0f4818627ef0 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -49,6 +49,7 @@ #include #include #include +#include #include @@ -2560,9 +2561,17 @@ static void nfs4_layoutreturn_any_run(struct nfs_client *clp) static void nfs4_state_manager(struct nfs_client *clp) { + unsigned int memflags; int status = 0; const char *section = "", *section_sep = ""; + /* + * State recovery can deadlock if the direct reclaim code tries + * start NFS writeback. So ensure memory allocations are all + * GFP_NOFS. + */ + memflags = memalloc_nofs_save(); + /* Ensure exclusive access to NFSv4 state */ do { trace_nfs4_state_mgr(clp); @@ -2657,6 +2666,7 @@ static void nfs4_state_manager(struct nfs_client *clp) clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state); } + memalloc_nofs_restore(memflags); nfs4_end_drain_session(clp); nfs4_clear_state_manager_bit(clp); @@ -2674,6 +2684,7 @@ static void nfs4_state_manager(struct nfs_client *clp) return; if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) return; + memflags = memalloc_nofs_save(); } while (refcount_read(&clp->cl_count) > 1 && !signalled()); goto out_drain; @@ -2686,6 +2697,7 @@ out_error: clp->cl_hostname, -status); ssleep(1); out_drain: + memalloc_nofs_restore(memflags); nfs4_end_drain_session(clp); nfs4_clear_state_manager_bit(clp); } -- cgit v1.2.3 From d7867712d81c05680beff2c9645ff1e8fa59a41d Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 29 Jan 2022 13:04:10 -0500 Subject: NFS: Charge open/lock file contexts to kmemcg Allow kmemcg to limit the number of open/lock file contexts, in the same way that it limits the parent file descriptors. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 2 +- fs/nfs/inode.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 75cb1cbe4cde..fbb4a522d716 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -73,7 +73,7 @@ static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir { struct nfs_inode *nfsi = NFS_I(dir); struct nfs_open_dir_context *ctx; - ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT); if (ctx != NULL) { ctx->duped = 0; ctx->attr_gencount = nfsi->attr_gencount; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index d96baa4450e3..34209abe16c5 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -952,7 +952,7 @@ struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx) res = __nfs_find_lock_context(ctx); rcu_read_unlock(); if (res == NULL) { - new = kmalloc(sizeof(*new), GFP_KERNEL); + new = kmalloc(sizeof(*new), GFP_KERNEL_ACCOUNT); if (new == NULL) return ERR_PTR(-ENOMEM); nfs_init_lock_context(new); @@ -1030,7 +1030,7 @@ struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, { struct nfs_open_context *ctx; - ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT); if (!ctx) return ERR_PTR(-ENOMEM); nfs_sb_active(dentry->d_sb); -- cgit v1.2.3 From 9c00fd9acba81070e9413a468df57cf89d6ee960 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 29 Jan 2022 13:42:01 -0500 Subject: NFSv4: Charge NFSv4 open state trackers to kmemcg Allow kmemcg to limit the number of NFSv4 delegation, lock and open state trackers. Signed-off-by: Trond Myklebust --- fs/nfs/delegation.c | 2 +- fs/nfs/nfs4state.c | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 7c9eb679dbdb..5c97cad741a7 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -439,7 +439,7 @@ int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred, struct nfs_delegation *freeme = NULL; int status = 0; - delegation = kmalloc(sizeof(*delegation), GFP_NOFS); + delegation = kmalloc(sizeof(*delegation), GFP_KERNEL_ACCOUNT); if (delegation == NULL) return -ENOMEM; nfs4_stateid_copy(&delegation->stateid, stateid); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 0f4818627ef0..87cb864a1ba2 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -667,7 +667,7 @@ nfs4_alloc_open_state(void) { struct nfs4_state *state; - state = kzalloc(sizeof(*state), GFP_NOFS); + state = kzalloc(sizeof(*state), GFP_KERNEL_ACCOUNT); if (!state) return NULL; refcount_set(&state->count, 1); @@ -870,14 +870,15 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f struct nfs4_lock_state *lsp; struct nfs_server *server = state->owner->so_server; - lsp = kzalloc(sizeof(*lsp), GFP_NOFS); + lsp = kzalloc(sizeof(*lsp), GFP_KERNEL_ACCOUNT); if (lsp == NULL) return NULL; nfs4_init_seqid_counter(&lsp->ls_seqid); refcount_set(&lsp->ls_count, 1); lsp->ls_state = state; lsp->ls_owner = fl_owner; - lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id, 0, 0, GFP_NOFS); + lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id, + 0, 0, GFP_KERNEL_ACCOUNT); if (lsp->ls_seqid.owner_id < 0) goto out_free; INIT_LIST_HEAD(&lsp->ls_locks); -- cgit v1.2.3 From 5c60e89e71f864033a268ed66933dad0d92f1550 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 29 Jan 2022 14:03:06 -0500 Subject: NFSv4.2: Fix up an invalid combination of memory allocation flags We should use either GFP_KERNEL or GFP_NOFS, but not both. Also strip GFP_KERNEL_ACCOUNT down to GFP_KERNEL. This memory is shrinkable, so does not need to be limited by kmemcg. Signed-off-by: Trond Myklebust --- fs/nfs/nfs42xattr.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/fs/nfs/nfs42xattr.c b/fs/nfs/nfs42xattr.c index 1c4d2a05b401..ad3405c64b9e 100644 --- a/fs/nfs/nfs42xattr.c +++ b/fs/nfs/nfs42xattr.c @@ -199,7 +199,7 @@ nfs4_xattr_alloc_entry(const char *name, const void *value, flags = NFS4_XATTR_ENTRY_EXTVAL; } - buf = kmalloc(alloclen, GFP_KERNEL_ACCOUNT | GFP_NOFS); + buf = kmalloc(alloclen, GFP_KERNEL); if (buf == NULL) return NULL; entry = (struct nfs4_xattr_entry *)buf; @@ -213,7 +213,7 @@ nfs4_xattr_alloc_entry(const char *name, const void *value, if (flags & NFS4_XATTR_ENTRY_EXTVAL) { - valp = kvmalloc(len, GFP_KERNEL_ACCOUNT | GFP_NOFS); + valp = kvmalloc(len, GFP_KERNEL); if (valp == NULL) { kfree(buf); return NULL; @@ -289,8 +289,7 @@ nfs4_xattr_alloc_cache(void) { struct nfs4_xattr_cache *cache; - cache = kmem_cache_alloc(nfs4_xattr_cache_cachep, - GFP_KERNEL_ACCOUNT | GFP_NOFS); + cache = kmem_cache_alloc(nfs4_xattr_cache_cachep, GFP_KERNEL); if (cache == NULL) return NULL; -- cgit v1.2.3 From da48f267f90d9dc9f930fd9a67753643657b404f Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 29 Jan 2022 13:57:38 -0500 Subject: NFS: Convert GFP_NOFS to GFP_KERNEL Assume that sections that should not re-enter the filesystem are already protected with memalloc_nofs_save/restore call, so relax those GFP_NOFS instances which might be used by other contexts. Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 6 +++--- fs/nfs/nfs4proc.c | 15 +++++++-------- fs/nfs/nfs4state.c | 2 +- fs/nfs/pnfs.c | 4 ++-- 4 files changed, 13 insertions(+), 14 deletions(-) diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 34209abe16c5..8cf29c6cd9f9 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1583,7 +1583,7 @@ struct nfs_fattr *nfs_alloc_fattr(void) { struct nfs_fattr *fattr; - fattr = kmalloc(sizeof(*fattr), GFP_NOFS); + fattr = kmalloc(sizeof(*fattr), GFP_KERNEL); if (fattr != NULL) { nfs_fattr_init(fattr); fattr->label = NULL; @@ -1599,7 +1599,7 @@ struct nfs_fattr *nfs_alloc_fattr_with_label(struct nfs_server *server) if (!fattr) return NULL; - fattr->label = nfs4_label_alloc(server, GFP_NOFS); + fattr->label = nfs4_label_alloc(server, GFP_KERNEL); if (IS_ERR(fattr->label)) { kfree(fattr); return NULL; @@ -1613,7 +1613,7 @@ struct nfs_fh *nfs_alloc_fhandle(void) { struct nfs_fh *fh; - fh = kmalloc(sizeof(struct nfs_fh), GFP_NOFS); + fh = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL); if (fh != NULL) fh->size = 0; return fh; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 0e0db6c27619..b3793b82a5e7 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5904,7 +5904,7 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu buflen = server->rsize; npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1; - pages = kmalloc_array(npages, sizeof(struct page *), GFP_NOFS); + pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); if (!pages) return -ENOMEM; @@ -6609,7 +6609,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, }; int status = 0; - data = kzalloc(sizeof(*data), GFP_NOFS); + data = kzalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) return -ENOMEM; @@ -6797,7 +6797,7 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, struct nfs4_state *state = lsp->ls_state; struct inode *inode = state->inode; - p = kzalloc(sizeof(*p), GFP_NOFS); + p = kzalloc(sizeof(*p), GFP_KERNEL); if (p == NULL) return NULL; p->arg.fh = NFS_FH(inode); @@ -7202,8 +7202,7 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f task_setup_data.flags |= RPC_TASK_MOVEABLE; data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), - fl->fl_u.nfs4_fl.owner, - recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS); + fl->fl_u.nfs4_fl.owner, GFP_KERNEL); if (data == NULL) return -ENOMEM; if (IS_SETLKW(cmd)) @@ -7626,7 +7625,7 @@ nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp) if (server->nfs_client->cl_mvops->minor_version != 0) return; - data = kmalloc(sizeof(*data), GFP_NOFS); + data = kmalloc(sizeof(*data), GFP_KERNEL); if (!data) return; data->lsp = lsp; @@ -9291,7 +9290,7 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, goto out_err; ret = ERR_PTR(-ENOMEM); - calldata = kzalloc(sizeof(*calldata), GFP_NOFS); + calldata = kzalloc(sizeof(*calldata), GFP_KERNEL); if (calldata == NULL) goto out_put_clp; nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged); @@ -10222,7 +10221,7 @@ static int nfs41_free_stateid(struct nfs_server *server, &task_setup.rpc_client, &msg); dprintk("NFS call free_stateid %p\n", stateid); - data = kmalloc(sizeof(*data), GFP_NOFS); + data = kmalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->server = server; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 87cb864a1ba2..58054dfdf2b0 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -821,7 +821,7 @@ static void __nfs4_close(struct nfs4_state *state, void nfs4_close_state(struct nfs4_state *state, fmode_t fmode) { - __nfs4_close(state, fmode, GFP_NOFS, 0); + __nfs4_close(state, fmode, GFP_KERNEL, 0); } void nfs4_close_sync(struct nfs4_state *state, fmode_t fmode) diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 7c9090a28e5c..f089e11fd001 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1233,7 +1233,7 @@ pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, int status = 0; *pcred = NULL; - lrp = kzalloc(sizeof(*lrp), GFP_NOFS); + lrp = kzalloc(sizeof(*lrp), GFP_KERNEL); if (unlikely(lrp == NULL)) { status = -ENOMEM; spin_lock(&ino->i_lock); @@ -3250,7 +3250,7 @@ struct nfs4_threshold *pnfs_mdsthreshold_alloc(void) { struct nfs4_threshold *thp; - thp = kzalloc(sizeof(*thp), GFP_NOFS); + thp = kzalloc(sizeof(*thp), GFP_KERNEL); if (!thp) { dprintk("%s mdsthreshold allocation failed\n", __func__); return NULL; -- cgit v1.2.3 From 61345a42a2ff8b0539faf545a5ce17b6c339db38 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 29 Jan 2022 14:16:55 -0500 Subject: NFSv4/flexfiles: Convert GFP_NOFS to GFP_KERNEL Assume that the higher layers will have set memalloc_nofs_save/restore as appropriate. Signed-off-by: Trond Myklebust --- fs/nfs/flexfilelayout/flexfilelayout.c | 17 +++++++++-------- fs/nfs/nfs42proc.c | 2 +- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index a553d59afa8b..e28f2177afb7 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -694,7 +694,7 @@ nfs4_ff_layout_stat_io_start_write(struct inode *inode, spin_unlock(&mirror->lock); if (report) - pnfs_report_layoutstat(inode, GFP_NOIO); + pnfs_report_layoutstat(inode, GFP_KERNEL); } static void @@ -900,7 +900,7 @@ retry: req->wb_bytes, IOMODE_RW, false, - GFP_NOFS); + GFP_KERNEL); if (IS_ERR(pgio->pg_lseg)) { pgio->pg_error = PTR_ERR(pgio->pg_lseg); pgio->pg_lseg = NULL; @@ -959,7 +959,7 @@ ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio, req->wb_bytes, IOMODE_RW, false, - GFP_NOFS); + GFP_KERNEL); if (IS_ERR(pgio->pg_lseg)) { pgio->pg_error = PTR_ERR(pgio->pg_lseg); pgio->pg_lseg = NULL; @@ -1258,7 +1258,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg, mirror = FF_LAYOUT_COMP(lseg, idx); err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout), mirror, offset, length, status, opnum, - GFP_NOIO); + GFP_KERNEL); switch (status) { case NFS4ERR_DELAY: @@ -1973,7 +1973,7 @@ ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo, struct inode *inode = lseg->pls_layout->plh_inode; struct pnfs_commit_array *array, *new; - new = pnfs_alloc_commit_array(flseg->mirror_array_cnt, GFP_NOIO); + new = pnfs_alloc_commit_array(flseg->mirror_array_cnt, GFP_KERNEL); if (new) { spin_lock(&inode->i_lock); array = pnfs_add_commit_array(fl_cinfo, new, lseg); @@ -2192,8 +2192,8 @@ ff_layout_send_layouterror(struct pnfs_layout_segment *lseg) if (list_empty(&head)) return; - errors = kmalloc_array(NFS42_LAYOUTERROR_MAX, - sizeof(*errors), GFP_NOFS); + errors = kmalloc_array(NFS42_LAYOUTERROR_MAX, sizeof(*errors), + GFP_KERNEL); if (errors != NULL) { const struct nfs4_ff_layout_ds_err *pos; size_t n = 0; @@ -2444,7 +2444,8 @@ ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args) const int dev_count = PNFS_LAYOUTSTATS_MAXDEV; /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */ - args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO); + args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), + GFP_KERNEL); if (!args->devinfo) return -ENOMEM; diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index ca878d021fab..30ea1cbd305b 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -1017,7 +1017,7 @@ int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg, return -EOPNOTSUPP; if (n > NFS42_LAYOUTERROR_MAX) return -EINVAL; - data = nfs42_alloc_layouterror_data(lseg, GFP_NOFS); + data = nfs42_alloc_layouterror_data(lseg, GFP_KERNEL); if (!data) return -ENOMEM; for (i = 0; i < n; i++) { -- cgit v1.2.3 From 4fb547be355d4af349681ba4c3bab81d99f4f774 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 29 Jan 2022 14:19:15 -0500 Subject: NFSv4.2/copyoffload: Convert GFP_NOFS to GFP_KERNEL There doesn't seem to be any reason why the copy offload code can't use GFP_KERNEL. It can't get called by direct reclaim. Signed-off-by: Trond Myklebust --- fs/nfs/callback_proc.c | 2 +- fs/nfs/nfs42proc.c | 10 +++++----- fs/nfs/nfs4file.c | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index c343666d9a42..39d1ec870d90 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -710,7 +710,7 @@ __be32 nfs4_callback_offload(void *data, void *dummy, struct nfs4_copy_state *copy, *tmp_copy; bool found = false; - copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS); + copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL); if (!copy) return htonl(NFS4ERR_SERVERFAULT); diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 30ea1cbd305b..882bf84484ac 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -181,7 +181,7 @@ static int handle_async_copy(struct nfs42_copy_res *res, struct nfs_open_context *dst_ctx = nfs_file_open_context(dst); struct nfs_open_context *src_ctx = nfs_file_open_context(src); - copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS); + copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL); if (!copy) return -ENOMEM; @@ -254,7 +254,7 @@ static int process_copy_commit(struct file *dst, loff_t pos_dst, struct nfs_commitres cres; int status = -ENOMEM; - cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS); + cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_KERNEL); if (!cres.verf) goto out; @@ -357,7 +357,7 @@ static ssize_t _nfs42_proc_copy(struct file *src, res->commit_res.verf = NULL; if (args->sync) { res->commit_res.verf = - kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS); + kzalloc(sizeof(struct nfs_writeverf), GFP_KERNEL); if (!res->commit_res.verf) return -ENOMEM; } @@ -552,7 +552,7 @@ static int nfs42_do_offload_cancel_async(struct file *dst, if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL)) return -EOPNOTSUPP; - data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_NOFS); + data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_KERNEL); if (data == NULL) return -ENOMEM; @@ -629,7 +629,7 @@ int nfs42_proc_copy_notify(struct file *src, struct file *dst, if (!(src_server->caps & NFS_CAP_COPY_NOTIFY)) return -EOPNOTSUPP; - args = kzalloc(sizeof(struct nfs42_copy_notify_args), GFP_NOFS); + args = kzalloc(sizeof(struct nfs42_copy_notify_args), GFP_KERNEL); if (args == NULL) return -ENOMEM; diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index ba117592a95b..d258933cf8c8 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -165,7 +165,7 @@ retry: if (sync) return -EOPNOTSUPP; cn_resp = kzalloc(sizeof(struct nfs42_copy_notify_res), - GFP_NOFS); + GFP_KERNEL); if (unlikely(cn_resp == NULL)) return -ENOMEM; @@ -339,7 +339,7 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt, res = ERR_PTR(-ENOMEM); len = strlen(SSC_READ_NAME_BODY) + 16; - read_name = kzalloc(len, GFP_NOFS); + read_name = kzalloc(len, GFP_KERNEL); if (read_name == NULL) goto out; snprintf(read_name, len, SSC_READ_NAME_BODY, read_name_gen++); -- cgit v1.2.3 From 0adc87940618648b3dcccc819c20068bd6b4ec93 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 29 Jan 2022 12:49:44 -0500 Subject: SUNRPC: Convert GFP_NOFS to GFP_KERNEL The sections which should not re-enter the filesystem are already protected with memalloc_nofs_save/restore calls, so it is better to use GFP_KERNEL in these calls to allow better performance for synchronous RPC calls. Signed-off-by: Trond Myklebust --- net/sunrpc/auth_unix.c | 2 +- net/sunrpc/clnt.c | 2 +- net/sunrpc/rpcb_clnt.c | 4 ++-- net/sunrpc/sched.c | 4 ++-- net/sunrpc/xprt.c | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index e7df1f782b2e..3600d8641644 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c @@ -43,7 +43,7 @@ unx_destroy(struct rpc_auth *auth) static struct rpc_cred * unx_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) { - struct rpc_cred *ret = mempool_alloc(unix_pool, GFP_NOFS); + struct rpc_cred *ret = mempool_alloc(unix_pool, GFP_KERNEL); rpcauth_init_cred(ret, acred, auth, &unix_credops); ret->cr_flags = 1UL << RPCAUTH_CRED_UPTODATE; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index c83fe618767c..97165a545cb3 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -2793,7 +2793,7 @@ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt, return -EINVAL; } - data = kmalloc(sizeof(*data), GFP_NOFS); + data = kmalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->xps = xprt_switch_get(xps); diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 647b323cc1d5..0fdeb8666bfd 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c @@ -714,7 +714,7 @@ void rpcb_getport_async(struct rpc_task *task) goto bailout_nofree; } - map = kzalloc(sizeof(struct rpcbind_args), GFP_NOFS); + map = kzalloc(sizeof(struct rpcbind_args), GFP_KERNEL); if (!map) { status = -ENOMEM; goto bailout_release_client; @@ -730,7 +730,7 @@ void rpcb_getport_async(struct rpc_task *task) case RPCBVERS_4: case RPCBVERS_3: map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID]; - map->r_addr = rpc_sockaddr2uaddr(sap, GFP_NOFS); + map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL); if (!map->r_addr) { status = -ENOMEM; goto bailout_free_args; diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index e2c835482791..52769b883c0a 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -1021,7 +1021,7 @@ int rpc_malloc(struct rpc_task *task) struct rpc_rqst *rqst = task->tk_rqstp; size_t size = rqst->rq_callsize + rqst->rq_rcvsize; struct rpc_buffer *buf; - gfp_t gfp = GFP_NOFS; + gfp_t gfp = GFP_KERNEL; if (RPC_IS_SWAPPER(task)) gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; @@ -1095,7 +1095,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta static struct rpc_task * rpc_alloc_task(void) { - return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); + return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_KERNEL); } /* diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index a02de2bddb28..9f0025e0742c 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1692,7 +1692,7 @@ static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt) goto out; ++xprt->num_reqs; spin_unlock(&xprt->reserve_lock); - req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS); + req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL); spin_lock(&xprt->reserve_lock); if (req != NULL) goto out; -- cgit v1.2.3 From 4c2883e77c5f30d34d04d3c9731988767eb9e898 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 29 Jan 2022 14:43:09 -0500 Subject: SUNRPC/auth_gss: Convert GFP_NOFS to GFP_KERNEL Assume that the upper layers have set memalloc_nofs_save/restore as appropriate. Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/auth_gss.c | 22 +++++++++++----------- net/sunrpc/auth_gss/auth_gss_internal.h | 2 +- net/sunrpc/auth_gss/gss_krb5_crypto.c | 10 +++++----- net/sunrpc/auth_gss/gss_krb5_seqnum.c | 4 ++-- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 5f42aa5fc612..affd64a54f02 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -145,7 +145,7 @@ gss_alloc_context(void) { struct gss_cl_ctx *ctx; - ctx = kzalloc(sizeof(*ctx), GFP_NOFS); + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (ctx != NULL) { ctx->gc_proc = RPC_GSS_PROC_DATA; ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */ @@ -208,7 +208,7 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct p = ERR_PTR(-EFAULT); goto err; } - ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_NOFS); + ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_KERNEL); if (ret < 0) { trace_rpcgss_import_ctx(ret); p = ERR_PTR(ret); @@ -510,7 +510,7 @@ gss_alloc_msg(struct gss_auth *gss_auth, int vers; int err = -ENOMEM; - gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS); + gss_msg = kzalloc(sizeof(*gss_msg), GFP_KERNEL); if (gss_msg == NULL) goto err; vers = get_pipe_version(gss_auth->net); @@ -526,7 +526,7 @@ gss_alloc_msg(struct gss_auth *gss_auth, gss_msg->auth = gss_auth; kref_get(&gss_auth->kref); if (service_name) { - gss_msg->service_name = kstrdup_const(service_name, GFP_NOFS); + gss_msg->service_name = kstrdup_const(service_name, GFP_KERNEL); if (!gss_msg->service_name) { err = -ENOMEM; goto err_put_pipe_version; @@ -702,7 +702,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) if (mlen > MSG_BUF_MAXSIZE) goto out; err = -ENOMEM; - buf = kmalloc(mlen, GFP_NOFS); + buf = kmalloc(mlen, GFP_KERNEL); if (!buf) goto out; @@ -1218,7 +1218,7 @@ gss_dup_cred(struct gss_auth *gss_auth, struct gss_cred *gss_cred) struct gss_cred *new; /* Make a copy of the cred so that we can reference count it */ - new = kzalloc(sizeof(*gss_cred), GFP_NOFS); + new = kzalloc(sizeof(*gss_cred), GFP_KERNEL); if (new) { struct auth_cred acred = { .cred = gss_cred->gc_base.cr_cred, @@ -1341,7 +1341,7 @@ gss_hash_cred(struct auth_cred *acred, unsigned int hashbits) static struct rpc_cred * gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) { - return rpcauth_lookup_credcache(auth, acred, flags, GFP_NOFS); + return rpcauth_lookup_credcache(auth, acred, flags, GFP_KERNEL); } static struct rpc_cred * @@ -1667,7 +1667,7 @@ gss_validate(struct rpc_task *task, struct xdr_stream *xdr) if (!p) goto validate_failed; - seq = kmalloc(4, GFP_NOFS); + seq = kmalloc(4, GFP_KERNEL); if (!seq) goto validate_failed; *seq = cpu_to_be32(task->tk_rqstp->rq_seqno); @@ -1777,11 +1777,11 @@ alloc_enc_pages(struct rpc_rqst *rqstp) rqstp->rq_enc_pages = kmalloc_array(rqstp->rq_enc_pages_num, sizeof(struct page *), - GFP_NOFS); + GFP_KERNEL); if (!rqstp->rq_enc_pages) goto out; for (i=0; i < rqstp->rq_enc_pages_num; i++) { - rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS); + rqstp->rq_enc_pages[i] = alloc_page(GFP_KERNEL); if (rqstp->rq_enc_pages[i] == NULL) goto out_free; } @@ -1985,7 +1985,7 @@ gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred, if (offset + len > rcv_buf->len) goto unwrap_failed; mic.len = len; - mic.data = kmalloc(len, GFP_NOFS); + mic.data = kmalloc(len, GFP_KERNEL); if (!mic.data) goto unwrap_failed; if (read_bytes_from_xdr_buf(rcv_buf, offset, mic.data, mic.len)) diff --git a/net/sunrpc/auth_gss/auth_gss_internal.h b/net/sunrpc/auth_gss/auth_gss_internal.h index f6d9631bd9d0..c53b329092d4 100644 --- a/net/sunrpc/auth_gss/auth_gss_internal.h +++ b/net/sunrpc/auth_gss/auth_gss_internal.h @@ -35,7 +35,7 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) if (unlikely(q > end || q < p)) return ERR_PTR(-EFAULT); if (len) { - dest->data = kmemdup(p, len, GFP_NOFS); + dest->data = kmemdup(p, len, GFP_KERNEL); if (unlikely(dest->data == NULL)) return ERR_PTR(-ENOMEM); } else diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 634b6c6e0dcb..3ea58175e159 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -161,7 +161,7 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, return GSS_S_FAILURE; } - checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS); + checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_KERNEL); if (checksumdata == NULL) return GSS_S_FAILURE; @@ -169,7 +169,7 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, if (IS_ERR(tfm)) goto out_free_cksum; - req = ahash_request_alloc(tfm, GFP_NOFS); + req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) goto out_free_ahash; @@ -257,7 +257,7 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen, return GSS_S_FAILURE; } - checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS); + checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_KERNEL); if (!checksumdata) return GSS_S_FAILURE; @@ -265,7 +265,7 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen, if (IS_ERR(tfm)) goto out_free_cksum; - req = ahash_request_alloc(tfm, GFP_NOFS); + req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) goto out_free_ahash; @@ -554,7 +554,7 @@ gss_krb5_cts_crypt(struct crypto_sync_skcipher *cipher, struct xdr_buf *buf, WARN_ON(0); return -ENOMEM; } - data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_NOFS); + data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c index fb117817ff5d..3200b971a814 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c +++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c @@ -49,7 +49,7 @@ krb5_make_seq_num(struct krb5_ctx *kctx, unsigned char *plain; s32 code; - plain = kmalloc(8, GFP_NOFS); + plain = kmalloc(8, GFP_KERNEL); if (!plain) return -ENOMEM; @@ -80,7 +80,7 @@ krb5_get_seq_num(struct krb5_ctx *kctx, dprintk("RPC: krb5_get_seq_num:\n"); - plain = kmalloc(8, GFP_NOFS); + plain = kmalloc(8, GFP_KERNEL); if (!plain) return -ENOMEM; -- cgit v1.2.3 From 46442b850e5b3d846c8c82d251e47990dbd6457d Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 29 Jan 2022 14:44:38 -0500 Subject: SUNRPC/xprtrdma: Convert GFP_NOFS to GFP_KERNEL Assume that the upper layers have set memalloc_nofs_save/restore as appropriate. Signed-off-by: Trond Myklebust --- net/sunrpc/xprtrdma/frwr_ops.c | 2 +- net/sunrpc/xprtrdma/verbs.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index 515dd7a66a04..3fcd8e1b2550 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -130,7 +130,7 @@ int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr) if (IS_ERR(frmr)) goto out_mr_err; - sg = kmalloc_array(depth, sizeof(*sg), GFP_NOFS); + sg = kmalloc_array(depth, sizeof(*sg), GFP_KERNEL); if (!sg) goto out_list_err; diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 7b5fce2faa10..2fbe9aaeec34 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -373,7 +373,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt) struct rpcrdma_ep *ep; int rc; - ep = kzalloc(sizeof(*ep), GFP_NOFS); + ep = kzalloc(sizeof(*ep), GFP_KERNEL); if (!ep) return -ENOTCONN; ep->re_xprt = &r_xprt->rx_xprt; @@ -746,7 +746,7 @@ rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt) struct rpcrdma_mr *mr; int rc; - mr = kzalloc(sizeof(*mr), GFP_NOFS); + mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) break; -- cgit v1.2.3 From 43245eca6e670ebf65908b549641c1460a9cc944 Mon Sep 17 00:00:00 2001 From: Olga Kornievskaia Date: Wed, 2 Feb 2022 17:55:02 -0500 Subject: NFSv4.1 support for NFS4_RESULT_PRESERVER_UNLINKED In 4.1+, the server is allowed to set a flag NFS4_RESULT_PRESERVE_UNLINKED in reply to the OPEN, that tells the client that it does not need to do a silly rename of an opened file when it's being removed. Signed-off-by: Olga Kornievskaia Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 10 ++++++++-- fs/nfs/nfs4proc.c | 2 ++ include/linux/nfs_fs.h | 1 + include/uapi/linux/nfs4.h | 1 + 4 files changed, 12 insertions(+), 2 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index fbb4a522d716..8b190c8e4a45 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1419,7 +1419,12 @@ int nfs_lookup_verify_inode(struct inode *inode, unsigned int flags) if (flags & LOOKUP_REVAL) goto out_force; out: - return (inode->i_nlink == 0) ? -ESTALE : 0; + if (inode->i_nlink > 0 || + (inode->i_nlink == 0 && + test_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(inode)->flags))) + return 0; + else + return -ESTALE; out_force: if (flags & LOOKUP_RCU) return -ECHILD; @@ -2330,7 +2335,8 @@ int nfs_unlink(struct inode *dir, struct dentry *dentry) trace_nfs_unlink_enter(dir, dentry); spin_lock(&dentry->d_lock); - if (d_count(dentry) > 1) { + if (d_count(dentry) > 1 && !test_bit(NFS_INO_PRESERVE_UNLINKED, + &NFS_I(d_inode(dentry))->flags)) { spin_unlock(&dentry->d_lock); /* Start asynchronous writeout of the inode */ write_inode_now(d_inode(dentry), 0); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index b3793b82a5e7..73a9b6de666c 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3050,6 +3050,8 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK) set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags); + if (opendata->o_res.rflags & NFS4_OPEN_RESULT_PRESERVE_UNLINKED) + set_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(state->inode)->flags); dentry = opendata->dentry; if (d_really_is_negative(dentry)) { diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 333ea05e2531..0e79dbbc759a 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -277,6 +277,7 @@ struct nfs4_copy_state { #define NFS_INO_STALE (1) /* possible stale inode */ #define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */ #define NFS_INO_INVALIDATING (3) /* inode is being invalidated */ +#define NFS_INO_PRESERVE_UNLINKED (4) /* preserve file if removed while open */ #define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */ #define NFS_INO_FORCE_READDIR (7) /* force readdirplus */ #define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */ diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h index 800bb0ffa6e6..1d2043708bf1 100644 --- a/include/uapi/linux/nfs4.h +++ b/include/uapi/linux/nfs4.h @@ -45,6 +45,7 @@ #define NFS4_OPEN_RESULT_CONFIRM 0x0002 #define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004 +#define NFS4_OPEN_RESULT_PRESERVE_UNLINKED 0x0008 #define NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK 0x0020 #define NFS4_SHARE_ACCESS_MASK 0x000F -- cgit v1.2.3 From 50c790a0b69bdc420f00f30bdf348d6c90194c78 Mon Sep 17 00:00:00 2001 From: Benjamin Coddington Date: Wed, 9 Feb 2022 09:07:01 -0500 Subject: NFSv4: use unique client identifiers in network namespaces In order to differentiate client state, assign a random uuid to the uniquifing portion of the client identifier when a network namespace is created. Containers may still override this value if they wish to maintain stable client identifiers by writing to /sys/fs/nfs/net/client/identifier, either by udev rules or other means. Signed-off-by: Benjamin Coddington Signed-off-by: Trond Myklebust --- fs/nfs/sysfs.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c index a6f740366963..886ed1eec2e5 100644 --- a/fs/nfs/sysfs.c +++ b/fs/nfs/sysfs.c @@ -151,6 +151,18 @@ static struct kobj_type nfs_netns_client_type = { .namespace = nfs_netns_client_namespace, }; +static void assign_unique_clientid(struct nfs_netns_client *clp) +{ + unsigned char client_uuid[16]; + char *uuid_str = kmalloc(UUID_STRING_LEN + 1, GFP_KERNEL); + + if (uuid_str) { + generate_random_uuid(client_uuid); + sprintf(uuid_str, "%pU", client_uuid); + rcu_assign_pointer(clp->identifier, uuid_str); + } +} + static struct nfs_netns_client *nfs_netns_client_alloc(struct kobject *parent, struct net *net) { @@ -158,6 +170,8 @@ static struct nfs_netns_client *nfs_netns_client_alloc(struct kobject *parent, p = kzalloc(sizeof(*p), GFP_KERNEL); if (p) { + if (net != &init_net) + assign_unique_clientid(p); p->net = net; p->kobject.kset = nfs_client_kset; if (kobject_init_and_add(&p->kobject, &nfs_netns_client_type, -- cgit v1.2.3 From 88a6099fc3274a27814d26dd688fdc5cd7a480ee Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 9 Feb 2022 13:22:48 -0500 Subject: NFS: Replace last uses of NFS_INO_REVAL_PAGECACHE Now that we have more fine grained attribute revalidation, let's just get rid of NFS_INO_REVAL_PAGECACHE. Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 24 +++++++++++------------- fs/nfs/write.c | 2 +- include/linux/nfs_fs.h | 8 +++----- 3 files changed, 15 insertions(+), 19 deletions(-) diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 8cf29c6cd9f9..3adf8b4a0079 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -236,19 +236,17 @@ static void nfs_zap_caches_locked(struct inode *inode) nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); nfsi->attrtimeo_timestamp = jiffies; - if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { - nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR - | NFS_INO_INVALID_DATA - | NFS_INO_INVALID_ACCESS - | NFS_INO_INVALID_ACL - | NFS_INO_INVALID_XATTR - | NFS_INO_REVAL_PAGECACHE); - } else - nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR - | NFS_INO_INVALID_ACCESS - | NFS_INO_INVALID_ACL - | NFS_INO_INVALID_XATTR - | NFS_INO_REVAL_PAGECACHE); + if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) + nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR | + NFS_INO_INVALID_DATA | + NFS_INO_INVALID_ACCESS | + NFS_INO_INVALID_ACL | + NFS_INO_INVALID_XATTR); + else + nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR | + NFS_INO_INVALID_ACCESS | + NFS_INO_INVALID_ACL | + NFS_INO_INVALID_XATTR); nfs_zap_label_cache_locked(nfsi); } diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 987a187bd39a..f88b0eb9b18e 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -306,7 +306,7 @@ static void nfs_set_pageerror(struct address_space *mapping) /* Force file size revalidation */ spin_lock(&inode->i_lock); nfs_set_cache_invalid(inode, NFS_INO_REVAL_FORCED | - NFS_INO_REVAL_PAGECACHE | + NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE); spin_unlock(&inode->i_lock); } diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 0e79dbbc759a..ce3128e4bffa 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -356,11 +356,9 @@ static inline void nfs_mark_for_revalidate(struct inode *inode) struct nfs_inode *nfsi = NFS_I(inode); spin_lock(&inode->i_lock); - nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE - | NFS_INO_INVALID_ACCESS - | NFS_INO_INVALID_ACL - | NFS_INO_INVALID_CHANGE - | NFS_INO_INVALID_CTIME; + nfsi->cache_validity |= NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL | + NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME | + NFS_INO_INVALID_SIZE; if (S_ISDIR(inode->i_mode)) nfsi->cache_validity |= NFS_INO_INVALID_DATA; spin_unlock(&inode->i_lock); -- cgit v1.2.3 From 41e97b7f8a15d15da03ca15e6ff7b9b7ab7f588c Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 9 Feb 2022 13:26:19 -0500 Subject: NFS: Remove unused flag NFS_INO_REVAL_PAGECACHE Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 5 ++--- fs/nfs/nfstrace.h | 1 - include/linux/nfs_fs.h | 1 - 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 3adf8b4a0079..f9fc506ebb29 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -203,14 +203,13 @@ void nfs_set_cache_invalid(struct inode *inode, unsigned long flags) NFS_INO_INVALID_OTHER | NFS_INO_INVALID_XATTR); flags &= ~(NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE); - } else if (flags & NFS_INO_REVAL_PAGECACHE) - flags |= NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE; + } if (!nfs_has_xattr_cache(nfsi)) flags &= ~NFS_INO_INVALID_XATTR; if (flags & NFS_INO_INVALID_DATA) nfs_fscache_invalidate(inode, 0); - flags &= ~(NFS_INO_REVAL_PAGECACHE | NFS_INO_REVAL_FORCED); + flags &= ~NFS_INO_REVAL_FORCED; nfsi->cache_validity |= flags; diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index 4611aa3a21a4..45a310b586ce 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -21,7 +21,6 @@ { NFS_INO_INVALID_ATIME, "INVALID_ATIME" }, \ { NFS_INO_INVALID_ACCESS, "INVALID_ACCESS" }, \ { NFS_INO_INVALID_ACL, "INVALID_ACL" }, \ - { NFS_INO_REVAL_PAGECACHE, "REVAL_PAGECACHE" }, \ { NFS_INO_REVAL_FORCED, "REVAL_FORCED" }, \ { NFS_INO_INVALID_LABEL, "INVALID_LABEL" }, \ { NFS_INO_INVALID_CHANGE, "INVALID_CHANGE" }, \ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index ce3128e4bffa..72a732a5103c 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -247,7 +247,6 @@ struct nfs4_copy_state { #define NFS_INO_INVALID_ATIME BIT(2) /* cached atime is invalid */ #define NFS_INO_INVALID_ACCESS BIT(3) /* cached access cred invalid */ #define NFS_INO_INVALID_ACL BIT(4) /* cached acls are invalid */ -#define NFS_INO_REVAL_PAGECACHE BIT(5) /* must revalidate pagecache */ #define NFS_INO_REVAL_FORCED BIT(6) /* force revalidation ignoring a delegation */ #define NFS_INO_INVALID_LABEL BIT(7) /* cached label is invalid */ #define NFS_INO_INVALID_CHANGE BIT(8) /* cached change is invalid */ -- cgit v1.2.3 From b622ffe1d9ecbac71f0cddb52ff0831efdf8fb83 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 22 Feb 2022 18:20:38 -0500 Subject: NFS: NFSv2/v3 clients should never be setting NFS_CAP_XATTR Ensure that we always initialise the 'xattr_support' field in struct nfs_fsinfo, so that nfs_server_set_fsinfo() doesn't declare our NFSv2/v3 client to be capable of supporting the NFSv4.2 xattr protocol by setting the NFS_CAP_XATTR capability. This configuration can cause nfs_do_access() to set access mode bits that are unsupported by the NFSv3 ACCESS call, which may confuse spec-compliant servers. Reported-by: Olga Kornievskaia Fixes: b78ef845c35d ("NFSv4.2: query the server for extended attribute support") Cc: stable@vger.kernel.org Signed-off-by: Trond Myklebust --- fs/nfs/nfs3xdr.c | 1 + fs/nfs/proc.c | 1 + 2 files changed, 2 insertions(+) diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 9274c9c5efea..54a1d21cbcc6 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -2228,6 +2228,7 @@ static int decode_fsinfo3resok(struct xdr_stream *xdr, /* ignore properties */ result->lease_time = 0; result->change_attr_type = NFS4_CHANGE_TYPE_IS_UNDEFINED; + result->xattr_support = 0; return 0; } diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index 73dcaa99fa9b..e3570c656b0f 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -92,6 +92,7 @@ nfs_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, info->maxfilesize = 0x7FFFFFFF; info->lease_time = 0; info->change_attr_type = NFS4_CHANGE_TYPE_IS_UNDEFINED; + info->xattr_support = 0; return 0; } -- cgit v1.2.3 From f1ec501d08b78fd52e56124519e3e6cdecbfc16f Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 23 Feb 2022 15:46:20 -0500 Subject: NFS: Remove unnecessary XATTR cache invalidation in nfs_fhget() We should never expect the 'xattr_cache' to be non-null in that case, hence nfs_set_cache_invalid() is just going to optimise it away. Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index f9fc506ebb29..7cecabf57b95 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -561,8 +561,6 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr) inode->i_gid = fattr->gid; else if (fattr_supported & NFS_ATTR_FATTR_GROUP) nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER); - if (nfs_server_capable(inode, NFS_CAP_XATTR)) - nfs_set_cache_invalid(inode, NFS_INO_INVALID_XATTR); if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED) inode->i_blocks = fattr->du.nfs2.blocks; else if (fattr_supported & NFS_ATTR_FATTR_BLOCKS_USED && -- cgit v1.2.3 From 84631f84ac95b6ff6f08a41ffba1f93eaab4e9c7 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 23 Feb 2022 15:43:26 -0500 Subject: NFS: Clean up NFSv4.2 xattrs Add a helper for the xattr mask so that we can get rid of the inlined ifdefs. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 7 ++----- fs/nfs/internal.h | 14 ++++++++++++++ fs/nfs/nfs4proc.c | 9 ++------- 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 8b190c8e4a45..ebddc736eac2 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -2995,11 +2995,8 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask) /* * Determine which access bits we want to ask for... */ - cache.mask = NFS_ACCESS_READ | NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND; - if (nfs_server_capable(inode, NFS_CAP_XATTR)) { - cache.mask |= NFS_ACCESS_XAREAD | NFS_ACCESS_XAWRITE | - NFS_ACCESS_XALIST; - } + cache.mask = NFS_ACCESS_READ | NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | + nfs_access_xattr_mask(NFS_SERVER(inode)); if (S_ISDIR(inode->i_mode)) cache.mask |= NFS_ACCESS_DELETE | NFS_ACCESS_LOOKUP; else diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 2de7c56a1fbe..b5398af53c7f 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -388,6 +388,20 @@ int nfs_mknod(struct user_namespace *, struct inode *, struct dentry *, umode_t, int nfs_rename(struct user_namespace *, struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); +#ifdef CONFIG_NFS_V4_2 +static inline __u32 nfs_access_xattr_mask(const struct nfs_server *server) +{ + if (!(server->caps & NFS_CAP_XATTR)) + return 0; + return NFS4_ACCESS_XAREAD | NFS4_ACCESS_XAWRITE | NFS4_ACCESS_XALIST; +} +#else +static inline __u32 nfs_access_xattr_mask(const struct nfs_server *server) +{ + return 0; +} +#endif + /* file.c */ int nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync); loff_t nfs_file_llseek(struct file *, loff_t, int); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 73a9b6de666c..8b875355824b 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1392,13 +1392,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, case NFS4_OPEN_CLAIM_FH: p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE | - NFS4_ACCESS_EXECUTE; -#ifdef CONFIG_NFS_V4_2 - if (!(server->caps & NFS_CAP_XATTR)) - break; - p->o_arg.access |= NFS4_ACCESS_XAREAD | NFS4_ACCESS_XAWRITE | - NFS4_ACCESS_XALIST; -#endif + NFS4_ACCESS_EXECUTE | + nfs_access_xattr_mask(server); } p->o_arg.clientid = server->nfs_client->cl_clientid; p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); -- cgit v1.2.3 From 6c984083ec2453dfd3fcf98f392f34500c73e3f2 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 15 Feb 2022 15:58:38 -0500 Subject: NFS: Use of mapping_set_error() results in spurious errors The use of mapping_set_error() in conjunction with calls to filemap_check_errors() is problematic because every error gets reported as either an EIO or an ENOSPC by filemap_check_errors() in functions such as filemap_write_and_wait() or filemap_write_and_wait_range(). In almost all cases, we prefer to use the more nuanced wb errors. Fixes: b8946d7bfb94 ("NFS: Revalidate the file mapping on all fatal writeback errors") Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index f88b0eb9b18e..74d258781205 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -316,7 +316,10 @@ static void nfs_mapping_set_error(struct page *page, int error) struct address_space *mapping = page_file_mapping(page); SetPageError(page); - mapping_set_error(mapping, error); + filemap_set_wb_err(mapping, error); + if (mapping->host) + errseq_set(&mapping->host->i_sb->s_wb_err, + error == -ENOSPC ? -ENOSPC : -EIO); nfs_set_pageerror(mapping); } -- cgit v1.2.3 From b38e09b9b613ee608c87bc00979db891ee9f0538 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 28 Feb 2022 10:09:23 -0500 Subject: Revert "NFSv4: use unique client identifiers in network namespaces" This reverts commit 50c790a0b69bdc420f00f30bdf348d6c90194c78. The functionality is believed to be capable of causing regressions in existing setups, so the author has requested that it be reverted. Signed-off-by: Trond Myklebust --- fs/nfs/sysfs.c | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c index 886ed1eec2e5..a6f740366963 100644 --- a/fs/nfs/sysfs.c +++ b/fs/nfs/sysfs.c @@ -151,18 +151,6 @@ static struct kobj_type nfs_netns_client_type = { .namespace = nfs_netns_client_namespace, }; -static void assign_unique_clientid(struct nfs_netns_client *clp) -{ - unsigned char client_uuid[16]; - char *uuid_str = kmalloc(UUID_STRING_LEN + 1, GFP_KERNEL); - - if (uuid_str) { - generate_random_uuid(client_uuid); - sprintf(uuid_str, "%pU", client_uuid); - rcu_assign_pointer(clp->identifier, uuid_str); - } -} - static struct nfs_netns_client *nfs_netns_client_alloc(struct kobject *parent, struct net *net) { @@ -170,8 +158,6 @@ static struct nfs_netns_client *nfs_netns_client_alloc(struct kobject *parent, p = kzalloc(sizeof(*p), GFP_KERNEL); if (p) { - if (net != &init_net) - assign_unique_clientid(p); p->net = net; p->kobject.kset = nfs_client_kset; if (kobject_init_and_add(&p->kobject, &nfs_netns_client_type, -- cgit v1.2.3 From 64cfca85bacde54caa64e0ab855c48734894fa37 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 24 Feb 2022 10:59:37 -0500 Subject: NFS: Return valid errors from nfs2/3_decode_dirent() Valid return values for decode_dirent() callback functions are: 0: Success -EBADCOOKIE: End of directory -EAGAIN: End of xdr_stream All errors need to map into one of those three values. Fixes: 573c4e1ef53a ("NFS: Simplify ->decode_dirent() calling sequence") Signed-off-by: Trond Myklebust --- fs/nfs/nfs2xdr.c | 2 +- fs/nfs/nfs3xdr.c | 21 ++++++--------------- 2 files changed, 7 insertions(+), 16 deletions(-) diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index 7fba7711e6b3..3d5ba43f44bb 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -949,7 +949,7 @@ int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, error = decode_filename_inline(xdr, &entry->name, &entry->len); if (unlikely(error)) - return error; + return -EAGAIN; /* * The type (size and byte order) of nfscookie isn't defined in diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 54a1d21cbcc6..7ab60ad98776 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -1967,7 +1967,6 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, bool plus) { struct user_namespace *userns = rpc_userns(entry->server->client); - struct nfs_entry old = *entry; __be32 *p; int error; u64 new_cookie; @@ -1987,15 +1986,15 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, error = decode_fileid3(xdr, &entry->ino); if (unlikely(error)) - return error; + return -EAGAIN; error = decode_inline_filename3(xdr, &entry->name, &entry->len); if (unlikely(error)) - return error; + return -EAGAIN; error = decode_cookie3(xdr, &new_cookie); if (unlikely(error)) - return error; + return -EAGAIN; entry->d_type = DT_UNKNOWN; @@ -2003,7 +2002,7 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, entry->fattr->valid = 0; error = decode_post_op_attr(xdr, entry->fattr, userns); if (unlikely(error)) - return error; + return -EAGAIN; if (entry->fattr->valid & NFS_ATTR_FATTR_V3) entry->d_type = nfs_umode_to_dtype(entry->fattr->mode); @@ -2018,11 +2017,8 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, return -EAGAIN; if (*p != xdr_zero) { error = decode_nfs_fh3(xdr, entry->fh); - if (unlikely(error)) { - if (error == -E2BIG) - goto out_truncated; - return error; - } + if (unlikely(error)) + return -EAGAIN; } else zero_nfs_fh3(entry->fh); } @@ -2031,11 +2027,6 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, entry->cookie = new_cookie; return 0; - -out_truncated: - dprintk("NFS: directory entry contains invalid file handle\n"); - *entry = old; - return -EAGAIN; } /* -- cgit v1.2.3 From 1a93b82c59ab45f2d9f14c47f09d2e341ff02381 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 18 Feb 2022 07:07:08 -0500 Subject: NFS: constify nfs_server_capable() and nfs_have_writebacks() Signed-off-by: Trond Myklebust --- include/linux/nfs_fs.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 72a732a5103c..6e10725887d1 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -363,7 +363,7 @@ static inline void nfs_mark_for_revalidate(struct inode *inode) spin_unlock(&inode->i_lock); } -static inline int nfs_server_capable(struct inode *inode, int cap) +static inline int nfs_server_capable(const struct inode *inode, int cap) { return NFS_SERVER(inode)->caps & cap; } @@ -587,12 +587,11 @@ extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail); extern void nfs_commit_free(struct nfs_commit_data *data); bool nfs_commit_end(struct nfs_mds_commit_info *cinfo); -static inline int -nfs_have_writebacks(struct inode *inode) +static inline bool nfs_have_writebacks(const struct inode *inode) { if (S_ISREG(inode->i_mode)) return atomic_long_read(&NFS_I(inode)->nrequests) != 0; - return 0; + return false; } /* -- cgit v1.2.3 From 2eef8a31112262f6f8e5d2b3076b9f288473eaf2 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 19 Feb 2022 20:38:19 -0500 Subject: NFS: Trace lookup revalidation failure Enable tracing of lookup revalidation failures. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index ebddc736eac2..1aa55cac9d9a 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1474,9 +1474,7 @@ nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry, { switch (error) { case 1: - dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n", - __func__, dentry); - return 1; + break; case 0: /* * We can't d_drop the root of a disconnected tree: @@ -1485,13 +1483,10 @@ nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry, * inodes on unmount and further oopses. */ if (inode && IS_ROOT(dentry)) - return 1; - dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n", - __func__, dentry); - return 0; + error = 1; + break; } - dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) lookup returned error %d\n", - __func__, dentry, error); + trace_nfs_lookup_revalidate_exit(dir, dentry, 0, error); return error; } @@ -1623,9 +1618,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry, goto out_bad; trace_nfs_lookup_revalidate_enter(dir, dentry, flags); - error = nfs_lookup_revalidate_dentry(dir, dentry, inode); - trace_nfs_lookup_revalidate_exit(dir, dentry, flags, error); - return error; + return nfs_lookup_revalidate_dentry(dir, dentry, inode); out_valid: return nfs_lookup_revalidate_done(dir, dentry, inode, 1); out_bad: -- cgit v1.2.3 From d1e32ea35502bcbf9241c54338882f18b6fa6452 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 25 Feb 2022 10:22:30 -0500 Subject: NFS: Initialise the readdir verifier as best we can in nfs_opendir() For the purpose of ensuring that opendir() followed by seekdir() work as correctly as possible, try to initialise the readdir verifier in nfs_opendir(). Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 1aa55cac9d9a..1dfbd05081ad 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -89,6 +89,7 @@ static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir NFS_INO_REVAL_FORCED); list_add(&ctx->list, &nfsi->open_files); clear_bit(NFS_INO_FORCE_READDIR, &nfsi->flags); + memcpy(ctx->verf, nfsi->cookieverf, sizeof(ctx->verf)); spin_unlock(&dir->i_lock); return ctx; } -- cgit v1.2.3 From 281f31b2e5a2f1c5ae82dbe0b14c9d57401e0967 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 22 Feb 2022 12:10:36 -0500 Subject: NFS: Use kzalloc() to avoid initialising the nfs_open_dir_context Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 1dfbd05081ad..379f88b158fb 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -69,18 +69,15 @@ const struct address_space_operations nfs_dir_aops = { .freepage = nfs_readdir_clear_array, }; -static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir) +static struct nfs_open_dir_context * +alloc_nfs_open_dir_context(struct inode *dir) { struct nfs_inode *nfsi = NFS_I(dir); struct nfs_open_dir_context *ctx; - ctx = kmalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT); + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT); if (ctx != NULL) { - ctx->duped = 0; ctx->attr_gencount = nfsi->attr_gencount; - ctx->dir_cookie = 0; - ctx->dup_cookie = 0; - ctx->page_index = 0; - ctx->eof = false; spin_lock(&dir->i_lock); if (list_empty(&nfsi->open_files) && (nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER)) -- cgit v1.2.3 From 0b2662b7e7fddaaf6d6c055763270765cdb97a0d Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 22 Feb 2022 10:39:26 -0500 Subject: NFS: Calculate page offsets algorithmically Instead of relying on counting the page offsets as we walk through the page cache, switch to calculating them algorithmically. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 379f88b158fb..6f0a38db6c37 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -249,17 +249,20 @@ static const char *nfs_readdir_copy_name(const char *name, unsigned int len) return ret; } +static size_t nfs_readdir_array_maxentries(void) +{ + return (PAGE_SIZE - sizeof(struct nfs_cache_array)) / + sizeof(struct nfs_cache_array_entry); +} + /* * Check that the next array entry lies entirely within the page bounds */ static int nfs_readdir_array_can_expand(struct nfs_cache_array *array) { - struct nfs_cache_array_entry *cache_entry; - if (array->page_full) return -ENOSPC; - cache_entry = &array->array[array->size + 1]; - if ((char *)cache_entry - (char *)array > PAGE_SIZE) { + if (array->size == nfs_readdir_array_maxentries()) { array->page_full = 1; return -ENOSPC; } @@ -318,6 +321,11 @@ static struct page *nfs_readdir_page_get_locked(struct address_space *mapping, return page; } +static loff_t nfs_readdir_page_offset(struct page *page) +{ + return (loff_t)page->index * (loff_t)nfs_readdir_array_maxentries(); +} + static u64 nfs_readdir_page_last_cookie(struct page *page) { struct nfs_cache_array *array; @@ -448,7 +456,7 @@ static int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, if (array->array[i].cookie == desc->dir_cookie) { struct nfs_inode *nfsi = NFS_I(file_inode(desc->file)); - new_pos = desc->current_index + i; + new_pos = nfs_readdir_page_offset(desc->page) + i; if (desc->attr_gencount != nfsi->attr_gencount || !nfs_readdir_inode_mapping_valid(nfsi)) { desc->duped = 0; -- cgit v1.2.3 From d09e673f497164442ad7976a870d7cc857783fd4 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 22 Feb 2022 08:31:28 -0500 Subject: NFS: Store the change attribute in the directory page cache Use the change attribute and the first cookie in a directory page cache entry to validate that the page is up to date. Suggested-by: Benjamin Coddington Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 68 +++++++++++++++++++++++++++++++++--------------------------- 1 file changed, 37 insertions(+), 31 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 6f0a38db6c37..a1767f755460 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -140,6 +140,7 @@ struct nfs_cache_array_entry { }; struct nfs_cache_array { + u64 change_attr; u64 last_cookie; unsigned int size; unsigned char page_full : 1, @@ -176,12 +177,14 @@ static void nfs_readdir_array_init(struct nfs_cache_array *array) memset(array, 0, sizeof(struct nfs_cache_array)); } -static void nfs_readdir_page_init_array(struct page *page, u64 last_cookie) +static void nfs_readdir_page_init_array(struct page *page, u64 last_cookie, + u64 change_attr) { struct nfs_cache_array *array; array = kmap_atomic(page); nfs_readdir_array_init(array); + array->change_attr = change_attr; array->last_cookie = last_cookie; array->cookies_are_ordered = 1; kunmap_atomic(array); @@ -208,7 +211,7 @@ nfs_readdir_page_array_alloc(u64 last_cookie, gfp_t gfp_flags) { struct page *page = alloc_page(gfp_flags); if (page) - nfs_readdir_page_init_array(page, last_cookie); + nfs_readdir_page_init_array(page, last_cookie, 0); return page; } @@ -305,19 +308,43 @@ out: return ret; } +static bool nfs_readdir_page_validate(struct page *page, u64 last_cookie, + u64 change_attr) +{ + struct nfs_cache_array *array = kmap_atomic(page); + int ret = true; + + if (array->change_attr != change_attr) + ret = false; + if (array->size > 0 && array->array[0].cookie != last_cookie) + ret = false; + kunmap_atomic(array); + return ret; +} + +static void nfs_readdir_page_unlock_and_put(struct page *page) +{ + unlock_page(page); + put_page(page); +} + static struct page *nfs_readdir_page_get_locked(struct address_space *mapping, pgoff_t index, u64 last_cookie) { struct page *page; + u64 change_attr; page = grab_cache_page(mapping, index); - if (page && !PageUptodate(page)) { - nfs_readdir_page_init_array(page, last_cookie); - if (invalidate_inode_pages2_range(mapping, index + 1, -1) < 0) - nfs_zap_mapping(mapping->host, mapping); - SetPageUptodate(page); + if (!page) + return NULL; + change_attr = inode_peek_iversion_raw(mapping->host); + if (PageUptodate(page)) { + if (nfs_readdir_page_validate(page, last_cookie, change_attr)) + return page; + nfs_readdir_clear_array(page); } - + nfs_readdir_page_init_array(page, last_cookie, change_attr); + SetPageUptodate(page); return page; } @@ -357,12 +384,6 @@ static void nfs_readdir_page_set_eof(struct page *page) kunmap_atomic(array); } -static void nfs_readdir_page_unlock_and_put(struct page *page) -{ - unlock_page(page); - put_page(page); -} - static struct page *nfs_readdir_page_get_next(struct address_space *mapping, pgoff_t index, u64 cookie) { @@ -419,16 +440,6 @@ out_eof: return -EBADCOOKIE; } -static bool -nfs_readdir_inode_mapping_valid(struct nfs_inode *nfsi) -{ - if (nfsi->cache_validity & (NFS_INO_INVALID_CHANGE | - NFS_INO_INVALID_DATA)) - return false; - smp_rmb(); - return !test_bit(NFS_INO_INVALIDATING, &nfsi->flags); -} - static bool nfs_readdir_array_cookie_in_range(struct nfs_cache_array *array, u64 cookie) { @@ -457,8 +468,7 @@ static int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, struct nfs_inode *nfsi = NFS_I(file_inode(desc->file)); new_pos = nfs_readdir_page_offset(desc->page) + i; - if (desc->attr_gencount != nfsi->attr_gencount || - !nfs_readdir_inode_mapping_valid(nfsi)) { + if (desc->attr_gencount != nfsi->attr_gencount) { desc->duped = 0; desc->attr_gencount = nfsi->attr_gencount; } else if (new_pos < desc->prev_index) { @@ -1095,11 +1105,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) * to either find the entry with the appropriate number or * revalidate the cookie. */ - if (ctx->pos == 0 || nfs_attribute_cache_expired(inode)) { - res = nfs_revalidate_mapping(inode, file->f_mapping); - if (res < 0) - goto out; - } + nfs_revalidate_mapping(inode, file->f_mapping); res = -ENOMEM; desc = kzalloc(sizeof(*desc), GFP_KERNEL); -- cgit v1.2.3 From 728dd0ab37421396927749fc8cec9c2009c526c8 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 22 Feb 2022 08:59:33 -0500 Subject: NFS: Don't re-read the entire page cache to find the next cookie If the page cache entry that was last read gets invalidated for some reason, then make sure we can re-create it on the next call to readdir. This, combined with the cache page validation, allows us to reuse the cached value of page-index on successive calls to nfs_readdir. Credit is due to Benjamin Coddington for showing that the concept works, and that it allows for improved cache sharing between processes even in the case where pages are lost due to LRU or active invalidation. Suggested-by: Benjamin Coddington Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 10 +++++++--- include/linux/nfs_fs.h | 1 + 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index a1767f755460..93f70698e401 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1120,6 +1120,8 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) desc->dup_cookie = dir_ctx->dup_cookie; desc->duped = dir_ctx->duped; page_index = dir_ctx->page_index; + desc->page_index = page_index; + desc->last_cookie = dir_ctx->last_cookie; desc->attr_gencount = dir_ctx->attr_gencount; desc->eof = dir_ctx->eof; memcpy(desc->verf, dir_ctx->verf, sizeof(desc->verf)); @@ -1168,6 +1170,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) spin_lock(&file->f_lock); dir_ctx->dir_cookie = desc->dir_cookie; dir_ctx->dup_cookie = desc->dup_cookie; + dir_ctx->last_cookie = desc->last_cookie; dir_ctx->duped = desc->duped; dir_ctx->attr_gencount = desc->attr_gencount; dir_ctx->page_index = desc->page_index; @@ -1209,10 +1212,11 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence) } if (offset != filp->f_pos) { filp->f_pos = offset; - if (nfs_readdir_use_cookie(filp)) - dir_ctx->dir_cookie = offset; - else + if (!nfs_readdir_use_cookie(filp)) { dir_ctx->dir_cookie = 0; + dir_ctx->page_index = 0; + } else + dir_ctx->dir_cookie = offset; if (offset == 0) memset(dir_ctx->verf, 0, sizeof(dir_ctx->verf)); dir_ctx->duped = 0; diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 6e10725887d1..1c533f2c1f36 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -105,6 +105,7 @@ struct nfs_open_dir_context { __be32 verf[NFS_DIR_VERIFIER_SIZE]; __u64 dir_cookie; __u64 dup_cookie; + __u64 last_cookie; pgoff_t page_index; signed char duped; bool eof; -- cgit v1.2.3 From c8f0523ba398b72ffdb6e41930c089b75a6e2acf Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 26 Feb 2022 09:38:19 -0500 Subject: NFS: Don't advance the page pointer unless the page is full When we hit the end of the data in the readdir page, we don't want to start filling a new page, unless this one is full. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 93f70698e401..60f7feee0a16 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -417,6 +417,18 @@ bool nfs_readdir_use_cookie(const struct file *filp) return true; } +static void nfs_readdir_seek_next_array(struct nfs_cache_array *array, + struct nfs_readdir_descriptor *desc) +{ + if (array->page_full) { + desc->last_cookie = array->last_cookie; + desc->current_index += array->size; + desc->cache_entry_index = 0; + desc->page_index++; + } else + desc->last_cookie = array->array[0].cookie; +} + static int nfs_readdir_search_for_pos(struct nfs_cache_array *array, struct nfs_readdir_descriptor *desc) { @@ -428,6 +440,7 @@ static int nfs_readdir_search_for_pos(struct nfs_cache_array *array, if (diff >= array->size) { if (array->page_is_eof) goto out_eof; + nfs_readdir_seek_next_array(array, desc); return -EAGAIN; } @@ -500,7 +513,8 @@ check_eof: status = -EBADCOOKIE; if (desc->dir_cookie == array->last_cookie) desc->eof = true; - } + } else + nfs_readdir_seek_next_array(array, desc); out: return status; } @@ -517,11 +531,6 @@ static int nfs_readdir_search_array(struct nfs_readdir_descriptor *desc) else status = nfs_readdir_search_for_cookie(array, desc); - if (status == -EAGAIN) { - desc->last_cookie = array->last_cookie; - desc->current_index += array->size; - desc->page_index++; - } kunmap_atomic(array); return status; } @@ -998,7 +1007,7 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc, { struct file *file = desc->file; struct nfs_cache_array *array; - unsigned int i = 0; + unsigned int i; array = kmap(desc->page); for (i = desc->cache_entry_index; i < array->size; i++) { @@ -1011,10 +1020,13 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc, break; } memcpy(desc->verf, verf, sizeof(desc->verf)); - if (i < (array->size-1)) - desc->dir_cookie = array->array[i+1].cookie; - else + if (i == array->size - 1) { desc->dir_cookie = array->last_cookie; + nfs_readdir_seek_next_array(array, desc); + } else { + desc->dir_cookie = array->array[i + 1].cookie; + desc->last_cookie = array->array[0].cookie; + } if (nfs_readdir_use_cookie(file)) desc->ctx->pos = desc->dir_cookie; else -- cgit v1.2.3 From 580f236737d13ee25d5b0b1d124f50014fe6833b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 7 Feb 2022 13:37:00 -0500 Subject: NFS: Adjust the amount of readahead performed by NFS readdir The current NFS readdir code will always try to maximise the amount of readahead it performs on the assumption that we can cache anything that isn't immediately read by the process. There are several cases where this assumption breaks down, including when the 'ls -l' heuristic kicks in to try to force use of readdirplus as a batch replacement for lookup/getattr. This patch therefore tries to tone down the amount of readahead we perform, and adjust it to try to match the amount of data being requested by user space. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++++- include/linux/nfs_fs.h | 1 + 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 60f7feee0a16..520dc3ec4aef 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -69,6 +69,8 @@ const struct address_space_operations nfs_dir_aops = { .freepage = nfs_readdir_clear_array, }; +#define NFS_INIT_DTSIZE PAGE_SIZE + static struct nfs_open_dir_context * alloc_nfs_open_dir_context(struct inode *dir) { @@ -78,6 +80,7 @@ alloc_nfs_open_dir_context(struct inode *dir) ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT); if (ctx != NULL) { ctx->attr_gencount = nfsi->attr_gencount; + ctx->dtsize = NFS_INIT_DTSIZE; spin_lock(&dir->i_lock); if (list_empty(&nfsi->open_files) && (nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER)) @@ -154,6 +157,7 @@ struct nfs_readdir_descriptor { struct page *page; struct dir_context *ctx; pgoff_t page_index; + pgoff_t page_index_max; u64 dir_cookie; u64 last_cookie; u64 dup_cookie; @@ -166,12 +170,36 @@ struct nfs_readdir_descriptor { unsigned long gencount; unsigned long attr_gencount; unsigned int cache_entry_index; + unsigned int buffer_fills; + unsigned int dtsize; signed char duped; bool plus; bool eob; bool eof; }; +static void nfs_set_dtsize(struct nfs_readdir_descriptor *desc, unsigned int sz) +{ + struct nfs_server *server = NFS_SERVER(file_inode(desc->file)); + unsigned int maxsize = server->dtsize; + + if (sz > maxsize) + sz = maxsize; + if (sz < NFS_MIN_FILE_IO_SIZE) + sz = NFS_MIN_FILE_IO_SIZE; + desc->dtsize = sz; +} + +static void nfs_shrink_dtsize(struct nfs_readdir_descriptor *desc) +{ + nfs_set_dtsize(desc, desc->dtsize >> 1); +} + +static void nfs_grow_dtsize(struct nfs_readdir_descriptor *desc) +{ + nfs_set_dtsize(desc, desc->dtsize << 1); +} + static void nfs_readdir_array_init(struct nfs_cache_array *array) { memset(array, 0, sizeof(struct nfs_cache_array)); @@ -784,6 +812,7 @@ static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc, break; arrays++; *arrays = page = new; + desc->page_index_max++; } else { new = nfs_readdir_page_get_next(mapping, page->index + 1, @@ -793,6 +822,7 @@ static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc, if (page != *arrays) nfs_readdir_page_unlock_and_put(page); page = new; + desc->page_index_max = new->index; } status = nfs_readdir_add_to_array(entry, page); } while (!status && !entry->eof); @@ -858,7 +888,7 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc, struct nfs_entry *entry; size_t array_size; struct inode *inode = file_inode(desc->file); - size_t dtsize = NFS_SERVER(inode)->dtsize; + unsigned int dtsize = desc->dtsize; int status = -ENOMEM; entry = kzalloc(sizeof(*entry), GFP_KERNEL); @@ -894,6 +924,7 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc, status = nfs_readdir_page_filler(desc, entry, pages, pglen, arrays, narrays); + desc->buffer_fills++; } while (!status && nfs_readdir_page_needs_filling(page) && page_mapping(page)); @@ -941,6 +972,10 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc) if (!desc->page) return -ENOMEM; if (nfs_readdir_page_needs_filling(desc->page)) { + /* Grow the dtsize if we had to go back for more pages */ + if (desc->page_index == desc->page_index_max) + nfs_grow_dtsize(desc); + desc->page_index_max = desc->page_index; res = nfs_readdir_xdr_to_array(desc, nfsi->cookieverf, verf, &desc->page, 1); if (res < 0) { @@ -1075,6 +1110,7 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc) desc->cache_entry_index = 0; desc->last_cookie = desc->dir_cookie; desc->duped = 0; + desc->page_index_max = 0; status = nfs_readdir_xdr_to_array(desc, desc->verf, verf, arrays, sz); @@ -1084,10 +1120,22 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc) } desc->page = NULL; + /* + * Grow the dtsize if we have to go back for more pages, + * or shrink it if we're reading too many. + */ + if (!desc->eof) { + if (!desc->eob) + nfs_grow_dtsize(desc); + else if (desc->buffer_fills == 1 && + i < (desc->page_index_max >> 1)) + nfs_shrink_dtsize(desc); + } for (i = 0; i < sz && arrays[i]; i++) nfs_readdir_page_array_free(arrays[i]); out: + desc->page_index_max = -1; kfree(arrays); dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __func__, status); return status; @@ -1126,6 +1174,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) desc->file = file; desc->ctx = ctx; desc->plus = nfs_use_readdirplus(inode, ctx); + desc->page_index_max = -1; spin_lock(&file->f_lock); desc->dir_cookie = dir_ctx->dir_cookie; @@ -1136,6 +1185,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) desc->last_cookie = dir_ctx->last_cookie; desc->attr_gencount = dir_ctx->attr_gencount; desc->eof = dir_ctx->eof; + nfs_set_dtsize(desc, dir_ctx->dtsize); memcpy(desc->verf, dir_ctx->verf, sizeof(desc->verf)); spin_unlock(&file->f_lock); @@ -1187,6 +1237,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) dir_ctx->attr_gencount = desc->attr_gencount; dir_ctx->page_index = desc->page_index; dir_ctx->eof = desc->eof; + dir_ctx->dtsize = desc->dtsize; memcpy(dir_ctx->verf, desc->verf, sizeof(dir_ctx->verf)); spin_unlock(&file->f_lock); out_free: diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 1c533f2c1f36..691a27936849 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -107,6 +107,7 @@ struct nfs_open_dir_context { __u64 dup_cookie; __u64 last_cookie; pgoff_t page_index; + unsigned int dtsize; signed char duped; bool eof; }; -- cgit v1.2.3 From 6c34f05b754622f473b546fd19ad3a89bd65bd89 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 22 Feb 2022 16:23:12 -0500 Subject: NFS: If the cookie verifier changes, we must invalidate the page cache Ensure that if the cookie verifier changes when we use the zero-valued cookie, then we invalidate any cached pages. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 520dc3ec4aef..9998d7d17367 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -990,9 +990,14 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc) /* * Set the cookie verifier if the page cache was empty */ - if (desc->page_index == 0) + if (desc->last_cookie == 0 && + memcmp(nfsi->cookieverf, verf, sizeof(nfsi->cookieverf))) { memcpy(nfsi->cookieverf, verf, sizeof(nfsi->cookieverf)); + invalidate_inode_pages2_range(desc->file->f_mapping, + desc->page_index_max + 1, + -1); + } } res = nfs_readdir_search_array(desc); if (res == 0) -- cgit v1.2.3 From 9ff89c25d8addeee8eea84fa828f1d2ad659cc54 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 7 Feb 2022 15:07:01 -0500 Subject: NFS: Simplify nfs_readdir_xdr_to_array() Recent changes to readdir mean that we can cope with partially filled page cache entries, so we no longer need to rely on looping in nfs_readdir_xdr_to_array(). Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 9998d7d17367..9d086ab4f889 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -889,6 +889,7 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc, size_t array_size; struct inode *inode = file_inode(desc->file); unsigned int dtsize = desc->dtsize; + unsigned int pglen; int status = -ENOMEM; entry = kzalloc(sizeof(*entry), GFP_KERNEL); @@ -906,28 +907,20 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc, if (!pages) goto out; - do { - unsigned int pglen; - status = nfs_readdir_xdr_filler(desc, verf_arg, entry->cookie, - pages, dtsize, - verf_res); - if (status < 0) - break; - - pglen = status; - if (pglen == 0) { - nfs_readdir_page_set_eof(page); - break; - } - - verf_arg = verf_res; + status = nfs_readdir_xdr_filler(desc, verf_arg, entry->cookie, pages, + dtsize, verf_res); + if (status < 0) + goto free_pages; + pglen = status; + if (pglen != 0) status = nfs_readdir_page_filler(desc, entry, pages, pglen, arrays, narrays); - desc->buffer_fills++; - } while (!status && nfs_readdir_page_needs_filling(page) && - page_mapping(page)); + else + nfs_readdir_page_set_eof(page); + desc->buffer_fills++; +free_pages: nfs_readdir_free_pages(pages, array_size); out: nfs_free_fattr(entry->fattr); -- cgit v1.2.3 From 9c3f4d988c23d099095c8b75cbd449e0466fa102 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 17 Feb 2022 13:02:37 -0500 Subject: NFS: Reduce use of uncached readdir When reading a very large directory, we want to try to keep the page cache up to date if doing so is inexpensive. With the change to allow readdir to continue reading even when the cache is incomplete, we no longer need to fall back to uncached readdir in order to scale to large directories. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 23 +++-------------------- 1 file changed, 3 insertions(+), 20 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 9d086ab4f889..dc6acfd14fc7 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -999,28 +999,11 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc) return res; } -static bool nfs_readdir_dont_search_cache(struct nfs_readdir_descriptor *desc) -{ - struct address_space *mapping = desc->file->f_mapping; - struct inode *dir = file_inode(desc->file); - unsigned int dtsize = NFS_SERVER(dir)->dtsize; - loff_t size = i_size_read(dir); - - /* - * Default to uncached readdir if the page cache is empty, and - * we're looking for a non-zero cookie in a large directory. - */ - return desc->dir_cookie != 0 && mapping->nrpages == 0 && size > dtsize; -} - /* Search for desc->dir_cookie from the beginning of the page cache */ static int readdir_search_pagecache(struct nfs_readdir_descriptor *desc) { int res; - if (nfs_readdir_dont_search_cache(desc)) - return -EBADCOOKIE; - do { if (desc->page_index == 0) { desc->current_index = 0; @@ -1273,10 +1256,10 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence) } if (offset != filp->f_pos) { filp->f_pos = offset; - if (!nfs_readdir_use_cookie(filp)) { + dir_ctx->page_index = 0; + if (!nfs_readdir_use_cookie(filp)) dir_ctx->dir_cookie = 0; - dir_ctx->page_index = 0; - } else + else dir_ctx->dir_cookie = offset; if (offset == 0) memset(dir_ctx->verf, 0, sizeof(dir_ctx->verf)); -- cgit v1.2.3 From 230bc98f7a2a49eb472d184bdec91fd3096384b3 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 17 Feb 2022 11:08:24 -0500 Subject: NFS: Improve heuristic for readdirplus The heuristic for readdirplus is designed to try to detect 'ls -l' and similar patterns. It does so by looking for cache hit/miss patterns in both the attribute cache and in the dcache of the files in a given directory, and then sets a flag for the readdirplus code to interpret. The problem with this approach is that a single attribute or dcache miss can cause the NFS code to force a refresh of the attributes for the entire set of files contained in the directory. To be able to make a more nuanced decision, let's sample the number of hits and misses in the set of open directory descriptors. That allows us to set thresholds at which we start preferring READDIRPLUS over regular READDIR, or at which we start to force a re-read of the remaining readdir cache using READDIRPLUS. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 80 ++++++++++++++++++++++++++++++++------------------ fs/nfs/inode.c | 4 +-- fs/nfs/internal.h | 4 +-- fs/nfs/nfstrace.h | 1 - include/linux/nfs_fs.h | 5 ++-- 5 files changed, 58 insertions(+), 36 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index dc6acfd14fc7..098fc1bdaac8 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -87,8 +87,7 @@ alloc_nfs_open_dir_context(struct inode *dir) nfs_set_cache_invalid(dir, NFS_INO_INVALID_DATA | NFS_INO_REVAL_FORCED); - list_add(&ctx->list, &nfsi->open_files); - clear_bit(NFS_INO_FORCE_READDIR, &nfsi->flags); + list_add_tail_rcu(&ctx->list, &nfsi->open_files); memcpy(ctx->verf, nfsi->cookieverf, sizeof(ctx->verf)); spin_unlock(&dir->i_lock); return ctx; @@ -99,9 +98,9 @@ alloc_nfs_open_dir_context(struct inode *dir) static void put_nfs_open_dir_context(struct inode *dir, struct nfs_open_dir_context *ctx) { spin_lock(&dir->i_lock); - list_del(&ctx->list); + list_del_rcu(&ctx->list); spin_unlock(&dir->i_lock); - kfree(ctx); + kfree_rcu(ctx, rcu_head); } /* @@ -594,7 +593,6 @@ static int nfs_readdir_xdr_filler(struct nfs_readdir_descriptor *desc, /* We requested READDIRPLUS, but the server doesn't grok it */ if (error == -ENOTSUPP && desc->plus) { NFS_SERVER(inode)->caps &= ~NFS_CAP_READDIRPLUS; - clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags); desc->plus = arg.plus = false; goto again; } @@ -644,51 +642,63 @@ int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry) return 1; } -static -bool nfs_use_readdirplus(struct inode *dir, struct dir_context *ctx) +#define NFS_READDIR_CACHE_USAGE_THRESHOLD (8UL) + +static bool nfs_use_readdirplus(struct inode *dir, struct dir_context *ctx, + unsigned int cache_hits, + unsigned int cache_misses) { if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS)) return false; - if (test_and_clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags)) - return true; - if (ctx->pos == 0) + if (ctx->pos == 0 || + cache_hits + cache_misses > NFS_READDIR_CACHE_USAGE_THRESHOLD) return true; return false; } /* - * This function is called by the lookup and getattr code to request the + * This function is called by the getattr code to request the * use of readdirplus to accelerate any future lookups in the same * directory. */ -void nfs_advise_use_readdirplus(struct inode *dir) +void nfs_readdir_record_entry_cache_hit(struct inode *dir) { struct nfs_inode *nfsi = NFS_I(dir); + struct nfs_open_dir_context *ctx; if (nfs_server_capable(dir, NFS_CAP_READDIRPLUS) && - !list_empty(&nfsi->open_files)) - set_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags); + S_ISDIR(dir->i_mode)) { + rcu_read_lock(); + list_for_each_entry_rcu (ctx, &nfsi->open_files, list) + atomic_inc(&ctx->cache_hits); + rcu_read_unlock(); + } } /* * This function is mainly for use by nfs_getattr(). * * If this is an 'ls -l', we want to force use of readdirplus. - * Do this by checking if there is an active file descriptor - * and calling nfs_advise_use_readdirplus, then forcing a - * cache flush. */ -void nfs_force_use_readdirplus(struct inode *dir) +void nfs_readdir_record_entry_cache_miss(struct inode *dir) { struct nfs_inode *nfsi = NFS_I(dir); + struct nfs_open_dir_context *ctx; if (nfs_server_capable(dir, NFS_CAP_READDIRPLUS) && - !list_empty(&nfsi->open_files)) { - set_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags); - set_bit(NFS_INO_FORCE_READDIR, &nfsi->flags); + S_ISDIR(dir->i_mode)) { + rcu_read_lock(); + list_for_each_entry_rcu (ctx, &nfsi->open_files, list) + atomic_inc(&ctx->cache_misses); + rcu_read_unlock(); } } +static void nfs_lookup_advise_force_readdirplus(struct inode *dir) +{ + nfs_readdir_record_entry_cache_miss(dir); +} + static void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry, unsigned long dir_verifier) @@ -1122,6 +1132,19 @@ out: return status; } +#define NFS_READDIR_CACHE_MISS_THRESHOLD (16UL) + +static void nfs_readdir_handle_cache_misses(struct inode *inode, + struct nfs_readdir_descriptor *desc, + pgoff_t page_index, + unsigned int cache_misses) +{ + if (desc->ctx->pos == 0 || + cache_misses <= NFS_READDIR_CACHE_MISS_THRESHOLD) + return; + invalidate_mapping_pages(inode->i_mapping, page_index + 1, -1); +} + /* The file offset position represents the dirent entry number. A last cookie cache takes care of the common case of reading the whole directory. @@ -1133,6 +1156,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) struct nfs_inode *nfsi = NFS_I(inode); struct nfs_open_dir_context *dir_ctx = file->private_data; struct nfs_readdir_descriptor *desc; + unsigned int cache_hits, cache_misses; pgoff_t page_index; int res; @@ -1154,7 +1178,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) goto out; desc->file = file; desc->ctx = ctx; - desc->plus = nfs_use_readdirplus(inode, ctx); desc->page_index_max = -1; spin_lock(&file->f_lock); @@ -1168,6 +1191,8 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) desc->eof = dir_ctx->eof; nfs_set_dtsize(desc, dir_ctx->dtsize); memcpy(desc->verf, dir_ctx->verf, sizeof(desc->verf)); + cache_hits = atomic_xchg(&dir_ctx->cache_hits, 0); + cache_misses = atomic_xchg(&dir_ctx->cache_misses, 0); spin_unlock(&file->f_lock); if (desc->eof) { @@ -1175,9 +1200,8 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) goto out_free; } - if (test_and_clear_bit(NFS_INO_FORCE_READDIR, &nfsi->flags) && - list_is_singular(&nfsi->open_files)) - invalidate_mapping_pages(inode->i_mapping, page_index + 1, -1); + desc->plus = nfs_use_readdirplus(inode, ctx, cache_hits, cache_misses); + nfs_readdir_handle_cache_misses(inode, desc, page_index, cache_misses); do { res = readdir_search_pagecache(desc); @@ -1196,7 +1220,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) break; } if (res == -ETOOSMALL && desc->plus) { - clear_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags); nfs_zap_caches(inode); desc->page_index = 0; desc->plus = false; @@ -1610,7 +1633,7 @@ nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry, nfs_set_verifier(dentry, dir_verifier); /* set a readdirplus hint that we had a cache miss */ - nfs_force_use_readdirplus(dir); + nfs_lookup_advise_force_readdirplus(dir); ret = 1; out: nfs_free_fattr(fattr); @@ -1667,7 +1690,6 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry, nfs_mark_dir_for_revalidate(dir); goto out_bad; } - nfs_advise_use_readdirplus(dir); goto out_valid; } @@ -1872,7 +1894,7 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in goto out; /* Notify readdir to use READDIRPLUS */ - nfs_force_use_readdirplus(dir); + nfs_lookup_advise_force_readdirplus(dir); no_entry: res = d_splice_alias(inode, dentry); diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 7cecabf57b95..bbf4357ff727 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -787,7 +787,7 @@ static void nfs_readdirplus_parent_cache_miss(struct dentry *dentry) if (!nfs_server_capable(d_inode(dentry), NFS_CAP_READDIRPLUS)) return; parent = dget_parent(dentry); - nfs_force_use_readdirplus(d_inode(parent)); + nfs_readdir_record_entry_cache_miss(d_inode(parent)); dput(parent); } @@ -798,7 +798,7 @@ static void nfs_readdirplus_parent_cache_hit(struct dentry *dentry) if (!nfs_server_capable(d_inode(dentry), NFS_CAP_READDIRPLUS)) return; parent = dget_parent(dentry); - nfs_advise_use_readdirplus(d_inode(parent)); + nfs_readdir_record_entry_cache_hit(d_inode(parent)); dput(parent); } diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index b5398af53c7f..194840a97e3a 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -366,8 +366,8 @@ extern struct nfs_client *nfs_init_client(struct nfs_client *clp, const struct nfs_client_initdata *); /* dir.c */ -extern void nfs_advise_use_readdirplus(struct inode *dir); -extern void nfs_force_use_readdirplus(struct inode *dir); +extern void nfs_readdir_record_entry_cache_hit(struct inode *dir); +extern void nfs_readdir_record_entry_cache_miss(struct inode *dir); extern unsigned long nfs_access_cache_count(struct shrinker *shrink, struct shrink_control *sc); extern unsigned long nfs_access_cache_scan(struct shrinker *shrink, diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index 45a310b586ce..3672f6703ee7 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -36,7 +36,6 @@ #define nfs_show_nfsi_flags(v) \ __print_flags(v, "|", \ - { BIT(NFS_INO_ADVISE_RDPLUS), "ADVISE_RDPLUS" }, \ { BIT(NFS_INO_STALE), "STALE" }, \ { BIT(NFS_INO_ACL_LRU_SET), "ACL_LRU_SET" }, \ { BIT(NFS_INO_INVALIDATING), "INVALIDATING" }, \ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 691a27936849..20a4cf0acad2 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -101,6 +101,8 @@ struct nfs_open_context { struct nfs_open_dir_context { struct list_head list; + atomic_t cache_hits; + atomic_t cache_misses; unsigned long attr_gencount; __be32 verf[NFS_DIR_VERIFIER_SIZE]; __u64 dir_cookie; @@ -110,6 +112,7 @@ struct nfs_open_dir_context { unsigned int dtsize; signed char duped; bool eof; + struct rcu_head rcu_head; }; /* @@ -274,13 +277,11 @@ struct nfs4_copy_state { /* * Bit offsets in flags field */ -#define NFS_INO_ADVISE_RDPLUS (0) /* advise readdirplus */ #define NFS_INO_STALE (1) /* possible stale inode */ #define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */ #define NFS_INO_INVALIDATING (3) /* inode is being invalidated */ #define NFS_INO_PRESERVE_UNLINKED (4) /* preserve file if removed while open */ #define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */ -#define NFS_INO_FORCE_READDIR (7) /* force readdirplus */ #define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */ #define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */ #define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */ -- cgit v1.2.3 From ad1e109a4109ce0cbdfebfbe1958d0c333166d5c Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 17 Feb 2022 15:46:23 -0500 Subject: NFS: Don't ask for readdirplus unless it can help nfs_getattr() If attribute caching is turned off, then use of readdirplus is not going to help stat() performance. Readdirplus also doesn't help if a file is being written to, since we will have to flush those writes in order to sync the mtime/ctime. Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 45 +++++++++++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 20 deletions(-) diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index bbf4357ff727..e51d86707fca 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -780,26 +780,32 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, } EXPORT_SYMBOL_GPL(nfs_setattr_update_inode); -static void nfs_readdirplus_parent_cache_miss(struct dentry *dentry) +/* + * Don't request help from readdirplus if the file is being written to, + * or if attribute caching is turned off + */ +static bool nfs_getattr_readdirplus_enable(const struct inode *inode) { - struct dentry *parent; + return nfs_server_capable(inode, NFS_CAP_READDIRPLUS) && + !nfs_have_writebacks(inode) && NFS_MAXATTRTIMEO(inode) > 5 * HZ; +} - if (!nfs_server_capable(d_inode(dentry), NFS_CAP_READDIRPLUS)) - return; - parent = dget_parent(dentry); - nfs_readdir_record_entry_cache_miss(d_inode(parent)); - dput(parent); +static void nfs_readdirplus_parent_cache_miss(struct dentry *dentry) +{ + if (!IS_ROOT(dentry)) { + struct dentry *parent = dget_parent(dentry); + nfs_readdir_record_entry_cache_miss(d_inode(parent)); + dput(parent); + } } static void nfs_readdirplus_parent_cache_hit(struct dentry *dentry) { - struct dentry *parent; - - if (!nfs_server_capable(d_inode(dentry), NFS_CAP_READDIRPLUS)) - return; - parent = dget_parent(dentry); - nfs_readdir_record_entry_cache_hit(d_inode(parent)); - dput(parent); + if (!IS_ROOT(dentry)) { + struct dentry *parent = dget_parent(dentry); + nfs_readdir_record_entry_cache_hit(d_inode(parent)); + dput(parent); + } } static u32 nfs_get_valid_attrmask(struct inode *inode) @@ -835,6 +841,7 @@ int nfs_getattr(struct user_namespace *mnt_userns, const struct path *path, int err = 0; bool force_sync = query_flags & AT_STATX_FORCE_SYNC; bool do_update = false; + bool readdirplus_enabled = nfs_getattr_readdirplus_enable(inode); trace_nfs_getattr_enter(inode); @@ -843,7 +850,8 @@ int nfs_getattr(struct user_namespace *mnt_userns, const struct path *path, STATX_INO | STATX_SIZE | STATX_BLOCKS; if ((query_flags & AT_STATX_DONT_SYNC) && !force_sync) { - nfs_readdirplus_parent_cache_hit(path->dentry); + if (readdirplus_enabled) + nfs_readdirplus_parent_cache_hit(path->dentry); goto out_no_revalidate; } @@ -893,15 +901,12 @@ int nfs_getattr(struct user_namespace *mnt_userns, const struct path *path, do_update |= cache_validity & NFS_INO_INVALID_BLOCKS; if (do_update) { - /* Update the attribute cache */ - if (!(server->flags & NFS_MOUNT_NOAC)) + if (readdirplus_enabled) nfs_readdirplus_parent_cache_miss(path->dentry); - else - nfs_readdirplus_parent_cache_hit(path->dentry); err = __nfs_revalidate_inode(server, inode); if (err) goto out; - } else + } else if (readdirplus_enabled) nfs_readdirplus_parent_cache_hit(path->dentry); out_no_revalidate: /* Only return attributes that were revalidated. */ -- cgit v1.2.3 From c49c68944f2d4c3827cd8ac7f70da277674d11ad Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 18 Feb 2022 12:04:06 -0500 Subject: NFSv4: Ask for a full XDR buffer of readdir goodness Instead of pretending that we know the ratio of directory info vs readdirplus attribute info, just set the 'dircount' field to the same value as the 'maxcount' field. Signed-off-by: Trond Myklebust --- fs/nfs/nfs3xdr.c | 7 ++++--- fs/nfs/nfs4xdr.c | 6 +++--- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 7ab60ad98776..d6779ceeb39e 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -1261,6 +1261,8 @@ static void nfs3_xdr_enc_readdir3args(struct rpc_rqst *req, static void encode_readdirplus3args(struct xdr_stream *xdr, const struct nfs3_readdirargs *args) { + uint32_t dircount = args->count; + uint32_t maxcount = args->count; __be32 *p; encode_nfs_fh3(xdr, args->fh); @@ -1273,9 +1275,8 @@ static void encode_readdirplus3args(struct xdr_stream *xdr, * readdirplus: need dircount + buffer size. * We just make sure we make dircount big enough */ - *p++ = cpu_to_be32(args->count >> 3); - - *p = cpu_to_be32(args->count); + *p++ = cpu_to_be32(dircount); + *p = cpu_to_be32(maxcount); } static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req, diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 8e70b92df4cc..b7780b97dc4d 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1605,7 +1605,8 @@ static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg FATTR4_WORD0_RDATTR_ERROR, FATTR4_WORD1_MOUNTED_ON_FILEID, }; - uint32_t dircount = readdir->count >> 1; + uint32_t dircount = readdir->count; + uint32_t maxcount = readdir->count; __be32 *p, verf[2]; uint32_t attrlen = 0; unsigned int i; @@ -1618,7 +1619,6 @@ static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg FATTR4_WORD1_SPACE_USED|FATTR4_WORD1_TIME_ACCESS| FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; attrs[2] |= FATTR4_WORD2_SECURITY_LABEL; - dircount >>= 1; } /* Use mounted_on_fileid only if the server supports it */ if (!(readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)) @@ -1634,7 +1634,7 @@ static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg encode_nfs4_verifier(xdr, &readdir->verifier); p = reserve_space(xdr, 12 + (attrlen << 2)); *p++ = cpu_to_be32(dircount); - *p++ = cpu_to_be32(readdir->count); + *p++ = cpu_to_be32(maxcount); *p++ = cpu_to_be32(attrlen); for (i = 0; i < attrlen; i++) *p++ = cpu_to_be32(attrs[i]); -- cgit v1.2.3 From 2c2c336506e9bd4056fca25301b8a06fb7aefd32 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 19 Feb 2022 09:56:45 -0500 Subject: NFS: Readdirplus can't help lookup for case insensitive filesystems If the filesystem is case insensitive, then readdirplus can't help with cache misses, since it won't return case folded variants of the filename. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 098fc1bdaac8..dcfc44411787 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -696,6 +696,8 @@ void nfs_readdir_record_entry_cache_miss(struct inode *dir) static void nfs_lookup_advise_force_readdirplus(struct inode *dir) { + if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE)) + return; nfs_readdir_record_entry_cache_miss(dir); } -- cgit v1.2.3 From 0b3cc71b5ab31ef90eb9b8b2d8ca580fbf88c590 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 19 Feb 2022 10:06:05 -0500 Subject: NFS: Don't request readdirplus when revalidation was forced If the revalidation was forced, due to the presence of a LOOKUP_EXCL or a LOOKUP_REVAL flag, then readdirplus won't help. It also can't help when we're doing a path component lookup. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index dcfc44411787..cf7974642a19 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -694,10 +694,13 @@ void nfs_readdir_record_entry_cache_miss(struct inode *dir) } } -static void nfs_lookup_advise_force_readdirplus(struct inode *dir) +static void nfs_lookup_advise_force_readdirplus(struct inode *dir, + unsigned int flags) { if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE)) return; + if (flags & (LOOKUP_EXCL | LOOKUP_PARENT | LOOKUP_REVAL)) + return; nfs_readdir_record_entry_cache_miss(dir); } @@ -1596,15 +1599,17 @@ nfs_lookup_revalidate_delegated(struct inode *dir, struct dentry *dentry, return nfs_lookup_revalidate_done(dir, dentry, inode, 1); } -static int -nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry, - struct inode *inode) +static int nfs_lookup_revalidate_dentry(struct inode *dir, + struct dentry *dentry, + struct inode *inode, unsigned int flags) { struct nfs_fh *fhandle; struct nfs_fattr *fattr; unsigned long dir_verifier; int ret; + trace_nfs_lookup_revalidate_enter(dir, dentry, flags); + ret = -ENOMEM; fhandle = nfs_alloc_fhandle(); fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode)); @@ -1625,6 +1630,10 @@ nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry, } goto out; } + + /* Request help from readdirplus */ + nfs_lookup_advise_force_readdirplus(dir, flags); + ret = 0; if (nfs_compare_fh(NFS_FH(inode), fhandle)) goto out; @@ -1634,8 +1643,6 @@ nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry, nfs_setsecurity(inode, fattr); nfs_set_verifier(dentry, dir_verifier); - /* set a readdirplus hint that we had a cache miss */ - nfs_lookup_advise_force_readdirplus(dir); ret = 1; out: nfs_free_fattr(fattr); @@ -1701,8 +1708,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry, if (NFS_STALE(inode)) goto out_bad; - trace_nfs_lookup_revalidate_enter(dir, dentry, flags); - return nfs_lookup_revalidate_dentry(dir, dentry, inode); + return nfs_lookup_revalidate_dentry(dir, dentry, inode, flags); out_valid: return nfs_lookup_revalidate_done(dir, dentry, inode, 1); out_bad: @@ -1896,7 +1902,7 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in goto out; /* Notify readdir to use READDIRPLUS */ - nfs_lookup_advise_force_readdirplus(dir); + nfs_lookup_advise_force_readdirplus(dir, flags); no_entry: res = d_splice_alias(inode, dentry); @@ -2159,7 +2165,7 @@ nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry, reval_dentry: if (flags & LOOKUP_RCU) return -ECHILD; - return nfs_lookup_revalidate_dentry(dir, dentry, inode); + return nfs_lookup_revalidate_dentry(dir, dentry, inode, flags); full_reval: return nfs_do_lookup_revalidate(dir, dentry, flags); -- cgit v1.2.3 From 310e3187450db2ca5699758296d23fc2ce3f37f0 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 19 Feb 2022 19:24:38 -0500 Subject: NFS: Add basic readdir tracing Add tracing to track how often the client goes to the server for updated readdir information. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 13 ++++++++++- fs/nfs/nfstrace.h | 68 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+), 1 deletion(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index cf7974642a19..d591d20f7534 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -984,10 +984,14 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc) if (desc->page_index == desc->page_index_max) nfs_grow_dtsize(desc); desc->page_index_max = desc->page_index; + trace_nfs_readdir_cache_fill(desc->file, nfsi->cookieverf, + desc->last_cookie, + desc->page->index, desc->dtsize); res = nfs_readdir_xdr_to_array(desc, nfsi->cookieverf, verf, &desc->page, 1); if (res < 0) { nfs_readdir_page_unlock_and_put_cached(desc); + trace_nfs_readdir_cache_fill_done(inode, res); if (res == -EBADCOOKIE || res == -ENOTSYNC) { invalidate_inode_pages2(desc->file->f_mapping); desc->page_index = 0; @@ -1108,7 +1112,14 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc) desc->duped = 0; desc->page_index_max = 0; + trace_nfs_readdir_uncached(desc->file, desc->verf, desc->last_cookie, + -1, desc->dtsize); + status = nfs_readdir_xdr_to_array(desc, desc->verf, verf, arrays, sz); + if (status < 0) { + trace_nfs_readdir_uncached_done(file_inode(desc->file), status); + goto out_free; + } for (i = 0; !desc->eob && i < sz && arrays[i]; i++) { desc->page = arrays[i]; @@ -1127,7 +1138,7 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc) i < (desc->page_index_max >> 1)) nfs_shrink_dtsize(desc); } - +out_free: for (i = 0; i < sz && arrays[i]; i++) nfs_readdir_page_array_free(arrays[i]); out: diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index 3672f6703ee7..c2d0543ecb2d 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -160,6 +160,8 @@ DEFINE_NFS_INODE_EVENT(nfs_fsync_enter); DEFINE_NFS_INODE_EVENT_DONE(nfs_fsync_exit); DEFINE_NFS_INODE_EVENT(nfs_access_enter); DEFINE_NFS_INODE_EVENT_DONE(nfs_set_cache_invalid); +DEFINE_NFS_INODE_EVENT_DONE(nfs_readdir_cache_fill_done); +DEFINE_NFS_INODE_EVENT_DONE(nfs_readdir_uncached_done); TRACE_EVENT(nfs_access_exit, TP_PROTO( @@ -271,6 +273,72 @@ DEFINE_NFS_UPDATE_SIZE_EVENT(wcc); DEFINE_NFS_UPDATE_SIZE_EVENT(update); DEFINE_NFS_UPDATE_SIZE_EVENT(grow); +DECLARE_EVENT_CLASS(nfs_readdir_event, + TP_PROTO( + const struct file *file, + const __be32 *verifier, + u64 cookie, + pgoff_t page_index, + unsigned int dtsize + ), + + TP_ARGS(file, verifier, cookie, page_index, dtsize), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(u32, fhandle) + __field(u64, fileid) + __field(u64, version) + __array(char, verifier, NFS4_VERIFIER_SIZE) + __field(u64, cookie) + __field(pgoff_t, index) + __field(unsigned int, dtsize) + ), + + TP_fast_assign( + const struct inode *dir = file_inode(file); + const struct nfs_inode *nfsi = NFS_I(dir); + + __entry->dev = dir->i_sb->s_dev; + __entry->fileid = nfsi->fileid; + __entry->fhandle = nfs_fhandle_hash(&nfsi->fh); + __entry->version = inode_peek_iversion_raw(dir); + if (cookie != 0) + memcpy(__entry->verifier, verifier, + NFS4_VERIFIER_SIZE); + else + memset(__entry->verifier, 0, + NFS4_VERIFIER_SIZE); + __entry->cookie = cookie; + __entry->index = page_index; + __entry->dtsize = dtsize; + ), + + TP_printk( + "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu " + "cookie=%s:0x%llx cache_index=%lu dtsize=%u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long long)__entry->fileid, __entry->fhandle, + __entry->version, show_nfs4_verifier(__entry->verifier), + (unsigned long long)__entry->cookie, __entry->index, + __entry->dtsize + ) +); + +#define DEFINE_NFS_READDIR_EVENT(name) \ + DEFINE_EVENT(nfs_readdir_event, name, \ + TP_PROTO( \ + const struct file *file, \ + const __be32 *verifier, \ + u64 cookie, \ + pgoff_t page_index, \ + unsigned int dtsize \ + ), \ + TP_ARGS(file, verifier, cookie, page_index, dtsize)) + +DEFINE_NFS_READDIR_EVENT(nfs_readdir_cache_fill); +DEFINE_NFS_READDIR_EVENT(nfs_readdir_uncached); + DECLARE_EVENT_CLASS(nfs_lookup_event, TP_PROTO( const struct inode *dir, -- cgit v1.2.3 From eace45a18ccb34a746c3060601103aa14ca29923 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 19 Feb 2022 19:19:35 -0500 Subject: NFS: Trace effects of readdirplus on the dcache Trace the effects of readdirplus on attribute and dentry revalidation. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 5 +++++ fs/nfs/nfstrace.h | 3 +++ 2 files changed, 8 insertions(+) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index d591d20f7534..8b25a39b1761 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -754,8 +754,12 @@ again: status = nfs_refresh_inode(d_inode(dentry), entry->fattr); if (!status) nfs_setsecurity(d_inode(dentry), entry->fattr); + trace_nfs_readdir_lookup_revalidate(d_inode(parent), + dentry, 0, status); goto out; } else { + trace_nfs_readdir_lookup_revalidate_failed( + d_inode(parent), dentry, 0); d_invalidate(dentry); dput(dentry); dentry = NULL; @@ -777,6 +781,7 @@ again: dentry = alias; } nfs_set_verifier(dentry, dir_verifier); + trace_nfs_readdir_lookup(d_inode(parent), dentry, 0); out: dput(dentry); } diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index c2d0543ecb2d..7c1102b991d0 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -432,6 +432,9 @@ DEFINE_NFS_LOOKUP_EVENT(nfs_lookup_enter); DEFINE_NFS_LOOKUP_EVENT_DONE(nfs_lookup_exit); DEFINE_NFS_LOOKUP_EVENT(nfs_lookup_revalidate_enter); DEFINE_NFS_LOOKUP_EVENT_DONE(nfs_lookup_revalidate_exit); +DEFINE_NFS_LOOKUP_EVENT(nfs_readdir_lookup); +DEFINE_NFS_LOOKUP_EVENT(nfs_readdir_lookup_revalidate_failed); +DEFINE_NFS_LOOKUP_EVENT_DONE(nfs_readdir_lookup_revalidate); TRACE_EVENT(nfs_atomic_open_enter, TP_PROTO( -- cgit v1.2.3 From 11d03d0a1ed8dbdabab1b5ab21861ad5cad4aef2 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 19 Feb 2022 19:09:21 -0500 Subject: NFS: Trace effects of the readdirplus heuristic Enable tracking of when the readdirplus heuristic causes a page cache invalidation. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 11 ++++++++++- fs/nfs/nfstrace.h | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 1 deletion(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 8b25a39b1761..8a246df98db5 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1000,6 +1000,8 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc) if (res == -EBADCOOKIE || res == -ENOTSYNC) { invalidate_inode_pages2(desc->file->f_mapping); desc->page_index = 0; + trace_nfs_readdir_invalidate_cache_range( + inode, 0, MAX_LFS_FILESIZE); return -EAGAIN; } return res; @@ -1014,6 +1016,9 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc) invalidate_inode_pages2_range(desc->file->f_mapping, desc->page_index_max + 1, -1); + trace_nfs_readdir_invalidate_cache_range( + inode, desc->page_index_max + 1, + MAX_LFS_FILESIZE); } } res = nfs_readdir_search_array(desc); @@ -1163,7 +1168,11 @@ static void nfs_readdir_handle_cache_misses(struct inode *inode, if (desc->ctx->pos == 0 || cache_misses <= NFS_READDIR_CACHE_MISS_THRESHOLD) return; - invalidate_mapping_pages(inode->i_mapping, page_index + 1, -1); + if (invalidate_mapping_pages(inode->i_mapping, page_index + 1, -1) == 0) + return; + trace_nfs_readdir_invalidate_cache_range( + inode, (loff_t)(page_index + 1) << PAGE_SHIFT, + MAX_LFS_FILESIZE); } /* The file offset position represents the dirent entry number. A diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index 7c1102b991d0..ec2645d20abf 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -273,6 +273,56 @@ DEFINE_NFS_UPDATE_SIZE_EVENT(wcc); DEFINE_NFS_UPDATE_SIZE_EVENT(update); DEFINE_NFS_UPDATE_SIZE_EVENT(grow); +DECLARE_EVENT_CLASS(nfs_inode_range_event, + TP_PROTO( + const struct inode *inode, + loff_t range_start, + loff_t range_end + ), + + TP_ARGS(inode, range_start, range_end), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(u32, fhandle) + __field(u64, fileid) + __field(u64, version) + __field(loff_t, range_start) + __field(loff_t, range_end) + ), + + TP_fast_assign( + const struct nfs_inode *nfsi = NFS_I(inode); + + __entry->dev = inode->i_sb->s_dev; + __entry->fhandle = nfs_fhandle_hash(&nfsi->fh); + __entry->fileid = nfsi->fileid; + __entry->version = inode_peek_iversion_raw(inode); + __entry->range_start = range_start; + __entry->range_end = range_end; + ), + + TP_printk( + "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu " + "range=[%lld, %lld]", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long long)__entry->fileid, + __entry->fhandle, __entry->version, + __entry->range_start, __entry->range_end + ) +); + +#define DEFINE_NFS_INODE_RANGE_EVENT(name) \ + DEFINE_EVENT(nfs_inode_range_event, name, \ + TP_PROTO( \ + const struct inode *inode, \ + loff_t range_start, \ + loff_t range_end \ + ), \ + TP_ARGS(inode, range_start, range_end)) + +DEFINE_NFS_INODE_RANGE_EVENT(nfs_readdir_invalidate_cache_range); + DECLARE_EVENT_CLASS(nfs_readdir_event, TP_PROTO( const struct file *file, -- cgit v1.2.3 From 9332cf14e2db4253bec827da66bd95e6c0f6a2f3 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 26 Feb 2022 18:38:41 -0500 Subject: NFS: Clean up page array initialisation/free Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 8a246df98db5..4983950de2ad 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -199,20 +199,17 @@ static void nfs_grow_dtsize(struct nfs_readdir_descriptor *desc) nfs_set_dtsize(desc, desc->dtsize << 1); } -static void nfs_readdir_array_init(struct nfs_cache_array *array) -{ - memset(array, 0, sizeof(struct nfs_cache_array)); -} - static void nfs_readdir_page_init_array(struct page *page, u64 last_cookie, u64 change_attr) { struct nfs_cache_array *array; array = kmap_atomic(page); - nfs_readdir_array_init(array); array->change_attr = change_attr; array->last_cookie = last_cookie; + array->size = 0; + array->page_full = 0; + array->page_is_eof = 0; array->cookies_are_ordered = 1; kunmap_atomic(array); } @@ -220,16 +217,15 @@ static void nfs_readdir_page_init_array(struct page *page, u64 last_cookie, /* * we are freeing strings created by nfs_add_to_readdir_array() */ -static -void nfs_readdir_clear_array(struct page *page) +static void nfs_readdir_clear_array(struct page *page) { struct nfs_cache_array *array; - int i; + unsigned int i; array = kmap_atomic(page); for (i = 0; i < array->size; i++) kfree(array->array[i].name); - nfs_readdir_array_init(array); + array->size = 0; kunmap_atomic(array); } -- cgit v1.2.3 From f648022faa68ef76058aa121d1aa3a967d59cae8 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 23 Feb 2022 11:31:51 -0500 Subject: NFS: Convert readdir page cache to use a cookie based index Instead of using a linear index to address the pages, use the cookie of the first entry, since that is what we use to match the page anyway. This allows us to avoid re-reading the entire cache on a seekdir() type of operation. The latter is very common when re-exporting NFS, and is a major performance drain. The change does affect our duplicate cookie detection, since we can no longer rely on the page index as a linear offset for detecting whether we looped backwards. However since we no longer do a linear search through all the pages on each call to nfs_readdir(), this is less of a concern than it was previously. The other downside is that invalidate_mapping_pages() no longer can use the page index to avoid clearing pages that have been read. A subsequent patch will restore the functionality this provides to the 'ls -l' heuristic. Signed-off-by: Trond Myklebust --- fs/nfs/Kconfig | 4 ++ fs/nfs/dir.c | 149 +++++++++++++++++++++---------------------------- include/linux/nfs_fs.h | 2 - 3 files changed, 69 insertions(+), 86 deletions(-) diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index 14a72224b657..47a53b3362b6 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -4,6 +4,10 @@ config NFS_FS depends on INET && FILE_LOCKING && MULTIUSER select LOCKD select SUNRPC + select CRYPTO + select CRYPTO_HASH + select XXHASH + select CRYPTO_XXHASH select NFS_ACL_SUPPORT if NFS_V3_ACL help Choose Y here if you want to access files residing on other diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 4983950de2ad..8c2552d89310 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -39,6 +39,7 @@ #include #include #include +#include #include "delegation.h" #include "iostat.h" @@ -159,9 +160,7 @@ struct nfs_readdir_descriptor { pgoff_t page_index_max; u64 dir_cookie; u64 last_cookie; - u64 dup_cookie; loff_t current_index; - loff_t prev_index; __be32 verf[NFS_DIR_VERIFIER_SIZE]; unsigned long dir_verifier; @@ -171,7 +170,6 @@ struct nfs_readdir_descriptor { unsigned int cache_entry_index; unsigned int buffer_fills; unsigned int dtsize; - signed char duped; bool plus; bool eob; bool eof; @@ -331,6 +329,28 @@ out: return ret; } +#define NFS_READDIR_COOKIE_MASK (U32_MAX >> 14) +/* + * Hash algorithm allowing content addressible access to sequences + * of directory cookies. Content is addressed by the value of the + * cookie index of the first readdir entry in a page. + * + * The xxhash algorithm is chosen because it is fast, and is supposed + * to result in a decent flat distribution of hashes. + * + * We then select only the first 18 bits to avoid issues with excessive + * memory use for the page cache XArray. 18 bits should allow the caching + * of 262144 pages of sequences of readdir entries. Since each page holds + * 127 readdir entries for a typical 64-bit system, that works out to a + * cache of ~ 33 million entries per directory. + */ +static pgoff_t nfs_readdir_page_cookie_hash(u64 cookie) +{ + if (cookie == 0) + return 0; + return xxhash(&cookie, sizeof(cookie), 0) & NFS_READDIR_COOKIE_MASK; +} + static bool nfs_readdir_page_validate(struct page *page, u64 last_cookie, u64 change_attr) { @@ -352,15 +372,15 @@ static void nfs_readdir_page_unlock_and_put(struct page *page) } static struct page *nfs_readdir_page_get_locked(struct address_space *mapping, - pgoff_t index, u64 last_cookie) + u64 last_cookie, + u64 change_attr) { + pgoff_t index = nfs_readdir_page_cookie_hash(last_cookie); struct page *page; - u64 change_attr; page = grab_cache_page(mapping, index); if (!page) return NULL; - change_attr = inode_peek_iversion_raw(mapping->host); if (PageUptodate(page)) { if (nfs_readdir_page_validate(page, last_cookie, change_attr)) return page; @@ -371,11 +391,6 @@ static struct page *nfs_readdir_page_get_locked(struct address_space *mapping, return page; } -static loff_t nfs_readdir_page_offset(struct page *page) -{ - return (loff_t)page->index * (loff_t)nfs_readdir_array_maxentries(); -} - static u64 nfs_readdir_page_last_cookie(struct page *page) { struct nfs_cache_array *array; @@ -408,11 +423,11 @@ static void nfs_readdir_page_set_eof(struct page *page) } static struct page *nfs_readdir_page_get_next(struct address_space *mapping, - pgoff_t index, u64 cookie) + u64 cookie, u64 change_attr) { struct page *page; - page = nfs_readdir_page_get_locked(mapping, index, cookie); + page = nfs_readdir_page_get_locked(mapping, cookie, change_attr); if (page) { if (nfs_readdir_page_last_cookie(page) == cookie) return page; @@ -452,6 +467,13 @@ static void nfs_readdir_seek_next_array(struct nfs_cache_array *array, desc->last_cookie = array->array[0].cookie; } +static void nfs_readdir_rewind_search(struct nfs_readdir_descriptor *desc) +{ + desc->current_index = 0; + desc->last_cookie = 0; + desc->page_index = 0; +} + static int nfs_readdir_search_for_pos(struct nfs_cache_array *array, struct nfs_readdir_descriptor *desc) { @@ -492,8 +514,7 @@ static bool nfs_readdir_array_cookie_in_range(struct nfs_cache_array *array, static int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, struct nfs_readdir_descriptor *desc) { - int i; - loff_t new_pos; + unsigned int i; int status = -EAGAIN; if (!nfs_readdir_array_cookie_in_range(array, desc->dir_cookie)) @@ -501,32 +522,10 @@ static int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, for (i = 0; i < array->size; i++) { if (array->array[i].cookie == desc->dir_cookie) { - struct nfs_inode *nfsi = NFS_I(file_inode(desc->file)); - - new_pos = nfs_readdir_page_offset(desc->page) + i; - if (desc->attr_gencount != nfsi->attr_gencount) { - desc->duped = 0; - desc->attr_gencount = nfsi->attr_gencount; - } else if (new_pos < desc->prev_index) { - if (desc->duped > 0 - && desc->dup_cookie == desc->dir_cookie) { - if (printk_ratelimit()) { - pr_notice("NFS: directory %pD2 contains a readdir loop." - "Please contact your server vendor. " - "The file: %s has duplicate cookie %llu\n", - desc->file, array->array[i].name, desc->dir_cookie); - } - status = -ELOOP; - goto out; - } - desc->dup_cookie = desc->dir_cookie; - desc->duped = -1; - } if (nfs_readdir_use_cookie(desc->file)) desc->ctx->pos = desc->dir_cookie; else - desc->ctx->pos = new_pos; - desc->prev_index = new_pos; + desc->ctx->pos = desc->current_index + i; desc->cache_entry_index = i; return 0; } @@ -538,7 +537,6 @@ check_eof: desc->eof = true; } else nfs_readdir_seek_next_array(array, desc); -out: return status; } @@ -785,10 +783,9 @@ out: /* Perform conversion from xdr to cache array */ static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc, struct nfs_entry *entry, - struct page **xdr_pages, - unsigned int buflen, - struct page **arrays, - size_t narrays) + struct page **xdr_pages, unsigned int buflen, + struct page **arrays, size_t narrays, + u64 change_attr) { struct address_space *mapping = desc->file->f_mapping; struct xdr_stream stream; @@ -828,18 +825,16 @@ static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc, break; arrays++; *arrays = page = new; - desc->page_index_max++; } else { - new = nfs_readdir_page_get_next(mapping, - page->index + 1, - entry->prev_cookie); + new = nfs_readdir_page_get_next( + mapping, entry->prev_cookie, change_attr); if (!new) break; if (page != *arrays) nfs_readdir_page_unlock_and_put(page); page = new; - desc->page_index_max = new->index; } + desc->page_index_max++; status = nfs_readdir_add_to_array(entry, page); } while (!status && !entry->eof); @@ -899,6 +894,7 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc, __be32 *verf_arg, __be32 *verf_res, struct page **arrays, size_t narrays) { + u64 change_attr; struct page **pages; struct page *page = *arrays; struct nfs_entry *entry; @@ -923,6 +919,7 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc, if (!pages) goto out; + change_attr = inode_peek_iversion_raw(inode); status = nfs_readdir_xdr_filler(desc, verf_arg, entry->cookie, pages, dtsize, verf_res); if (status < 0) @@ -931,7 +928,7 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc, pglen = status; if (pglen != 0) status = nfs_readdir_page_filler(desc, entry, pages, pglen, - arrays, narrays); + arrays, narrays, change_attr); else nfs_readdir_page_set_eof(page); desc->buffer_fills++; @@ -961,9 +958,11 @@ nfs_readdir_page_unlock_and_put_cached(struct nfs_readdir_descriptor *desc) static struct page * nfs_readdir_page_get_cached(struct nfs_readdir_descriptor *desc) { - return nfs_readdir_page_get_locked(desc->file->f_mapping, - desc->page_index, - desc->last_cookie); + struct address_space *mapping = desc->file->f_mapping; + u64 change_attr = inode_peek_iversion_raw(mapping->host); + + return nfs_readdir_page_get_locked(mapping, desc->last_cookie, + change_attr); } /* @@ -995,7 +994,7 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc) trace_nfs_readdir_cache_fill_done(inode, res); if (res == -EBADCOOKIE || res == -ENOTSYNC) { invalidate_inode_pages2(desc->file->f_mapping); - desc->page_index = 0; + nfs_readdir_rewind_search(desc); trace_nfs_readdir_invalidate_cache_range( inode, 0, MAX_LFS_FILESIZE); return -EAGAIN; @@ -1009,12 +1008,10 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc) memcmp(nfsi->cookieverf, verf, sizeof(nfsi->cookieverf))) { memcpy(nfsi->cookieverf, verf, sizeof(nfsi->cookieverf)); - invalidate_inode_pages2_range(desc->file->f_mapping, - desc->page_index_max + 1, + invalidate_inode_pages2_range(desc->file->f_mapping, 1, -1); trace_nfs_readdir_invalidate_cache_range( - inode, desc->page_index_max + 1, - MAX_LFS_FILESIZE); + inode, 1, MAX_LFS_FILESIZE); } } res = nfs_readdir_search_array(desc); @@ -1030,11 +1027,6 @@ static int readdir_search_pagecache(struct nfs_readdir_descriptor *desc) int res; do { - if (desc->page_index == 0) { - desc->current_index = 0; - desc->prev_index = 0; - desc->last_cookie = 0; - } res = find_and_lock_cache_page(desc); } while (res == -EAGAIN); return res; @@ -1072,8 +1064,6 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc, desc->ctx->pos = desc->dir_cookie; else desc->ctx->pos++; - if (desc->duped != 0) - desc->duped = 1; } if (array->page_is_eof) desc->eof = !desc->eob; @@ -1115,7 +1105,6 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc) desc->page_index = 0; desc->cache_entry_index = 0; desc->last_cookie = desc->dir_cookie; - desc->duped = 0; desc->page_index_max = 0; trace_nfs_readdir_uncached(desc->file, desc->verf, desc->last_cookie, @@ -1148,6 +1137,8 @@ out_free: for (i = 0; i < sz && arrays[i]; i++) nfs_readdir_page_array_free(arrays[i]); out: + if (!nfs_readdir_use_cookie(desc->file)) + nfs_readdir_rewind_search(desc); desc->page_index_max = -1; kfree(arrays); dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __func__, status); @@ -1158,17 +1149,14 @@ out: static void nfs_readdir_handle_cache_misses(struct inode *inode, struct nfs_readdir_descriptor *desc, - pgoff_t page_index, unsigned int cache_misses) { if (desc->ctx->pos == 0 || cache_misses <= NFS_READDIR_CACHE_MISS_THRESHOLD) return; - if (invalidate_mapping_pages(inode->i_mapping, page_index + 1, -1) == 0) + if (invalidate_mapping_pages(inode->i_mapping, 0, -1) == 0) return; - trace_nfs_readdir_invalidate_cache_range( - inode, (loff_t)(page_index + 1) << PAGE_SHIFT, - MAX_LFS_FILESIZE); + trace_nfs_readdir_invalidate_cache_range(inode, 0, MAX_LFS_FILESIZE); } /* The file offset position represents the dirent entry number. A @@ -1183,7 +1171,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) struct nfs_open_dir_context *dir_ctx = file->private_data; struct nfs_readdir_descriptor *desc; unsigned int cache_hits, cache_misses; - pgoff_t page_index; int res; dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n", @@ -1208,10 +1195,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) spin_lock(&file->f_lock); desc->dir_cookie = dir_ctx->dir_cookie; - desc->dup_cookie = dir_ctx->dup_cookie; - desc->duped = dir_ctx->duped; - page_index = dir_ctx->page_index; - desc->page_index = page_index; + desc->page_index = dir_ctx->page_index; desc->last_cookie = dir_ctx->last_cookie; desc->attr_gencount = dir_ctx->attr_gencount; desc->eof = dir_ctx->eof; @@ -1227,7 +1211,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) } desc->plus = nfs_use_readdirplus(inode, ctx, cache_hits, cache_misses); - nfs_readdir_handle_cache_misses(inode, desc, page_index, cache_misses); + nfs_readdir_handle_cache_misses(inode, desc, cache_misses); do { res = readdir_search_pagecache(desc); @@ -1247,7 +1231,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) } if (res == -ETOOSMALL && desc->plus) { nfs_zap_caches(inode); - desc->page_index = 0; desc->plus = false; desc->eof = false; continue; @@ -1261,9 +1244,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) spin_lock(&file->f_lock); dir_ctx->dir_cookie = desc->dir_cookie; - dir_ctx->dup_cookie = desc->dup_cookie; dir_ctx->last_cookie = desc->last_cookie; - dir_ctx->duped = desc->duped; dir_ctx->attr_gencount = desc->attr_gencount; dir_ctx->page_index = desc->page_index; dir_ctx->eof = desc->eof; @@ -1306,13 +1287,13 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence) if (offset != filp->f_pos) { filp->f_pos = offset; dir_ctx->page_index = 0; - if (!nfs_readdir_use_cookie(filp)) + if (!nfs_readdir_use_cookie(filp)) { dir_ctx->dir_cookie = 0; - else + dir_ctx->last_cookie = 0; + } else { dir_ctx->dir_cookie = offset; - if (offset == 0) - memset(dir_ctx->verf, 0, sizeof(dir_ctx->verf)); - dir_ctx->duped = 0; + dir_ctx->last_cookie = offset; + } dir_ctx->eof = false; } spin_unlock(&filp->f_lock); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 20a4cf0acad2..42aad886d3c0 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -106,11 +106,9 @@ struct nfs_open_dir_context { unsigned long attr_gencount; __be32 verf[NFS_DIR_VERIFIER_SIZE]; __u64 dir_cookie; - __u64 dup_cookie; __u64 last_cookie; pgoff_t page_index; unsigned int dtsize; - signed char duped; bool eof; struct rcu_head rcu_head; }; -- cgit v1.2.3 From b0365ccb0712efacf99936e94e92eb7ae63de4d5 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 23 Feb 2022 13:29:59 -0500 Subject: NFS: Fix up forced readdirplus Avoid clearing the entire readdir page cache if we're just doing forced readdirplus for the 'ls -l' heuristic. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 56 +++++++++++++++++++++++++++++++++++--------------- fs/nfs/nfstrace.h | 1 + include/linux/nfs_fs.h | 1 + 3 files changed, 41 insertions(+), 17 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 8c2552d89310..f6aac1e8a8b9 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -170,6 +170,7 @@ struct nfs_readdir_descriptor { unsigned int cache_entry_index; unsigned int buffer_fills; unsigned int dtsize; + bool clear_cache; bool plus; bool eob; bool eof; @@ -227,6 +228,13 @@ static void nfs_readdir_clear_array(struct page *page) kunmap_atomic(array); } +static void nfs_readdir_page_reinit_array(struct page *page, u64 last_cookie, + u64 change_attr) +{ + nfs_readdir_clear_array(page); + nfs_readdir_page_init_array(page, last_cookie, change_attr); +} + static struct page * nfs_readdir_page_array_alloc(u64 last_cookie, gfp_t gfp_flags) { @@ -428,12 +436,11 @@ static struct page *nfs_readdir_page_get_next(struct address_space *mapping, struct page *page; page = nfs_readdir_page_get_locked(mapping, cookie, change_attr); - if (page) { - if (nfs_readdir_page_last_cookie(page) == cookie) - return page; - nfs_readdir_page_unlock_and_put(page); - } - return NULL; + if (!page) + return NULL; + if (nfs_readdir_page_last_cookie(page) != cookie) + nfs_readdir_page_reinit_array(page, cookie, change_attr); + return page; } static inline @@ -960,9 +967,15 @@ nfs_readdir_page_get_cached(struct nfs_readdir_descriptor *desc) { struct address_space *mapping = desc->file->f_mapping; u64 change_attr = inode_peek_iversion_raw(mapping->host); + u64 cookie = desc->last_cookie; + struct page *page; - return nfs_readdir_page_get_locked(mapping, desc->last_cookie, - change_attr); + page = nfs_readdir_page_get_locked(mapping, cookie, change_attr); + if (!page) + return NULL; + if (desc->clear_cache && !nfs_readdir_page_needs_filling(page)) + nfs_readdir_page_reinit_array(page, cookie, change_attr); + return page; } /* @@ -1013,6 +1026,7 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc) trace_nfs_readdir_invalidate_cache_range( inode, 1, MAX_LFS_FILESIZE); } + desc->clear_cache = false; } res = nfs_readdir_search_array(desc); if (res == 0) @@ -1147,16 +1161,17 @@ out: #define NFS_READDIR_CACHE_MISS_THRESHOLD (16UL) -static void nfs_readdir_handle_cache_misses(struct inode *inode, +static bool nfs_readdir_handle_cache_misses(struct inode *inode, struct nfs_readdir_descriptor *desc, - unsigned int cache_misses) + unsigned int cache_misses, + bool force_clear) { - if (desc->ctx->pos == 0 || - cache_misses <= NFS_READDIR_CACHE_MISS_THRESHOLD) - return; - if (invalidate_mapping_pages(inode->i_mapping, 0, -1) == 0) - return; - trace_nfs_readdir_invalidate_cache_range(inode, 0, MAX_LFS_FILESIZE); + if (desc->ctx->pos == 0 || !desc->plus) + return false; + if (cache_misses <= NFS_READDIR_CACHE_MISS_THRESHOLD && !force_clear) + return false; + trace_nfs_readdir_force_readdirplus(inode); + return true; } /* The file offset position represents the dirent entry number. A @@ -1171,6 +1186,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) struct nfs_open_dir_context *dir_ctx = file->private_data; struct nfs_readdir_descriptor *desc; unsigned int cache_hits, cache_misses; + bool force_clear; int res; dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n", @@ -1203,6 +1219,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) memcpy(desc->verf, dir_ctx->verf, sizeof(desc->verf)); cache_hits = atomic_xchg(&dir_ctx->cache_hits, 0); cache_misses = atomic_xchg(&dir_ctx->cache_misses, 0); + force_clear = dir_ctx->force_clear; spin_unlock(&file->f_lock); if (desc->eof) { @@ -1211,7 +1228,9 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) } desc->plus = nfs_use_readdirplus(inode, ctx, cache_hits, cache_misses); - nfs_readdir_handle_cache_misses(inode, desc, cache_misses); + force_clear = nfs_readdir_handle_cache_misses(inode, desc, cache_misses, + force_clear); + desc->clear_cache = force_clear; do { res = readdir_search_pagecache(desc); @@ -1240,6 +1259,8 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) nfs_do_filldir(desc, nfsi->cookieverf); nfs_readdir_page_unlock_and_put_cached(desc); + if (desc->page_index == desc->page_index_max) + desc->clear_cache = force_clear; } while (!desc->eob && !desc->eof); spin_lock(&file->f_lock); @@ -1247,6 +1268,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) dir_ctx->last_cookie = desc->last_cookie; dir_ctx->attr_gencount = desc->attr_gencount; dir_ctx->page_index = desc->page_index; + dir_ctx->force_clear = force_clear; dir_ctx->eof = desc->eof; dir_ctx->dtsize = desc->dtsize; memcpy(dir_ctx->verf, desc->verf, sizeof(dir_ctx->verf)); diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index ec2645d20abf..59f4ca803fd0 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -160,6 +160,7 @@ DEFINE_NFS_INODE_EVENT(nfs_fsync_enter); DEFINE_NFS_INODE_EVENT_DONE(nfs_fsync_exit); DEFINE_NFS_INODE_EVENT(nfs_access_enter); DEFINE_NFS_INODE_EVENT_DONE(nfs_set_cache_invalid); +DEFINE_NFS_INODE_EVENT(nfs_readdir_force_readdirplus); DEFINE_NFS_INODE_EVENT_DONE(nfs_readdir_cache_fill_done); DEFINE_NFS_INODE_EVENT_DONE(nfs_readdir_uncached_done); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 42aad886d3c0..3893386ceaed 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -109,6 +109,7 @@ struct nfs_open_dir_context { __u64 last_cookie; pgoff_t page_index; unsigned int dtsize; + bool force_clear; bool eof; struct rcu_head rcu_head; }; -- cgit v1.2.3 From 0adf85b445c7fbc5d2df1f8c1bc54d62c4340237 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 27 Feb 2022 12:46:24 -0500 Subject: NFS: Optimise away the previous cookie field Replace the 'previous cookie' field in struct nfs_entry with the array->last_cookie. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 26 ++++++++++++++------------ fs/nfs/nfs2xdr.c | 1 - fs/nfs/nfs3xdr.c | 1 - fs/nfs/nfs4xdr.c | 1 - include/linux/nfs_xdr.h | 3 +-- 5 files changed, 15 insertions(+), 17 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index f6aac1e8a8b9..033249a72e92 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -301,19 +301,20 @@ static int nfs_readdir_array_can_expand(struct nfs_cache_array *array) return 0; } -static -int nfs_readdir_add_to_array(struct nfs_entry *entry, struct page *page) +static int nfs_readdir_page_array_append(struct page *page, + const struct nfs_entry *entry, + u64 *cookie) { struct nfs_cache_array *array; struct nfs_cache_array_entry *cache_entry; const char *name; - int ret; + int ret = -ENOMEM; name = nfs_readdir_copy_name(entry->name, entry->len); - if (!name) - return -ENOMEM; array = kmap_atomic(page); + if (!name) + goto out; ret = nfs_readdir_array_can_expand(array); if (ret) { kfree(name); @@ -321,7 +322,7 @@ int nfs_readdir_add_to_array(struct nfs_entry *entry, struct page *page) } cache_entry = &array->array[array->size]; - cache_entry->cookie = entry->prev_cookie; + cache_entry->cookie = array->last_cookie; cache_entry->ino = entry->ino; cache_entry->d_type = entry->d_type; cache_entry->name_len = entry->len; @@ -333,6 +334,7 @@ int nfs_readdir_add_to_array(struct nfs_entry *entry, struct page *page) if (entry->eof != 0) nfs_readdir_array_set_eof(array); out: + *cookie = array->last_cookie; kunmap_atomic(array); return ret; } @@ -798,6 +800,7 @@ static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc, struct xdr_stream stream; struct xdr_buf buf; struct page *scratch, *new, *page = *arrays; + u64 cookie; int status; scratch = alloc_page(GFP_KERNEL); @@ -819,22 +822,21 @@ static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc, nfs_prime_dcache(file_dentry(desc->file), entry, desc->dir_verifier); - status = nfs_readdir_add_to_array(entry, page); + status = nfs_readdir_page_array_append(page, entry, &cookie); if (status != -ENOSPC) continue; if (page->mapping != mapping) { if (!--narrays) break; - new = nfs_readdir_page_array_alloc(entry->prev_cookie, - GFP_KERNEL); + new = nfs_readdir_page_array_alloc(cookie, GFP_KERNEL); if (!new) break; arrays++; *arrays = page = new; } else { - new = nfs_readdir_page_get_next( - mapping, entry->prev_cookie, change_attr); + new = nfs_readdir_page_get_next(mapping, cookie, + change_attr); if (!new) break; if (page != *arrays) @@ -842,7 +844,7 @@ static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc, page = new; } desc->page_index_max++; - status = nfs_readdir_add_to_array(entry, page); + status = nfs_readdir_page_array_append(page, entry, &cookie); } while (!status && !entry->eof); switch (status) { diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index 3d5ba43f44bb..05c3b4b2b3dd 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -955,7 +955,6 @@ int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, * The type (size and byte order) of nfscookie isn't defined in * RFC 1094. This implementation assumes that it's an XDR uint32. */ - entry->prev_cookie = entry->cookie; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) return -EAGAIN; diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index d6779ceeb39e..3b0b650c9c5a 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -2024,7 +2024,6 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, zero_nfs_fh3(entry->fh); } - entry->prev_cookie = entry->cookie; entry->cookie = new_cookie; return 0; diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index b7780b97dc4d..86a5f6516928 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -7508,7 +7508,6 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, if (entry->fattr->valid & NFS_ATTR_FATTR_TYPE) entry->d_type = nfs_umode_to_dtype(entry->fattr->mode); - entry->prev_cookie = entry->cookie; entry->cookie = new_cookie; return 0; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 728cb0c1f0b6..82f7c2730b9a 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -745,8 +745,7 @@ struct nfs_auth_info { */ struct nfs_entry { __u64 ino; - __u64 cookie, - prev_cookie; + __u64 cookie; const char * name; unsigned int len; int eof; -- cgit v1.2.3 From 612896ec5a4edbf98c4a631503899da04df76480 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 24 Feb 2022 11:48:35 -0500 Subject: NFS: Cache all entries in the readdirplus reply Even if we're not able to cache all the entries in the readdir buffer, let's ensure that we do prime the dcache. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 40 ++++++++++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 033249a72e92..7e12102b29e7 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -789,6 +789,21 @@ out: dput(dentry); } +static int nfs_readdir_entry_decode(struct nfs_readdir_descriptor *desc, + struct nfs_entry *entry, + struct xdr_stream *stream) +{ + int ret; + + if (entry->fattr->label) + entry->fattr->label->len = NFS4_MAXLABELLEN; + ret = xdr_decode(desc, entry, stream); + if (ret || !desc->plus) + return ret; + nfs_prime_dcache(file_dentry(desc->file), entry, desc->dir_verifier); + return 0; +} + /* Perform conversion from xdr to cache array */ static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc, struct nfs_entry *entry, @@ -811,17 +826,10 @@ static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc, xdr_set_scratch_page(&stream, scratch); do { - if (entry->fattr->label) - entry->fattr->label->len = NFS4_MAXLABELLEN; - - status = xdr_decode(desc, entry, &stream); + status = nfs_readdir_entry_decode(desc, entry, &stream); if (status != 0) break; - if (desc->plus) - nfs_prime_dcache(file_dentry(desc->file), entry, - desc->dir_verifier); - status = nfs_readdir_page_array_append(page, entry, &cookie); if (status != -ENOSPC) continue; @@ -849,15 +857,19 @@ static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc, switch (status) { case -EBADCOOKIE: - if (entry->eof) { - nfs_readdir_page_set_eof(page); - status = 0; - } - break; - case -ENOSPC: + if (!entry->eof) + break; + nfs_readdir_page_set_eof(page); + fallthrough; case -EAGAIN: status = 0; break; + case -ENOSPC: + status = 0; + if (!desc->plus) + break; + while (!nfs_readdir_entry_decode(desc, entry, &stream)) + ; } if (page != *arrays) -- cgit v1.2.3 From cb8fac6d2727f79f211e745b16c9abbf4d8be652 Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Tue, 15 Feb 2022 13:17:04 +0300 Subject: NFS: remove unneeded check in decode_devicenotify_args() [You don't often get email from khoroshilov@ispras.ru. Learn why this is important at http://aka.ms/LearnAboutSenderIdentification.] Overflow check in not needed anymore after we switch to kmalloc_array(). Signed-off-by: Alexey Khoroshilov Fixes: a4f743a6bb20 ("NFSv4.1: Convert open-coded array allocation calls to kmalloc_array()") Signed-off-by: Trond Myklebust --- fs/nfs/callback_xdr.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index f90de8043b0f..8dcb08e1a885 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -271,10 +271,6 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp, n = ntohl(*p++); if (n == 0) goto out; - if (n > ULONG_MAX / sizeof(*args->devs)) { - status = htonl(NFS4ERR_BADXDR); - goto out; - } args->devs = kmalloc_array(n, sizeof(*args->devs), GFP_KERNEL); if (!args->devs) { -- cgit v1.2.3 From b4be2c598b767eb72507e4dc56d75c3fe2231cee Mon Sep 17 00:00:00 2001 From: Olga Kornievskaia Date: Tue, 15 Feb 2022 13:26:41 -0500 Subject: NFSv4.1 restrict GETATTR fs_location query to the main transport In the presence of trunking transports, it's helpful to make sure that during the migration event, the GETATTR for fs_location attribute happens on the main transport. Signed-off-by: Olga Kornievskaia Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 8b875355824b..fd8eece12e94 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -8008,6 +8008,18 @@ static int _nfs41_proc_get_locations(struct nfs_server *server, .rpc_resp = &res, .rpc_cred = cred, }; + struct nfs4_call_sync_data data = { + .seq_server = server, + .seq_args = &args.seq_args, + .seq_res = &res.seq_res, + }; + struct rpc_task_setup task_setup_data = { + .rpc_client = clnt, + .rpc_message = &msg, + .callback_ops = server->nfs_client->cl_mvops->call_sync_ops, + .callback_data = &data, + .flags = RPC_TASK_NO_ROUND_ROBIN, + }; int status; nfs_fattr_init(&locations->fattr); @@ -8015,8 +8027,7 @@ static int _nfs41_proc_get_locations(struct nfs_server *server, locations->nlocations = 0; nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); - status = nfs4_call_sync_sequence(clnt, server, &msg, - &args.seq_args, &res.seq_res); + status = nfs4_call_sync_custom(&task_setup_data); if (status == NFS4_OK && res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) status = -NFS4ERR_LEASE_MOVED; -- cgit v1.2.3 From 45f3a70ba68e1fc7fe0edde731b08d85435da30d Mon Sep 17 00:00:00 2001 From: Dave Wysochanski Date: Tue, 1 Mar 2022 14:37:24 -0500 Subject: NFS: Cleanup usage of nfs_inode in fscache interface A number of places in the fscache interface used nfs_inode when inode could be used, simplifying the code. Signed-off-by: Dave Wysochanski Signed-off-by: Trond Myklebust --- fs/nfs/fscache.c | 10 ++++------ fs/nfs/fscache.h | 18 +++++++++--------- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c index cfe901650ab0..81bd2770e640 100644 --- a/fs/nfs/fscache.c +++ b/fs/nfs/fscache.c @@ -173,7 +173,7 @@ void nfs_fscache_init_inode(struct inode *inode) if (!(nfss->fscache && S_ISREG(inode->i_mode))) return; - nfs_fscache_update_auxdata(&auxdata, nfsi); + nfs_fscache_update_auxdata(&auxdata, inode); nfsi->fscache = fscache_acquire_cookie(NFS_SB(inode->i_sb)->fscache, 0, @@ -181,7 +181,7 @@ void nfs_fscache_init_inode(struct inode *inode) nfsi->fh.size, &auxdata, /* aux_data */ sizeof(auxdata), - i_size_read(&nfsi->vfs_inode)); + i_size_read(inode)); } /* @@ -220,7 +220,6 @@ void nfs_fscache_clear_inode(struct inode *inode) void nfs_fscache_open_file(struct inode *inode, struct file *filp) { struct nfs_fscache_inode_auxdata auxdata; - struct nfs_inode *nfsi = NFS_I(inode); struct fscache_cookie *cookie = nfs_i_fscache(inode); bool open_for_write = inode_is_open_for_write(inode); @@ -230,7 +229,7 @@ void nfs_fscache_open_file(struct inode *inode, struct file *filp) fscache_use_cookie(cookie, open_for_write); if (open_for_write) { dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi); - nfs_fscache_update_auxdata(&auxdata, nfsi); + nfs_fscache_update_auxdata(&auxdata, inode); fscache_invalidate(cookie, &auxdata, i_size_read(inode), FSCACHE_INVAL_DIO_WRITE); } @@ -240,11 +239,10 @@ EXPORT_SYMBOL_GPL(nfs_fscache_open_file); void nfs_fscache_release_file(struct inode *inode, struct file *filp) { struct nfs_fscache_inode_auxdata auxdata; - struct nfs_inode *nfsi = NFS_I(inode); struct fscache_cookie *cookie = nfs_i_fscache(inode); if (fscache_cookie_valid(cookie)) { - nfs_fscache_update_auxdata(&auxdata, nfsi); + nfs_fscache_update_auxdata(&auxdata, inode); fscache_unuse_cookie(cookie, &auxdata, NULL); } } diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h index 25a5c0f82392..4c7afaabbf9f 100644 --- a/fs/nfs/fscache.h +++ b/fs/nfs/fscache.h @@ -86,16 +86,16 @@ static inline void nfs_readpage_to_fscache(struct inode *inode, } static inline void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata, - struct nfs_inode *nfsi) + struct inode *inode) { memset(auxdata, 0, sizeof(*auxdata)); - auxdata->mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec; - auxdata->mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec; - auxdata->ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec; - auxdata->ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec; + auxdata->mtime_sec = inode->i_mtime.tv_sec; + auxdata->mtime_nsec = inode->i_mtime.tv_nsec; + auxdata->ctime_sec = inode->i_ctime.tv_sec; + auxdata->ctime_nsec = inode->i_ctime.tv_nsec; - if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4) - auxdata->change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode); + if (NFS_SERVER(inode)->nfs_client->rpc_ops->version == 4) + auxdata->change_attr = inode_peek_iversion_raw(inode); } /* @@ -107,9 +107,9 @@ static inline void nfs_fscache_invalidate(struct inode *inode, int flags) struct nfs_inode *nfsi = NFS_I(inode); if (nfsi->fscache) { - nfs_fscache_update_auxdata(&auxdata, nfsi); + nfs_fscache_update_auxdata(&auxdata, inode); fscache_invalidate(nfsi->fscache, &auxdata, - i_size_read(&nfsi->vfs_inode), flags); + i_size_read(inode), flags); } } -- cgit v1.2.3 From fc1c5abfca7e1059df46623e64aecf840cdbb9dc Mon Sep 17 00:00:00 2001 From: Dave Wysochanski Date: Tue, 1 Mar 2022 14:37:25 -0500 Subject: NFS: Rename fscache read and write pages functions Rename NFS fscache functions in a more consistent fashion to better reflect when we read from and write to fscache. Signed-off-by: Dave Wysochanski Signed-off-by: Trond Myklebust --- fs/nfs/fscache.c | 6 +++--- fs/nfs/fscache.h | 27 ++++++++++----------------- fs/nfs/read.c | 4 ++-- 3 files changed, 15 insertions(+), 22 deletions(-) diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c index 81bd2770e640..62fbce28fe85 100644 --- a/fs/nfs/fscache.c +++ b/fs/nfs/fscache.c @@ -317,7 +317,7 @@ static int fscache_fallback_write_page(struct inode *inode, struct page *page, /* * Retrieve a page from fscache */ -int __nfs_readpage_from_fscache(struct inode *inode, struct page *page) +int __nfs_fscache_read_page(struct inode *inode, struct page *page) { int ret; @@ -351,7 +351,7 @@ int __nfs_readpage_from_fscache(struct inode *inode, struct page *page) * Store a newly fetched page in fscache. We can be certain there's no page * stored in the cache as yet otherwise we would've read it from there. */ -void __nfs_readpage_to_fscache(struct inode *inode, struct page *page) +void __nfs_fscache_write_page(struct inode *inode, struct page *page) { int ret; @@ -362,7 +362,7 @@ void __nfs_readpage_to_fscache(struct inode *inode, struct page *page) ret = fscache_fallback_write_page(inode, page, true); dfprintk(FSCACHE, - "NFS: readpage_to_fscache: p:%p(i:%lu f:%lx) ret %d\n", + "NFS: nfs_fscache_write_page: p:%p(i:%lu f:%lx) ret %d\n", page, page->index, page->flags, ret); if (ret != 0) { diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h index 4c7afaabbf9f..4e980cc04779 100644 --- a/fs/nfs/fscache.h +++ b/fs/nfs/fscache.h @@ -45,10 +45,8 @@ extern void nfs_fscache_clear_inode(struct inode *); extern void nfs_fscache_open_file(struct inode *, struct file *); extern void nfs_fscache_release_file(struct inode *, struct file *); -extern int __nfs_readpage_from_fscache(struct inode *, struct page *); -extern void __nfs_read_completion_to_fscache(struct nfs_pgio_header *hdr, - unsigned long bytes); -extern void __nfs_readpage_to_fscache(struct inode *, struct page *); +extern int __nfs_fscache_read_page(struct inode *, struct page *); +extern void __nfs_fscache_write_page(struct inode *, struct page *); static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp) { @@ -66,11 +64,10 @@ static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp) /* * Retrieve a page from an inode data storage object. */ -static inline int nfs_readpage_from_fscache(struct inode *inode, - struct page *page) +static inline int nfs_fscache_read_page(struct inode *inode, struct page *page) { - if (NFS_I(inode)->fscache) - return __nfs_readpage_from_fscache(inode, page); + if (nfs_i_fscache(inode)) + return __nfs_fscache_read_page(inode, page); return -ENOBUFS; } @@ -78,11 +75,11 @@ static inline int nfs_readpage_from_fscache(struct inode *inode, * Store a page newly fetched from the server in an inode data storage object * in the cache. */ -static inline void nfs_readpage_to_fscache(struct inode *inode, +static inline void nfs_fscache_write_page(struct inode *inode, struct page *page) { - if (NFS_I(inode)->fscache) - __nfs_readpage_to_fscache(inode, page); + if (nfs_i_fscache(inode)) + __nfs_fscache_write_page(inode, page); } static inline void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata, @@ -136,15 +133,11 @@ static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp) { return 1; /* True: may release page */ } -static inline int nfs_readpage_from_fscache(struct inode *inode, - struct page *page) +static inline int nfs_fscache_read_page(struct inode *inode, struct page *page) { return -ENOBUFS; } -static inline void nfs_readpage_to_fscache(struct inode *inode, - struct page *page) {} - - +static inline void nfs_fscache_write_page(struct inode *inode, struct page *page) {} static inline void nfs_fscache_invalidate(struct inode *inode, int flags) {} static inline const char *nfs_server_fscache_state(struct nfs_server *server) diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 2472f962a9a2..e4c1a49b0126 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -123,7 +123,7 @@ static void nfs_readpage_release(struct nfs_page *req, int error) struct address_space *mapping = page_file_mapping(page); if (PageUptodate(page)) - nfs_readpage_to_fscache(inode, page); + nfs_fscache_write_page(inode, page); else if (!PageError(page) && !PagePrivate(page)) generic_error_remove_page(mapping, page); unlock_page(page); @@ -305,7 +305,7 @@ readpage_async_filler(struct nfs_readdesc *desc, struct page *page) aligned_len = min_t(unsigned int, ALIGN(len, rsize), PAGE_SIZE); if (!IS_SYNC(page->mapping->host)) { - error = nfs_readpage_from_fscache(page->mapping->host, page); + error = nfs_fscache_read_page(page->mapping->host, page); if (error == 0) goto out_unlock; } -- cgit v1.2.3 From e3f0a7fe698ff0d3ef428f72ba253fd1f377c193 Mon Sep 17 00:00:00 2001 From: Dave Wysochanski Date: Tue, 1 Mar 2022 14:37:26 -0500 Subject: NFS: Replace dfprintks with tracepoints in fscache read and write page functions Most of fscache and other NFS IO paths are now using tracepoints. Remove the dfprintks in the NFS fscache read/write page functions and replace with tracepoints at the begin and end of the functions. Signed-off-by: Dave Wysochanski Signed-off-by: Trond Myklebust --- fs/nfs/fscache.c | 29 +++++++----------- fs/nfs/nfstrace.h | 91 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+), 18 deletions(-) diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c index 62fbce28fe85..841b69aef189 100644 --- a/fs/nfs/fscache.c +++ b/fs/nfs/fscache.c @@ -19,6 +19,7 @@ #include "internal.h" #include "iostat.h" #include "fscache.h" +#include "nfstrace.h" #define NFSDBG_FACILITY NFSDBG_FSCACHE @@ -321,30 +322,27 @@ int __nfs_fscache_read_page(struct inode *inode, struct page *page) { int ret; - dfprintk(FSCACHE, - "NFS: readpage_from_fscache(fsc:%p/p:%p(i:%lx f:%lx)/0x%p)\n", - nfs_i_fscache(inode), page, page->index, page->flags, inode); - + trace_nfs_fscache_read_page(inode, page); if (PageChecked(page)) { - dfprintk(FSCACHE, "NFS: readpage_from_fscache: PageChecked\n"); ClearPageChecked(page); - return 1; + ret = 1; + goto out; } ret = fscache_fallback_read_page(inode, page); if (ret < 0) { nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL); - dfprintk(FSCACHE, - "NFS: readpage_from_fscache failed %d\n", ret); SetPageChecked(page); - return ret; + goto out; } /* Read completed synchronously */ - dfprintk(FSCACHE, "NFS: readpage_from_fscache: read successful\n"); nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK); SetPageUptodate(page); - return 0; + ret = 0; +out: + trace_nfs_fscache_read_page_exit(inode, page, ret); + return ret; } /* @@ -355,20 +353,15 @@ void __nfs_fscache_write_page(struct inode *inode, struct page *page) { int ret; - dfprintk(FSCACHE, - "NFS: readpage_to_fscache(fsc:%p/p:%p(i:%lx f:%lx))\n", - nfs_i_fscache(inode), page, page->index, page->flags); + trace_nfs_fscache_write_page(inode, page); ret = fscache_fallback_write_page(inode, page, true); - dfprintk(FSCACHE, - "NFS: nfs_fscache_write_page: p:%p(i:%lu f:%lx) ret %d\n", - page, page->index, page->flags, ret); - if (ret != 0) { nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL); nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_UNCACHED); } else { nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_WRITTEN_OK); } + trace_nfs_fscache_write_page_exit(inode, page, ret); } diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index 59f4ca803fd0..012bd7339862 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -1215,6 +1215,97 @@ TRACE_EVENT(nfs_readpage_short, ) ); +DECLARE_EVENT_CLASS(nfs_fscache_page_event, + TP_PROTO( + const struct inode *inode, + struct page *page + ), + + TP_ARGS(inode, page), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(u32, fhandle) + __field(u64, fileid) + __field(loff_t, offset) + ), + + TP_fast_assign( + const struct nfs_inode *nfsi = NFS_I(inode); + const struct nfs_fh *fh = &nfsi->fh; + + __entry->offset = page_index(page) << PAGE_SHIFT; + __entry->dev = inode->i_sb->s_dev; + __entry->fileid = nfsi->fileid; + __entry->fhandle = nfs_fhandle_hash(fh); + ), + + TP_printk( + "fileid=%02x:%02x:%llu fhandle=0x%08x " + "offset=%lld", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long long)__entry->fileid, + __entry->fhandle, + (long long)__entry->offset + ) +); +DECLARE_EVENT_CLASS(nfs_fscache_page_event_done, + TP_PROTO( + const struct inode *inode, + struct page *page, + int error + ), + + TP_ARGS(inode, page, error), + + TP_STRUCT__entry( + __field(int, error) + __field(dev_t, dev) + __field(u32, fhandle) + __field(u64, fileid) + __field(loff_t, offset) + ), + + TP_fast_assign( + const struct nfs_inode *nfsi = NFS_I(inode); + const struct nfs_fh *fh = &nfsi->fh; + + __entry->offset = page_index(page) << PAGE_SHIFT; + __entry->dev = inode->i_sb->s_dev; + __entry->fileid = nfsi->fileid; + __entry->fhandle = nfs_fhandle_hash(fh); + __entry->error = error; + ), + + TP_printk( + "fileid=%02x:%02x:%llu fhandle=0x%08x " + "offset=%lld error=%d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long long)__entry->fileid, + __entry->fhandle, + (long long)__entry->offset, __entry->error + ) +); +#define DEFINE_NFS_FSCACHE_PAGE_EVENT(name) \ + DEFINE_EVENT(nfs_fscache_page_event, name, \ + TP_PROTO( \ + const struct inode *inode, \ + struct page *page \ + ), \ + TP_ARGS(inode, page)) +#define DEFINE_NFS_FSCACHE_PAGE_EVENT_DONE(name) \ + DEFINE_EVENT(nfs_fscache_page_event_done, name, \ + TP_PROTO( \ + const struct inode *inode, \ + struct page *page, \ + int error \ + ), \ + TP_ARGS(inode, page, error)) +DEFINE_NFS_FSCACHE_PAGE_EVENT(nfs_fscache_read_page); +DEFINE_NFS_FSCACHE_PAGE_EVENT_DONE(nfs_fscache_read_page_exit); +DEFINE_NFS_FSCACHE_PAGE_EVENT(nfs_fscache_write_page); +DEFINE_NFS_FSCACHE_PAGE_EVENT_DONE(nfs_fscache_write_page_exit); + TRACE_EVENT(nfs_pgio_error, TP_PROTO( const struct nfs_pgio_header *hdr, -- cgit v1.2.3 From b5fdf66f6eb2560784c6f60131dc567de06267dc Mon Sep 17 00:00:00 2001 From: Dave Wysochanski Date: Tue, 1 Mar 2022 14:37:27 -0500 Subject: NFS: Remove remaining dfprintks related to fscache and remove NFSDBG_FSCACHE The fscache cookie APIs including fscache_acquire_cookie() and fscache_relinquish_cookie() now have very good tracing. Thus, there is no real need for dfprintks in the NFS fscache interface. The NFS fscache interface has removed all dfprintks so remove the NFSDBG_FSCACHE defines. Signed-off-by: Dave Wysochanski Signed-off-by: Trond Myklebust --- fs/nfs/fscache.c | 10 ---------- include/uapi/linux/nfs_fs.h | 2 +- 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c index 841b69aef189..4dee53ceb941 100644 --- a/fs/nfs/fscache.c +++ b/fs/nfs/fscache.c @@ -21,8 +21,6 @@ #include "fscache.h" #include "nfstrace.h" -#define NFSDBG_FACILITY NFSDBG_FSCACHE - #define NFS_MAX_KEY_LEN 1000 static bool nfs_append_int(char *key, int *_len, unsigned long long x) @@ -129,8 +127,6 @@ int nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int u vcookie = fscache_acquire_volume(key, NULL, /* preferred_cache */ NULL, 0 /* coherency_data */); - dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n", - nfss, vcookie); if (IS_ERR(vcookie)) { if (vcookie != ERR_PTR(-EBUSY)) { kfree(key); @@ -153,9 +149,6 @@ void nfs_fscache_release_super_cookie(struct super_block *sb) { struct nfs_server *nfss = NFS_SB(sb); - dfprintk(FSCACHE, "NFS: releasing superblock cookie (0x%p/0x%p)\n", - nfss, nfss->fscache); - fscache_relinquish_volume(nfss->fscache, NULL, false); nfss->fscache = NULL; kfree(nfss->fscache_uniq); @@ -193,8 +186,6 @@ void nfs_fscache_clear_inode(struct inode *inode) struct nfs_inode *nfsi = NFS_I(inode); struct fscache_cookie *cookie = nfs_i_fscache(inode); - dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie); - fscache_relinquish_cookie(cookie, false); nfsi->fscache = NULL; } @@ -229,7 +220,6 @@ void nfs_fscache_open_file(struct inode *inode, struct file *filp) fscache_use_cookie(cookie, open_for_write); if (open_for_write) { - dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi); nfs_fscache_update_auxdata(&auxdata, inode); fscache_invalidate(cookie, &auxdata, i_size_read(inode), FSCACHE_INVAL_DIO_WRITE); diff --git a/include/uapi/linux/nfs_fs.h b/include/uapi/linux/nfs_fs.h index 3afe3767c55d..ae0de165c014 100644 --- a/include/uapi/linux/nfs_fs.h +++ b/include/uapi/linux/nfs_fs.h @@ -52,7 +52,7 @@ #define NFSDBG_CALLBACK 0x0100 #define NFSDBG_CLIENT 0x0200 #define NFSDBG_MOUNT 0x0400 -#define NFSDBG_FSCACHE 0x0800 +#define NFSDBG_FSCACHE 0x0800 /* unused */ #define NFSDBG_PNFS 0x1000 #define NFSDBG_PNFS_LD 0x2000 #define NFSDBG_STATE 0x4000 -- cgit v1.2.3 From 944d95f766c6fe97fa358c661281a741758cee7e Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 7 Mar 2022 10:41:44 +1100 Subject: NFS: remove IS_SWAPFILE hack This code is pointless as IS_SWAPFILE is always defined. So remove it. Suggested-by: Mark Hemment Reviewed-by: Christoph Hellwig Signed-off-by: NeilBrown Signed-off-by: Trond Myklebust --- fs/nfs/file.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 4d681683d13c..93c01aaa0a8d 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -44,11 +44,6 @@ static const struct vm_operations_struct nfs_file_vm_ops; -/* Hack for future NFS swap support */ -#ifndef IS_SWAPFILE -# define IS_SWAPFILE(inode) (0) -#endif - int nfs_check_flags(int flags) { if ((flags & (O_APPEND | O_DIRECT)) == (O_APPEND | O_DIRECT)) -- cgit v1.2.3 From c487216bec83b0c5a8803e5c61433d33ad7b104d Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 7 Mar 2022 10:41:44 +1100 Subject: SUNRPC/call_alloc: async tasks mustn't block waiting for memory When memory is short, new worker threads cannot be created and we depend on the minimum one rpciod thread to be able to handle everything. So it must not block waiting for memory. mempools are particularly a problem as memory can only be released back to the mempool by an async rpc task running. If all available workqueue threads are waiting on the mempool, no thread is available to return anything. rpc_malloc() can block, and this might cause deadlocks. So check RPC_IS_ASYNC(), rather than RPC_IS_SWAPPER() to determine if blocking is acceptable. Signed-off-by: NeilBrown Signed-off-by: Trond Myklebust --- net/sunrpc/sched.c | 4 +++- net/sunrpc/xprtrdma/transport.c | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 52769b883c0a..e5b07562ba45 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -1023,8 +1023,10 @@ int rpc_malloc(struct rpc_task *task) struct rpc_buffer *buf; gfp_t gfp = GFP_KERNEL; + if (RPC_IS_ASYNC(task)) + gfp = GFP_NOWAIT | __GFP_NOWARN; if (RPC_IS_SWAPPER(task)) - gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; + gfp |= __GFP_MEMALLOC; size += sizeof(struct rpc_buffer); if (size <= RPC_BUFFER_MAXSIZE) diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 42e375dbdadb..5714bf880e95 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -570,8 +570,10 @@ xprt_rdma_allocate(struct rpc_task *task) gfp_t flags; flags = RPCRDMA_DEF_GFP; + if (RPC_IS_ASYNC(task)) + flags = GFP_NOWAIT | __GFP_NOWARN; if (RPC_IS_SWAPPER(task)) - flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; + flags |= __GFP_MEMALLOC; if (!rpcrdma_check_regbuf(r_xprt, req->rl_sendbuf, rqst->rq_callsize, flags)) -- cgit v1.2.3 From a41b05edfedb939440e83666f23de3ef9af33acf Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 7 Mar 2022 10:41:44 +1100 Subject: SUNRPC/auth: async tasks mustn't block waiting for memory When memory is short, new worker threads cannot be created and we depend on the minimum one rpciod thread to be able to handle everything. So it must not block waiting for memory. mempools are particularly a problem as memory can only be released back to the mempool by an async rpc task running. If all available workqueue threads are waiting on the mempool, no thread is available to return anything. lookup_cred() can block on a mempool or kmalloc - and this can cause deadlocks. So add a new RPCAUTH_LOOKUP flag for async lookups and don't block on memory. If the -ENOMEM gets back to call_refreshresult(), wait a short while and try again. HZ>>4 is chosen as it is used elsewhere for -ENOMEM retries. Signed-off-by: NeilBrown Signed-off-by: Trond Myklebust --- include/linux/sunrpc/auth.h | 1 + net/sunrpc/auth.c | 6 +++++- net/sunrpc/auth_gss/auth_gss.c | 6 +++++- net/sunrpc/auth_unix.c | 10 ++++++++-- net/sunrpc/clnt.c | 3 +++ 5 files changed, 22 insertions(+), 4 deletions(-) diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 98da816b5fc2..3e6ce288a7fc 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -99,6 +99,7 @@ struct rpc_auth_create_args { /* Flags for rpcauth_lookupcred() */ #define RPCAUTH_LOOKUP_NEW 0x01 /* Accept an uninitialised cred */ +#define RPCAUTH_LOOKUP_ASYNC 0x02 /* Don't block waiting for memory */ /* * Client authentication ops diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index a9f0d17fdb0d..6bfa19f9fa6a 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -615,6 +615,8 @@ rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags) }; struct rpc_cred *ret; + if (RPC_IS_ASYNC(task)) + lookupflags |= RPCAUTH_LOOKUP_ASYNC; ret = auth->au_ops->lookup_cred(auth, &acred, lookupflags); put_cred(acred.cred); return ret; @@ -631,6 +633,8 @@ rpcauth_bind_machine_cred(struct rpc_task *task, int lookupflags) if (!acred.principal) return NULL; + if (RPC_IS_ASYNC(task)) + lookupflags |= RPCAUTH_LOOKUP_ASYNC; return auth->au_ops->lookup_cred(auth, &acred, lookupflags); } @@ -654,7 +658,7 @@ rpcauth_bindcred(struct rpc_task *task, const struct cred *cred, int flags) }; if (flags & RPC_TASK_ASYNC) - lookupflags |= RPCAUTH_LOOKUP_NEW; + lookupflags |= RPCAUTH_LOOKUP_NEW | RPCAUTH_LOOKUP_ASYNC; if (task->tk_op_cred) /* Task must use exactly this rpc_cred */ new = get_rpccred(task->tk_op_cred); diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index affd64a54f02..ac0828108204 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -1341,7 +1341,11 @@ gss_hash_cred(struct auth_cred *acred, unsigned int hashbits) static struct rpc_cred * gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) { - return rpcauth_lookup_credcache(auth, acred, flags, GFP_KERNEL); + gfp_t gfp = GFP_KERNEL; + + if (flags & RPCAUTH_LOOKUP_ASYNC) + gfp = GFP_NOWAIT | __GFP_NOWARN; + return rpcauth_lookup_credcache(auth, acred, flags, gfp); } static struct rpc_cred * diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index 3600d8641644..c629d366030e 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c @@ -43,8 +43,14 @@ unx_destroy(struct rpc_auth *auth) static struct rpc_cred * unx_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) { - struct rpc_cred *ret = mempool_alloc(unix_pool, GFP_KERNEL); - + gfp_t gfp = GFP_KERNEL; + struct rpc_cred *ret; + + if (flags & RPCAUTH_LOOKUP_ASYNC) + gfp = GFP_NOWAIT | __GFP_NOWARN; + ret = mempool_alloc(unix_pool, gfp); + if (!ret) + return ERR_PTR(-ENOMEM); rpcauth_init_cred(ret, acred, auth, &unix_credops); ret->cr_flags = 1UL << RPCAUTH_CRED_UPTODATE; return ret; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 97165a545cb3..9556eb7b065b 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1745,6 +1745,9 @@ call_refreshresult(struct rpc_task *task) task->tk_cred_retry--; trace_rpc_retry_refresh_status(task); return; + case -ENOMEM: + rpc_delay(task, HZ >> 4); + return; } trace_rpc_refresh_status(task); rpc_call_rpcerror(task, status); -- cgit v1.2.3 From a721035477fb5fb8abc738fbe410b07c12af3dc5 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 7 Mar 2022 10:41:44 +1100 Subject: SUNRPC/xprt: async tasks mustn't block waiting for memory When memory is short, new worker threads cannot be created and we depend on the minimum one rpciod thread to be able to handle everything. So it must not block waiting for memory. xprt_dynamic_alloc_slot can block indefinitely. This can tie up all workqueue threads and NFS can deadlock. So when called from a workqueue, set __GFP_NORETRY. The rdma alloc_slot already does not block. However it sets the error to -EAGAIN suggesting this will trigger a sleep. It does not. As we can see in call_reserveresult(), only -ENOMEM causes a sleep. -EAGAIN causes immediate retry. Signed-off-by: NeilBrown Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 5 ++++- net/sunrpc/xprtrdma/transport.c | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 9f0025e0742c..2d1f84aea516 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1687,12 +1687,15 @@ out: static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt) { struct rpc_rqst *req = ERR_PTR(-EAGAIN); + gfp_t gfp_mask = GFP_KERNEL; if (xprt->num_reqs >= xprt->max_reqs) goto out; ++xprt->num_reqs; spin_unlock(&xprt->reserve_lock); - req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL); + if (current->flags & PF_WQ_WORKER) + gfp_mask |= __GFP_NORETRY | __GFP_NOWARN; + req = kzalloc(sizeof(*req), gfp_mask); spin_lock(&xprt->reserve_lock); if (req != NULL) goto out; diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 5714bf880e95..923e4b512ee9 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -517,7 +517,7 @@ xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) return; out_sleep: - task->tk_status = -EAGAIN; + task->tk_status = -ENOMEM; xprt_add_backlog(xprt, task); } -- cgit v1.2.3 From a80a8461868905823609be97f91776a26befe839 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 7 Mar 2022 10:41:44 +1100 Subject: SUNRPC: remove scheduling boost for "SWAPPER" tasks. Currently, tasks marked as "swapper" tasks get put to the front of non-priority rpc_queues, and are sorted earlier than non-swapper tasks on the transport's ->xmit_queue. This is pointless as currently *all* tasks for a mount that has swap enabled on *any* file are marked as "swapper" tasks. So the net result is that the non-priority rpc_queues are reverse-ordered (LIFO). This scheduling boost is not necessary to avoid deadlocks, and hurts fairness, so remove it. If there were a need to expedite some requests, the tk_priority mechanism is a more appropriate tool. Signed-off-by: NeilBrown Signed-off-by: Trond Myklebust --- net/sunrpc/sched.c | 7 ------- net/sunrpc/xprt.c | 11 ----------- 2 files changed, 18 deletions(-) diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index e5b07562ba45..690bd3401820 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -186,11 +186,6 @@ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, /* * Add new request to wait queue. - * - * Swapper tasks always get inserted at the head of the queue. - * This should avoid many nasty memory deadlocks and hopefully - * improve overall performance. - * Everyone else gets appended to the queue to ensure proper FIFO behavior. */ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task, @@ -199,8 +194,6 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, INIT_LIST_HEAD(&task->u.tk_wait.timer_list); if (RPC_IS_PRIORITY(queue)) __rpc_add_wait_queue_priority(queue, task, queue_priority); - else if (RPC_IS_SWAPPER(task)) - list_add(&task->u.tk_wait.list, &queue->tasks[0]); else list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); task->tk_waitqueue = queue; diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 2d1f84aea516..2f165634df54 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1354,17 +1354,6 @@ xprt_request_enqueue_transmit(struct rpc_task *task) INIT_LIST_HEAD(&req->rq_xmit2); goto out; } - } else if (RPC_IS_SWAPPER(task)) { - list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { - if (pos->rq_cong || pos->rq_bytes_sent) - continue; - if (RPC_IS_SWAPPER(pos->rq_task)) - continue; - /* Note: req is added _before_ pos */ - list_add_tail(&req->rq_xmit, &pos->rq_xmit); - INIT_LIST_HEAD(&req->rq_xmit2); - goto out; - } } else if (!req->rq_seqno) { list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { if (pos->rq_task->tk_owner != task->tk_owner) -- cgit v1.2.3 From 89c2be8a951654758dffeaaa6272328d9c8f29be Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 7 Mar 2022 10:41:44 +1100 Subject: NFS: discard NFS_RPC_SWAPFLAGS and RPC_TASK_ROOTCREDS NFS_RPC_SWAPFLAGS is only used for READ requests. It sets RPC_TASK_SWAPPER which gives some memory-allocation priority to requests. This is not needed for swap READ - though it is for writes where it is set via a different mechanism. RPC_TASK_ROOTCREDS causes the 'machine' credential to be used. This is not needed as the root credential is saved when the swap file is opened, and this is used for all IO. So NFS_RPC_SWAPFLAGS isn't needed, and as it is the only user of RPC_TASK_ROOTCREDS, that isn't needed either. Remove both. Signed-off-by: NeilBrown Signed-off-by: Trond Myklebust --- fs/nfs/read.c | 4 ---- include/linux/nfs_fs.h | 5 ----- include/linux/sunrpc/sched.h | 1 - include/trace/events/sunrpc.h | 1 - net/sunrpc/auth.c | 2 +- 5 files changed, 1 insertion(+), 12 deletions(-) diff --git a/fs/nfs/read.c b/fs/nfs/read.c index e4c1a49b0126..5e7657374bc3 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -194,10 +194,6 @@ static void nfs_initiate_read(struct nfs_pgio_header *hdr, const struct nfs_rpc_ops *rpc_ops, struct rpc_task_setup *task_setup_data, int how) { - struct inode *inode = hdr->inode; - int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0; - - task_setup_data->flags |= swap_flags; rpc_ops->read_setup(hdr, msg); trace_nfs_initiate_read(hdr); } diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 3893386ceaed..9074ed0b65aa 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -45,11 +45,6 @@ */ #define NFS_MAX_TRANSPORTS 16 -/* - * These are the default flags for swap requests - */ -#define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS) - /* * Size of the NFS directory verifier */ diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index db964bb63912..56710f8056d3 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -124,7 +124,6 @@ struct rpc_task_setup { #define RPC_TASK_MOVEABLE 0x0004 /* nfs4.1+ rpc tasks */ #define RPC_TASK_NULLCREDS 0x0010 /* Use AUTH_NULL credential */ #define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */ -#define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */ #define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ #define RPC_TASK_NO_ROUND_ROBIN 0x0100 /* send requests on "main" xprt */ #define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */ diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 29982d60b68a..ac33892da411 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -311,7 +311,6 @@ TRACE_EVENT(rpc_request, { RPC_TASK_MOVEABLE, "MOVEABLE" }, \ { RPC_TASK_NULLCREDS, "NULLCREDS" }, \ { RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \ - { RPC_TASK_ROOTCREDS, "ROOTCREDS" }, \ { RPC_TASK_DYNAMIC, "DYNAMIC" }, \ { RPC_TASK_NO_ROUND_ROBIN, "NO_ROUND_ROBIN" }, \ { RPC_TASK_SOFT, "SOFT" }, \ diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 6bfa19f9fa6a..682fcd24bf43 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -670,7 +670,7 @@ rpcauth_bindcred(struct rpc_task *task, const struct cred *cred, int flags) /* If machine cred couldn't be bound, try a root cred */ if (new) ; - else if (cred == &machine_cred || (flags & RPC_TASK_ROOTCREDS)) + else if (cred == &machine_cred) new = rpcauth_bind_root_cred(task, lookupflags); else if (flags & RPC_TASK_NULLCREDS) new = authnull_ops.lookup_cred(NULL, NULL, 0); -- cgit v1.2.3 From 8db55a032ac7ac1ed7b98d6b1dc980e6378c652f Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 7 Mar 2022 10:41:44 +1100 Subject: SUNRPC: improve 'swap' handling: scheduling and PF_MEMALLOC rpc tasks can be marked as RPC_TASK_SWAPPER. This causes GFP_MEMALLOC to be used for some allocations. This is needed in some cases, but not in all where it is currently provided, and in some where it isn't provided. Currently *all* tasks associated with a rpc_client on which swap is enabled get the flag and hence some GFP_MEMALLOC support. GFP_MEMALLOC is provided for ->buf_alloc() but only swap-writes need it. However xdr_alloc_bvec does not get GFP_MEMALLOC - though it often does need it. xdr_alloc_bvec is called while the XPRT_LOCK is held. If this blocks, then it blocks all other queued tasks. So this allocation needs GFP_MEMALLOC for *all* requests, not just writes, when the xprt is used for any swap writes. Similarly, if the transport is not connected, that will block all requests including swap writes, so memory allocations should get GFP_MEMALLOC if swap writes are possible. So with this patch: 1/ we ONLY set RPC_TASK_SWAPPER for swap writes. 2/ __rpc_execute() sets PF_MEMALLOC while handling any task with RPC_TASK_SWAPPER set, or when handling any task that holds the XPRT_LOCKED lock on an xprt used for swap. This removes the need for the RPC_IS_SWAPPER() test in ->buf_alloc handlers. 3/ xprt_prepare_transmit() sets PF_MEMALLOC after locking any task to a swapper xprt. __rpc_execute() will clear it. 3/ PF_MEMALLOC is set for all the connect workers. Reviewed-by: Chuck Lever (for xprtrdma parts) Signed-off-by: NeilBrown Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 2 ++ net/sunrpc/clnt.c | 2 -- net/sunrpc/sched.c | 20 +++++++++++++++++--- net/sunrpc/xprt.c | 3 +++ net/sunrpc/xprtrdma/transport.c | 6 ++++-- net/sunrpc/xprtsock.c | 8 ++++++++ 6 files changed, 34 insertions(+), 7 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 74d258781205..599a82406d38 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1412,6 +1412,8 @@ static void nfs_initiate_write(struct nfs_pgio_header *hdr, { int priority = flush_task_priority(how); + if (IS_SWAPFILE(hdr->inode)) + task_setup_data->flags |= RPC_TASK_SWAPPER; task_setup_data->priority = priority; rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client); trace_nfs_initiate_write(hdr); diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 9556eb7b065b..4117ea4caa2e 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1085,8 +1085,6 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) task->tk_flags |= RPC_TASK_TIMEOUT; if (clnt->cl_noretranstimeo) task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT; - if (atomic_read(&clnt->cl_swapper)) - task->tk_flags |= RPC_TASK_SWAPPER; /* Add to the client's list of all tasks */ spin_lock(&clnt->cl_lock); list_add_tail(&task->tk_task, &clnt->cl_tasks); diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 690bd3401820..7c8f87ebdbc0 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -869,6 +869,15 @@ void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) ops->rpc_release(calldata); } +static bool xprt_needs_memalloc(struct rpc_xprt *xprt, struct rpc_task *tk) +{ + if (!xprt) + return false; + if (!atomic_read(&xprt->swapper)) + return false; + return test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == tk; +} + /* * This is the RPC `scheduler' (or rather, the finite state machine). */ @@ -877,6 +886,7 @@ static void __rpc_execute(struct rpc_task *task) struct rpc_wait_queue *queue; int task_is_async = RPC_IS_ASYNC(task); int status = 0; + unsigned long pflags = current->flags; WARN_ON_ONCE(RPC_IS_QUEUED(task)); if (RPC_IS_QUEUED(task)) @@ -899,6 +909,10 @@ static void __rpc_execute(struct rpc_task *task) } if (!do_action) break; + if (RPC_IS_SWAPPER(task) || + xprt_needs_memalloc(task->tk_xprt, task)) + current->flags |= PF_MEMALLOC; + trace_rpc_task_run_action(task, do_action); do_action(task); @@ -936,7 +950,7 @@ static void __rpc_execute(struct rpc_task *task) rpc_clear_running(task); spin_unlock(&queue->lock); if (task_is_async) - return; + goto out; /* sync task: sleep here */ trace_rpc_task_sync_sleep(task, task->tk_action); @@ -960,6 +974,8 @@ static void __rpc_execute(struct rpc_task *task) /* Release all resources associated with the task */ rpc_release_task(task); +out: + current_restore_flags(pflags, PF_MEMALLOC); } /* @@ -1018,8 +1034,6 @@ int rpc_malloc(struct rpc_task *task) if (RPC_IS_ASYNC(task)) gfp = GFP_NOWAIT | __GFP_NOWARN; - if (RPC_IS_SWAPPER(task)) - gfp |= __GFP_MEMALLOC; size += sizeof(struct rpc_buffer); if (size <= RPC_BUFFER_MAXSIZE) diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 2f165634df54..bbe913121f43 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1492,6 +1492,9 @@ bool xprt_prepare_transmit(struct rpc_task *task) return false; } + if (atomic_read(&xprt->swapper)) + /* This will be clear in __rpc_execute */ + current->flags |= PF_MEMALLOC; return true; } diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 923e4b512ee9..6b7e10e5a141 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -235,8 +235,11 @@ xprt_rdma_connect_worker(struct work_struct *work) struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt, rx_connect_worker.work); struct rpc_xprt *xprt = &r_xprt->rx_xprt; + unsigned int pflags = current->flags; int rc; + if (atomic_read(&xprt->swapper)) + current->flags |= PF_MEMALLOC; rc = rpcrdma_xprt_connect(r_xprt); xprt_clear_connecting(xprt); if (!rc) { @@ -250,6 +253,7 @@ xprt_rdma_connect_worker(struct work_struct *work) rpcrdma_xprt_disconnect(r_xprt); xprt_unlock_connect(xprt, r_xprt); xprt_wake_pending_tasks(xprt, rc); + current_restore_flags(pflags, PF_MEMALLOC); } /** @@ -572,8 +576,6 @@ xprt_rdma_allocate(struct rpc_task *task) flags = RPCRDMA_DEF_GFP; if (RPC_IS_ASYNC(task)) flags = GFP_NOWAIT | __GFP_NOWARN; - if (RPC_IS_SWAPPER(task)) - flags |= __GFP_MEMALLOC; if (!rpcrdma_check_regbuf(r_xprt, req->rl_sendbuf, rqst->rq_callsize, flags)) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 0f39e08ee580..61d3293f1d68 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2052,7 +2052,10 @@ static void xs_udp_setup_socket(struct work_struct *work) struct rpc_xprt *xprt = &transport->xprt; struct socket *sock; int status = -EIO; + unsigned int pflags = current->flags; + if (atomic_read(&xprt->swapper)) + current->flags |= PF_MEMALLOC; sock = xs_create_sock(xprt, transport, xs_addr(xprt)->sa_family, SOCK_DGRAM, IPPROTO_UDP, false); @@ -2072,6 +2075,7 @@ out: xprt_clear_connecting(xprt); xprt_unlock_connect(xprt, transport); xprt_wake_pending_tasks(xprt, status); + current_restore_flags(pflags, PF_MEMALLOC); } /** @@ -2231,7 +2235,10 @@ static void xs_tcp_setup_socket(struct work_struct *work) struct socket *sock = transport->sock; struct rpc_xprt *xprt = &transport->xprt; int status; + unsigned int pflags = current->flags; + if (atomic_read(&xprt->swapper)) + current->flags |= PF_MEMALLOC; if (!sock) { sock = xs_create_sock(xprt, transport, xs_addr(xprt)->sa_family, SOCK_STREAM, @@ -2296,6 +2303,7 @@ out: xprt_clear_connecting(xprt); out_unlock: xprt_unlock_connect(xprt, transport); + current_restore_flags(pflags, PF_MEMALLOC); } /** -- cgit v1.2.3 From 4dc73c679114a2f408567e2e44770ed934190db2 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 7 Mar 2022 10:41:44 +1100 Subject: NFSv4: keep state manager thread active if swap is enabled If we are swapping over NFSv4, we may not be able to allocate memory to start the state-manager thread at the time when we need it. So keep it always running when swap is enabled, and just signal it to start. This requires updating and testing the cl_swapper count on the root rpc_clnt after following all ->cl_parent links. Signed-off-by: NeilBrown Signed-off-by: Trond Myklebust --- fs/nfs/file.c | 15 ++++++++++++--- fs/nfs/nfs4_fs.h | 1 + fs/nfs/nfs4proc.c | 20 ++++++++++++++++++++ fs/nfs/nfs4state.c | 40 +++++++++++++++++++++++++++++++++------- include/linux/nfs_xdr.h | 2 ++ net/sunrpc/clnt.c | 2 ++ 6 files changed, 70 insertions(+), 10 deletions(-) diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 93c01aaa0a8d..d31bc430dce3 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -483,8 +483,9 @@ static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file, { unsigned long blocks; long long isize; - struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host); - struct inode *inode = file->f_mapping->host; + struct inode *inode = file_inode(file); + struct rpc_clnt *clnt = NFS_CLIENT(inode); + struct nfs_client *cl = NFS_SERVER(inode)->nfs_client; spin_lock(&inode->i_lock); blocks = inode->i_blocks; @@ -497,14 +498,22 @@ static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file, *span = sis->pages; + + if (cl->rpc_ops->enable_swap) + cl->rpc_ops->enable_swap(inode); + return rpc_clnt_swap_activate(clnt); } static void nfs_swap_deactivate(struct file *file) { - struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host); + struct inode *inode = file_inode(file); + struct rpc_clnt *clnt = NFS_CLIENT(inode); + struct nfs_client *cl = NFS_SERVER(inode)->nfs_client; rpc_clnt_swap_deactivate(clnt); + if (cl->rpc_ops->disable_swap) + cl->rpc_ops->disable_swap(file_inode(file)); } const struct address_space_operations nfs_file_aops = { diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 84f39b6f1b1e..79df6e83881b 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -42,6 +42,7 @@ enum nfs4_client_state { NFS4CLNT_LEASE_MOVED, NFS4CLNT_DELEGATION_EXPIRED, NFS4CLNT_RUN_MANAGER, + NFS4CLNT_MANAGER_AVAILABLE, NFS4CLNT_RECALL_RUNNING, NFS4CLNT_RECALL_ANY_LAYOUT_READ, NFS4CLNT_RECALL_ANY_LAYOUT_RW, diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index fd8eece12e94..dd7a4c2a3f05 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -10468,6 +10468,24 @@ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) return error + error2 + error3; } +static void nfs4_enable_swap(struct inode *inode) +{ + /* The state manager thread must always be running. + * It will notice the client is a swapper, and stay put. + */ + struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; + + nfs4_schedule_state_manager(clp); +} + +static void nfs4_disable_swap(struct inode *inode) +{ + /* The state manager thread will now exit once it is + * woken. + */ + wake_up_var(&NFS_SERVER(inode)->nfs_client->cl_state); +} + static const struct inode_operations nfs4_dir_inode_operations = { .create = nfs_create, .lookup = nfs_lookup, @@ -10545,6 +10563,8 @@ const struct nfs_rpc_ops nfs_v4_clientops = { .create_server = nfs4_create_server, .clone_server = nfs_clone_server, .discover_trunking = nfs4_discover_trunking, + .enable_swap = nfs4_enable_swap, + .disable_swap = nfs4_disable_swap, }; static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 58054dfdf2b0..4b2ea239a537 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1207,10 +1207,17 @@ void nfs4_schedule_state_manager(struct nfs_client *clp) { struct task_struct *task; char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1]; + struct rpc_clnt *cl = clp->cl_rpcclient; + + while (cl != cl->cl_parent) + cl = cl->cl_parent; set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state); - if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) + if (test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) != 0) { + wake_up_var(&clp->cl_state); return; + } + set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state); __module_get(THIS_MODULE); refcount_inc(&clp->cl_count); @@ -1226,6 +1233,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp) printk(KERN_ERR "%s: kthread_run: %ld\n", __func__, PTR_ERR(task)); nfs4_clear_state_manager_bit(clp); + clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state); nfs_put_client(clp); module_put(THIS_MODULE); } @@ -2680,12 +2688,8 @@ static void nfs4_state_manager(struct nfs_client *clp) clear_bit(NFS4CLNT_RECALL_RUNNING, &clp->cl_state); } - /* Did we race with an attempt to give us more work? */ - if (!test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state)) - return; - if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) - return; - memflags = memalloc_nofs_save(); + return; + } while (refcount_read(&clp->cl_count) > 1 && !signalled()); goto out_drain; @@ -2706,9 +2710,31 @@ out_drain: static int nfs4_run_state_manager(void *ptr) { struct nfs_client *clp = ptr; + struct rpc_clnt *cl = clp->cl_rpcclient; + + while (cl != cl->cl_parent) + cl = cl->cl_parent; allow_signal(SIGKILL); +again: + set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state); nfs4_state_manager(clp); + if (atomic_read(&cl->cl_swapper)) { + wait_var_event_interruptible(&clp->cl_state, + test_bit(NFS4CLNT_RUN_MANAGER, + &clp->cl_state)); + if (atomic_read(&cl->cl_swapper) && + test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state)) + goto again; + /* Either no longer a swapper, or were signalled */ + } + clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state); + + if (refcount_read(&clp->cl_count) > 1 && !signalled() && + test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state) && + !test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state)) + goto again; + nfs_put_client(clp); module_put_and_kthread_exit(0); return 0; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 82f7c2730b9a..49ba486aea5f 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1797,6 +1797,8 @@ struct nfs_rpc_ops { struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, rpc_authflavor_t); int (*discover_trunking)(struct nfs_server *, struct nfs_fh *); + void (*enable_swap)(struct inode *inode); + void (*disable_swap)(struct inode *inode); }; /* diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 4117ea4caa2e..0f54a56d19d2 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -3069,6 +3069,8 @@ rpc_clnt_swap_activate_callback(struct rpc_clnt *clnt, int rpc_clnt_swap_activate(struct rpc_clnt *clnt) { + while (clnt != clnt->cl_parent) + clnt = clnt->cl_parent; if (atomic_inc_return(&clnt->cl_swapper) == 1) return rpc_clnt_iterate_for_each_xprt(clnt, rpc_clnt_swap_activate_callback, NULL); -- cgit v1.2.3 From 64158668ac8b31626a8ce48db4cad08496eb8340 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 7 Mar 2022 10:41:44 +1100 Subject: NFS: swap IO handling is slightly different for O_DIRECT IO 1/ Taking the i_rwsem for swap IO triggers lockdep warnings regarding possible deadlocks with "fs_reclaim". These deadlocks could, I believe, eventuate if a buffered read on the swapfile was attempted. We don't need coherence with the page cache for a swap file, and buffered writes are forbidden anyway. There is no other need for i_rwsem during direct IO. So never take it for swap_rw() 2/ generic_write_checks() explicitly forbids writes to swap, and performs checks that are not needed for swap. So bypass it for swap_rw(). Signed-off-by: NeilBrown Signed-off-by: Trond Myklebust --- fs/nfs/direct.c | 42 ++++++++++++++++++++++++++++-------------- fs/nfs/file.c | 4 ++-- include/linux/nfs_fs.h | 8 ++++---- 3 files changed, 34 insertions(+), 20 deletions(-) diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index eabfdab543c8..04aaf39a05cb 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -173,8 +173,8 @@ ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE); if (iov_iter_rw(iter) == READ) - return nfs_file_direct_read(iocb, iter); - return nfs_file_direct_write(iocb, iter); + return nfs_file_direct_read(iocb, iter, true); + return nfs_file_direct_write(iocb, iter, true); } static void nfs_direct_release_pages(struct page **pages, unsigned int npages) @@ -425,6 +425,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, * nfs_file_direct_read - file direct read operation for NFS files * @iocb: target I/O control block * @iter: vector of user buffers into which to read data + * @swap: flag indicating this is swap IO, not O_DIRECT IO * * We use this function for direct reads instead of calling * generic_file_aio_read() in order to avoid gfar's check to see if @@ -440,7 +441,8 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, * client must read the updated atime from the server back into its * cache. */ -ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter) +ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, + bool swap) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; @@ -482,12 +484,14 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter) if (iter_is_iovec(iter)) dreq->flags = NFS_ODIRECT_SHOULD_DIRTY; - nfs_start_io_direct(inode); + if (!swap) + nfs_start_io_direct(inode); NFS_I(inode)->read_io += count; requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos); - nfs_end_io_direct(inode); + if (!swap) + nfs_end_io_direct(inode); if (requested > 0) { result = nfs_direct_wait(dreq); @@ -876,6 +880,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, * nfs_file_direct_write - file direct write operation for NFS files * @iocb: target I/O control block * @iter: vector of user buffers from which to write data + * @swap: flag indicating this is swap IO, not O_DIRECT IO * * We use this function for direct writes instead of calling * generic_file_aio_write() in order to avoid taking the inode @@ -892,7 +897,8 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, * Note that O_APPEND is not supported for NFS direct writes, as there * is no atomic O_APPEND write facility in the NFS protocol. */ -ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) +ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, + bool swap) { ssize_t result, requested; size_t count; @@ -906,7 +912,11 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n", file, iov_iter_count(iter), (long long) iocb->ki_pos); - result = generic_write_checks(iocb, iter); + if (swap) + /* bypass generic checks */ + result = iov_iter_count(iter); + else + result = generic_write_checks(iocb, iter); if (result <= 0) return result; count = result; @@ -937,16 +947,20 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) dreq->iocb = iocb; pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode); - nfs_start_io_direct(inode); + if (swap) { + requested = nfs_direct_write_schedule_iovec(dreq, iter, pos); + } else { + nfs_start_io_direct(inode); - requested = nfs_direct_write_schedule_iovec(dreq, iter, pos); + requested = nfs_direct_write_schedule_iovec(dreq, iter, pos); - if (mapping->nrpages) { - invalidate_inode_pages2_range(mapping, - pos >> PAGE_SHIFT, end); - } + if (mapping->nrpages) { + invalidate_inode_pages2_range(mapping, + pos >> PAGE_SHIFT, end); + } - nfs_end_io_direct(inode); + nfs_end_io_direct(inode); + } if (requested > 0) { result = nfs_direct_wait(dreq); diff --git a/fs/nfs/file.c b/fs/nfs/file.c index d31bc430dce3..81c80548a5c6 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -157,7 +157,7 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to) ssize_t result; if (iocb->ki_flags & IOCB_DIRECT) - return nfs_file_direct_read(iocb, to); + return nfs_file_direct_read(iocb, to, false); dprintk("NFS: read(%pD2, %zu@%lu)\n", iocb->ki_filp, @@ -623,7 +623,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) return result; if (iocb->ki_flags & IOCB_DIRECT) - return nfs_file_direct_write(iocb, from); + return nfs_file_direct_write(iocb, from, false); dprintk("NFS: write(%pD2, %zu@%Ld)\n", file, iov_iter_count(from), (long long) iocb->ki_pos); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 9074ed0b65aa..c47c448befc8 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -508,10 +508,10 @@ static inline const struct cred *nfs_file_cred(struct file *file) * linux/fs/nfs/direct.c */ extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *); -extern ssize_t nfs_file_direct_read(struct kiocb *iocb, - struct iov_iter *iter); -extern ssize_t nfs_file_direct_write(struct kiocb *iocb, - struct iov_iter *iter); +ssize_t nfs_file_direct_read(struct kiocb *iocb, + struct iov_iter *iter, bool swap); +ssize_t nfs_file_direct_write(struct kiocb *iocb, + struct iov_iter *iter, bool swap); /* * linux/fs/nfs/dir.c -- cgit v1.2.3 From c265de257f558a05c1859ee9e3fed04883b9ec0e Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 7 Mar 2022 10:41:44 +1100 Subject: NFS: swap-out must always use STABLE writes. The commit handling code is not safe against memory-pressure deadlocks when writing to swap. In particular, nfs_commitdata_alloc() blocks indefinitely waiting for memory, and this can consume all available workqueue threads. swap-out most likely uses STABLE writes anyway as COND_STABLE indicates that a stable write should be used if the write fits in a single request, and it normally does. However if we ever swap with a small wsize, or gather unusually large numbers of pages for a single write, this might change. For safety, make it explicit in the code that direct writes used for swap must always use FLUSH_STABLE. Signed-off-by: NeilBrown Signed-off-by: Trond Myklebust --- fs/nfs/direct.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 04aaf39a05cb..11c566d8769f 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -794,7 +794,7 @@ static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = { */ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, struct iov_iter *iter, - loff_t pos) + loff_t pos, int ioflags) { struct nfs_pageio_descriptor desc; struct inode *inode = dreq->inode; @@ -802,7 +802,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, size_t requested_bytes = 0; size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE); - nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false, + nfs_pageio_init_write(&desc, inode, ioflags, false, &nfs_direct_write_completion_ops); desc.pg_dreq = dreq; get_dreq(dreq); @@ -948,11 +948,13 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode); if (swap) { - requested = nfs_direct_write_schedule_iovec(dreq, iter, pos); + requested = nfs_direct_write_schedule_iovec(dreq, iter, pos, + FLUSH_STABLE); } else { nfs_start_io_direct(inode); - requested = nfs_direct_write_schedule_iovec(dreq, iter, pos); + requested = nfs_direct_write_schedule_iovec(dreq, iter, pos, + FLUSH_COND_STABLE); if (mapping->nrpages) { invalidate_inode_pages2_range(mapping, -- cgit v1.2.3 From 693486d5f8951780a9bb31f7fe935171a80010e4 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 7 Mar 2022 10:41:45 +1100 Subject: SUNRPC: change locking for xs_swap_enable/disable It is not in general safe to wait for XPRT_LOCKED to clear. A wakeup is only sent when - connection completes - sock close completes so during normal operations, this can wait indefinitely. The event we need to protect against is ->inet being set to NULL, and that happens under the recv_mutex lock. So drop the handlign of XPRT_LOCKED and use recv_mutex instead. Signed-off-by: NeilBrown Signed-off-by: Trond Myklebust --- net/sunrpc/xprtsock.c | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 61d3293f1d68..7e39f87cde2d 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1936,9 +1936,9 @@ static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task) #if IS_ENABLED(CONFIG_SUNRPC_SWAP) /* - * Note that this should be called with XPRT_LOCKED held (or when we otherwise - * know that we have exclusive access to the socket), to guard against - * races with xs_reset_transport. + * Note that this should be called with XPRT_LOCKED held, or recv_mutex + * held, or when we otherwise know that we have exclusive access to the + * socket, to guard against races with xs_reset_transport. */ static void xs_set_memalloc(struct rpc_xprt *xprt) { @@ -1967,13 +1967,11 @@ xs_enable_swap(struct rpc_xprt *xprt) { struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt); - if (atomic_inc_return(&xprt->swapper) != 1) - return 0; - if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) - return -ERESTARTSYS; - if (xs->inet) + mutex_lock(&xs->recv_mutex); + if (atomic_inc_return(&xprt->swapper) == 1 && + xs->inet) sk_set_memalloc(xs->inet); - xprt_release_xprt(xprt, NULL); + mutex_unlock(&xs->recv_mutex); return 0; } @@ -1989,13 +1987,11 @@ xs_disable_swap(struct rpc_xprt *xprt) { struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt); - if (!atomic_dec_and_test(&xprt->swapper)) - return; - if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) - return; - if (xs->inet) + mutex_lock(&xs->recv_mutex); + if (atomic_dec_and_test(&xprt->swapper) && + xs->inet) sk_clear_memalloc(xs->inet); - xprt_release_xprt(xprt, NULL); + mutex_unlock(&xs->recv_mutex); } #else static void xs_set_memalloc(struct rpc_xprt *xprt) -- cgit v1.2.3 From a43bf604446414103b7535f38e739b65601c4fb2 Mon Sep 17 00:00:00 2001 From: Olga Kornievskaia Date: Wed, 16 Mar 2022 18:24:26 -0400 Subject: NFSv4.1 provide mount option to toggle trunking discovery Introduce a new mount option -- trunkdiscovery,notrunkdiscovery -- to toggle whether or not the client will engage in actively discovery of trunking locations. v2 make notrunkdiscovery default Signed-off-by: Olga Kornievskaia Fixes: 1976b2b31462 ("NFSv4.1 query for fs_location attr on a new file system") Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 3 ++- fs/nfs/fs_context.c | 8 ++++++++ include/linux/nfs_fs_sb.h | 1 + 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/fs/nfs/client.c b/fs/nfs/client.c index d1f34229e11a..e828504cc396 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -857,7 +857,8 @@ static int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, str } if (clp->rpc_ops->discover_trunking != NULL && - (server->caps & NFS_CAP_FS_LOCATIONS)) { + (server->caps & NFS_CAP_FS_LOCATIONS && + (server->flags & NFS_MOUNT_TRUNK_DISCOVERY))) { error = clp->rpc_ops->discover_trunking(server, mntfh); if (error < 0) return error; diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c index ea17fa1f31ec..e2d59bb5e6bb 100644 --- a/fs/nfs/fs_context.c +++ b/fs/nfs/fs_context.c @@ -80,6 +80,7 @@ enum nfs_param { Opt_source, Opt_tcp, Opt_timeo, + Opt_trunkdiscovery, Opt_udp, Opt_v, Opt_vers, @@ -180,6 +181,7 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = { fsparam_string("source", Opt_source), fsparam_flag ("tcp", Opt_tcp), fsparam_u32 ("timeo", Opt_timeo), + fsparam_flag_no("trunkdiscovery", Opt_trunkdiscovery), fsparam_flag ("udp", Opt_udp), fsparam_flag ("v2", Opt_v), fsparam_flag ("v3", Opt_v), @@ -529,6 +531,12 @@ static int nfs_fs_context_parse_param(struct fs_context *fc, else ctx->flags &= ~NFS_MOUNT_NOCTO; break; + case Opt_trunkdiscovery: + if (result.negated) + ctx->flags &= ~NFS_MOUNT_TRUNK_DISCOVERY; + else + ctx->flags |= NFS_MOUNT_TRUNK_DISCOVERY; + break; case Opt_ac: if (result.negated) ctx->flags |= NFS_MOUNT_NOAC; diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index ca0959e51e81..b0e3fd550122 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -151,6 +151,7 @@ struct nfs_server { #define NFS_MOUNT_SOFTREVAL 0x800000 #define NFS_MOUNT_WRITE_EAGER 0x01000000 #define NFS_MOUNT_WRITE_WAIT 0x02000000 +#define NFS_MOUNT_TRUNK_DISCOVERY 0x04000000 unsigned int fattr_valid; /* Valid attributes */ unsigned int caps; /* server capabilities */ -- cgit v1.2.3 From 648a4548d622c4ae965058db1a6b5b95c062789a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 21 Mar 2022 22:27:13 -0400 Subject: NFS: Don't deadlock when cookie hashes collide In the very rare case where the readdir reply contains multiple cookies that map to the same hash value, we can end up deadlocking waiting for a page lock that we already hold. In this case we should fail the page lock by using grab_cache_page_nowait(). Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 7e12102b29e7..17986c0019d4 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -381,23 +381,28 @@ static void nfs_readdir_page_unlock_and_put(struct page *page) put_page(page); } +static void nfs_readdir_page_init_and_validate(struct page *page, u64 cookie, + u64 change_attr) +{ + if (PageUptodate(page)) { + if (nfs_readdir_page_validate(page, cookie, change_attr)) + return; + nfs_readdir_clear_array(page); + } + nfs_readdir_page_init_array(page, cookie, change_attr); + SetPageUptodate(page); +} + static struct page *nfs_readdir_page_get_locked(struct address_space *mapping, - u64 last_cookie, - u64 change_attr) + u64 cookie, u64 change_attr) { - pgoff_t index = nfs_readdir_page_cookie_hash(last_cookie); + pgoff_t index = nfs_readdir_page_cookie_hash(cookie); struct page *page; page = grab_cache_page(mapping, index); if (!page) return NULL; - if (PageUptodate(page)) { - if (nfs_readdir_page_validate(page, last_cookie, change_attr)) - return page; - nfs_readdir_clear_array(page); - } - nfs_readdir_page_init_array(page, last_cookie, change_attr); - SetPageUptodate(page); + nfs_readdir_page_init_and_validate(page, cookie, change_attr); return page; } @@ -435,11 +440,13 @@ static void nfs_readdir_page_set_eof(struct page *page) static struct page *nfs_readdir_page_get_next(struct address_space *mapping, u64 cookie, u64 change_attr) { + pgoff_t index = nfs_readdir_page_cookie_hash(cookie); struct page *page; - page = nfs_readdir_page_get_locked(mapping, cookie, change_attr); + page = grab_cache_page_nowait(mapping, index); if (!page) return NULL; + nfs_readdir_page_init_and_validate(page, cookie, change_attr); if (nfs_readdir_page_last_cookie(page) != cookie) nfs_readdir_page_reinit_array(page, cookie, change_attr); return page; -- cgit v1.2.3 From e47a62df29a0714cc2d5129516a3618337c84554 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 22 Mar 2022 09:11:44 -0400 Subject: NFS: Fix revalidation of empty readdir pages If the page is empty, we need to check the array->last_cookie instead of the first entry. Add a helper for the cases where we care. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 17986c0019d4..bac4cf1a308e 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -252,6 +252,11 @@ static void nfs_readdir_page_array_free(struct page *page) } } +static u64 nfs_readdir_array_index_cookie(struct nfs_cache_array *array) +{ + return array->size == 0 ? array->last_cookie : array->array[0].cookie; +} + static void nfs_readdir_array_set_eof(struct nfs_cache_array *array) { array->page_is_eof = 1; @@ -369,7 +374,7 @@ static bool nfs_readdir_page_validate(struct page *page, u64 last_cookie, if (array->change_attr != change_attr) ret = false; - if (array->size > 0 && array->array[0].cookie != last_cookie) + if (nfs_readdir_array_index_cookie(array) != last_cookie) ret = false; kunmap_atomic(array); return ret; @@ -480,7 +485,7 @@ static void nfs_readdir_seek_next_array(struct nfs_cache_array *array, desc->cache_entry_index = 0; desc->page_index++; } else - desc->last_cookie = array->array[0].cookie; + desc->last_cookie = nfs_readdir_array_index_cookie(array); } static void nfs_readdir_rewind_search(struct nfs_readdir_descriptor *desc) -- cgit v1.2.3 From 89f42494f92f448747bd8a7ab1ae8b5d5520577d Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 16 Mar 2022 19:10:43 -0400 Subject: SUNRPC: Don't call connect() more than once on a TCP socket Avoid socket state races due to repeated calls to ->connect() using the same socket. If connect() returns 0 due to the connection having completed, but we are in fact in a closing state, then we may leave the XPRT_CONNECTING flag set on the transport. Reported-by: Enrico Scholz Fixes: 3be232f11a3c ("SUNRPC: Prevent immediate close+reconnect") Signed-off-by: Trond Myklebust --- include/linux/sunrpc/xprtsock.h | 1 + net/sunrpc/xprtsock.c | 22 ++++++++++++---------- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 8c2a712cb242..689062afdd61 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -89,5 +89,6 @@ struct sock_xprt { #define XPRT_SOCK_WAKE_WRITE (5) #define XPRT_SOCK_WAKE_PENDING (6) #define XPRT_SOCK_WAKE_DISCONNECT (7) +#define XPRT_SOCK_CONNECT_SENT (8) #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 7e39f87cde2d..8f8a03c3315a 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2235,10 +2235,15 @@ static void xs_tcp_setup_socket(struct work_struct *work) if (atomic_read(&xprt->swapper)) current->flags |= PF_MEMALLOC; - if (!sock) { - sock = xs_create_sock(xprt, transport, - xs_addr(xprt)->sa_family, SOCK_STREAM, - IPPROTO_TCP, true); + + if (xprt_connected(xprt)) + goto out; + if (test_and_clear_bit(XPRT_SOCK_CONNECT_SENT, + &transport->sock_state) || + !sock) { + xs_reset_transport(transport); + sock = xs_create_sock(xprt, transport, xs_addr(xprt)->sa_family, + SOCK_STREAM, IPPROTO_TCP, true); if (IS_ERR(sock)) { xprt_wake_pending_tasks(xprt, PTR_ERR(sock)); goto out; @@ -2262,6 +2267,7 @@ static void xs_tcp_setup_socket(struct work_struct *work) fallthrough; case -EINPROGRESS: /* SYN_SENT! */ + set_bit(XPRT_SOCK_CONNECT_SENT, &transport->sock_state); if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; fallthrough; @@ -2323,13 +2329,9 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport)); - if (transport->sock != NULL && !xprt_connecting(xprt)) { + if (transport->sock != NULL) { dprintk("RPC: xs_connect delayed xprt %p for %lu " - "seconds\n", - xprt, xprt->reestablish_timeout / HZ); - - /* Start by resetting any existing state */ - xs_reset_transport(transport); + "seconds\n", xprt, xprt->reestablish_timeout / HZ); delay = xprt_reconnect_delay(xprt); xprt_reconnect_backoff(xprt, XS_TCP_INIT_REEST_TO); -- cgit v1.2.3 From 3b21f757c309c84a23a26d8cab20b743e0719705 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 16 Mar 2022 19:18:25 -0400 Subject: SUNRPC: Only save the TCP source port after the connection is complete Since the RPC client uses a non-blocking connect(), we do not expect to see it return '0' under normal circumstances. Signed-off-by: Trond Myklebust --- net/sunrpc/xprtsock.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 8f8a03c3315a..d2bf3b49dbf4 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -58,6 +58,7 @@ #include "sunrpc.h" static void xs_close(struct rpc_xprt *xprt); +static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock); static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt, struct socket *sock); @@ -1025,6 +1026,8 @@ static int xs_tcp_send_request(struct rpc_rqst *req) if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state)) xs_tcp_set_socket_timeouts(xprt, transport->sock); + xs_set_srcport(transport, transport->sock); + /* Continue transmitting the packet/record. We must be careful * to cope with writespace callbacks arriving _after_ we have * called sendmsg(). */ @@ -2263,8 +2266,6 @@ static void xs_tcp_setup_socket(struct work_struct *work) sock->sk->sk_state); switch (status) { case 0: - xs_set_srcport(transport, sock); - fallthrough; case -EINPROGRESS: /* SYN_SENT! */ set_bit(XPRT_SOCK_CONNECT_SENT, &transport->sock_state); -- cgit v1.2.3 From 7496b59f588dd52886fdbac7633608097543a0a5 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 14 Mar 2022 21:02:10 -0400 Subject: SUNRPC: Fix socket waits for write buffer space The socket layer requires that we use the socket lock to protect changes to the sock->sk_write_pending field and others. Reported-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprtsock.c | 54 +++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 39 insertions(+), 15 deletions(-) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index d2bf3b49dbf4..68eee352d69a 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -764,12 +764,12 @@ xs_stream_start_connect(struct sock_xprt *transport) /** * xs_nospace - handle transmit was incomplete * @req: pointer to RPC request + * @transport: pointer to struct sock_xprt * */ -static int xs_nospace(struct rpc_rqst *req) +static int xs_nospace(struct rpc_rqst *req, struct sock_xprt *transport) { - struct rpc_xprt *xprt = req->rq_xprt; - struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); + struct rpc_xprt *xprt = &transport->xprt; struct sock *sk = transport->inet; int ret = -EAGAIN; @@ -780,25 +780,49 @@ static int xs_nospace(struct rpc_rqst *req) /* Don't race with disconnect */ if (xprt_connected(xprt)) { + struct socket_wq *wq; + + rcu_read_lock(); + wq = rcu_dereference(sk->sk_wq); + set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags); + rcu_read_unlock(); + /* wait for more buffer space */ + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); sk->sk_write_pending++; xprt_wait_for_buffer_space(xprt); } else ret = -ENOTCONN; spin_unlock(&xprt->transport_lock); + return ret; +} - /* Race breaker in case memory is freed before above code is called */ - if (ret == -EAGAIN) { - struct socket_wq *wq; +static int xs_sock_nospace(struct rpc_rqst *req) +{ + struct sock_xprt *transport = + container_of(req->rq_xprt, struct sock_xprt, xprt); + struct sock *sk = transport->inet; + int ret = -EAGAIN; - rcu_read_lock(); - wq = rcu_dereference(sk->sk_wq); - set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags); - rcu_read_unlock(); + lock_sock(sk); + if (!sock_writeable(sk)) + ret = xs_nospace(req, transport); + release_sock(sk); + return ret; +} - sk->sk_write_space(sk); - } +static int xs_stream_nospace(struct rpc_rqst *req) +{ + struct sock_xprt *transport = + container_of(req->rq_xprt, struct sock_xprt, xprt); + struct sock *sk = transport->inet; + int ret = -EAGAIN; + + lock_sock(sk); + if (!sk_stream_memory_free(sk)) + ret = xs_nospace(req, transport); + release_sock(sk); return ret; } @@ -888,7 +912,7 @@ static int xs_local_send_request(struct rpc_rqst *req) case -ENOBUFS: break; case -EAGAIN: - status = xs_nospace(req); + status = xs_stream_nospace(req); break; default: dprintk("RPC: sendmsg returned unrecognized error %d\n", @@ -964,7 +988,7 @@ process_status: /* Should we call xs_close() here? */ break; case -EAGAIN: - status = xs_nospace(req); + status = xs_sock_nospace(req); break; case -ENETUNREACH: case -ENOBUFS: @@ -1086,7 +1110,7 @@ static int xs_tcp_send_request(struct rpc_rqst *req) /* Should we call xs_close() here? */ break; case -EAGAIN: - status = xs_nospace(req); + status = xs_stream_nospace(req); break; case -ECONNRESET: case -ECONNREFUSED: -- cgit v1.2.3 From 2790a624d43084de590884934969e19c7a82316a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 15 Mar 2022 08:12:40 -0400 Subject: SUNRPC: Replace internal use of SOCKWQ_ASYNC_NOSPACE The socket's SOCKWQ_ASYNC_NOSPACE can be cleared by various actors in the socket layer, so replace it with our own flag in the transport sock_state field. Reported-by: Chuck Lever Signed-off-by: Trond Myklebust --- include/linux/sunrpc/xprtsock.h | 1 + net/sunrpc/xprtsock.c | 22 ++++------------------ 2 files changed, 5 insertions(+), 18 deletions(-) diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 689062afdd61..3eb0079669c5 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -90,5 +90,6 @@ struct sock_xprt { #define XPRT_SOCK_WAKE_PENDING (6) #define XPRT_SOCK_WAKE_DISCONNECT (7) #define XPRT_SOCK_CONNECT_SENT (8) +#define XPRT_SOCK_NOSPACE (9) #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 68eee352d69a..2450b31b807a 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -780,14 +780,8 @@ static int xs_nospace(struct rpc_rqst *req, struct sock_xprt *transport) /* Don't race with disconnect */ if (xprt_connected(xprt)) { - struct socket_wq *wq; - - rcu_read_lock(); - wq = rcu_dereference(sk->sk_wq); - set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags); - rcu_read_unlock(); - /* wait for more buffer space */ + set_bit(XPRT_SOCK_NOSPACE, &transport->sock_state); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); sk->sk_write_pending++; xprt_wait_for_buffer_space(xprt); @@ -1151,6 +1145,7 @@ static void xs_sock_reset_state_flags(struct rpc_xprt *xprt) clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state); clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state); clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state); + clear_bit(XPRT_SOCK_NOSPACE, &transport->sock_state); } static void xs_run_error_worker(struct sock_xprt *transport, unsigned int nr) @@ -1497,7 +1492,6 @@ static void xs_tcp_state_change(struct sock *sk) static void xs_write_space(struct sock *sk) { - struct socket_wq *wq; struct sock_xprt *transport; struct rpc_xprt *xprt; @@ -1508,15 +1502,10 @@ static void xs_write_space(struct sock *sk) if (unlikely(!(xprt = xprt_from_sock(sk)))) return; transport = container_of(xprt, struct sock_xprt, xprt); - rcu_read_lock(); - wq = rcu_dereference(sk->sk_wq); - if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0) - goto out; - + if (!test_and_clear_bit(XPRT_SOCK_NOSPACE, &transport->sock_state)) + return; xs_run_error_worker(transport, XPRT_SOCK_WAKE_WRITE); sk->sk_write_pending--; -out: - rcu_read_unlock(); } /** @@ -1857,7 +1846,6 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt, sk->sk_user_data = xprt; sk->sk_data_ready = xs_data_ready; sk->sk_write_space = xs_udp_write_space; - sock_set_flag(sk, SOCK_FASYNC); sk->sk_error_report = xs_error_report; xprt_clear_connected(xprt); @@ -2051,7 +2039,6 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) sk->sk_user_data = xprt; sk->sk_data_ready = xs_data_ready; sk->sk_write_space = xs_udp_write_space; - sock_set_flag(sk, SOCK_FASYNC); xprt_set_connected(xprt); @@ -2218,7 +2205,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) sk->sk_data_ready = xs_data_ready; sk->sk_state_change = xs_tcp_state_change; sk->sk_write_space = xs_tcp_write_space; - sock_set_flag(sk, SOCK_FASYNC); sk->sk_error_report = xs_error_report; /* socket options */ -- cgit v1.2.3 From d0afde5fc6fb13531e2434fc4b6a65f131671f68 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 14 Mar 2022 23:05:07 -0400 Subject: SUNRPC: Improve accuracy of socket ENOBUFS determination The current code checks for whether or not the socket is in a writeable state after we get an EAGAIN. That is racy, since we've dropped the socket lock, so the amount of free buffer may have changed. Instead, let's check whether the socket is writeable before we try to write to it. If that was the case, we do expect the message to be at least partially sent unless we're in a low memory situation. Signed-off-by: Trond Myklebust --- net/sunrpc/xprtsock.c | 53 +++++++++++++++++---------------------------------- 1 file changed, 18 insertions(+), 35 deletions(-) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 2450b31b807a..8909c768fe71 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -806,13 +806,15 @@ static int xs_sock_nospace(struct rpc_rqst *req) return ret; } -static int xs_stream_nospace(struct rpc_rqst *req) +static int xs_stream_nospace(struct rpc_rqst *req, bool vm_wait) { struct sock_xprt *transport = container_of(req->rq_xprt, struct sock_xprt, xprt); struct sock *sk = transport->inet; int ret = -EAGAIN; + if (vm_wait) + return -ENOBUFS; lock_sock(sk); if (!sk_stream_memory_free(sk)) ret = xs_nospace(req, transport); @@ -870,6 +872,7 @@ static int xs_local_send_request(struct rpc_rqst *req) struct msghdr msg = { .msg_flags = XS_SENDMSG_FLAGS, }; + bool vm_wait; unsigned int sent; int status; @@ -882,15 +885,14 @@ static int xs_local_send_request(struct rpc_rqst *req) xs_pktdump("packet data:", req->rq_svec->iov_base, req->rq_svec->iov_len); + vm_wait = sk_stream_is_writeable(transport->inet) ? true : false; + req->rq_xtime = ktime_get(); status = xprt_sock_sendmsg(transport->sock, &msg, xdr, transport->xmit.offset, rm, &sent); dprintk("RPC: %s(%u) = %d\n", __func__, xdr->len - transport->xmit.offset, status); - if (status == -EAGAIN && sock_writeable(transport->inet)) - status = -ENOBUFS; - if (likely(sent > 0) || status == 0) { transport->xmit.offset += sent; req->rq_bytes_sent = transport->xmit.offset; @@ -900,13 +902,12 @@ static int xs_local_send_request(struct rpc_rqst *req) return 0; } status = -EAGAIN; + vm_wait = false; } switch (status) { - case -ENOBUFS: - break; case -EAGAIN: - status = xs_stream_nospace(req); + status = xs_stream_nospace(req, vm_wait); break; default: dprintk("RPC: sendmsg returned unrecognized error %d\n", @@ -1024,7 +1025,7 @@ static int xs_tcp_send_request(struct rpc_rqst *req) struct msghdr msg = { .msg_flags = XS_SENDMSG_FLAGS, }; - bool vm_wait = false; + bool vm_wait; unsigned int sent; int status; @@ -1051,7 +1052,10 @@ static int xs_tcp_send_request(struct rpc_rqst *req) * called sendmsg(). */ req->rq_xtime = ktime_get(); tcp_sock_set_cork(transport->inet, true); - while (1) { + + vm_wait = sk_stream_is_writeable(transport->inet) ? true : false; + + do { status = xprt_sock_sendmsg(transport->sock, &msg, xdr, transport->xmit.offset, rm, &sent); @@ -1072,31 +1076,10 @@ static int xs_tcp_send_request(struct rpc_rqst *req) WARN_ON_ONCE(sent == 0 && status == 0); - if (status == -EAGAIN ) { - /* - * Return EAGAIN if we're sure we're hitting the - * socket send buffer limits. - */ - if (test_bit(SOCK_NOSPACE, &transport->sock->flags)) - break; - /* - * Did we hit a memory allocation failure? - */ - if (sent == 0) { - status = -ENOBUFS; - if (vm_wait) - break; - /* Retry, knowing now that we're below the - * socket send buffer limit - */ - vm_wait = true; - } - continue; - } - if (status < 0) - break; - vm_wait = false; - } + if (sent > 0) + vm_wait = false; + + } while (status == 0); switch (status) { case -ENOTSOCK: @@ -1104,7 +1087,7 @@ static int xs_tcp_send_request(struct rpc_rqst *req) /* Should we call xs_close() here? */ break; case -EAGAIN: - status = xs_stream_nospace(req); + status = xs_stream_nospace(req, vm_wait); break; case -ECONNRESET: case -ECONNREFUSED: -- cgit v1.2.3 From 33e5c765bc1ea5e06ea7603637f14d727e6fcdf3 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 14 Mar 2022 22:02:22 -0400 Subject: NFS: Fix memory allocation in rpc_malloc() When in a low memory situation, we do want rpciod to kick off direct reclaim in the case where that helps, however we don't want it looping forever in mempool_alloc(). So first try allocating from the slab using GFP_KERNEL | __GFP_NORETRY, and then fall back to a GFP_NOWAIT allocation from the mempool. Ditto for rpc_alloc_task() Signed-off-by: Trond Myklebust --- include/linux/sunrpc/sched.h | 1 + net/sunrpc/sched.c | 21 ++++++++++++++------- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 56710f8056d3..1d7a3e51b795 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -262,6 +262,7 @@ void rpc_destroy_mempool(void); extern struct workqueue_struct *rpciod_workqueue; extern struct workqueue_struct *xprtiod_workqueue; void rpc_prepare_task(struct rpc_task *task); +gfp_t rpc_task_gfp_mask(void); static inline int rpc_wait_for_completion_task(struct rpc_task *task) { diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 7c8f87ebdbc0..d59a033820be 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -57,6 +57,13 @@ struct workqueue_struct *rpciod_workqueue __read_mostly; struct workqueue_struct *xprtiod_workqueue __read_mostly; EXPORT_SYMBOL_GPL(xprtiod_workqueue); +gfp_t rpc_task_gfp_mask(void) +{ + if (current->flags & PF_WQ_WORKER) + return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; + return GFP_KERNEL; +} + unsigned long rpc_task_timeout(const struct rpc_task *task) { @@ -1030,15 +1037,15 @@ int rpc_malloc(struct rpc_task *task) struct rpc_rqst *rqst = task->tk_rqstp; size_t size = rqst->rq_callsize + rqst->rq_rcvsize; struct rpc_buffer *buf; - gfp_t gfp = GFP_KERNEL; - - if (RPC_IS_ASYNC(task)) - gfp = GFP_NOWAIT | __GFP_NOWARN; + gfp_t gfp = rpc_task_gfp_mask(); size += sizeof(struct rpc_buffer); - if (size <= RPC_BUFFER_MAXSIZE) - buf = mempool_alloc(rpc_buffer_mempool, gfp); - else + if (size <= RPC_BUFFER_MAXSIZE) { + buf = kmem_cache_alloc(rpc_buffer_slabp, gfp); + /* Reach for the mempool if dynamic allocation fails */ + if (!buf && RPC_IS_ASYNC(task)) + buf = mempool_alloc(rpc_buffer_mempool, GFP_NOWAIT); + } else buf = kmalloc(size, gfp); if (!buf) -- cgit v1.2.3 From 910ad38697d95bd32f45ba70fd6952f6c2956f28 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 21 Mar 2022 17:37:01 -0400 Subject: NFS: Fix memory allocation in rpc_alloc_task() As for rpc_malloc(), we first try allocating from the slab, then fall back to a non-waiting allocation from the mempool. Signed-off-by: Trond Myklebust --- net/sunrpc/sched.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index d59a033820be..b258b87a3ec2 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -1108,10 +1108,14 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta rpc_init_task_statistics(task); } -static struct rpc_task * -rpc_alloc_task(void) +static struct rpc_task *rpc_alloc_task(void) { - return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_KERNEL); + struct rpc_task *task; + + task = kmem_cache_alloc(rpc_task_slabp, rpc_task_gfp_mask()); + if (task) + return task; + return mempool_alloc(rpc_task_mempool, GFP_NOWAIT); } /* -- cgit v1.2.3 From 059ee82b6462028ebace435bc94f5b082be0632a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 21 Mar 2022 17:46:30 -0400 Subject: SUNRPC: Fix unx_lookup_cred() allocation Default to the same mempool allocation strategy as for rpc_malloc(). Signed-off-by: Trond Myklebust --- net/sunrpc/auth_unix.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index c629d366030e..1e091d3fa607 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c @@ -40,17 +40,19 @@ unx_destroy(struct rpc_auth *auth) /* * Lookup AUTH_UNIX creds for current process */ -static struct rpc_cred * -unx_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) +static struct rpc_cred *unx_lookup_cred(struct rpc_auth *auth, + struct auth_cred *acred, int flags) { - gfp_t gfp = GFP_KERNEL; struct rpc_cred *ret; - if (flags & RPCAUTH_LOOKUP_ASYNC) - gfp = GFP_NOWAIT | __GFP_NOWARN; - ret = mempool_alloc(unix_pool, gfp); - if (!ret) - return ERR_PTR(-ENOMEM); + ret = kmalloc(sizeof(*ret), rpc_task_gfp_mask()); + if (!ret) { + if (!(flags & RPCAUTH_LOOKUP_ASYNC)) + return ERR_PTR(-ENOMEM); + ret = mempool_alloc(unix_pool, GFP_NOWAIT); + if (!ret) + return ERR_PTR(-ENOMEM); + } rpcauth_init_cred(ret, acred, auth, &unix_credops); ret->cr_flags = 1UL << RPCAUTH_CRED_UPTODATE; return ret; -- cgit v1.2.3 From b2648015d4521de21ed3c9f48f718e023860b8c1 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 21 Mar 2022 13:20:16 -0400 Subject: SUNRPC: Make the rpciod and xprtiod slab allocation modes consistent Make sure that rpciod and xprtiod are always using the same slab allocation modes. Signed-off-by: Trond Myklebust --- net/sunrpc/backchannel_rqst.c | 8 ++++---- net/sunrpc/rpcb_clnt.c | 4 ++-- net/sunrpc/socklib.c | 3 ++- net/sunrpc/xprt.c | 5 +---- net/sunrpc/xprtsock.c | 11 ++++++----- 5 files changed, 15 insertions(+), 16 deletions(-) diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index 22a2c235abf1..5a6b61dcdf2d 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c @@ -75,9 +75,9 @@ static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags) return 0; } -static -struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt, gfp_t gfp_flags) +static struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt) { + gfp_t gfp_flags = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; struct rpc_rqst *req; /* Pre-allocate one backchannel rpc_rqst */ @@ -154,7 +154,7 @@ int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs) INIT_LIST_HEAD(&tmp_list); for (i = 0; i < min_reqs; i++) { /* Pre-allocate one backchannel rpc_rqst */ - req = xprt_alloc_bc_req(xprt, GFP_KERNEL); + req = xprt_alloc_bc_req(xprt); if (req == NULL) { printk(KERN_ERR "Failed to create bc rpc_rqst\n"); goto out_free; @@ -343,7 +343,7 @@ found: break; } else if (req) break; - new = xprt_alloc_bc_req(xprt, GFP_KERNEL); + new = xprt_alloc_bc_req(xprt); } while (new); return req; } diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 0fdeb8666bfd..5a8e6d46809a 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c @@ -714,7 +714,7 @@ void rpcb_getport_async(struct rpc_task *task) goto bailout_nofree; } - map = kzalloc(sizeof(struct rpcbind_args), GFP_KERNEL); + map = kzalloc(sizeof(struct rpcbind_args), rpc_task_gfp_mask()); if (!map) { status = -ENOMEM; goto bailout_release_client; @@ -730,7 +730,7 @@ void rpcb_getport_async(struct rpc_task *task) case RPCBVERS_4: case RPCBVERS_3: map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID]; - map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL); + map->r_addr = rpc_sockaddr2uaddr(sap, rpc_task_gfp_mask()); if (!map->r_addr) { status = -ENOMEM; goto bailout_free_args; diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c index d52313af82bc..05b38bf68316 100644 --- a/net/sunrpc/socklib.c +++ b/net/sunrpc/socklib.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -222,7 +223,7 @@ static int xprt_send_pagedata(struct socket *sock, struct msghdr *msg, { int err; - err = xdr_alloc_bvec(xdr, GFP_KERNEL); + err = xdr_alloc_bvec(xdr, rpc_task_gfp_mask()); if (err < 0) return err; diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index bbe913121f43..744c6c1d536f 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1679,15 +1679,12 @@ out: static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt) { struct rpc_rqst *req = ERR_PTR(-EAGAIN); - gfp_t gfp_mask = GFP_KERNEL; if (xprt->num_reqs >= xprt->max_reqs) goto out; ++xprt->num_reqs; spin_unlock(&xprt->reserve_lock); - if (current->flags & PF_WQ_WORKER) - gfp_mask |= __GFP_NORETRY | __GFP_NOWARN; - req = kzalloc(sizeof(*req), gfp_mask); + req = kzalloc(sizeof(*req), rpc_task_gfp_mask()); spin_lock(&xprt->reserve_lock); if (req != NULL) goto out; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 8909c768fe71..b52eaa8a0cda 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -428,9 +428,9 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, offset += want; } - want = xs_alloc_sparse_pages(buf, - min_t(size_t, count - offset, buf->page_len), - GFP_KERNEL); + want = xs_alloc_sparse_pages( + buf, min_t(size_t, count - offset, buf->page_len), + GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); if (seek < want) { ret = xs_read_bvec(sock, msg, flags, buf->bvec, xdr_buf_pagecount(buf), @@ -826,7 +826,8 @@ static void xs_stream_prepare_request(struct rpc_rqst *req) { xdr_free_bvec(&req->rq_rcv_buf); - req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_KERNEL); + req->rq_task->tk_status = xdr_alloc_bvec( + &req->rq_rcv_buf, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); } /* @@ -2487,7 +2488,7 @@ static int bc_malloc(struct rpc_task *task) return -EINVAL; } - page = alloc_page(GFP_KERNEL); + page = alloc_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); if (!page) return -ENOMEM; -- cgit v1.2.3 From 515dcdcd48736576c6f5c197814da6f81c60a21e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 21 Mar 2022 12:34:19 -0400 Subject: NFS: nfsiod should not block forever in mempool_alloc() The concern is that since nfsiod is sometimes required to kick off a commit, it can get locked up waiting forever in mempool_alloc() instead of failing gracefully and leaving the commit until later. Try to allocate from the slab first, with GFP_KERNEL | __GFP_NORETRY, then fall back to a non-blocking attempt to allocate from the memory pool. Signed-off-by: Trond Myklebust --- fs/nfs/internal.h | 7 +++++++ fs/nfs/pnfs_nfs.c | 8 ++++++-- fs/nfs/write.c | 24 +++++++++--------------- include/linux/nfs_fs.h | 2 +- 4 files changed, 23 insertions(+), 18 deletions(-) diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 194840a97e3a..57b0497105c8 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -587,6 +587,13 @@ nfs_write_match_verf(const struct nfs_writeverf *verf, !nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier); } +static inline gfp_t nfs_io_gfp_mask(void) +{ + if (current->flags & PF_WQ_WORKER) + return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; + return GFP_KERNEL; +} + /* unlink.c */ extern struct rpc_task * nfs_async_rename(struct inode *old_dir, struct inode *new_dir, diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 316f68f96e57..657c242a18ff 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -419,7 +419,7 @@ static struct nfs_commit_data * pnfs_bucket_fetch_commitdata(struct pnfs_commit_bucket *bucket, struct nfs_commit_info *cinfo) { - struct nfs_commit_data *data = nfs_commitdata_alloc(false); + struct nfs_commit_data *data = nfs_commitdata_alloc(); if (!data) return NULL; @@ -515,7 +515,11 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages, unsigned int nreq = 0; if (!list_empty(mds_pages)) { - data = nfs_commitdata_alloc(true); + data = nfs_commitdata_alloc(); + if (!data) { + nfs_retry_commit(mds_pages, NULL, cinfo, -1); + return -ENOMEM; + } data->ds_commit_index = -1; list_splice_init(mds_pages, &data->pages); list_add_tail(&data->list, &list); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 599a82406d38..ef47e3700e4b 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -70,27 +70,17 @@ static mempool_t *nfs_wdata_mempool; static struct kmem_cache *nfs_cdata_cachep; static mempool_t *nfs_commit_mempool; -struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail) +struct nfs_commit_data *nfs_commitdata_alloc(void) { struct nfs_commit_data *p; - if (never_fail) - p = mempool_alloc(nfs_commit_mempool, GFP_NOIO); - else { - /* It is OK to do some reclaim, not no safe to wait - * for anything to be returned to the pool. - * mempool_alloc() cannot handle that particular combination, - * so we need two separate attempts. - */ + p = kmem_cache_zalloc(nfs_cdata_cachep, nfs_io_gfp_mask()); + if (!p) { p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT); - if (!p) - p = kmem_cache_alloc(nfs_cdata_cachep, GFP_NOIO | - __GFP_NOWARN | __GFP_NORETRY); if (!p) return NULL; + memset(p, 0, sizeof(*p)); } - - memset(p, 0, sizeof(*p)); INIT_LIST_HEAD(&p->pages); return p; } @@ -1826,7 +1816,11 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how, if (list_empty(head)) return 0; - data = nfs_commitdata_alloc(true); + data = nfs_commitdata_alloc(); + if (!data) { + nfs_retry_commit(head, NULL, cinfo, -1); + return -ENOMEM; + } /* Set up the argument struct */ nfs_init_commit(data, head, NULL, cinfo); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index c47c448befc8..db305abafc9e 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -580,7 +580,7 @@ extern int nfs_wb_all(struct inode *inode); extern int nfs_wb_page(struct inode *inode, struct page *page); extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); extern int nfs_commit_inode(struct inode *, int); -extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail); +extern struct nfs_commit_data *nfs_commitdata_alloc(void); extern void nfs_commit_free(struct nfs_commit_data *data); bool nfs_commit_end(struct nfs_mds_commit_info *cinfo); -- cgit v1.2.3 From 0bae835b63c53f86cdc524f5962e39409585b22c Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 21 Mar 2022 13:48:36 -0400 Subject: NFS: Avoid writeback threads getting stuck in mempool_alloc() In a low memory situation, allow the NFS writeback code to fail without getting stuck in infinite loops in mempool_alloc(). Signed-off-by: Trond Myklebust --- fs/nfs/pagelist.c | 10 +++++----- fs/nfs/write.c | 10 ++++++++-- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index ad7f83dc9a2d..3156db526cc4 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -90,10 +90,10 @@ void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos) } } -static inline struct nfs_page * -nfs_page_alloc(void) +static inline struct nfs_page *nfs_page_alloc(void) { - struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL); + struct nfs_page *p = + kmem_cache_zalloc(nfs_page_cachep, nfs_io_gfp_mask()); if (p) INIT_LIST_HEAD(&p->wb_list); return p; @@ -892,7 +892,7 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, struct nfs_commit_info cinfo; struct nfs_page_array *pg_array = &hdr->page_array; unsigned int pagecount, pageused; - gfp_t gfp_flags = GFP_KERNEL; + gfp_t gfp_flags = nfs_io_gfp_mask(); pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count); pg_array->npages = pagecount; @@ -979,7 +979,7 @@ nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc, desc->pg_mirrors_dynamic = NULL; if (mirror_count == 1) return desc->pg_mirrors_static; - ret = kmalloc_array(mirror_count, sizeof(*ret), GFP_KERNEL); + ret = kmalloc_array(mirror_count, sizeof(*ret), nfs_io_gfp_mask()); if (ret != NULL) { for (i = 0; i < mirror_count; i++) nfs_pageio_mirror_init(&ret[i], desc->pg_bsize); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index ef47e3700e4b..e864ac836237 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -94,9 +94,15 @@ EXPORT_SYMBOL_GPL(nfs_commit_free); static struct nfs_pgio_header *nfs_writehdr_alloc(void) { - struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_KERNEL); + struct nfs_pgio_header *p; - memset(p, 0, sizeof(*p)); + p = kmem_cache_zalloc(nfs_wdata_cachep, nfs_io_gfp_mask()); + if (!p) { + p = mempool_alloc(nfs_wdata_mempool, GFP_NOWAIT); + if (!p) + return NULL; + memset(p, 0, sizeof(*p)); + } p->rw_mode = FMODE_WRITE; return p; } -- cgit v1.2.3 From 63d8a41b1dbf24dfc2480cb28236e2f6844d89b9 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 21 Mar 2022 15:32:09 -0400 Subject: NFSv4/pnfs: Ensure pNFS allocation modes are consistent with nfsiod Ensure that pNFS allocations that can be called from rpciod/nfsiod callback can fail in low memory mode, so that the threads don't block and loop forever. Signed-off-by: Trond Myklebust --- fs/nfs/nfs42proc.c | 2 +- fs/nfs/pnfs.c | 39 +++++++++++++++++---------------------- 2 files changed, 18 insertions(+), 23 deletions(-) diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 882bf84484ac..b841e267b764 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -1017,7 +1017,7 @@ int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg, return -EOPNOTSUPP; if (n > NFS42_LAYOUTERROR_MAX) return -EINVAL; - data = nfs42_alloc_layouterror_data(lseg, GFP_KERNEL); + data = nfs42_alloc_layouterror_data(lseg, nfs_io_gfp_mask()); if (!data) return -ENOMEM; for (i = 0; i < n; i++) { diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index f089e11fd001..de318bb5d349 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1233,7 +1233,7 @@ pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, int status = 0; *pcred = NULL; - lrp = kzalloc(sizeof(*lrp), GFP_KERNEL); + lrp = kzalloc(sizeof(*lrp), nfs_io_gfp_mask()); if (unlikely(lrp == NULL)) { status = -ENOMEM; spin_lock(&ino->i_lock); @@ -2206,7 +2206,7 @@ _pnfs_grab_empty_layout(struct inode *ino, struct nfs_open_context *ctx) struct pnfs_layout_hdr *lo; spin_lock(&ino->i_lock); - lo = pnfs_find_alloc_layout(ino, ctx, GFP_KERNEL); + lo = pnfs_find_alloc_layout(ino, ctx, nfs_io_gfp_mask()); if (!lo) goto out_unlock; if (!test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) @@ -2249,8 +2249,8 @@ static void _lgopen_prepare_attached(struct nfs4_opendata *data, lo = _pnfs_grab_empty_layout(ino, ctx); if (!lo) return; - lgp = pnfs_alloc_init_layoutget_args(ino, ctx, ¤t_stateid, - &rng, GFP_KERNEL); + lgp = pnfs_alloc_init_layoutget_args(ino, ctx, ¤t_stateid, &rng, + nfs_io_gfp_mask()); if (!lgp) { pnfs_clear_first_layoutget(lo); nfs_layoutget_end(lo); @@ -2275,8 +2275,8 @@ static void _lgopen_prepare_floating(struct nfs4_opendata *data, }; struct nfs4_layoutget *lgp; - lgp = pnfs_alloc_init_layoutget_args(ino, ctx, ¤t_stateid, - &rng, GFP_KERNEL); + lgp = pnfs_alloc_init_layoutget_args(ino, ctx, ¤t_stateid, &rng, + nfs_io_gfp_mask()); if (!lgp) return; data->lgp = lgp; @@ -2691,13 +2691,11 @@ pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *r else rd_size = nfs_dreq_bytes_left(pgio->pg_dreq); - pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, - nfs_req_openctx(req), - req_offset(req), - rd_size, - IOMODE_READ, - false, - GFP_KERNEL); + pgio->pg_lseg = + pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req), + req_offset(req), rd_size, + IOMODE_READ, false, + nfs_io_gfp_mask()); if (IS_ERR(pgio->pg_lseg)) { pgio->pg_error = PTR_ERR(pgio->pg_lseg); pgio->pg_lseg = NULL; @@ -2718,13 +2716,10 @@ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, pnfs_generic_pg_check_layout(pgio); pnfs_generic_pg_check_range(pgio, req); if (pgio->pg_lseg == NULL) { - pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, - nfs_req_openctx(req), - req_offset(req), - wb_size, - IOMODE_RW, - false, - GFP_KERNEL); + pgio->pg_lseg = + pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req), + req_offset(req), wb_size, IOMODE_RW, + false, nfs_io_gfp_mask()); if (IS_ERR(pgio->pg_lseg)) { pgio->pg_error = PTR_ERR(pgio->pg_lseg); pgio->pg_lseg = NULL; @@ -3183,7 +3178,7 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync) status = -ENOMEM; /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */ - data = kzalloc(sizeof(*data), GFP_NOFS); + data = kzalloc(sizeof(*data), nfs_io_gfp_mask()); if (!data) goto clear_layoutcommitting; @@ -3250,7 +3245,7 @@ struct nfs4_threshold *pnfs_mdsthreshold_alloc(void) { struct nfs4_threshold *thp; - thp = kzalloc(sizeof(*thp), GFP_KERNEL); + thp = kzalloc(sizeof(*thp), nfs_io_gfp_mask()); if (!thp) { dprintk("%s mdsthreshold allocation failed\n", __func__); return NULL; -- cgit v1.2.3 From 3e5f151e94c190c31a240d9458677caab4f6c44e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 21 Mar 2022 15:34:22 -0400 Subject: pNFS/flexfiles: Ensure pNFS allocation modes are consistent with nfsiod Ensure that pNFS flexfile allocations in rpciod/nfsiod callbacks can fail in low memory mode, so that the threads don't block and loop forever. Signed-off-by: Trond Myklebust --- fs/nfs/flexfilelayout/flexfilelayout.c | 50 ++++++++++++++-------------------- 1 file changed, 21 insertions(+), 29 deletions(-) diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index e28f2177afb7..604be402ae13 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -663,7 +663,7 @@ nfs4_ff_layout_stat_io_start_read(struct inode *inode, spin_unlock(&mirror->lock); if (report) - pnfs_report_layoutstat(inode, GFP_KERNEL); + pnfs_report_layoutstat(inode, nfs_io_gfp_mask()); } static void @@ -694,7 +694,7 @@ nfs4_ff_layout_stat_io_start_write(struct inode *inode, spin_unlock(&mirror->lock); if (report) - pnfs_report_layoutstat(inode, GFP_KERNEL); + pnfs_report_layoutstat(inode, nfs_io_gfp_mask()); } static void @@ -806,13 +806,10 @@ ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio, bool strict_iomode) { pnfs_put_lseg(pgio->pg_lseg); - pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, - nfs_req_openctx(req), - req_offset(req), - req->wb_bytes, - IOMODE_READ, - strict_iomode, - GFP_KERNEL); + pgio->pg_lseg = + pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req), + req_offset(req), req->wb_bytes, IOMODE_READ, + strict_iomode, nfs_io_gfp_mask()); if (IS_ERR(pgio->pg_lseg)) { pgio->pg_error = PTR_ERR(pgio->pg_lseg); pgio->pg_lseg = NULL; @@ -894,13 +891,10 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio, retry: ff_layout_pg_check_layout(pgio, req); if (!pgio->pg_lseg) { - pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, - nfs_req_openctx(req), - req_offset(req), - req->wb_bytes, - IOMODE_RW, - false, - GFP_KERNEL); + pgio->pg_lseg = + pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req), + req_offset(req), req->wb_bytes, + IOMODE_RW, false, nfs_io_gfp_mask()); if (IS_ERR(pgio->pg_lseg)) { pgio->pg_error = PTR_ERR(pgio->pg_lseg); pgio->pg_lseg = NULL; @@ -953,13 +947,10 @@ ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) { if (!pgio->pg_lseg) { - pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, - nfs_req_openctx(req), - req_offset(req), - req->wb_bytes, - IOMODE_RW, - false, - GFP_KERNEL); + pgio->pg_lseg = + pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req), + req_offset(req), req->wb_bytes, + IOMODE_RW, false, nfs_io_gfp_mask()); if (IS_ERR(pgio->pg_lseg)) { pgio->pg_error = PTR_ERR(pgio->pg_lseg); pgio->pg_lseg = NULL; @@ -1258,7 +1249,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg, mirror = FF_LAYOUT_COMP(lseg, idx); err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout), mirror, offset, length, status, opnum, - GFP_KERNEL); + nfs_io_gfp_mask()); switch (status) { case NFS4ERR_DELAY: @@ -1973,7 +1964,8 @@ ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo, struct inode *inode = lseg->pls_layout->plh_inode; struct pnfs_commit_array *array, *new; - new = pnfs_alloc_commit_array(flseg->mirror_array_cnt, GFP_KERNEL); + new = pnfs_alloc_commit_array(flseg->mirror_array_cnt, + nfs_io_gfp_mask()); if (new) { spin_lock(&inode->i_lock); array = pnfs_add_commit_array(fl_cinfo, new, lseg); @@ -2152,10 +2144,10 @@ ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args) struct nfs4_flexfile_layoutreturn_args *ff_args; struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout); - ff_args = kmalloc(sizeof(*ff_args), GFP_KERNEL); + ff_args = kmalloc(sizeof(*ff_args), nfs_io_gfp_mask()); if (!ff_args) goto out_nomem; - ff_args->pages[0] = alloc_page(GFP_KERNEL); + ff_args->pages[0] = alloc_page(nfs_io_gfp_mask()); if (!ff_args->pages[0]) goto out_nomem_free; @@ -2193,7 +2185,7 @@ ff_layout_send_layouterror(struct pnfs_layout_segment *lseg) return; errors = kmalloc_array(NFS42_LAYOUTERROR_MAX, sizeof(*errors), - GFP_KERNEL); + nfs_io_gfp_mask()); if (errors != NULL) { const struct nfs4_ff_layout_ds_err *pos; size_t n = 0; @@ -2445,7 +2437,7 @@ ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args) /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */ args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), - GFP_KERNEL); + nfs_io_gfp_mask()); if (!args->devinfo) return -ENOMEM; -- cgit v1.2.3 From a245832aaa9930f0ea91527cbd70521722b89313 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 21 Mar 2022 17:27:14 -0400 Subject: pNFS/files: Ensure pNFS allocation modes are consistent with nfsiod Ensure that pNFS file commit allocations in rpciod/nfsiod callbacks can fail in low memory mode, so that the threads don't block and loop forever. Signed-off-by: Trond Myklebust --- fs/nfs/filelayout/filelayout.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 9c96e3e5ed35..76deddab0a8f 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -1075,7 +1075,7 @@ filelayout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo, unsigned int size = (fl->stripe_type == STRIPE_SPARSE) ? fl->dsaddr->ds_num : fl->dsaddr->stripe_count; - new = pnfs_alloc_commit_array(size, GFP_NOIO); + new = pnfs_alloc_commit_array(size, nfs_io_gfp_mask()); if (new) { spin_lock(&inode->i_lock); array = pnfs_add_commit_array(fl_cinfo, new, lseg); -- cgit v1.2.3 From 3848e96edf4788f772d83990022fa7023a233d83 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Mar 2022 13:42:17 +1100 Subject: SUNRPC: avoid race between mod_timer() and del_timer_sync() xprt_destory() claims XPRT_LOCKED and then calls del_timer_sync(). Both xprt_unlock_connect() and xprt_release() call ->release_xprt() which drops XPRT_LOCKED and *then* xprt_schedule_autodisconnect() which calls mod_timer(). This may result in mod_timer() being called *after* del_timer_sync(). When this happens, the timer may fire long after the xprt has been freed, and run_timer_softirq() will probably crash. The pairing of ->release_xprt() and xprt_schedule_autodisconnect() is always called under ->transport_lock. So if we take ->transport_lock to call del_timer_sync(), we can be sure that mod_timer() will run first (if it runs at all). Cc: stable@vger.kernel.org Signed-off-by: NeilBrown Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 744c6c1d536f..880bfe8dc7f6 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -2104,7 +2104,14 @@ static void xprt_destroy(struct rpc_xprt *xprt) */ wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); + /* + * xprt_schedule_autodisconnect() can run after XPRT_LOCKED + * is cleared. We use ->transport_lock to ensure the mod_timer() + * can only run *before* del_time_sync(), never after. + */ + spin_lock(&xprt->transport_lock); del_timer_sync(&xprt->timer); + spin_unlock(&xprt->transport_lock); /* * Destroy sockets etc from the system workqueue so they can -- cgit v1.2.3 From 3de24f3d7078f90c8488fc67446671503f44d63e Mon Sep 17 00:00:00 2001 From: Jakob Koschel Date: Thu, 24 Mar 2022 08:15:23 +0100 Subject: NFS: replace usage of found with dedicated list iterator variable To move the list iterator variable into the list_for_each_entry_*() macro in the future it should be avoided to use the list iterator variable after the loop body. To *never* use the list iterator variable after the loop it was concluded to use a separate iterator variable instead of a found boolean [1]. This removes the need to use a found variable and simply checking if the variable was set, can determine if the break/goto was hit. Link: https://lore.kernel.org/all/CAHk-=wgRr_D8CB-D9Kg-c=EHreAsk5SqXPwr9Y7k9sA6cWXJ6w@mail.gmail.com/ Signed-off-by: Jakob Koschel Signed-off-by: Trond Myklebust --- fs/nfs/nfs42proc.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index b841e267b764..068c45b3bc1a 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -175,9 +175,8 @@ static int handle_async_copy(struct nfs42_copy_res *res, nfs4_stateid *src_stateid, bool *restart) { - struct nfs4_copy_state *copy, *tmp_copy; + struct nfs4_copy_state *copy, *tmp_copy = NULL, *iter; int status = NFS4_OK; - bool found_pending = false; struct nfs_open_context *dst_ctx = nfs_file_open_context(dst); struct nfs_open_context *src_ctx = nfs_file_open_context(src); @@ -186,17 +185,17 @@ static int handle_async_copy(struct nfs42_copy_res *res, return -ENOMEM; spin_lock(&dst_server->nfs_client->cl_lock); - list_for_each_entry(tmp_copy, + list_for_each_entry(iter, &dst_server->nfs_client->pending_cb_stateids, copies) { - if (memcmp(&res->write_res.stateid, &tmp_copy->stateid, + if (memcmp(&res->write_res.stateid, &iter->stateid, NFS4_STATEID_SIZE)) continue; - found_pending = true; - list_del(&tmp_copy->copies); + tmp_copy = iter; + list_del(&iter->copies); break; } - if (found_pending) { + if (tmp_copy) { spin_unlock(&dst_server->nfs_client->cl_lock); kfree(copy); copy = tmp_copy; -- cgit v1.2.3 From 82ee41b85cef16b4be1f4732650012d9baaedddd Mon Sep 17 00:00:00 2001 From: Olga Kornievskaia Date: Thu, 24 Mar 2022 10:22:58 -0400 Subject: SUNRPC don't resend a task on an offlined transport When a task is being retried, due to an NFS error, if the assigned transport has been put offline and the task is relocatable pick a new transport. Fixes: 6f081693e7b2b ("sunrpc: remove an offlined xprt using sysfs") Signed-off-by: Olga Kornievskaia Signed-off-by: Trond Myklebust --- net/sunrpc/clnt.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 0f54a56d19d2..8bf2af8546d2 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1065,7 +1065,9 @@ rpc_task_get_next_xprt(struct rpc_clnt *clnt) static void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt) { - if (task->tk_xprt) + if (task->tk_xprt && + !(test_bit(XPRT_OFFLINE, &task->tk_xprt->state) && + (task->tk_flags & RPC_TASK_MOVEABLE))) return; if (task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) task->tk_xprt = rpc_task_get_first_xprt(clnt); -- cgit v1.2.3 From 1d15d121cc2ad4d016a7dc1493132a9696f91fc5 Mon Sep 17 00:00:00 2001 From: Olga Kornievskaia Date: Thu, 24 Mar 2022 10:38:42 -0400 Subject: NFSv4.1: don't retry BIND_CONN_TO_SESSION on session error There is no reason to retry the operation if a session error had occurred in such case result structure isn't filled out. Fixes: dff58530c4ca ("NFSv4.1: fix handling of backchannel binding in BIND_CONN_TO_SESSION") Signed-off-by: Olga Kornievskaia Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index dd7a4c2a3f05..e3f5b380cefe 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -8340,6 +8340,7 @@ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) case -NFS4ERR_DEADSESSION: nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); + return; } if (args->dir == NFS4_CDFC4_FORE_OR_BOTH && res->dir != NFS4_CDFS4_BOTH) { -- cgit v1.2.3 From 421ab1be43bd015ffe744f4ea25df4f19d1ce6fe Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 25 Mar 2022 10:37:31 -0400 Subject: SUNRPC: Do not dereference non-socket transports in sysfs Do not cast the struct xprt to a sock_xprt unless we know it is a UDP or TCP transport. Otherwise the call to lock the mutex will scribble over whatever structure is actually there. This has been seen to cause hard system lockups when the underlying transport was RDMA. Fixes: b49ea673e119 ("SUNRPC: lock against ->sock changing during sysfs read") Cc: stable@vger.kernel.org Signed-off-by: Trond Myklebust --- include/linux/sunrpc/xprt.h | 3 +++ include/linux/sunrpc/xprtsock.h | 1 - net/sunrpc/sysfs.c | 55 ++++++++++++++++++++--------------------- net/sunrpc/xprtsock.c | 26 +++++++++++++++++-- 4 files changed, 54 insertions(+), 31 deletions(-) diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 955ea4d7af0b..eef5e87c03b4 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -139,6 +139,9 @@ struct rpc_xprt_ops { void (*rpcbind)(struct rpc_task *task); void (*set_port)(struct rpc_xprt *xprt, unsigned short port); void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task); + int (*get_srcaddr)(struct rpc_xprt *xprt, char *buf, + size_t buflen); + unsigned short (*get_srcport)(struct rpc_xprt *xprt); int (*buf_alloc)(struct rpc_task *task); void (*buf_free)(struct rpc_task *task); void (*prepare_request)(struct rpc_rqst *req); diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 3eb0079669c5..38284f25eddf 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -10,7 +10,6 @@ int init_socket_xprt(void); void cleanup_socket_xprt(void); -unsigned short get_srcport(struct rpc_xprt *); #define RPC_MIN_RESVPORT (1U) #define RPC_MAX_RESVPORT (65535U) diff --git a/net/sunrpc/sysfs.c b/net/sunrpc/sysfs.c index 05c758da6a92..9d8a7d9f3e41 100644 --- a/net/sunrpc/sysfs.c +++ b/net/sunrpc/sysfs.c @@ -97,7 +97,7 @@ static ssize_t rpc_sysfs_xprt_dstaddr_show(struct kobject *kobj, return 0; ret = sprintf(buf, "%s\n", xprt->address_strings[RPC_DISPLAY_ADDR]); xprt_put(xprt); - return ret + 1; + return ret; } static ssize_t rpc_sysfs_xprt_srcaddr_show(struct kobject *kobj, @@ -105,33 +105,31 @@ static ssize_t rpc_sysfs_xprt_srcaddr_show(struct kobject *kobj, char *buf) { struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj); - struct sockaddr_storage saddr; - struct sock_xprt *sock; - ssize_t ret = -1; + size_t buflen = PAGE_SIZE; + ssize_t ret = -ENOTSOCK; if (!xprt || !xprt_connected(xprt)) { - xprt_put(xprt); - return -ENOTCONN; + ret = -ENOTCONN; + } else if (xprt->ops->get_srcaddr) { + ret = xprt->ops->get_srcaddr(xprt, buf, buflen); + if (ret > 0) { + if (ret < buflen - 1) { + buf[ret] = '\n'; + ret++; + buf[ret] = '\0'; + } + } } - - sock = container_of(xprt, struct sock_xprt, xprt); - mutex_lock(&sock->recv_mutex); - if (sock->sock == NULL || - kernel_getsockname(sock->sock, (struct sockaddr *)&saddr) < 0) - goto out; - - ret = sprintf(buf, "%pISc\n", &saddr); -out: - mutex_unlock(&sock->recv_mutex); xprt_put(xprt); - return ret + 1; + return ret; } static ssize_t rpc_sysfs_xprt_info_show(struct kobject *kobj, - struct kobj_attribute *attr, - char *buf) + struct kobj_attribute *attr, char *buf) { struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj); + unsigned short srcport = 0; + size_t buflen = PAGE_SIZE; ssize_t ret; if (!xprt || !xprt_connected(xprt)) { @@ -139,7 +137,11 @@ static ssize_t rpc_sysfs_xprt_info_show(struct kobject *kobj, return -ENOTCONN; } - ret = sprintf(buf, "last_used=%lu\ncur_cong=%lu\ncong_win=%lu\n" + if (xprt->ops->get_srcport) + srcport = xprt->ops->get_srcport(xprt); + + ret = snprintf(buf, buflen, + "last_used=%lu\ncur_cong=%lu\ncong_win=%lu\n" "max_num_slots=%u\nmin_num_slots=%u\nnum_reqs=%u\n" "binding_q_len=%u\nsending_q_len=%u\npending_q_len=%u\n" "backlog_q_len=%u\nmain_xprt=%d\nsrc_port=%u\n" @@ -147,14 +149,11 @@ static ssize_t rpc_sysfs_xprt_info_show(struct kobject *kobj, xprt->last_used, xprt->cong, xprt->cwnd, xprt->max_reqs, xprt->min_reqs, xprt->num_reqs, xprt->binding.qlen, xprt->sending.qlen, xprt->pending.qlen, - xprt->backlog.qlen, xprt->main, - (xprt->xprt_class->ident == XPRT_TRANSPORT_TCP) ? - get_srcport(xprt) : 0, + xprt->backlog.qlen, xprt->main, srcport, atomic_long_read(&xprt->queuelen), - (xprt->xprt_class->ident == XPRT_TRANSPORT_TCP) ? - xprt->address_strings[RPC_DISPLAY_PORT] : "0"); + xprt->address_strings[RPC_DISPLAY_PORT]); xprt_put(xprt); - return ret + 1; + return ret; } static ssize_t rpc_sysfs_xprt_state_show(struct kobject *kobj, @@ -201,7 +200,7 @@ static ssize_t rpc_sysfs_xprt_state_show(struct kobject *kobj, } xprt_put(xprt); - return ret + 1; + return ret; } static ssize_t rpc_sysfs_xprt_switch_info_show(struct kobject *kobj, @@ -220,7 +219,7 @@ static ssize_t rpc_sysfs_xprt_switch_info_show(struct kobject *kobj, xprt_switch->xps_nunique_destaddr_xprts, atomic_long_read(&xprt_switch->xps_queuelen)); xprt_switch_put(xprt_switch); - return ret + 1; + return ret; } static ssize_t rpc_sysfs_xprt_dstaddr_store(struct kobject *kobj, diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index b52eaa8a0cda..78af7518f263 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1638,7 +1638,7 @@ static int xs_get_srcport(struct sock_xprt *transport) return port; } -unsigned short get_srcport(struct rpc_xprt *xprt) +static unsigned short xs_sock_srcport(struct rpc_xprt *xprt) { struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt); unsigned short ret = 0; @@ -1648,7 +1648,25 @@ unsigned short get_srcport(struct rpc_xprt *xprt) mutex_unlock(&sock->recv_mutex); return ret; } -EXPORT_SYMBOL(get_srcport); + +static int xs_sock_srcaddr(struct rpc_xprt *xprt, char *buf, size_t buflen) +{ + struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt); + union { + struct sockaddr sa; + struct sockaddr_storage st; + } saddr; + int ret = -ENOTCONN; + + mutex_lock(&sock->recv_mutex); + if (sock->sock) { + ret = kernel_getsockname(sock->sock, &saddr.sa); + if (ret >= 0) + ret = snprintf(buf, buflen, "%pISc", &saddr.sa); + } + mutex_unlock(&sock->recv_mutex); + return ret; +} static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port) { @@ -2622,6 +2640,8 @@ static const struct rpc_xprt_ops xs_udp_ops = { .rpcbind = rpcb_getport_async, .set_port = xs_set_port, .connect = xs_connect, + .get_srcaddr = xs_sock_srcaddr, + .get_srcport = xs_sock_srcport, .buf_alloc = rpc_malloc, .buf_free = rpc_free, .send_request = xs_udp_send_request, @@ -2644,6 +2664,8 @@ static const struct rpc_xprt_ops xs_tcp_ops = { .rpcbind = rpcb_getport_async, .set_port = xs_set_port, .connect = xs_connect, + .get_srcaddr = xs_sock_srcaddr, + .get_srcport = xs_sock_srcport, .buf_alloc = rpc_malloc, .buf_free = rpc_free, .prepare_request = xs_stream_prepare_request, -- cgit v1.2.3 From ebbe788731cb52a0ef69cf962813b302ef5b399e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 24 Mar 2022 17:19:59 -0400 Subject: SUNRPC: Don't return error values in sysfs read of closed files Instead of returning an error value, which ends up being the return value for the read() system call, it is more elegant to simply return the error as a string value. Signed-off-by: Trond Myklebust --- net/sunrpc/sysfs.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/net/sunrpc/sysfs.c b/net/sunrpc/sysfs.c index 9d8a7d9f3e41..a3a2f8aeb80e 100644 --- a/net/sunrpc/sysfs.c +++ b/net/sunrpc/sysfs.c @@ -93,10 +93,13 @@ static ssize_t rpc_sysfs_xprt_dstaddr_show(struct kobject *kobj, struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj); ssize_t ret; - if (!xprt) - return 0; + if (!xprt) { + ret = sprintf(buf, "\n"); + goto out; + } ret = sprintf(buf, "%s\n", xprt->address_strings[RPC_DISPLAY_ADDR]); xprt_put(xprt); +out: return ret; } @@ -106,10 +109,10 @@ static ssize_t rpc_sysfs_xprt_srcaddr_show(struct kobject *kobj, { struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj); size_t buflen = PAGE_SIZE; - ssize_t ret = -ENOTSOCK; + ssize_t ret; if (!xprt || !xprt_connected(xprt)) { - ret = -ENOTCONN; + ret = sprintf(buf, "\n"); } else if (xprt->ops->get_srcaddr) { ret = xprt->ops->get_srcaddr(xprt, buf, buflen); if (ret > 0) { @@ -118,8 +121,10 @@ static ssize_t rpc_sysfs_xprt_srcaddr_show(struct kobject *kobj, ret++; buf[ret] = '\0'; } - } - } + } else + ret = sprintf(buf, "\n"); + } else + ret = sprintf(buf, "\n"); xprt_put(xprt); return ret; } @@ -133,8 +138,8 @@ static ssize_t rpc_sysfs_xprt_info_show(struct kobject *kobj, ssize_t ret; if (!xprt || !xprt_connected(xprt)) { - xprt_put(xprt); - return -ENOTCONN; + ret = sprintf(buf, "\n"); + goto out; } if (xprt->ops->get_srcport) @@ -152,6 +157,7 @@ static ssize_t rpc_sysfs_xprt_info_show(struct kobject *kobj, xprt->backlog.qlen, xprt->main, srcport, atomic_long_read(&xprt->queuelen), xprt->address_strings[RPC_DISPLAY_PORT]); +out: xprt_put(xprt); return ret; } @@ -165,10 +171,7 @@ static ssize_t rpc_sysfs_xprt_state_show(struct kobject *kobj, int locked, connected, connecting, close_wait, bound, binding, closing, congested, cwnd_wait, write_space, offline, remove; - if (!xprt) - return 0; - - if (!xprt->state) { + if (!(xprt && xprt->state)) { ret = sprintf(buf, "state=CLOSED\n"); } else { locked = test_bit(XPRT_LOCKED, &xprt->state); -- cgit v1.2.3 From d02d81efc7564b4d5446a02e0214a164cf00b1f3 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 25 Mar 2022 21:51:03 -0400 Subject: NFS: Don't loop forever in nfs_do_recoalesce() If __nfs_pageio_add_request() fails to add the request, it will return with either desc->pg_error < 0, or mirror->pg_recoalesce will be set, so we are guaranteed either to exit the function altogether, or to loop. However if there is nothing left in mirror->pg_list to coalesce, we must exit, so make sure that we clear mirror->pg_recoalesce every time we loop. Reported-by: Olga Kornievskaia Fixes: 70536bf4eb07 ("NFS: Clean up reset of the mirror accounting variables") Signed-off-by: Trond Myklebust --- fs/nfs/pagelist.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 3156db526cc4..9157dd19b8b4 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -1218,6 +1218,7 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) do { list_splice_init(&mirror->pg_list, &head); + mirror->pg_recoalesce = 0; while (!list_empty(&head)) { struct nfs_page *req; -- cgit v1.2.3 From 7c9d845f0612e5bcd23456a2ec43be8ac43458f1 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 28 Mar 2022 08:36:34 -0400 Subject: NFSv4/pNFS: Fix another issue with a list iterator pointing to the head In nfs4_callback_devicenotify(), if we don't find a matching entry for the deviceid, we're left with a pointer to 'struct nfs_server' that actually points to the list of super blocks associated with our struct nfs_client. Furthermore, even if we have a valid pointer, nothing pins the super block, and so the struct nfs_server could end up getting freed while we're using it. Since all we want is a pointer to the struct pnfs_layoutdriver_type, let's skip all the iteration over super blocks, and just use APIs to find the layout driver directly. Reported-by: Xiaomeng Tong Fixes: 1be5683b03a7 ("pnfs: CB_NOTIFY_DEVICEID") Signed-off-by: Trond Myklebust --- fs/nfs/callback_proc.c | 27 +++++++++------------------ fs/nfs/pnfs.c | 11 +++++++++++ fs/nfs/pnfs.h | 2 ++ 3 files changed, 22 insertions(+), 18 deletions(-) diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 39d1ec870d90..c8520284dda7 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -358,12 +358,11 @@ __be32 nfs4_callback_devicenotify(void *argp, void *resp, struct cb_process_state *cps) { struct cb_devicenotifyargs *args = argp; + const struct pnfs_layoutdriver_type *ld = NULL; uint32_t i; __be32 res = 0; - struct nfs_client *clp = cps->clp; - struct nfs_server *server = NULL; - if (!clp) { + if (!cps->clp) { res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION); goto out; } @@ -371,23 +370,15 @@ __be32 nfs4_callback_devicenotify(void *argp, void *resp, for (i = 0; i < args->ndevs; i++) { struct cb_devicenotifyitem *dev = &args->devs[i]; - if (!server || - server->pnfs_curr_ld->id != dev->cbd_layout_type) { - rcu_read_lock(); - list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) - if (server->pnfs_curr_ld && - server->pnfs_curr_ld->id == dev->cbd_layout_type) { - rcu_read_unlock(); - goto found; - } - rcu_read_unlock(); - continue; + if (!ld || ld->id != dev->cbd_layout_type) { + pnfs_put_layoutdriver(ld); + ld = pnfs_find_layoutdriver(dev->cbd_layout_type); + if (!ld) + continue; } - - found: - nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id); + nfs4_delete_deviceid(ld, cps->clp, &dev->cbd_dev_id); } - + pnfs_put_layoutdriver(ld); out: kfree(args->devs); return res; diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index de318bb5d349..856c962273c7 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -92,6 +92,17 @@ find_pnfs_driver(u32 id) return local; } +const struct pnfs_layoutdriver_type *pnfs_find_layoutdriver(u32 id) +{ + return find_pnfs_driver(id); +} + +void pnfs_put_layoutdriver(const struct pnfs_layoutdriver_type *ld) +{ + if (ld) + module_put(ld->owner); +} + void unset_pnfs_layoutdriver(struct nfs_server *nfss) { diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index f4d7548d67b2..07f11489e4e9 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -234,6 +234,8 @@ struct pnfs_devicelist { extern int pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *); extern void pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *); +extern const struct pnfs_layoutdriver_type *pnfs_find_layoutdriver(u32 id); +extern void pnfs_put_layoutdriver(const struct pnfs_layoutdriver_type *ld); /* nfs4proc.c */ extern size_t max_response_pages(struct nfs_server *server); -- cgit v1.2.3