summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorTrond Myklebust <trond.myklebust@hammerspace.com>2021-08-27 14:00:56 -0400
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2021-08-30 13:21:38 -0400
commit8cfb9015280d49f9d92d5b0f88cedf5f0856c0fd (patch)
tree3c29d644da945946c0e48e8ccde199dde67864f8 /fs
parent2a7a451a9084877a0b9d335c77d57e4cda1e5882 (diff)
downloadlinux-8cfb9015280d49f9d92d5b0f88cedf5f0856c0fd.tar.bz2
NFS: Always provide aligned buffers to the RPC read layers
Instead of messing around with XDR padding in the RDMA layer, we should just give the RPC layer an aligned buffer. Try to avoid creating extra RPC calls by aligning to the smaller value of ALIGN(len, rsize) and PAGE_SIZE. Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/nfs/read.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 9f39e0a1a38b..08d6cc57cbc3 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -293,15 +293,19 @@ static int
readpage_async_filler(void *data, struct page *page)
{
struct nfs_readdesc *desc = data;
+ struct inode *inode = page_file_mapping(page)->host;
+ unsigned int rsize = NFS_SERVER(inode)->rsize;
struct nfs_page *new;
- unsigned int len;
+ unsigned int len, aligned_len;
int error;
len = nfs_page_length(page);
if (len == 0)
return nfs_return_empty_page(page);
- new = nfs_create_request(desc->ctx, page, 0, len);
+ aligned_len = min_t(unsigned int, ALIGN(len, rsize), PAGE_SIZE);
+
+ new = nfs_create_request(desc->ctx, page, 0, aligned_len);
if (IS_ERR(new))
goto out_error;