diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2017-06-23 17:18:41 -0400 |
---|---|---|
committer | J. Bruce Fields <bfields@redhat.com> | 2017-07-12 15:54:57 -0400 |
commit | 71641d99ce037ea226f94d5e08f2a8f71eba08f4 (patch) | |
tree | 0e10fdc4fb7f6b2988837dd74a871dd0253389c3 /net/sunrpc | |
parent | cafc739892f34b9090413179ca259409fc43bfae (diff) | |
download | linux-71641d99ce037ea226f94d5e08f2a8f71eba08f4.tar.bz2 |
svcrdma: Properly compute .len and .buflen for received RPC Calls
When an RPC-over-RDMA request is received, the Receive buffer
contains a Transport Header possibly followed by an RPC message.
Even though rq_arg.head[0] (as passed to NFSD) does not contain the
Transport Header header, currently rq_arg.len includes the size of
the Transport Header.
That violates the intent of the xdr_buf API contract. .buflen should
include everything, but .len should be exactly the length of the RPC
message in the buffer.
The rq_arg fields are summed together at the end of
svc_rdma_recvfrom to obtain the correct return value. rq_arg.len
really ought to contain the correct number of bytes already, but it
currently doesn't due to the above misbehavior.
Let's instead ensure that .buflen includes the length of the
transport header, and that .len is always equal to head.iov_len +
.page_len + tail.iov_len .
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 14 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_rw.c | 2 |
2 files changed, 5 insertions, 11 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 5fbcb73dd68d..ad4bd62eebf1 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -143,7 +143,6 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp, put_page(rqstp->rq_pages[sge_no]); rqstp->rq_pages[sge_no] = page; bc -= min_t(u32, bc, ctxt->sge[sge_no].length); - rqstp->rq_arg.buflen += ctxt->sge[sge_no].length; sge_no++; } rqstp->rq_respages = &rqstp->rq_pages[sge_no]; @@ -338,6 +337,7 @@ static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg) rq_arg->head[0].iov_base = p; hdr_len = (unsigned long)p - (unsigned long)rdma_argp; rq_arg->head[0].iov_len -= hdr_len; + rq_arg->len -= hdr_len; dprintk("svcrdma: received %s request for XID 0x%08x, hdr_len=%u\n", proc, be32_to_cpup(rdma_argp), hdr_len); return hdr_len; @@ -564,18 +564,12 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) goto out_readchunk; complete: - ret = rqstp->rq_arg.head[0].iov_len - + rqstp->rq_arg.page_len - + rqstp->rq_arg.tail[0].iov_len; svc_rdma_put_context(ctxt, 0); - dprintk("svcrdma: ret=%d, rq_arg.len=%u, " - "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zd\n", - ret, rqstp->rq_arg.len, - rqstp->rq_arg.head[0].iov_base, - rqstp->rq_arg.head[0].iov_len); + dprintk("svcrdma: recvfrom: xprt=%p, rqstp=%p, rq_arg.len=%u\n", + rdma_xprt, rqstp, rqstp->rq_arg.len); rqstp->rq_prot = IPPROTO_MAX; svc_xprt_copy_addrs(rqstp, xprt); - return ret; + return rqstp->rq_arg.len; out_readchunk: ret = svc_rdma_recv_read_chunk(rdma_xprt, rqstp, ctxt, p); diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 77d38a4b8d99..9d7a151b4885 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -848,7 +848,7 @@ static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp, head->arg.len += info->ri_chunklen; head->arg.buflen += info->ri_chunklen; - if (head->arg.len <= head->sge[0].length) { + if (head->arg.buflen <= head->sge[0].length) { /* Transport header and RPC message fit entirely * in page where head iovec resides. */ |