summaryrefslogtreecommitdiffstats
path: root/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2021-01-13 13:57:18 -0500
committerChuck Lever <chuck.lever@oracle.com>2021-03-22 13:22:13 -0400
commit2a1e4f21d84184f7ff5768ee3d3d0c30b1135867 (patch)
treec87db194a3c60d96538d11b25971243213c955d6 /net/sunrpc/xprtrdma/svc_rdma_backchannel.c
parente844d307d46cfa7e09cdb671941bfd5f1be86773 (diff)
downloadlinux-2a1e4f21d84184f7ff5768ee3d3d0c30b1135867.tar.bz2
svcrdma: Normalize Send page handling
Currently svc_rdma_sendto() migrates xdr_buf pages into a separate page list and NULLs out a bunch of entries in rq_pages while the pages are under I/O. The Send completion handler then frees those pages later. Instead, let's wait for the Send completion, then handle page releasing in the nfsd thread. I'd like to avoid the cost of 250+ put_page() calls in the Send completion handler, which is single- threaded. Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'net/sunrpc/xprtrdma/svc_rdma_backchannel.c')
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index 9150df35fb6f..16897fcb659c 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -93,7 +93,13 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
*/
get_page(virt_to_page(rqst->rq_buffer));
sctxt->sc_send_wr.opcode = IB_WR_SEND;
- return svc_rdma_send(rdma, sctxt);
+ ret = svc_rdma_send(rdma, sctxt);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_killable(&sctxt->sc_done);
+ svc_rdma_send_ctxt_put(rdma, sctxt);
+ return ret;
}
/* Server-side transport endpoint wants a whole page for its send