summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
authorJack Wang <jinpu.wang@cloud.ionos.com>2021-06-14 11:03:33 +0200
committerJason Gunthorpe <jgg@nvidia.com>2021-06-18 13:47:12 -0300
commit5e91eabf66c854f16ca2e954e5c68939bc81601e (patch)
tree3b036df87c63ff499125e8191e99bf8e732322c3 /drivers/infiniband/ulp
parent915e4af59f0b6a9e72dd9ef0fda853b87ae556c5 (diff)
downloadlinux-5e91eabf66c854f16ca2e954e5c68939bc81601e.tar.bz2
RDMA/rtrs-srv: Set minimal max_send_wr and max_recv_wr
Currently rtrs when create_qp use a coarse numbers (bigger in general), which leads to hardware create more resources which only waste memory with no benefits. For max_send_wr, we don't really need alway max_qp_wr size when creating qp, reduce it to cq_size. For max_recv_wr, cq_size is enough. With the patch when sess_queue_depth=128, per session (2 paths) memory consumption reduced from 188 MB to 65MB When always_invalidate is enabled, we need send more wr, so treat it special. Fixes: 9cb837480424e ("RDMA/rtrs: server: main functionality") Link: https://lore.kernel.org/r/20210614090337.29557-2-jinpu.wang@ionos.com Signed-off-by: Jack Wang <jinpu.wang@cloud.ionos.com> Reviewed-by: Md Haris Iqbal <haris.iqbal@cloud.ionos.com> Signed-off-by: Gioh Kim <gi-oh.kim@ionos.com> Reviewed-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c38
1 files changed, 25 insertions, 13 deletions
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 5639b29b8b02..04ec3080e9b5 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -1634,7 +1634,7 @@ static int create_con(struct rtrs_srv_sess *sess,
struct rtrs_sess *s = &sess->s;
struct rtrs_srv_con *con;
- u32 cq_size, wr_queue_size;
+ u32 cq_size, max_send_wr, max_recv_wr, wr_limit;
int err, cq_vector;
con = kzalloc(sizeof(*con), GFP_KERNEL);
@@ -1655,30 +1655,42 @@ static int create_con(struct rtrs_srv_sess *sess,
* All receive and all send (each requiring invalidate)
* + 2 for drain and heartbeat
*/
- wr_queue_size = SERVICE_CON_QUEUE_DEPTH * 3 + 2;
- cq_size = wr_queue_size;
+ max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
+ max_recv_wr = SERVICE_CON_QUEUE_DEPTH + 2;
+ cq_size = max_send_wr + max_recv_wr;
} else {
/*
- * If we have all receive requests posted and
- * all write requests posted and each read request
- * requires an invalidate request + drain
- * and qp gets into error state.
- */
- cq_size = srv->queue_depth * 3 + 1;
- /*
* In theory we might have queue_depth * 32
* outstanding requests if an unsafe global key is used
* and we have queue_depth read requests each consisting
* of 32 different addresses. div 3 for mlx5.
*/
- wr_queue_size = sess->s.dev->ib_dev->attrs.max_qp_wr / 3;
+ wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr / 3;
+ /* when always_invlaidate enalbed, we need linv+rinv+mr+imm */
+ if (always_invalidate)
+ max_send_wr =
+ min_t(int, wr_limit,
+ srv->queue_depth * (1 + 4) + 1);
+ else
+ max_send_wr =
+ min_t(int, wr_limit,
+ srv->queue_depth * (1 + 2) + 1);
+
+ max_recv_wr = srv->queue_depth + 1;
+ /*
+ * If we have all receive requests posted and
+ * all write requests posted and each read request
+ * requires an invalidate request + drain
+ * and qp gets into error state.
+ */
+ cq_size = max_send_wr + max_recv_wr;
}
- atomic_set(&con->sq_wr_avail, wr_queue_size);
+ atomic_set(&con->sq_wr_avail, max_send_wr);
cq_vector = rtrs_srv_get_next_cq_vector(sess);
/* TODO: SOFTIRQ can be faster, but be careful with softirq context */
err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_size,
- wr_queue_size, wr_queue_size,
+ max_send_wr, max_recv_wr,
IB_POLL_WORKQUEUE);
if (err) {
rtrs_err(s, "rtrs_cq_qp_create(), err: %d\n", err);