diff options
author | Dust Li <dust.li@linux.alibaba.com> | 2022-03-04 17:17:19 +0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2022-03-06 10:57:12 +0000 |
commit | 925a24213b5cc80fcef8858e6dc1f97ea2b17afb (patch) | |
tree | c971de4c85ffef91afceeb6f8f6af0c54d5517fd /net/smc | |
parent | d59e3cbaef707f0d3dc1e3b6735cb25060ca74c2 (diff) | |
download | linux-925a24213b5cc80fcef8858e6dc1f97ea2b17afb.tar.bz2 |
Revert "net/smc: don't req_notify until all CQEs drained"
This reverts commit a505cce6f7cfaf2aa2385aab7286063c96444526.
Leon says:
We already discussed that. SMC should be changed to use
RDMA CQ pool API
drivers/infiniband/core/cq.c.
ib_poll_handler() has much better implementation (tracing,
IRQ rescheduling, proper error handling) than this SMC variant.
Since we will switch to ib_poll_handler() in the future,
revert this patch.
Link: https://lore.kernel.org/netdev/20220301105332.GA9417@linux.alibaba.com/
Suggested-by: Leon Romanovsky <leon@kernel.org>
Suggested-by: Karsten Graul <kgraul@linux.ibm.com>
Signed-off-by: Dust Li <dust.li@linux.alibaba.com>
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/smc')
-rw-r--r-- | net/smc/smc_wr.c | 49 |
1 files changed, 21 insertions, 28 deletions
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index 34d616406d51..24be1d03fef9 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c @@ -137,28 +137,25 @@ static void smc_wr_tx_tasklet_fn(struct tasklet_struct *t) { struct smc_ib_device *dev = from_tasklet(dev, t, send_tasklet); struct ib_wc wc[SMC_WR_MAX_POLL_CQE]; - int i, rc; + int i = 0, rc; + int polled = 0; again: + polled++; do { memset(&wc, 0, sizeof(wc)); rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc); + if (polled == 1) { + ib_req_notify_cq(dev->roce_cq_send, + IB_CQ_NEXT_COMP | + IB_CQ_REPORT_MISSED_EVENTS); + } + if (!rc) + break; for (i = 0; i < rc; i++) smc_wr_tx_process_cqe(&wc[i]); - if (rc < SMC_WR_MAX_POLL_CQE) - /* If < SMC_WR_MAX_POLL_CQE, the CQ should have been - * drained, no need to poll again. --Guangguan Wang - */ - break; } while (rc > 0); - - /* IB_CQ_REPORT_MISSED_EVENTS make sure if ib_req_notify_cq() returns - * 0, it is safe to wait for the next event. - * Else we must poll the CQ again to make sure we won't miss any event - */ - if (ib_req_notify_cq(dev->roce_cq_send, - IB_CQ_NEXT_COMP | - IB_CQ_REPORT_MISSED_EVENTS)) + if (polled == 1) goto again; } @@ -481,28 +478,24 @@ static void smc_wr_rx_tasklet_fn(struct tasklet_struct *t) { struct smc_ib_device *dev = from_tasklet(dev, t, recv_tasklet); struct ib_wc wc[SMC_WR_MAX_POLL_CQE]; + int polled = 0; int rc; again: + polled++; do { memset(&wc, 0, sizeof(wc)); rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc); - if (rc > 0) - smc_wr_rx_process_cqes(&wc[0], rc); - if (rc < SMC_WR_MAX_POLL_CQE) - /* If < SMC_WR_MAX_POLL_CQE, the CQ should have been - * drained, no need to poll again. --Guangguan Wang - */ + if (polled == 1) { + ib_req_notify_cq(dev->roce_cq_recv, + IB_CQ_SOLICITED_MASK + | IB_CQ_REPORT_MISSED_EVENTS); + } + if (!rc) break; + smc_wr_rx_process_cqes(&wc[0], rc); } while (rc > 0); - - /* IB_CQ_REPORT_MISSED_EVENTS make sure if ib_req_notify_cq() returns - * 0, it is safe to wait for the next event. - * Else we must poll the CQ again to make sure we won't miss any event - */ - if (ib_req_notify_cq(dev->roce_cq_recv, - IB_CQ_SOLICITED_MASK | - IB_CQ_REPORT_MISSED_EVENTS)) + if (polled == 1) goto again; } |