summaryrefslogtreecommitdiffstats
path: root/net/smc
diff options
context:
space:
mode:
authorDust Li <dust.li@linux.alibaba.com>2022-03-01 17:44:01 +0800
committerDavid S. Miller <davem@davemloft.net>2022-03-01 14:25:12 +0000
commita505cce6f7cfaf2aa2385aab7286063c96444526 (patch)
tree7f42d2f2684a796dd4966aacbe8f2f29247dc231 /net/smc
parent6bf536eb5c8ca011d1ff57b5c5f7c57ceac06a37 (diff)
downloadlinux-a505cce6f7cfaf2aa2385aab7286063c96444526.tar.bz2
net/smc: don't req_notify until all CQEs drained
When we are handling softirq workload, enable hardirq may again interrupt the current routine of softirq, and then try to raise softirq again. This only wastes CPU cycles and won't have any real gain. Since IB_CQ_REPORT_MISSED_EVENTS already make sure if ib_req_notify_cq() returns 0, it is safe to wait for the next event, with no need to poll the CQ again in this case. This patch disables hardirq during the processing of softirq, and re-arm the CQ after softirq is done. Somehow like NAPI. Co-developed-by: Guangguan Wang <guangguan.wang@linux.alibaba.com> Signed-off-by: Guangguan Wang <guangguan.wang@linux.alibaba.com> Signed-off-by: Dust Li <dust.li@linux.alibaba.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/smc')
-rw-r--r--net/smc/smc_wr.c49
1 files changed, 28 insertions, 21 deletions
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index 24be1d03fef9..34d616406d51 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -137,25 +137,28 @@ static void smc_wr_tx_tasklet_fn(struct tasklet_struct *t)
{
struct smc_ib_device *dev = from_tasklet(dev, t, send_tasklet);
struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
- int i = 0, rc;
- int polled = 0;
+ int i, rc;
again:
- polled++;
do {
memset(&wc, 0, sizeof(wc));
rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc);
- if (polled == 1) {
- ib_req_notify_cq(dev->roce_cq_send,
- IB_CQ_NEXT_COMP |
- IB_CQ_REPORT_MISSED_EVENTS);
- }
- if (!rc)
- break;
for (i = 0; i < rc; i++)
smc_wr_tx_process_cqe(&wc[i]);
+ if (rc < SMC_WR_MAX_POLL_CQE)
+ /* If < SMC_WR_MAX_POLL_CQE, the CQ should have been
+ * drained, no need to poll again. --Guangguan Wang
+ */
+ break;
} while (rc > 0);
- if (polled == 1)
+
+ /* IB_CQ_REPORT_MISSED_EVENTS make sure if ib_req_notify_cq() returns
+ * 0, it is safe to wait for the next event.
+ * Else we must poll the CQ again to make sure we won't miss any event
+ */
+ if (ib_req_notify_cq(dev->roce_cq_send,
+ IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS))
goto again;
}
@@ -478,24 +481,28 @@ static void smc_wr_rx_tasklet_fn(struct tasklet_struct *t)
{
struct smc_ib_device *dev = from_tasklet(dev, t, recv_tasklet);
struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
- int polled = 0;
int rc;
again:
- polled++;
do {
memset(&wc, 0, sizeof(wc));
rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc);
- if (polled == 1) {
- ib_req_notify_cq(dev->roce_cq_recv,
- IB_CQ_SOLICITED_MASK
- | IB_CQ_REPORT_MISSED_EVENTS);
- }
- if (!rc)
+ if (rc > 0)
+ smc_wr_rx_process_cqes(&wc[0], rc);
+ if (rc < SMC_WR_MAX_POLL_CQE)
+ /* If < SMC_WR_MAX_POLL_CQE, the CQ should have been
+ * drained, no need to poll again. --Guangguan Wang
+ */
break;
- smc_wr_rx_process_cqes(&wc[0], rc);
} while (rc > 0);
- if (polled == 1)
+
+ /* IB_CQ_REPORT_MISSED_EVENTS make sure if ib_req_notify_cq() returns
+ * 0, it is safe to wait for the next event.
+ * Else we must poll the CQ again to make sure we won't miss any event
+ */
+ if (ib_req_notify_cq(dev->roce_cq_recv,
+ IB_CQ_SOLICITED_MASK |
+ IB_CQ_REPORT_MISSED_EVENTS))
goto again;
}