diff options
Diffstat (limited to 'net/rds')
-rw-r--r-- | net/rds/ib.h | 1 | ||||
-rw-r--r-- | net/rds/ib_cm.c | 12 |
2 files changed, 13 insertions, 0 deletions
diff --git a/net/rds/ib.h b/net/rds/ib.h index 1fe9f79fead5..540458928f3c 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h @@ -185,6 +185,7 @@ struct rds_ib_connection { /* Endpoint role in connection */ bool i_active_side; + atomic_t i_cq_quiesce; /* Send/Recv vectors */ int i_scq_vector; diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 33c8584ada1f..ce3775abc6e7 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -128,6 +128,8 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even ic->i_flowctl ? ", flow control" : ""); } + atomic_set(&ic->i_cq_quiesce, 0); + /* Init rings and fill recv. this needs to wait until protocol * negotiation is complete, since ring layout is different * from 3.1 to 4.1. @@ -267,6 +269,10 @@ static void rds_ib_tasklet_fn_send(unsigned long data) rds_ib_stats_inc(s_ib_tasklet_call); + /* if cq has been already reaped, ignore incoming cq event */ + if (atomic_read(&ic->i_cq_quiesce)) + return; + poll_scq(ic, ic->i_send_cq, ic->i_send_wc); ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP); poll_scq(ic, ic->i_send_cq, ic->i_send_wc); @@ -308,6 +314,10 @@ static void rds_ib_tasklet_fn_recv(unsigned long data) rds_ib_stats_inc(s_ib_tasklet_call); + /* if cq has been already reaped, ignore incoming cq event */ + if (atomic_read(&ic->i_cq_quiesce)) + return; + memset(&state, 0, sizeof(state)); poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state); ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); @@ -804,6 +814,8 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp) tasklet_kill(&ic->i_send_tasklet); tasklet_kill(&ic->i_recv_tasklet); + atomic_set(&ic->i_cq_quiesce, 1); + /* first destroy the ib state that generates callbacks */ if (ic->i_cm_id->qp) rdma_destroy_qp(ic->i_cm_id); |