summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichal Kalderon <Michal.Kalderon@cavium.com>2017-10-09 12:37:48 +0300
committerDavid S. Miller <davem@davemloft.net>2017-10-09 10:21:26 -0700
commit6f34a284f36399501fcc034dc4522a2d8d9fa6c9 (patch)
treeae9c9c0a70a0ed8b4dd166da54f9720913fcf5d2
parent89d65113097072de7936a2aea2f819818a7c987a (diff)
downloadlinux-6f34a284f36399501fcc034dc4522a2d8d9fa6c9.tar.bz2
qed: Add LL2 slowpath handling
For iWARP unaligned MPA flow, a slowpath event of flushing an MPA connection that entered an unaligned state is required. The flush ramrod is received on the ll2 queue, and a pre-registered callback function is called to handle the flush event. Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com> Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c40
-rw-r--r--include/linux/qed/qed_ll2_if.h5
2 files changed, 43 insertions, 2 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 8eb9645c880d..047f556ca62e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -423,6 +423,41 @@ static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
}
static int
+qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn,
+ union core_rx_cqe_union *p_cqe,
+ unsigned long *p_lock_flags)
+{
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+ struct core_rx_slow_path_cqe *sp_cqe;
+
+ sp_cqe = &p_cqe->rx_cqe_sp;
+ if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) {
+ DP_NOTICE(p_hwfn,
+ "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n",
+ sp_cqe->ramrod_cmd_id);
+ return -EINVAL;
+ }
+
+ if (!p_ll2_conn->cbs.slowpath_cb) {
+ DP_NOTICE(p_hwfn,
+ "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n");
+ return -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
+
+ p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie,
+ p_ll2_conn->my_id,
+ le32_to_cpu(sp_cqe->opaque_data.data[0]),
+ le32_to_cpu(sp_cqe->opaque_data.data[1]));
+
+ spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
+
+ return 0;
+}
+
+static int
qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn,
union core_rx_cqe_union *p_cqe,
@@ -495,8 +530,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
switch (cqe->rx_cqe_sp.type) {
case CORE_RX_CQE_TYPE_SLOW_PATH:
- DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
- rc = -EINVAL;
+ rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn,
+ cqe, &flags);
break;
case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
case CORE_RX_CQE_TYPE_REGULAR:
@@ -1214,6 +1249,7 @@ qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
+ p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb;
p_ll2_info->cbs.cookie = cbs->cookie;
return 0;
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index 95fdf02a3bbe..e755954d85fd 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -151,11 +151,16 @@ void (*qed_ll2_release_tx_packet_cb)(void *cxt,
dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet);
+typedef
+void (*qed_ll2_slowpath_cb)(void *cxt, u8 connection_handle,
+ u32 opaque_data_0, u32 opaque_data_1);
+
struct qed_ll2_cbs {
qed_ll2_complete_rx_packet_cb rx_comp_cb;
qed_ll2_release_rx_packet_cb rx_release_cb;
qed_ll2_complete_tx_packet_cb tx_comp_cb;
qed_ll2_release_tx_packet_cb tx_release_cb;
+ qed_ll2_slowpath_cb slowpath_cb;
void *cookie;
};