summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_rx.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c169
1 files changed, 147 insertions, 22 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index dbb1c6323967..65828af120b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -34,22 +34,39 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
-#include <linux/indirect_call_wrapper.h>
#include <net/ip6_checksum.h>
#include <net/page_pool.h>
#include <net/inet_ecn.h>
#include "en.h"
+#include "en/txrx.h"
#include "en_tc.h"
#include "eswitch.h"
#include "en_rep.h"
#include "en/rep/tc.h"
#include "ipoib/ipoib.h"
+#include "accel/ipsec.h"
+#include "fpga/ipsec.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h"
#include "lib/clock.h"
#include "en/xdp.h"
#include "en/xsk/rx.h"
#include "en/health.h"
+#include "en/params.h"
+
+static struct sk_buff *
+mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
+ u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+static struct sk_buff *
+mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
+ u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+
+const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
+ .handle_rx_cqe = mlx5e_handle_rx_cqe,
+ .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
+};
static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
{
@@ -370,7 +387,7 @@ static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
mlx5e_put_rx_frag(rq, wi, recycle);
}
-void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
+static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
@@ -537,14 +554,14 @@ err:
return err;
}
-void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
+static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
/* Don't recycle, this function is called on rq/netdev close */
mlx5e_free_rx_mpwqe(rq, wi, false);
}
-bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
+INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
u8 wqe_bulk;
@@ -578,6 +595,33 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
return !!err;
}
+void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
+{
+ u16 sqcc;
+
+ sqcc = sq->cc;
+
+ while (sqcc != sq->pc) {
+ struct mlx5e_icosq_wqe_info *wi;
+ u16 ci;
+
+ ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
+ wi = &sq->db.wqe_info[ci];
+ sqcc += wi->num_wqebbs;
+#ifdef CONFIG_MLX5_EN_TLS
+ switch (wi->wqe_type) {
+ case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
+ mlx5e_ktls_handle_ctx_completion(wi);
+ break;
+ case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
+ mlx5e_ktls_handle_get_psv_completion(wi, sq);
+ break;
+ }
+#endif
+ }
+ sq->cc = sqcc;
+}
+
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
@@ -633,6 +677,16 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
break;
case MLX5E_ICOSQ_WQE_NOP:
break;
+#ifdef CONFIG_MLX5_EN_TLS
+ case MLX5E_ICOSQ_WQE_UMR_TLS:
+ break;
+ case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
+ mlx5e_ktls_handle_ctx_completion(wi);
+ break;
+ case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
+ mlx5e_ktls_handle_get_psv_completion(wi, sq);
+ break;
+#endif
default:
netdev_WARN_ONCE(cq->channel->netdev,
"Bad WQE type in ICOSQ WQE info: 0x%x\n",
@@ -648,7 +702,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
return i;
}
-bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
+INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
{
struct mlx5e_icosq *sq = &rq->channel->icosq;
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
@@ -936,9 +990,14 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
goto csum_unnecessary;
if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
- if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
+ u8 ipproto = get_ip_proto(skb, network_depth, proto);
+
+ if (unlikely(ipproto == IPPROTO_SCTP))
goto csum_unnecessary;
+ if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
+ goto csum_none;
+
stats->csum_complete++;
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
@@ -982,9 +1041,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
skb->mac_len = ETH_HLEN;
-#ifdef CONFIG_MLX5_EN_TLS
- mlx5e_tls_handle_rx_skb(netdev, skb, &cqe_bcnt);
-#endif
+ mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
+
+ if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
+ mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe);
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
@@ -1063,7 +1123,7 @@ static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
xdp->frame_sz = rq->buff.frame0_sz;
}
-struct sk_buff *
+static struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
{
@@ -1103,7 +1163,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
return skb;
}
-struct sk_buff *
+static struct sk_buff *
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
{
@@ -1152,11 +1212,13 @@ static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
if (cqe_syndrome_needs_recover(err_cqe->syndrome) &&
- !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state))
+ !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) {
+ mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe);
queue_work(rq->channel->priv->wq, &rq->recover_work);
+ }
}
-void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
@@ -1199,7 +1261,7 @@ wq_cyc_pop:
}
#ifdef CONFIG_MLX5_ESWITCH
-void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct net_device *netdev = rq->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -1221,7 +1283,10 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto free_wqe;
}
- skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
+ skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
+ mlx5e_skb_from_cqe_linear,
+ mlx5e_skb_from_cqe_nonlinear,
+ rq, cqe, wi, cqe_bcnt);
if (!skb) {
/* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
@@ -1251,8 +1316,7 @@ wq_cyc_pop:
mlx5_wq_cyc_pop(wq);
}
-void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq,
- struct mlx5_cqe64 *cqe)
+static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
@@ -1310,9 +1374,14 @@ mpwrq_cqe_out:
mlx5e_free_rx_mpwqe(rq, wi, true);
mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
}
+
+const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
+ .handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
+ .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
+};
#endif
-struct sk_buff *
+static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{
@@ -1358,7 +1427,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
return skb;
}
-struct sk_buff *
+static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{
@@ -1408,7 +1477,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return skb;
}
-void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
@@ -1604,7 +1673,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
stats->bytes += cqe_bcnt;
}
-void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
@@ -1640,11 +1709,15 @@ wq_free_wqe:
mlx5_wq_cyc_pop(wq);
}
+const struct mlx5e_rx_handlers mlx5i_rx_handlers = {
+ .handle_rx_cqe = mlx5i_handle_rx_cqe,
+ .handle_rx_cqe_mpwqe = NULL, /* Not supported */
+};
#endif /* CONFIG_MLX5_CORE_IPOIB */
#ifdef CONFIG_MLX5_EN_IPSEC
-void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
@@ -1681,3 +1754,55 @@ wq_free_wqe:
}
#endif /* CONFIG_MLX5_EN_IPSEC */
+
+int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
+{
+ struct mlx5_core_dev *mdev = rq->mdev;
+ struct mlx5e_channel *c = rq->channel;
+
+ switch (rq->wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
+ mlx5e_xsk_skb_from_cqe_mpwrq_linear :
+ mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ?
+ mlx5e_skb_from_cqe_mpwrq_linear :
+ mlx5e_skb_from_cqe_mpwrq_nonlinear;
+ rq->post_wqes = mlx5e_post_rx_mpwqes;
+ rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
+
+ rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (MLX5_IPSEC_DEV(mdev)) {
+ netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
+ return -EINVAL;
+ }
+#endif
+ if (!rq->handle_rx_cqe) {
+ netdev_err(c->netdev, "RX handler of MPWQE RQ is not set\n");
+ return -EINVAL;
+ }
+ break;
+ default: /* MLX5_WQ_TYPE_CYCLIC */
+ rq->wqe.skb_from_cqe = xsk ?
+ mlx5e_xsk_skb_from_cqe_linear :
+ mlx5e_rx_is_linear_skb(params, NULL) ?
+ mlx5e_skb_from_cqe_linear :
+ mlx5e_skb_from_cqe_nonlinear;
+ rq->post_wqes = mlx5e_post_rx_wqes;
+ rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
+
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
+ c->priv->ipsec)
+ rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
+ else
+#endif
+ rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe;
+ if (!rq->handle_rx_cqe) {
+ netdev_err(c->netdev, "RX handler of RQ is not set\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}