diff options
author | Jakub Kicinski <kuba@kernel.org> | 2022-12-09 20:06:34 -0800 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2022-12-09 20:06:35 -0800 |
commit | dd8b3a802b64adf059a49a68f1bdca7846e492fc (patch) | |
tree | 2cb39d19e1b9763967e01b83c985f135057de21e /drivers/net/ethernet | |
parent | 5fc11a401a8dc491b326d2c916b07d22e7ac8833 (diff) | |
parent | abe2343d37c2b4361547d5d31e17340ff9ec7356 (diff) | |
download | linux-dd8b3a802b64adf059a49a68f1bdca7846e492fc.tar.bz2 |
Merge tag 'ipsec-next-2022-12-09' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next
Steffen Klassert says:
====================
ipsec-next 2022-12-09
1) Add xfrm packet offload core API.
From Leon Romanovsky.
2) Add xfrm packet offload support for mlx5.
From Leon Romanovsky and Raed Salem.
3) Fix a typto in a error message.
From Colin Ian King.
* tag 'ipsec-next-2022-12-09' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next: (38 commits)
xfrm: Fix spelling mistake "oflload" -> "offload"
net/mlx5e: Open mlx5 driver to accept IPsec packet offload
net/mlx5e: Handle ESN update events
net/mlx5e: Handle hardware IPsec limits events
net/mlx5e: Update IPsec soft and hard limits
net/mlx5e: Store all XFRM SAs in Xarray
net/mlx5e: Provide intermediate pointer to access IPsec struct
net/mlx5e: Skip IPsec encryption for TX path without matching policy
net/mlx5e: Add statistics for Rx/Tx IPsec offloaded flows
net/mlx5e: Improve IPsec flow steering autogroup
net/mlx5e: Configure IPsec packet offload flow steering
net/mlx5e: Use same coding pattern for Rx and Tx flows
net/mlx5e: Add XFRM policy offload logic
net/mlx5e: Create IPsec policy offload tables
net/mlx5e: Generalize creation of default IPsec miss group and rule
net/mlx5e: Group IPsec miss handles into separate struct
net/mlx5e: Make clear what IPsec rx_err does
net/mlx5e: Flatten the IPsec RX add rule path
net/mlx5e: Refactor FTE setup code to be more clear
net/mlx5e: Move IPsec flow table creation to separate function
...
====================
Link: https://lore.kernel.org/r/20221209093310.4018731-1-steffen.klassert@secunet.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet')
19 files changed, 1537 insertions, 445 deletions
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c index 585590520076..ca21794281d6 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c @@ -283,6 +283,10 @@ static int ch_ipsec_xfrm_add_state(struct xfrm_state *x) pr_debug("Cannot offload xfrm states with geniv other than seqiv\n"); return -EINVAL; } + if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) { + pr_debug("Unsupported xfrm offload\n"); + return -EINVAL; + } sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL); if (!sa_entry) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 774de63dd93a..53a969e34883 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -585,6 +585,11 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) return -EINVAL; } + if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) { + netdev_err(dev, "Unsupported ipsec offload type\n"); + return -EINVAL; + } + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { struct rx_sa rsa; diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c index 9984ebc62d78..c1cf540d162a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -280,6 +280,11 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) return -EINVAL; } + if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) { + netdev_err(dev, "Unsupported ipsec offload type\n"); + return -EINVAL; + } + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { struct rx_sa rsa; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 65790ff58a74..2d77fb8a8a01 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -1245,4 +1245,5 @@ int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_t int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi); int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats); #endif +int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey); #endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index bf2741eb7f9b..379c6dc9a3be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -84,7 +84,8 @@ enum { MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1, #endif #ifdef CONFIG_MLX5_EN_IPSEC - MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1, + MLX5E_ACCEL_FS_POL_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1, + MLX5E_ACCEL_FS_ESP_FT_LEVEL, MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL, #endif }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c index 9c1c24da9453..78af8a3175bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c @@ -162,7 +162,6 @@ mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev, MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER); aso_ctrl = &aso_wqe->aso_ctrl; - memset(aso_ctrl, 0, sizeof(*aso_ctrl)); aso_ctrl->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE << 6; aso_ctrl->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE | MLX5_ASO_ALWAYS_TRUE << 4; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 1b03ab03fc5a..bb9023957f74 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -45,55 +45,9 @@ static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x) return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; } -struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec, - unsigned int handle) +static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x) { - struct mlx5e_ipsec_sa_entry *sa_entry; - struct xfrm_state *ret = NULL; - - rcu_read_lock(); - hash_for_each_possible_rcu(ipsec->sadb_rx, sa_entry, hlist, handle) - if (sa_entry->handle == handle) { - ret = sa_entry->x; - xfrm_state_hold(ret); - break; - } - rcu_read_unlock(); - - return ret; -} - -static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry) -{ - unsigned int handle = sa_entry->ipsec_obj_id; - struct mlx5e_ipsec *ipsec = sa_entry->ipsec; - struct mlx5e_ipsec_sa_entry *_sa_entry; - unsigned long flags; - - rcu_read_lock(); - hash_for_each_possible_rcu(ipsec->sadb_rx, _sa_entry, hlist, handle) - if (_sa_entry->handle == handle) { - rcu_read_unlock(); - return -EEXIST; - } - rcu_read_unlock(); - - spin_lock_irqsave(&ipsec->sadb_rx_lock, flags); - sa_entry->handle = handle; - hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle); - spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags); - - return 0; -} - -static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry) -{ - struct mlx5e_ipsec *ipsec = sa_entry->ipsec; - unsigned long flags; - - spin_lock_irqsave(&ipsec->sadb_rx_lock, flags); - hash_del_rcu(&sa_entry->hlist); - spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags); + return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle; } static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) @@ -129,9 +83,33 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) return false; } -static void -mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, - struct mlx5_accel_esp_xfrm_attrs *attrs) +static void mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + struct xfrm_state *x = sa_entry->x; + + attrs->hard_packet_limit = x->lft.hard_packet_limit; + if (x->lft.soft_packet_limit == XFRM_INF) + return; + + /* Hardware decrements hard_packet_limit counter through + * the operation. While fires an event when soft_packet_limit + * is reached. It emans that we need substitute the numbers + * in order to properly count soft limit. + * + * As an example: + * XFRM user sets soft limit is 2 and hard limit is 9 and + * expects to see soft event after 2 packets and hard event + * after 9 packets. In our case, the hard limit will be set + * to 9 and soft limit is comparator to 7 so user gets the + * soft event after 2 packeta + */ + attrs->soft_packet_limit = + x->lft.hard_packet_limit - x->lft.soft_packet_limit; +} + +void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5_accel_esp_xfrm_attrs *attrs) { struct xfrm_state *x = sa_entry->x; struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm; @@ -157,33 +135,31 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, memcpy(&aes_gcm->salt, x->aead->alg_key + key_len, sizeof(aes_gcm->salt)); + attrs->authsize = crypto_aead_authsize(aead) / 4; /* in dwords */ + /* iv len */ aes_gcm->icv_len = x->aead->alg_icv_len; /* esn */ if (sa_entry->esn_state.trigger) { - attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED; + attrs->esn_trigger = true; attrs->esn = sa_entry->esn_state.esn; - if (sa_entry->esn_state.overlap) - attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; + attrs->esn_overlap = sa_entry->esn_state.overlap; + attrs->replay_window = x->replay_esn->replay_window; } - /* action */ - attrs->action = (x->xso.dir == XFRM_DEV_OFFLOAD_OUT) ? - MLX5_ACCEL_ESP_ACTION_ENCRYPT : - MLX5_ACCEL_ESP_ACTION_DECRYPT; - /* flags */ - attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ? - MLX5_ACCEL_ESP_FLAGS_TRANSPORT : - MLX5_ACCEL_ESP_FLAGS_TUNNEL; - + attrs->dir = x->xso.dir; /* spi */ attrs->spi = be32_to_cpu(x->id.spi); /* source , destination ips */ memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr)); memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr)); - attrs->is_ipv6 = (x->props.family != AF_INET); + attrs->family = x->props.family; + attrs->type = x->xso.type; + attrs->reqid = x->props.reqid; + + mlx5e_ipsec_init_limits(sa_entry, attrs); } static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) @@ -215,11 +191,6 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) netdev_info(netdev, "Only IPv4/6 xfrm states may be offloaded\n"); return -EINVAL; } - if (x->props.mode != XFRM_MODE_TRANSPORT && - x->props.mode != XFRM_MODE_TUNNEL) { - dev_info(&netdev->dev, "Only transport and tunnel xfrm states may be offloaded\n"); - return -EINVAL; - } if (x->id.proto != IPPROTO_ESP) { netdev_info(netdev, "Only ESP xfrm state may be offloaded\n"); return -EINVAL; @@ -253,6 +224,67 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n"); return -EINVAL; } + switch (x->xso.type) { + case XFRM_DEV_OFFLOAD_CRYPTO: + if (!(mlx5_ipsec_device_caps(priv->mdev) & + MLX5_IPSEC_CAP_CRYPTO)) { + netdev_info(netdev, "Crypto offload is not supported\n"); + return -EINVAL; + } + + if (x->props.mode != XFRM_MODE_TRANSPORT && + x->props.mode != XFRM_MODE_TUNNEL) { + netdev_info(netdev, "Only transport and tunnel xfrm states may be offloaded\n"); + return -EINVAL; + } + break; + case XFRM_DEV_OFFLOAD_PACKET: + if (!(mlx5_ipsec_device_caps(priv->mdev) & + MLX5_IPSEC_CAP_PACKET_OFFLOAD)) { + netdev_info(netdev, "Packet offload is not supported\n"); + return -EINVAL; + } + + if (x->props.mode != XFRM_MODE_TRANSPORT) { + netdev_info(netdev, "Only transport xfrm states may be offloaded in packet mode\n"); + return -EINVAL; + } + + if (x->replay_esn && x->replay_esn->replay_window != 32 && + x->replay_esn->replay_window != 64 && + x->replay_esn->replay_window != 128 && + x->replay_esn->replay_window != 256) { + netdev_info(netdev, + "Unsupported replay window size %u\n", + x->replay_esn->replay_window); + return -EINVAL; + } + + if (!x->props.reqid) { + netdev_info(netdev, "Cannot offload without reqid\n"); + return -EINVAL; + } + + if (x->lft.hard_byte_limit != XFRM_INF || + x->lft.soft_byte_limit != XFRM_INF) { + netdev_info(netdev, + "Device doesn't support limits in bytes\n"); + return -EINVAL; + } + + if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit && + x->lft.hard_packet_limit != XFRM_INF) { + /* XFRM stack doesn't prevent such configuration :(. */ + netdev_info(netdev, + "Hard packet limit must be greater than soft one\n"); + return -EINVAL; + } + break; + default: + netdev_info(netdev, "Unsupported xfrm offload type %d\n", + x->xso.type); + return -EINVAL; + } return 0; } @@ -270,6 +302,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = NULL; struct net_device *netdev = x->xso.real_dev; + struct mlx5e_ipsec *ipsec; struct mlx5e_priv *priv; int err; @@ -277,6 +310,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) if (!priv->ipsec) return -EOPNOTSUPP; + ipsec = priv->ipsec; err = mlx5e_xfrm_validate_state(x); if (err) return err; @@ -288,7 +322,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) } sa_entry->x = x; - sa_entry->ipsec = priv->ipsec; + sa_entry->ipsec = ipsec; /* check esn */ mlx5e_ipsec_update_esn_state(sa_entry); @@ -299,25 +333,29 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) if (err) goto err_xfrm; - err = mlx5e_accel_ipsec_fs_add_rule(priv, sa_entry); + err = mlx5e_accel_ipsec_fs_add_rule(sa_entry); if (err) goto err_hw_ctx; - if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) { - err = mlx5e_ipsec_sadb_rx_add(sa_entry); - if (err) - goto err_add_rule; - } else { + /* We use *_bh() variant because xfrm_timer_handler(), which runs + * in softirq context, can reach our state delete logic and we need + * xa_erase_bh() there. + */ + err = xa_insert_bh(&ipsec->sadb, sa_entry->ipsec_obj_id, sa_entry, + GFP_KERNEL); + if (err) + goto err_add_rule; + + if (x->xso.dir == XFRM_DEV_OFFLOAD_OUT) sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ? mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv; - } INIT_WORK(&sa_entry->modify_work.work, _update_xfrm_state); x->xso.offload_handle = (unsigned long)sa_entry; - goto out; + return 0; err_add_rule: - mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry); + mlx5e_accel_ipsec_fs_del_rule(sa_entry); err_hw_ctx: mlx5_ipsec_free_sa_ctx(sa_entry); err_xfrm: @@ -329,18 +367,19 @@ out: static void mlx5e_xfrm_del_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; + struct mlx5e_ipsec_sa_entry *old; - if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) - mlx5e_ipsec_sadb_rx_del(sa_entry); + old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id); + WARN_ON(old != sa_entry); } static void mlx5e_xfrm_free_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); - struct mlx5e_priv *priv = netdev_priv(x->xso.dev); cancel_work_sync(&sa_entry->modify_work.work); - mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry); + mlx5e_accel_ipsec_fs_del_rule(sa_entry); mlx5_ipsec_free_sa_ctx(sa_entry); kfree(sa_entry); } @@ -359,23 +398,33 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv) if (!ipsec) return; - hash_init(ipsec->sadb_rx); - spin_lock_init(&ipsec->sadb_rx_lock); + xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC); ipsec->mdev = priv->mdev; ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0, priv->netdev->name); if (!ipsec->wq) goto err_wq; + if (mlx5_ipsec_device_caps(priv->mdev) & + MLX5_IPSEC_CAP_PACKET_OFFLOAD) { + ret = mlx5e_ipsec_aso_init(ipsec); + if (ret) + goto err_aso; + } + ret = mlx5e_accel_ipsec_fs_init(ipsec); if (ret) goto err_fs_init; + ipsec->fs = priv->fs; priv->ipsec = ipsec; netdev_dbg(priv->netdev, "IPSec attached to netdevice\n"); return; err_fs_init: + if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD) + mlx5e_ipsec_aso_cleanup(ipsec); +err_aso: destroy_workqueue(ipsec->wq); err_wq: kfree(ipsec); @@ -391,6 +440,8 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv) return; mlx5e_accel_ipsec_fs_cleanup(ipsec); + if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD) + mlx5e_ipsec_aso_cleanup(ipsec); destroy_workqueue(ipsec->wq); kfree(ipsec); priv->ipsec = NULL; @@ -426,6 +477,122 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x) queue_work(sa_entry->ipsec->wq, &modify_work->work); } +static void mlx5e_xfrm_update_curlft(struct xfrm_state *x) +{ + struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); + int err; + + lockdep_assert_held(&x->lock); + + if (sa_entry->attrs.soft_packet_limit == XFRM_INF) + /* Limits are not configured, as soft limit + * must be lowever than hard limit. + */ + return; + + err = mlx5e_ipsec_aso_query(sa_entry, NULL); + if (err) + return; + + mlx5e_ipsec_aso_update_curlft(sa_entry, &x->curlft.packets); +} + +static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x) +{ + struct net_device *netdev = x->xdo.real_dev; + + if (x->type != XFRM_POLICY_TYPE_MAIN) { + netdev_info(netdev, "Cannot offload non-main policy types\n"); + return -EINVAL; + } + + /* Please pay attention that we support only one template */ + if (x->xfrm_nr > 1) { + netdev_info(netdev, "Cannot offload more than one template\n"); + return -EINVAL; + } + + if (x->xdo.dir != XFRM_DEV_OFFLOAD_IN && + x->xdo.dir != XFRM_DEV_OFFLOAD_OUT) { + netdev_info(netdev, "Cannot offload forward policy\n"); + return -EINVAL; + } + + if (!x->xfrm_vec[0].reqid) { + netdev_info(netdev, "Cannot offload policy without reqid\n"); + return -EINVAL; + } + + if (x->xdo.type != XFRM_DEV_OFFLOAD_PACKET) { + netdev_info(netdev, "Unsupported xfrm offload type\n"); + return -EINVAL; + } + + return 0; +} + +static void +mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry, + struct mlx5_accel_pol_xfrm_attrs *attrs) +{ + struct xfrm_policy *x = pol_entry->x; + struct xfrm_selector *sel; + + sel = &x->selector; + memset(attrs, 0, sizeof(*attrs)); + + memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr)); + memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr)); + attrs->family = sel->family; + attrs->dir = x->xdo.dir; + attrs->action = x->action; + attrs->type = XFRM_DEV_OFFLOAD_PACKET; + attrs->reqid = x->xfrm_vec[0].reqid; +} + +static int mlx5e_xfrm_add_policy(struct xfrm_policy *x) +{ + struct net_device *netdev = x->xdo.real_dev; + struct mlx5e_ipsec_pol_entry *pol_entry; + struct mlx5e_priv *priv; + int err; + + priv = netdev_priv(netdev); + if (!priv->ipsec) + return -EOPNOTSUPP; + + err = mlx5e_xfrm_validate_policy(x); + if (err) + return err; + + pol_entry = kzalloc(sizeof(*pol_entry), GFP_KERNEL); + if (!pol_entry) + return -ENOMEM; + + pol_entry->x = x; + pol_entry->ipsec = priv->ipsec; + + mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs); + err = mlx5e_accel_ipsec_fs_add_pol(pol_entry); + if (err) + goto err_fs; + + x->xdo.offload_handle = (unsigned long)pol_entry; + return 0; + +err_fs: + kfree(pol_entry); + return err; +} + +static void mlx5e_xfrm_free_policy(struct xfrm_policy *x) +{ + struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x); + + mlx5e_accel_ipsec_fs_del_pol(pol_entry); + kfree(pol_entry); +} + static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = { .xdo_dev_state_add = mlx5e_xfrm_add_state, .xdo_dev_state_delete = mlx5e_xfrm_del_state, @@ -434,6 +601,18 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = { .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state, }; +static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = { + .xdo_dev_state_add = mlx5e_xfrm_add_state, + .xdo_dev_state_delete = mlx5e_xfrm_del_state, + .xdo_dev_state_free = mlx5e_xfrm_free_state, + .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok, + .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state, + + .xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft, + .xdo_dev_policy_add = mlx5e_xfrm_add_policy, + .xdo_dev_policy_free = mlx5e_xfrm_free_policy, +}; + void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; @@ -443,7 +622,12 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) return; mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n"); - netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops; + + if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD) + netdev->xfrmdev_ops = &mlx5e_ipsec_packet_xfrmdev_ops; + else + netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops; + netdev->features |= NETIF_F_HW_ESP; netdev->hw_enc_features |= NETIF_F_HW_ESP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 4c47347d0ee2..a92e19c4c499 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -34,27 +34,14 @@ #ifndef __MLX5E_IPSEC_H__ #define __MLX5E_IPSEC_H__ -#ifdef CONFIG_MLX5_EN_IPSEC - #include <linux/mlx5/device.h> #include <net/xfrm.h> #include <linux/idr.h> +#include "lib/aso.h" #define MLX5E_IPSEC_SADB_RX_BITS 10 #define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L -enum mlx5_accel_esp_flags { - MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */ - MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0, - MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1, - MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2, -}; - -enum mlx5_accel_esp_action { - MLX5_ACCEL_ESP_ACTION_DECRYPT, - MLX5_ACCEL_ESP_ACTION_ENCRYPT, -}; - struct aes_gcm_keymat { u64 seq_iv; @@ -66,7 +53,6 @@ struct aes_gcm_keymat { }; struct mlx5_accel_esp_xfrm_attrs { - enum mlx5_accel_esp_action action; u32 esn; u32 spi; u32 flags; @@ -82,16 +68,37 @@ struct mlx5_accel_esp_xfrm_attrs { __be32 a6[4]; } daddr; - u8 is_ipv6; + u8 dir : 2; + u8 esn_overlap : 1; + u8 esn_trigger : 1; + u8 type : 2; + u8 family; + u32 replay_window; + u32 authsize; + u32 reqid; + u64 hard_packet_limit; + u64 soft_packet_limit; }; enum mlx5_ipsec_cap { MLX5_IPSEC_CAP_CRYPTO = 1 << 0, MLX5_IPSEC_CAP_ESN = 1 << 1, + MLX5_IPSEC_CAP_PACKET_OFFLOAD = 1 << 2, }; struct mlx5e_priv; +struct mlx5e_ipsec_hw_stats { + u64 ipsec_rx_pkts; + u64 ipsec_rx_bytes; + u64 ipsec_rx_drop_pkts; + u64 ipsec_rx_drop_bytes; + u64 ipsec_tx_pkts; + u64 ipsec_tx_bytes; + u64 ipsec_tx_drop_pkts; + u64 ipsec_tx_drop_bytes; +}; + struct mlx5e_ipsec_sw_stats { atomic64_t ipsec_rx_drop_sp_alloc; atomic64_t ipsec_rx_drop_sadb_miss; @@ -102,17 +109,38 @@ struct mlx5e_ipsec_sw_stats { atomic64_t ipsec_tx_drop_trailer; }; -struct mlx5e_accel_fs_esp; +struct mlx5e_ipsec_rx; struct mlx5e_ipsec_tx; +struct mlx5e_ipsec_work { + struct work_struct work; + struct mlx5e_ipsec *ipsec; + u32 id; +}; + +struct mlx5e_ipsec_aso { + u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)]; + dma_addr_t dma_addr; + struct mlx5_aso *aso; + /* IPsec ASO caches data on every query call, + * so in nested calls, we can use this boolean to save + * recursive calls to mlx5e_ipsec_aso_query() + */ + u8 use_cache : 1; +}; + struct mlx5e_ipsec { struct mlx5_core_dev *mdev; - DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS); - spinlock_t sadb_rx_lock; /* Protects sadb_rx */ + struct xarray sadb; struct mlx5e_ipsec_sw_stats sw_stats; + struct mlx5e_ipsec_hw_stats hw_stats; struct workqueue_struct *wq; - struct mlx5e_accel_fs_esp *rx_fs; - struct mlx5e_ipsec_tx *tx_fs; + struct mlx5e_flow_steering *fs; + struct mlx5e_ipsec_rx *rx_ipv4; + struct mlx5e_ipsec_rx *rx_ipv6; + struct mlx5e_ipsec_tx *tx; + struct mlx5e_ipsec_aso *aso; + struct notifier_block nb; }; struct mlx5e_ipsec_esn_state { @@ -123,7 +151,8 @@ struct mlx5e_ipsec_esn_state { struct mlx5e_ipsec_rule { struct mlx5_flow_handle *rule; - struct mlx5_modify_hdr *set_modify_hdr; + struct mlx5_modify_hdr *modify_hdr; + struct mlx5_pkt_reformat *pkt_reformat; }; struct mlx5e_ipsec_modify_state_work { @@ -132,9 +161,7 @@ struct mlx5e_ipsec_modify_state_work { }; struct mlx5e_ipsec_sa_entry { - struct hlist_node hlist; /* Item in SADB_RX hashtable */ struct mlx5e_ipsec_esn_state esn_state; - unsigned int handle; /* Handle in SADB_RX */ struct xfrm_state *x; struct mlx5e_ipsec *ipsec; struct mlx5_accel_esp_xfrm_attrs attrs; @@ -146,19 +173,43 @@ struct mlx5e_ipsec_sa_entry { struct mlx5e_ipsec_modify_state_work modify_work; }; +struct mlx5_accel_pol_xfrm_attrs { + union { + __be32 a4; + __be32 a6[4]; + } saddr; + + union { + __be32 a4; + __be32 a6[4]; + } daddr; + + u8 family; + u8 action; + u8 type : 2; + u8 dir : 2; + u32 reqid; +}; + +struct mlx5e_ipsec_pol_entry { + struct xfrm_policy *x; + struct mlx5e_ipsec *ipsec; + struct mlx5e_ipsec_rule ipsec_rule; + struct mlx5_accel_pol_xfrm_attrs attrs; +}; + +#ifdef CONFIG_MLX5_EN_IPSEC + void mlx5e_ipsec_init(struct mlx5e_priv *priv); void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv); void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv); -struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev, - unsigned int handle); - void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec); int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec); -int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, - struct mlx5e_ipsec_sa_entry *sa_entry); -void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, - struct mlx5e_ipsec_sa_entry *sa_entry); +int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry); +void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry); +int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry); +void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry); int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry); void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry); @@ -168,11 +219,30 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev); void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry, const struct mlx5_accel_esp_xfrm_attrs *attrs); +int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec); +void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec); + +int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5_wqe_aso_ctrl_seg *data); +void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry, + u64 *packets); + +void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, + void *ipsec_stats); + +void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5_accel_esp_xfrm_attrs *attrs); static inline struct mlx5_core_dev * mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry) { return sa_entry->ipsec->mdev; } + +static inline struct mlx5_core_dev * +mlx5e_ipsec_pol2dev(struct mlx5e_ipsec_pol_entry *pol_entry) +{ + return pol_entry->ipsec->mdev; +} #else static inline void mlx5e_ipsec_init(struct mlx5e_priv *priv) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index b859e4a4c744..9f19f4b59a70 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -9,53 +9,67 @@ #define NUM_IPSEC_FTE BIT(15) -enum accel_fs_esp_type { - ACCEL_FS_ESP4, - ACCEL_FS_ESP6, - ACCEL_FS_ESP_NUM_TYPES, +struct mlx5e_ipsec_fc { + struct mlx5_fc *cnt; + struct mlx5_fc *drop; }; -struct mlx5e_ipsec_rx_err { - struct mlx5_flow_table *ft; - struct mlx5_flow_handle *rule; - struct mlx5_modify_hdr *copy_modify_hdr; +struct mlx5e_ipsec_ft { + struct mutex mutex; /* Protect changes to this struct */ + struct mlx5_flow_table *pol; + struct mlx5_flow_table *sa; + struct mlx5_flow_table *status; + u32 refcnt; }; -struct mlx5e_accel_fs_esp_prot { - struct mlx5_flow_table *ft; - struct mlx5_flow_group *miss_group; - struct mlx5_flow_handle *miss_rule; - struct mlx5_flow_destination default_dest; - struct mlx5e_ipsec_rx_err rx_err; - u32 refcnt; - struct mutex prot_mutex; /* protect ESP4/ESP6 protocol */ +struct mlx5e_ipsec_miss { + struct mlx5_flow_group *group; + struct mlx5_flow_handle *rule; }; -struct mlx5e_accel_fs_esp { - struct mlx5e_accel_fs_esp_prot fs_prot[ACCEL_FS_ESP_NUM_TYPES]; +struct mlx5e_ipsec_rx { + struct mlx5e_ipsec_ft ft; + struct mlx5e_ipsec_miss pol; + struct mlx5e_ipsec_miss sa; + struct mlx5e_ipsec_rule status; + struct mlx5e_ipsec_fc *fc; }; struct mlx5e_ipsec_tx { + struct mlx5e_ipsec_ft ft; + struct mlx5e_ipsec_miss pol; struct mlx5_flow_namespace *ns; - struct mlx5_flow_table *ft; - struct mutex mutex; /* Protect IPsec TX steering */ - u32 refcnt; + struct mlx5e_ipsec_fc *fc; }; /* IPsec RX flow steering */ -static enum mlx5_traffic_types fs_esp2tt(enum accel_fs_esp_type i) +static enum mlx5_traffic_types family2tt(u32 family) { - if (i == ACCEL_FS_ESP4) + if (family == AF_INET) return MLX5_TT_IPV4_IPSEC_ESP; return MLX5_TT_IPV6_IPSEC_ESP; } -static int rx_err_add_rule(struct mlx5e_priv *priv, - struct mlx5e_accel_fs_esp_prot *fs_prot, - struct mlx5e_ipsec_rx_err *rx_err) +static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns, + int level, int prio, + int max_num_groups) +{ + struct mlx5_flow_table_attr ft_attr = {}; + + ft_attr.autogroup.num_reserved_entries = 1; + ft_attr.autogroup.max_num_groups = max_num_groups; + ft_attr.max_fte = NUM_IPSEC_FTE; + ft_attr.level = level; + ft_attr.prio = prio; + + return mlx5_create_auto_grouped_flow_table(ns, &ft_attr); +} + +static int ipsec_status_rule(struct mlx5_core_dev *mdev, + struct mlx5e_ipsec_rx *rx, + struct mlx5_flow_destination *dest) { u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; - struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_flow_act flow_act = {}; struct mlx5_modify_hdr *modify_hdr; struct mlx5_flow_handle *fte; @@ -79,26 +93,26 @@ static int rx_err_add_rule(struct mlx5e_priv *priv, if (IS_ERR(modify_hdr)) { err = PTR_ERR(modify_hdr); - netdev_err(priv->netdev, - "fail to alloc ipsec copy modify_header_id err=%d\n", err); + mlx5_core_err(mdev, + "fail to alloc ipsec copy modify_header_id err=%d\n", err); goto out_spec; } /* create fte */ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; flow_act.modify_hdr = modify_hdr; - fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, - &fs_prot->default_dest, 1); + fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2); if (IS_ERR(fte)) { err = PTR_ERR(fte); - netdev_err(priv->netdev, "fail to add ipsec rx err copy rule err=%d\n", err); + mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err); goto out; } kvfree(spec); - rx_err->rule = fte; - rx_err->copy_modify_hdr = modify_hdr; + rx->status.rule = fte; + rx->status.modify_hdr = modify_hdr; return 0; out: @@ -108,13 +122,12 @@ out_spec: return err; } -static int rx_fs_create(struct mlx5e_priv *priv, - struct mlx5e_accel_fs_esp_prot *fs_prot) +static int ipsec_miss_create(struct mlx5_core_dev *mdev, + struct mlx5_flow_table *ft, + struct mlx5e_ipsec_miss *miss, + struct mlx5_flow_destination *dest) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - struct mlx5_flow_table *ft = fs_prot->ft; - struct mlx5_flow_group *miss_group; - struct mlx5_flow_handle *miss_rule; MLX5_DECLARE_FLOW_ACT(flow_act); struct mlx5_flow_spec *spec; u32 *flow_group_in; @@ -130,450 +143,888 @@ static int rx_fs_create(struct mlx5e_priv *priv, /* Create miss_group */ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1); - miss_group = mlx5_create_flow_group(ft, flow_group_in); - if (IS_ERR(miss_group)) { - err = PTR_ERR(miss_group); - netdev_err(priv->netdev, "fail to create ipsec rx miss_group err=%d\n", err); + miss->group = mlx5_create_flow_group(ft, flow_group_in); + if (IS_ERR(miss->group)) { + err = PTR_ERR(miss->group); + mlx5_core_err(mdev, "fail to create IPsec miss_group err=%d\n", + err); goto out; } - fs_prot->miss_group = miss_group; /* Create miss rule */ - miss_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1); - if (IS_ERR(miss_rule)) { - mlx5_destroy_flow_group(fs_prot->miss_group); - err = PTR_ERR(miss_rule); - netdev_err(priv->netdev, "fail to create ipsec rx miss_rule err=%d\n", err); + miss->rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1); + if (IS_ERR(miss->rule)) { + mlx5_destroy_flow_group(miss->group); + err = PTR_ERR(miss->rule); + mlx5_core_err(mdev, "fail to create IPsec miss_rule err=%d\n", + err); goto out; } - fs_prot->miss_rule = miss_rule; out: kvfree(flow_group_in); kvfree(spec); return err; } -static void rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type) +static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx) { - struct mlx5e_accel_fs_esp_prot *fs_prot; - struct mlx5e_accel_fs_esp *accel_esp; - - accel_esp = priv->ipsec->rx_fs; - - /* The netdev unreg already happened, so all offloaded rule are already removed */ - fs_prot = &accel_esp->fs_prot[type]; + mlx5_del_flow_rules(rx->pol.rule); + mlx5_destroy_flow_group(rx->pol.group); + mlx5_destroy_flow_table(rx->ft.pol); - mlx5_del_flow_rules(fs_prot->miss_rule); - mlx5_destroy_flow_group(fs_prot->miss_group); - mlx5_destroy_flow_table(fs_prot->ft); + mlx5_del_flow_rules(rx->sa.rule); + mlx5_destroy_flow_group(rx->sa.group); + mlx5_destroy_flow_table(rx->ft.sa); - mlx5_del_flow_rules(fs_prot->rx_err.rule); - mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr); - mlx5_destroy_flow_table(fs_prot->rx_err.ft); + mlx5_del_flow_rules(rx->status.rule); + mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr); + mlx5_destroy_flow_table(rx->ft.status); } -static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type) +static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx, u32 family) { - struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(priv->fs, false); - struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false); - struct mlx5_flow_table_attr ft_attr = {}; - struct mlx5e_accel_fs_esp_prot *fs_prot; - struct mlx5e_accel_fs_esp *accel_esp; + struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false); + struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false); + struct mlx5_flow_destination dest[2]; struct mlx5_flow_table *ft; int err; - accel_esp = priv->ipsec->rx_fs; - fs_prot = &accel_esp->fs_prot[type]; - fs_prot->default_dest = - mlx5_ttc_get_default_dest(ttc, fs_esp2tt(type)); - - ft_attr.max_fte = 1; - ft_attr.autogroup.max_num_groups = 1; - ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL; - ft_attr.prio = MLX5E_NIC_PRIO; - ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); + ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL, + MLX5E_NIC_PRIO, 1); if (IS_ERR(ft)) return PTR_ERR(ft); - fs_prot->rx_err.ft = ft; - err = rx_err_add_rule(priv, fs_prot, &fs_prot->rx_err); + rx->ft.status = ft; + + dest[0] = mlx5_ttc_get_default_dest(ttc, family2tt(family)); + dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest[1].counter_id = mlx5_fc_id(rx->fc->cnt); + err = ipsec_status_rule(mdev, rx, dest); if (err) goto err_add; /* Create FT */ - ft_attr.max_fte = NUM_IPSEC_FTE; - ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL; - ft_attr.prio = MLX5E_NIC_PRIO; - ft_attr.autogroup.num_reserved_entries = 1; - ft_attr.autogroup.max_num_groups = 1; - ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); + ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_LEVEL, MLX5E_NIC_PRIO, + 2); if (IS_ERR(ft)) { err = PTR_ERR(ft); goto err_fs_ft; } - fs_prot->ft = ft; + rx->ft.sa = ft; - err = rx_fs_create(priv, fs_prot); + err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, dest); if (err) goto err_fs; + ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_POL_FT_LEVEL, MLX5E_NIC_PRIO, + 2); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + goto err_pol_ft; + } + rx->ft.pol = ft; + memset(dest, 0x00, 2 * sizeof(*dest)); + dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest[0].ft = rx->ft.sa; + err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, dest); + if (err) + goto err_pol_miss; + return 0; +err_pol_miss: + mlx5_destroy_flow_table(rx->ft.pol); +err_pol_ft: + mlx5_del_flow_rules(rx->sa.rule); + mlx5_destroy_flow_group(rx->sa.group); err_fs: - mlx5_destroy_flow_table(fs_prot->ft); + mlx5_destroy_flow_table(rx->ft.sa); err_fs_ft: - mlx5_del_flow_rules(fs_prot->rx_err.rule); - mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr); + mlx5_del_flow_rules(rx->status.rule); + mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr); err_add: - mlx5_destroy_flow_table(fs_prot->rx_err.ft); + mlx5_destroy_flow_table(rx->ft.status); return err; } -static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type) +static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev, + struct mlx5e_ipsec *ipsec, u32 family) { - struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false); - struct mlx5e_accel_fs_esp_prot *fs_prot; + struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false); struct mlx5_flow_destination dest = {}; - struct mlx5e_accel_fs_esp *accel_esp; + struct mlx5e_ipsec_rx *rx; int err = 0; - accel_esp = priv->ipsec->rx_fs; - fs_prot = &accel_esp->fs_prot[type]; - mutex_lock(&fs_prot->prot_mutex); - if (fs_prot->refcnt) + if (family == AF_INET) + rx = ipsec->rx_ipv4; + else + rx = ipsec->rx_ipv6; + + mutex_lock(&rx->ft.mutex); + if (rx->ft.refcnt) goto skip; /* create FT */ - err = rx_create(priv, type); + err = rx_create(mdev, ipsec, rx, family); if (err) goto out; /* connect */ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = fs_prot->ft; - mlx5_ttc_fwd_dest(ttc, fs_esp2tt(type), &dest); + dest.ft = rx->ft.pol; + mlx5_ttc_fwd_dest(ttc, family2tt(family), &dest); skip: - fs_prot->refcnt++; + rx->ft.refcnt++; out: - mutex_unlock(&fs_prot->prot_mutex); - return err; + mutex_unlock(&rx->ft.mutex); + if (err) + return ERR_PTR(err); + return rx; } -static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type) +static void rx_ft_put(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, + u32 family) { - struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false); - struct mlx5e_accel_fs_esp_prot *fs_prot; - struct mlx5e_accel_fs_esp *accel_esp; + struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false); + struct mlx5e_ipsec_rx *rx; + + if (family == AF_INET) + rx = ipsec->rx_ipv4; + else + rx = ipsec->rx_ipv6; - accel_esp = priv->ipsec->rx_fs; - fs_prot = &accel_esp->fs_prot[type]; - mutex_lock(&fs_prot->prot_mutex); - fs_prot->refcnt--; - if (fs_prot->refcnt) + mutex_lock(&rx->ft.mutex); + rx->ft.refcnt--; + if (rx->ft.refcnt) goto out; /* disconnect */ - mlx5_ttc_fwd_default_dest(ttc, fs_esp2tt(type)); + mlx5_ttc_fwd_default_dest(ttc, family2tt(family)); /* remove FT */ - rx_destroy(priv, type); + rx_destroy(mdev, rx); out: - mutex_unlock(&fs_prot->prot_mutex); + mutex_unlock(&rx->ft.mutex); } /* IPsec TX flow steering */ -static int tx_create(struct mlx5e_priv *priv) +static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx) { - struct mlx5_flow_table_attr ft_attr = {}; - struct mlx5e_ipsec *ipsec = priv->ipsec; + struct mlx5_flow_destination dest = {}; struct mlx5_flow_table *ft; int err; - ft_attr.max_fte = NUM_IPSEC_FTE; - ft_attr.autogroup.max_num_groups = 1; - ft = mlx5_create_auto_grouped_flow_table(ipsec->tx_fs->ns, &ft_attr); + ft = ipsec_ft_create(tx->ns, 1, 0, 4); + if (IS_ERR(ft)) + return PTR_ERR(ft); + + tx->ft.sa = ft; + + ft = ipsec_ft_create(tx->ns, 0, 0, 2); if (IS_ERR(ft)) { err = PTR_ERR(ft); - netdev_err(priv->netdev, "fail to create ipsec tx ft err=%d\n", err); - return err; + goto err_pol_ft; } - ipsec->tx_fs->ft = ft; + tx->ft.pol = ft; + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = tx->ft.sa; + err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest); + if (err) + goto err_pol_miss; return 0; + +err_pol_miss: + mlx5_destroy_flow_table(tx->ft.pol); +err_pol_ft: + mlx5_destroy_flow_table(tx->ft.sa); + return err; } -static int tx_ft_get(struct mlx5e_priv *priv) +static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev, + struct mlx5e_ipsec *ipsec) { - struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs; + struct mlx5e_ipsec_tx *tx = ipsec->tx; int err = 0; - mutex_lock(&tx_fs->mutex); - if (tx_fs->refcnt) + mutex_lock(&tx->ft.mutex); + if (tx->ft.refcnt) goto skip; - err = tx_create(priv); + err = tx_create(mdev, tx); if (err) goto out; skip: - tx_fs->refcnt++; + tx->ft.refcnt++; out: - mutex_unlock(&tx_fs->mutex); - return err; + mutex_unlock(&tx->ft.mutex); + if (err) + return ERR_PTR(err); + return tx; } -static void tx_ft_put(struct mlx5e_priv *priv) +static void tx_ft_put(struct mlx5e_ipsec *ipsec) { - struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs; + struct mlx5e_ipsec_tx *tx = ipsec->tx; - mutex_lock(&tx_fs->mutex); - tx_fs->refcnt--; - if (tx_fs->refcnt) + mutex_lock(&tx->ft.mutex); + tx->ft.refcnt--; + if (tx->ft.refcnt) goto out; - mlx5_destroy_flow_table(tx_fs->ft); + mlx5_del_flow_rules(tx->pol.rule); + mlx5_destroy_flow_group(tx->pol.group); + mlx5_destroy_flow_table(tx->ft.pol); + mlx5_destroy_flow_table(tx->ft.sa); out: - mutex_unlock(&tx_fs->mutex); + mutex_unlock(&tx->ft.mutex); } -static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs, - u32 ipsec_obj_id, - struct mlx5_flow_spec *spec, - struct mlx5_flow_act *flow_act) +static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr, + __be32 *daddr) { - u8 ip_version = attrs->is_ipv6 ? 6 : 4; - - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS; + spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; - /* ip_version */ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version); - MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ip_version); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4); + + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); +} - /* Non fragmented */ - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag); - MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0); +static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr, + __be32 *daddr) +{ + spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6); + + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16); + memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16); + memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16); +} + +static void setup_fte_esp(struct mlx5_flow_spec *spec) +{ /* ESP header */ + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP); +} +static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi) +{ /* SPI number */ + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi); +} + +static void setup_fte_no_frags(struct mlx5_flow_spec *spec) +{ + /* Non fragmented */ + spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0); +} + +static void setup_fte_reg_a(struct mlx5_flow_spec *spec) +{ + /* Add IPsec indicator in metadata_reg_a */ + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; + + MLX5_SET(fte_match_param, spec->match_criteria, + misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC); MLX5_SET(fte_match_param, spec->match_value, - misc_parameters.outer_esp_spi, attrs->spi); - - if (ip_version == 4) { - memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, - outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), - &attrs->saddr.a4, 4); - memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, - outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), - &attrs->daddr.a4, 4); - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, - outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, - outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); - } else { - memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, - outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), - &attrs->saddr.a6, 16); - memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, - outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - &attrs->daddr.a6, 16); - memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, - outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), - 0xff, 16); - memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, - outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - 0xff, 16); - } + misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC); +} - flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC; - flow_act->crypto.obj_id = ipsec_obj_id; - flow_act->flags |= FLOW_ACT_NO_APPEND; +static void setup_fte_reg_c0(struct mlx5_flow_spec *spec, u32 reqid) +{ + /* Pass policy check before choosing this SA */ + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; + + MLX5_SET(fte_match_param, spec->match_criteria, + misc_parameters_2.metadata_reg_c_0, reqid); + MLX5_SET(fte_match_param, spec->match_value, + misc_parameters_2.metadata_reg_c_0, reqid); } -static int rx_add_rule(struct mlx5e_priv *priv, - struct mlx5e_ipsec_sa_entry *sa_entry) +static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir, + struct mlx5_flow_act *flow_act) { u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; - struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; + enum mlx5_flow_namespace_type ns_type; + struct mlx5_modify_hdr *modify_hdr; + + MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); + switch (dir) { + case XFRM_DEV_OFFLOAD_IN: + MLX5_SET(set_action_in, action, field, + MLX5_ACTION_IN_FIELD_METADATA_REG_B); + ns_type = MLX5_FLOW_NAMESPACE_KERNEL; + break; + case XFRM_DEV_OFFLOAD_OUT: + MLX5_SET(set_action_in, action, field, + MLX5_ACTION_IN_FIELD_METADATA_REG_C_0); + ns_type = MLX5_FLOW_NAMESPACE_EGRESS; + break; + default: + return -EINVAL; + } + + MLX5_SET(set_action_in, action, data, val); + MLX5_SET(set_action_in, action, offset, 0); + MLX5_SET(set_action_in, action, length, 32); + + modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action); + if (IS_ERR(modify_hdr)) { + mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n", + PTR_ERR(modify_hdr)); + return PTR_ERR(modify_hdr); + } + + flow_act->modify_hdr = modify_hdr; + flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + return 0; +} + +static int setup_pkt_reformat(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm_attrs *attrs, + struct mlx5_flow_act *flow_act) +{ + enum mlx5_flow_namespace_type ns_type = MLX5_FLOW_NAMESPACE_EGRESS; + struct mlx5_pkt_reformat_params reformat_params = {}; + struct mlx5_pkt_reformat *pkt_reformat; + u8 reformatbf[16] = {}; + __be32 spi; + + if (attrs->dir == XFRM_DEV_OFFLOAD_IN) { + reformat_params.type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT; + ns_type = MLX5_FLOW_NAMESPACE_KERNEL; + goto cmd; + } + + if (attrs->family == AF_INET) + reformat_params.type = + MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4; + else + reformat_params.type = + MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6; + + /* convert to network format */ + spi = htonl(attrs->spi); + memcpy(reformatbf, &spi, 4); + + reformat_params.param_0 = attrs->authsize; + reformat_params.size = sizeof(reformatbf); + reformat_params.data = &reformatbf; + +cmd: + pkt_reformat = + mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type); + if (IS_ERR(pkt_reformat)) + return PTR_ERR(pkt_reformat); + + flow_act->pkt_reformat = pkt_reformat; + flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + return 0; +} + +static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) +{ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; - u32 ipsec_obj_id = sa_entry->ipsec_obj_id; - struct mlx5_modify_hdr *modify_hdr = NULL; - struct mlx5e_accel_fs_esp_prot *fs_prot; + struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; struct mlx5_flow_destination dest = {}; - struct mlx5e_accel_fs_esp *accel_esp; struct mlx5_flow_act flow_act = {}; struct mlx5_flow_handle *rule; - enum accel_fs_esp_type type; struct mlx5_flow_spec *spec; - int err = 0; + struct mlx5e_ipsec_rx *rx; + int err; - accel_esp = priv->ipsec->rx_fs; - type = attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4; - fs_prot = &accel_esp->fs_prot[type]; - - err = rx_ft_get(priv, type); - if (err) - return err; + rx = rx_ft_get(mdev, ipsec, attrs->family); + if (IS_ERR(rx)) + return PTR_ERR(rx); spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) { err = -ENOMEM; - goto out_err; + goto err_alloc; } - setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act); + if (attrs->family == AF_INET) + setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4); + else + setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); - /* Set bit[31] ipsec marker */ - /* Set bit[23-0] ipsec_obj_id */ - MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); - MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); - MLX5_SET(set_action_in, action, data, (ipsec_obj_id | BIT(31))); - MLX5_SET(set_action_in, action, offset, 0); - MLX5_SET(set_action_in, action, length, 32); + setup_fte_spi(spec, attrs->spi); + setup_fte_esp(spec); + setup_fte_no_frags(spec); - modify_hdr = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL, - 1, action); - if (IS_ERR(modify_hdr)) { - err = PTR_ERR(modify_hdr); - netdev_err(priv->netdev, - "fail to alloc ipsec set modify_header_id err=%d\n", err); - modify_hdr = NULL; - goto out_err; + err = setup_modify_header(mdev, sa_entry->ipsec_obj_id | BIT(31), + XFRM_DEV_OFFLOAD_IN, &flow_act); + if (err) + goto err_mod_header; + + switch (attrs->type) { + case XFRM_DEV_OFFLOAD_PACKET: + err = setup_pkt_reformat(mdev, attrs, &flow_act); + if (err) + goto err_pkt_reformat; + break; + default: + break; } - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT | - MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC; + flow_act.crypto.obj_id = sa_entry->ipsec_obj_id; + flow_act.flags |= FLOW_ACT_NO_APPEND; + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - flow_act.modify_hdr = modify_hdr; - dest.ft = fs_prot->rx_err.ft; - rule = mlx5_add_flow_rules(fs_prot->ft, spec, &flow_act, &dest, 1); + dest.ft = rx->ft.status; + rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); - netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n", - attrs->action, err); - goto out_err; + mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err); + goto err_add_flow; } + kvfree(spec); - ipsec_rule->rule = rule; - ipsec_rule->set_modify_hdr = modify_hdr; - goto out; - -out_err: - if (modify_hdr) - mlx5_modify_header_dealloc(priv->mdev, modify_hdr); - rx_ft_put(priv, type); + sa_entry->ipsec_rule.rule = rule; + sa_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr; + sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat; + return 0; -out: +err_add_flow: + if (flow_act.pkt_reformat) + mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat); +err_pkt_reformat: + mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr); +err_mod_header: kvfree(spec); +err_alloc: + rx_ft_put(mdev, ipsec, attrs->family); return err; } -static int tx_add_rule(struct mlx5e_priv *priv, - struct mlx5e_ipsec_sa_entry *sa_entry) +static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) { + struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; + struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; + struct mlx5_flow_destination dest = {}; struct mlx5_flow_act flow_act = {}; struct mlx5_flow_handle *rule; struct mlx5_flow_spec *spec; + struct mlx5e_ipsec_tx *tx; int err = 0; - err = tx_ft_get(priv); - if (err) - return err; + tx = tx_ft_get(mdev, ipsec); + if (IS_ERR(tx)) + return PTR_ERR(tx); spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) { err = -ENOMEM; - goto out; + goto err_alloc; } - setup_fte_common(&sa_entry->attrs, sa_entry->ipsec_obj_id, spec, - &flow_act); + if (attrs->family == AF_INET) + setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4); + else + setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); + + setup_fte_no_frags(spec); + + switch (attrs->type) { + case XFRM_DEV_OFFLOAD_CRYPTO: + setup_fte_spi(spec, attrs->spi); + setup_fte_esp(spec); + setup_fte_reg_a(spec); + break; + case XFRM_DEV_OFFLOAD_PACKET: + setup_fte_reg_c0(spec, attrs->reqid); + err = setup_pkt_reformat(mdev, attrs, &flow_act); + if (err) + goto err_pkt_reformat; + break; + default: + break; + } - /* Add IPsec indicator in metadata_reg_a */ - spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; - MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a, - MLX5_ETH_WQE_FT_META_IPSEC); - MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a, - MLX5_ETH_WQE_FT_META_IPSEC); - - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | - MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT; - rule = mlx5_add_flow_rules(priv->ipsec->tx_fs->ft, spec, &flow_act, NULL, 0); + flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC; + flow_act.crypto.obj_id = sa_entry->ipsec_obj_id; + flow_act.flags |= FLOW_ACT_NO_APPEND; + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW | + MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter_id = mlx5_fc_id(tx->fc->cnt); + rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); - netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n", - sa_entry->attrs.action, err); - goto out; + mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err); + goto err_add_flow; } + kvfree(spec); sa_entry->ipsec_rule.rule = rule; + sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat; + return 0; -out: +err_add_flow: + if (flow_act.pkt_reformat) + mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat); +err_pkt_reformat: kvfree(spec); +err_alloc: + tx_ft_put(ipsec); + return err; +} + +static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) +{ + struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs; + struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry); + struct mlx5_flow_destination dest[2] = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + struct mlx5e_ipsec_tx *tx; + int err, dstn = 0; + + tx = tx_ft_get(mdev, pol_entry->ipsec); + if (IS_ERR(tx)) + return PTR_ERR(tx); + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) { + err = -ENOMEM; + goto err_alloc; + } + + if (attrs->family == AF_INET) + setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4); + else + setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); + + setup_fte_no_frags(spec); + + err = setup_modify_header(mdev, attrs->reqid, XFRM_DEV_OFFLOAD_OUT, + &flow_act); if (err) - tx_ft_put(priv); + goto err_mod_header; + + switch (attrs->action) { + case XFRM_POLICY_ALLOW: + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + break; + case XFRM_POLICY_BLOCK: + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop); + dstn++; + break; + default: + WARN_ON(true); + err = -EINVAL; + goto err_action; + } + + flow_act.flags |= FLOW_ACT_NO_APPEND; + dest[dstn].ft = tx->ft.sa; + dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dstn++; + rule = mlx5_add_flow_rules(tx->ft.pol, spec, &flow_act, dest, dstn); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err); + goto err_action; + } + + kvfree(spec); + pol_entry->ipsec_rule.rule = rule; + pol_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr; + return 0; + +err_action: + mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr); +err_mod_header: + kvfree(spec); +err_alloc: + tx_ft_put(pol_entry->ipsec); return err; } -int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, - struct mlx5e_ipsec_sa_entry *sa_entry) +static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) { - if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT) - return tx_add_rule(priv, sa_entry); + struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs; + struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry); + struct mlx5_flow_destination dest[2]; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + struct mlx5e_ipsec_rx *rx; + int err, dstn = 0; + + rx = rx_ft_get(mdev, pol_entry->ipsec, attrs->family); + if (IS_ERR(rx)) + return PTR_ERR(rx); + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) { + err = -ENOMEM; + goto err_alloc; + } - return rx_add_rule(priv, sa_entry); + if (attrs->family == AF_INET) + setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4); + else + setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); + + setup_fte_no_frags(spec); + + switch (attrs->action) { + case XFRM_POLICY_ALLOW: + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + break; + case XFRM_POLICY_BLOCK: + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; + dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop); + dstn++; + break; + default: + WARN_ON(true); + err = -EINVAL; + goto err_action; + } + + flow_act.flags |= FLOW_ACT_NO_APPEND; + dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest[dstn].ft = rx->ft.sa; + dstn++; + rule = mlx5_add_flow_rules(rx->ft.pol, spec, &flow_act, dest, dstn); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Fail to add RX IPsec policy rule err=%d\n", err); + goto err_action; + } + + kvfree(spec); + pol_entry->ipsec_rule.rule = rule; + return 0; + +err_action: + kvfree(spec); +err_alloc: + rx_ft_put(mdev, pol_entry->ipsec, attrs->family); + return err; } -void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, - struct mlx5e_ipsec_sa_entry *sa_entry) +static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec) +{ + struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4; + struct mlx5_core_dev *mdev = ipsec->mdev; + struct mlx5e_ipsec_tx *tx = ipsec->tx; + + mlx5_fc_destroy(mdev, tx->fc->drop); + mlx5_fc_destroy(mdev, tx->fc->cnt); + kfree(tx->fc); + mlx5_fc_destroy(mdev, rx_ipv4->fc->drop); + mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt); + kfree(rx_ipv4->fc); +} + +static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec) +{ + struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4; + struct mlx5e_ipsec_rx *rx_ipv6 = ipsec->rx_ipv6; + struct mlx5_core_dev *mdev = ipsec->mdev; + struct mlx5e_ipsec_tx *tx = ipsec->tx; + struct mlx5e_ipsec_fc *fc; + struct mlx5_fc *counter; + int err; + + fc = kzalloc(sizeof(*rx_ipv4->fc), GFP_KERNEL); + if (!fc) + return -ENOMEM; + + /* Both IPv4 and IPv6 point to same flow counters struct. */ + rx_ipv4->fc = fc; + rx_ipv6->fc = fc; + counter = mlx5_fc_create(mdev, false); + if (IS_ERR(counter)) { + err = PTR_ERR(counter); + goto err_rx_cnt; + } + + fc->cnt = counter; + counter = mlx5_fc_create(mdev, false); + if (IS_ERR(counter)) { + err = PTR_ERR(counter); + goto err_rx_drop; + } + + fc->drop = counter; + fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL); + if (!fc) { + err = -ENOMEM; + goto err_tx_fc; + } + + tx->fc = fc; + counter = mlx5_fc_create(mdev, false); + if (IS_ERR(counter)) { + err = PTR_ERR(counter); + goto err_tx_cnt; + } + + fc->cnt = counter; + counter = mlx5_fc_create(mdev, false); + if (IS_ERR(counter)) { + err = PTR_ERR(counter); + goto err_tx_drop; + } + + fc->drop = counter; + return 0; + +err_tx_drop: + mlx5_fc_destroy(mdev, tx->fc->cnt); +err_tx_cnt: + kfree(tx->fc); +err_tx_fc: + mlx5_fc_destroy(mdev, rx_ipv4->fc->drop); +err_rx_drop: + mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt); +err_rx_cnt: + kfree(rx_ipv4->fc); + return err; +} + +void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_ipsec *ipsec = priv->ipsec; + struct mlx5e_ipsec_hw_stats *stats; + struct mlx5e_ipsec_fc *fc; + + stats = (struct mlx5e_ipsec_hw_stats *)ipsec_stats; + + stats->ipsec_rx_pkts = 0; + stats->ipsec_rx_bytes = 0; + stats->ipsec_rx_drop_pkts = 0; + stats->ipsec_rx_drop_bytes = 0; + stats->ipsec_tx_pkts = 0; + stats->ipsec_tx_bytes = 0; + stats->ipsec_tx_drop_pkts = 0; + stats->ipsec_tx_drop_bytes = 0; + + fc = ipsec->rx_ipv4->fc; + mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes); + mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts, + &stats->ipsec_rx_drop_bytes); + + fc = ipsec->tx->fc; + mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes); + mlx5_fc_query(mdev, fc->drop, &stats->ipsec_tx_drop_pkts, + &stats->ipsec_tx_drop_bytes); +} + +int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) +{ + if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) + return tx_add_rule(sa_entry); + + return rx_add_rule(sa_entry); +} + +void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry) { struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); mlx5_del_flow_rules(ipsec_rule->rule); - if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT) { - tx_ft_put(priv); + if (ipsec_rule->pkt_reformat) + mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat); + + if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) { + tx_ft_put(sa_entry->ipsec); return; } - mlx5_modify_header_dealloc(mdev, ipsec_rule->set_modify_hdr); - rx_ft_put(priv, - sa_entry->attrs.is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4); + mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr); + rx_ft_put(mdev, sa_entry->ipsec, sa_entry->attrs.family); } -void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec) +int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry) { - struct mlx5e_accel_fs_esp_prot *fs_prot; - struct mlx5e_accel_fs_esp *accel_esp; - enum accel_fs_esp_type i; + if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) + return tx_add_policy(pol_entry); - if (!ipsec->rx_fs) - return; + return rx_add_policy(pol_entry); +} + +void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry) +{ + struct mlx5e_ipsec_rule *ipsec_rule = &pol_entry->ipsec_rule; + struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry); - mutex_destroy(&ipsec->tx_fs->mutex); - WARN_ON(ipsec->tx_fs->refcnt); - kfree(ipsec->tx_fs); + mlx5_del_flow_rules(ipsec_rule->rule); - accel_esp = ipsec->rx_fs; - for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) { - fs_prot = &accel_esp->fs_prot[i]; - mutex_destroy(&fs_prot->prot_mutex); - WARN_ON(fs_prot->refcnt); + if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) { + rx_ft_put(mdev, pol_entry->ipsec, pol_entry->attrs.family); + return; } - kfree(ipsec->rx_fs); + + mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr); + tx_ft_put(pol_entry->ipsec); +} + +void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec) +{ + if (!ipsec->tx) + return; + + ipsec_fs_destroy_counters(ipsec); + mutex_destroy(&ipsec->tx->ft.mutex); + WARN_ON(ipsec->tx->ft.refcnt); + kfree(ipsec->tx); + + mutex_destroy(&ipsec->rx_ipv4->ft.mutex); + WARN_ON(ipsec->rx_ipv4->ft.refcnt); + kfree(ipsec->rx_ipv4); + + mutex_destroy(&ipsec->rx_ipv6->ft.mutex); + WARN_ON(ipsec->rx_ipv6->ft.refcnt); + kfree(ipsec->rx_ipv6); } int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec) { - struct mlx5e_accel_fs_esp_prot *fs_prot; - struct mlx5e_accel_fs_esp *accel_esp; struct mlx5_flow_namespace *ns; - enum accel_fs_esp_type i; int err = -ENOMEM; ns = mlx5_get_flow_namespace(ipsec->mdev, @@ -581,26 +1032,34 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec) if (!ns) return -EOPNOTSUPP; - ipsec->tx_fs = kzalloc(sizeof(*ipsec->tx_fs), GFP_KERNEL); - if (!ipsec->tx_fs) + ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL); + if (!ipsec->tx) return -ENOMEM; - ipsec->rx_fs = kzalloc(sizeof(*ipsec->rx_fs), GFP_KERNEL); - if (!ipsec->rx_fs) - goto err_rx; + ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL); + if (!ipsec->rx_ipv4) + goto err_rx_ipv4; - mutex_init(&ipsec->tx_fs->mutex); - ipsec->tx_fs->ns = ns; + ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL); + if (!ipsec->rx_ipv6) + goto err_rx_ipv6; - accel_esp = ipsec->rx_fs; - for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) { - fs_prot = &accel_esp->fs_prot[i]; - mutex_init(&fs_prot->prot_mutex); - } + err = ipsec_fs_init_counters(ipsec); + if (err) + goto err_counters; + + mutex_init(&ipsec->tx->ft.mutex); + mutex_init(&ipsec->rx_ipv4->ft.mutex); + mutex_init(&ipsec->rx_ipv6->ft.mutex); + ipsec->tx->ns = ns; return 0; -err_rx: - kfree(ipsec->tx_fs); +err_counters: + kfree(ipsec->rx_ipv6); +err_rx_ipv6: + kfree(ipsec->rx_ipv4); +err_rx_ipv4: + kfree(ipsec->tx); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index 792724ce7336..8e3614218fc4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -2,9 +2,14 @@ /* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */ #include "mlx5_core.h" +#include "en.h" #include "ipsec.h" #include "lib/mlx5.h" +enum { + MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET, +}; + u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) { u32 caps = 0; @@ -31,6 +36,12 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp)) caps |= MLX5_IPSEC_CAP_CRYPTO; + if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) && + MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_esp_trasport) && + MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_del_esp_trasport) && + MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap)) + caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD; + if (!caps) return 0; @@ -46,6 +57,52 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) } EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps); +static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn, + struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + void *aso_ctx; + + aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso); + if (attrs->esn_trigger) { + MLX5_SET(ipsec_aso, aso_ctx, esn_event_arm, 1); + + if (attrs->dir == XFRM_DEV_OFFLOAD_IN) { + MLX5_SET(ipsec_aso, aso_ctx, window_sz, + attrs->replay_window / 64); + MLX5_SET(ipsec_aso, aso_ctx, mode, + MLX5_IPSEC_ASO_REPLAY_PROTECTION); + } + } + + /* ASO context */ + MLX5_SET(ipsec_obj, obj, ipsec_aso_access_pd, pdn); + MLX5_SET(ipsec_obj, obj, full_offload, 1); + MLX5_SET(ipsec_aso, aso_ctx, valid, 1); + /* MLX5_IPSEC_ASO_REG_C_4_5 is type C register that is used + * in flow steering to perform matching against. Please be + * aware that this register was chosen arbitrary and can't + * be used in other places as long as IPsec packet offload + * active. + */ + MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5); + if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) + MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN); + + if (attrs->hard_packet_limit != XFRM_INF) { + MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt, + lower_32_bits(attrs->hard_packet_limit)); + MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1); + MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1); + } + + if (attrs->soft_packet_limit != XFRM_INF) { + MLX5_SET(ipsec_aso, aso_ctx, remove_flow_soft_lft, + lower_32_bits(attrs->soft_packet_limit)); + + MLX5_SET(ipsec_aso, aso_ctx, soft_lft_arm, 1); + } +} + static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry) { struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; @@ -54,6 +111,7 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry) u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {}; void *obj, *salt_p, *salt_iv_p; + struct mlx5e_hw_objs *res; int err; obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object); @@ -66,11 +124,10 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry) salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv); memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv)); /* esn */ - if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) { + if (attrs->esn_trigger) { MLX5_SET(ipsec_obj, obj, esn_en, 1); MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn); - if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) - MLX5_SET(ipsec_obj, obj, esn_overlap, 1); + MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap); } MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id); @@ -81,6 +138,10 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry) MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC); + res = &mdev->mlx5e_res.hw_objs; + if (attrs->type == XFRM_DEV_OFFLOAD_PACKET) + mlx5e_ipsec_packet_setup(obj, res->pdn, attrs); + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); if (!err) sa_entry->ipsec_obj_id = @@ -152,7 +213,7 @@ static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry, void *obj; int err; - if (!(attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED)) + if (!attrs->esn_trigger) return 0; general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types); @@ -183,8 +244,7 @@ static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry, MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP | MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB); MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn); - if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) - MLX5_SET(ipsec_obj, obj, esn_overlap, 1); + MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap); /* general object fields set */ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); @@ -203,3 +263,234 @@ void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry, memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs)); } + +static void +mlx5e_ipsec_aso_update_esn(struct mlx5e_ipsec_sa_entry *sa_entry, + const struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + struct mlx5_wqe_aso_ctrl_seg data = {}; + + data.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6; + data.condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE | MLX5_ASO_ALWAYS_TRUE + << 4; + data.data_offset_condition_operand = MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET; + data.bitwise_data = cpu_to_be64(BIT_ULL(54)); + data.data_mask = data.bitwise_data; + + mlx5e_ipsec_aso_query(sa_entry, &data); +} + +static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry, + u32 mode_param) +{ + struct mlx5_accel_esp_xfrm_attrs attrs = {}; + + if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) { + sa_entry->esn_state.esn++; + sa_entry->esn_state.overlap = 0; + } else { + sa_entry->esn_state.overlap = 1; + } + + mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs); + mlx5_accel_esp_modify_xfrm(sa_entry, &attrs); + mlx5e_ipsec_aso_update_esn(sa_entry, &attrs); +} + +static void mlx5e_ipsec_handle_event(struct work_struct *_work) +{ + struct mlx5e_ipsec_work *work = + container_of(_work, struct mlx5e_ipsec_work, work); + struct mlx5_accel_esp_xfrm_attrs *attrs; + struct mlx5e_ipsec_sa_entry *sa_entry; + struct mlx5e_ipsec_aso *aso; + struct mlx5e_ipsec *ipsec; + int ret; + + sa_entry = xa_load(&work->ipsec->sadb, work->id); + if (!sa_entry) + goto out; + + ipsec = sa_entry->ipsec; + aso = ipsec->aso; + attrs = &sa_entry->attrs; + + spin_lock(&sa_entry->x->lock); + ret = mlx5e_ipsec_aso_query(sa_entry, NULL); + if (ret) + goto unlock; + + aso->use_cache = true; + if (attrs->esn_trigger && + !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) { + u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter); + + mlx5e_ipsec_update_esn_state(sa_entry, mode_param); + } + + if (attrs->soft_packet_limit != XFRM_INF) + if (!MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm) || + !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm) || + !MLX5_GET(ipsec_aso, aso->ctx, remove_flow_enable)) + xfrm_state_check_expire(sa_entry->x); + aso->use_cache = false; + +unlock: + spin_unlock(&sa_entry->x->lock); +out: + kfree(work); +} + +static int mlx5e_ipsec_event(struct notifier_block *nb, unsigned long event, + void *data) +{ + struct mlx5e_ipsec *ipsec = container_of(nb, struct mlx5e_ipsec, nb); + struct mlx5_eqe_obj_change *object; + struct mlx5e_ipsec_work *work; + struct mlx5_eqe *eqe = data; + u16 type; + + if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE) + return NOTIFY_DONE; + + object = &eqe->data.obj_change; + type = be16_to_cpu(object->obj_type); + + if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC) + return NOTIFY_DONE; + + work = kmalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return NOTIFY_DONE; + + INIT_WORK(&work->work, mlx5e_ipsec_handle_event); + work->ipsec = ipsec; + work->id = be32_to_cpu(object->obj_id); + + queue_work(ipsec->wq, &work->work); + return NOTIFY_OK; +} + +int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec) +{ + struct mlx5_core_dev *mdev = ipsec->mdev; + struct mlx5e_ipsec_aso *aso; + struct mlx5e_hw_objs *res; + struct device *pdev; + int err; + + aso = kzalloc(sizeof(*ipsec->aso), GFP_KERNEL); + if (!aso) + return -ENOMEM; + + res = &mdev->mlx5e_res.hw_objs; + + pdev = mlx5_core_dma_dev(mdev); + aso->dma_addr = dma_map_single(pdev, aso->ctx, sizeof(aso->ctx), + DMA_BIDIRECTIONAL); + err = dma_mapping_error(pdev, aso->dma_addr); + if (err) + goto err_dma; + + aso->aso = mlx5_aso_create(mdev, res->pdn); + if (IS_ERR(aso->aso)) { + err = PTR_ERR(aso->aso); + goto err_aso_create; + } + + ipsec->nb.notifier_call = mlx5e_ipsec_event; + mlx5_notifier_register(mdev, &ipsec->nb); + + ipsec->aso = aso; + return 0; + +err_aso_create: + dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx), + DMA_BIDIRECTIONAL); +err_dma: + kfree(aso); + return err; +} + +void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec) +{ + struct mlx5_core_dev *mdev = ipsec->mdev; + struct mlx5e_ipsec_aso *aso; + struct device *pdev; + + aso = ipsec->aso; + pdev = mlx5_core_dma_dev(mdev); + + mlx5_notifier_unregister(mdev, &ipsec->nb); + mlx5_aso_destroy(aso->aso); + dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx), + DMA_BIDIRECTIONAL); + kfree(aso); +} + +static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl, + struct mlx5_wqe_aso_ctrl_seg *data) +{ + if (!data) + return; + + ctrl->data_mask_mode = data->data_mask_mode; + ctrl->condition_1_0_operand = data->condition_1_0_operand; + ctrl->condition_1_0_offset = data->condition_1_0_offset; + ctrl->data_offset_condition_operand = data->data_offset_condition_operand; + ctrl->condition_0_data = data->condition_0_data; + ctrl->condition_0_mask = data->condition_0_mask; + ctrl->condition_1_data = data->condition_1_data; + ctrl->condition_1_mask = data->condition_1_mask; + ctrl->bitwise_data = data->bitwise_data; + ctrl->data_mask = data->data_mask; +} + +int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5_wqe_aso_ctrl_seg *data) +{ + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; + struct mlx5e_ipsec_aso *aso = ipsec->aso; + struct mlx5_core_dev *mdev = ipsec->mdev; + struct mlx5_wqe_aso_ctrl_seg *ctrl; + struct mlx5e_hw_objs *res; + struct mlx5_aso_wqe *wqe; + u8 ds_cnt; + + lockdep_assert_held(&sa_entry->x->lock); + if (aso->use_cache) + return 0; + + res = &mdev->mlx5e_res.hw_objs; + + memset(aso->ctx, 0, sizeof(aso->ctx)); + wqe = mlx5_aso_get_wqe(aso->aso); + ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS); + mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id, + MLX5_ACCESS_ASO_OPC_MOD_IPSEC); + + ctrl = &wqe->aso_ctrl; + ctrl->va_l = + cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN); + ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr)); + ctrl->l_key = cpu_to_be32(res->mkey); + mlx5e_ipsec_aso_copy(ctrl, data); + + mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl); + return mlx5_aso_poll_cq(aso->aso, false); +} + +void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry, + u64 *packets) +{ + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; + struct mlx5e_ipsec_aso *aso = ipsec->aso; + u64 hard_cnt; + + hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt); + /* HW decresases the limit till it reaches zero to fire an avent. + * We need to fix the calculations, so the returned count is a total + * number of passed packets and not how much left. + */ + *packets = sa_entry->attrs.hard_packet_limit - hard_cnt; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index 6859f1c1a831..eab5bc718771 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -312,27 +312,31 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, struct mlx5_cqe64 *cqe) { u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata); - struct mlx5e_priv *priv; + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_ipsec *ipsec = priv->ipsec; + struct mlx5e_ipsec_sa_entry *sa_entry; struct xfrm_offload *xo; - struct xfrm_state *xs; struct sec_path *sp; u32 sa_handle; sa_handle = MLX5_IPSEC_METADATA_HANDLE(ipsec_meta_data); - priv = netdev_priv(netdev); sp = secpath_set(skb); if (unlikely(!sp)) { - atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc); + atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sp_alloc); return; } - xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle); - if (unlikely(!xs)) { - atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss); + rcu_read_lock(); + sa_entry = xa_load(&ipsec->sadb, sa_handle); + if (unlikely(!sa_entry)) { + rcu_read_unlock(); + atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss); return; } + xfrm_state_hold(sa_entry->x); + rcu_read_unlock(); - sp->xvec[sp->len++] = xs; + sp->xvec[sp->len++] = sa_entry->x; sp->olen++; xo = xfrm_offload(skb); @@ -349,6 +353,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, xo->status = CRYPTO_INVALID_PACKET_SYNTAX; break; default: - atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome); + atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c index 9de84821dafb..e0e36a09721c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c @@ -37,6 +37,17 @@ #include "en.h" #include "ipsec.h" +static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_pkts) }, + { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_pkts) }, + { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_pkts) }, + { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_drop_pkts) }, + { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_drop_bytes) }, +}; + static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sadb_miss) }, @@ -50,8 +61,48 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = { #define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \ atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset)) +#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc) #define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc) +static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw) +{ + if (!priv->ipsec) + return 0; + + return NUM_IPSEC_HW_COUNTERS; +} + +static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw) {} + +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw) +{ + unsigned int i; + + if (!priv->ipsec) + return idx; + + for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + mlx5e_ipsec_hw_stats_desc[i].format); + + return idx; +} + +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw) +{ + int i; + + if (!priv->ipsec) + return idx; + + mlx5e_accel_ipsec_fs_read_stats(priv, &priv->ipsec->hw_stats); + for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->hw_stats, + mlx5e_ipsec_hw_stats_desc, i); + + return idx; +} + static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw) { return priv->ipsec ? NUM_IPSEC_SW_COUNTERS : 0; @@ -81,4 +132,5 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw) return idx; } +MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0); MLX5E_DEFINE_STATS_GRP(ipsec_sw, 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 70c4ea3841d7..6687b8136e44 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -2480,6 +2480,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = { &MLX5E_STATS_GRP(per_prio), &MLX5E_STATS_GRP(pme), #ifdef CONFIG_MLX5_EN_IPSEC + &MLX5E_STATS_GRP(ipsec_hw), &MLX5E_STATS_GRP(ipsec_sw), #endif &MLX5E_STATS_GRP(tls), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 37df58ba958c..375752d6546d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -506,6 +506,7 @@ extern MLX5E_DECLARE_STATS_GRP(per_prio); extern MLX5E_DECLARE_STATS_GRP(pme); extern MLX5E_DECLARE_STATS_GRP(channels); extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest); +extern MLX5E_DECLARE_STATS_GRP(ipsec_hw); extern MLX5E_DECLARE_STATS_GRP(ipsec_sw); extern MLX5E_DECLARE_STATS_GRP(ptp); extern MLX5E_DECLARE_STATS_GRP(macsec_hw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index a0242dc15741..8f7580fec193 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -19,6 +19,7 @@ #include "diag/fw_tracer.h" #include "mlx5_irq.h" #include "devlink.h" +#include "en_accel/ipsec.h" enum { MLX5_EQE_OWNER_INIT_VAL = 0x1, @@ -578,6 +579,10 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4]) if (MLX5_CAP_MACSEC(dev, log_max_macsec_offload)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE); + if (mlx5_ipsec_device_caps(dev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD) + async_event_mask |= + (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE); + mask[0] = async_event_mask; if (MLX5_CAP_GEN(dev, event_cap)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 4dcd26b86662..5a85d8c1e797 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -111,8 +111,8 @@ #define ETHTOOL_PRIO_NUM_LEVELS 1 #define ETHTOOL_NUM_PRIOS 11 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) -/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}} */ -#define KERNEL_NIC_PRIO_NUM_LEVELS 7 +/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy */ +#define KERNEL_NIC_PRIO_NUM_LEVELS 8 #define KERNEL_NIC_NUM_PRIOS 1 /* One more level for tc */ #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) @@ -133,7 +133,7 @@ #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1) #define KERNEL_TX_IPSEC_NUM_PRIOS 1 -#define KERNEL_TX_IPSEC_NUM_LEVELS 1 +#define KERNEL_TX_IPSEC_NUM_LEVELS 2 #define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS) #define KERNEL_TX_MACSEC_NUM_PRIOS 1 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c index 0f9e4f01c85a..5a80fb7dbbca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c @@ -353,12 +353,15 @@ void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt, cseg->general_id = cpu_to_be32(obj_id); } -void *mlx5_aso_get_wqe(struct mlx5_aso *aso) +struct mlx5_aso_wqe *mlx5_aso_get_wqe(struct mlx5_aso *aso) { + struct mlx5_aso_wqe *wqe; u16 pi; pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc); - return mlx5_wq_cyc_get_wqe(&aso->wq, pi); + wqe = mlx5_wq_cyc_get_wqe(&aso->wq, pi); + memset(wqe, 0, sizeof(*wqe)); + return wqe; } void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h index 2d40dcf9d42e..afb078bbb8ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h @@ -15,6 +15,7 @@ #define MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT 24 #define MLX5_MACSEC_ASO_DS_CNT (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_DS)) +#define ASO_CTRL_READ_EN BIT(0) struct mlx5_wqe_aso_ctrl_seg { __be32 va_h; __be32 va_l; /* include read_enable */ @@ -71,13 +72,14 @@ enum { }; enum { + MLX5_ACCESS_ASO_OPC_MOD_IPSEC = 0x0, MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER = 0x2, MLX5_ACCESS_ASO_OPC_MOD_MACSEC = 0x5, }; struct mlx5_aso; -void *mlx5_aso_get_wqe(struct mlx5_aso *aso); +struct mlx5_aso_wqe *mlx5_aso_get_wqe(struct mlx5_aso *aso); void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt, struct mlx5_aso_wqe *aso_wqe, u32 obj_id, u32 opc_mode); diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c index 3728870d8e9c..4632268695cb 100644 --- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c +++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c @@ -302,6 +302,11 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x) return -EINVAL; } + if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) { + nn_err(nn, "Unsupported xfrm offload tyoe\n"); + return -EINVAL; + } + cfg->spi = ntohl(x->id.spi); /* Hash/Authentication */ |