summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorIdo Schimmel <idosch@nvidia.com>2021-05-19 15:08:23 +0300
committerDavid S. Miller <davem@davemloft.net>2021-05-19 12:47:47 -0700
commit01848e05f8bbff2d799073b307fe2eb42bee764b (patch)
treedbbdc73fd584656bc5b4c26428883ee37ac67d62 /drivers/net/ethernet
parentb7b8f435ea3b33ba7067f992c5b85a62f24d19ed (diff)
downloadlinux-01848e05f8bbff2d799073b307fe2eb42bee764b.tar.bz2
mlxsw: spectrum_router: Add support for inner layer 3 multipath hash policy
When this policy is set, the kernel uses the inner layer 3 fields for multipath hash computation and falls back to the outer fields if no encapsulation was encountered. This behavior is most likely influenced by the behavior of the flow dissector, which is used for the packet dissection. The Spectrum ASIC, however, cannot fallback to outer fields if inner fields are not available. This should not result in a discrepancy from the software data path because if several flows have matching inner fields, they will tend to have matching outer fields as well. Therefore, implement this policy by enabling both outer and inner layer 3 fields for the multipath hash computation. Signed-off-by: Ido Schimmel <idosch@nvidia.com> Reviewed-by: Petr Machata <petrm@nvidia.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c41
1 files changed, 41 insertions, 0 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 605515137636..bacac94398dd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -9602,6 +9602,8 @@ static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
struct mlxsw_sp_mp_hash_config {
DECLARE_BITMAP(headers, __MLXSW_REG_RECR2_HEADER_CNT);
DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT);
+ DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT);
+ DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT);
};
#define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \
@@ -9613,6 +9615,27 @@ struct mlxsw_sp_mp_hash_config {
#define MLXSW_SP_MP_HASH_FIELD_RANGE_SET(_fields, _field, _nr) \
bitmap_set(_fields, MLXSW_REG_RECR2_##_field, _nr)
+static void mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config *config)
+{
+ unsigned long *inner_headers = config->inner_headers;
+ unsigned long *inner_fields = config->inner_fields;
+
+ /* IPv4 inner */
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
+ /* IPv6 inner */
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
+}
+
static void mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
{
unsigned long *headers = config->headers;
@@ -9642,6 +9665,12 @@ static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
break;
+ case 2:
+ /* Outer */
+ mlxsw_sp_mp4_hash_outer_addr(config);
+ /* Inner */
+ mlxsw_sp_mp_hash_inner_l3(config);
+ break;
}
}
@@ -9677,6 +9706,14 @@ static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp,
MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
break;
+ case 2:
+ /* Outer */
+ mlxsw_sp_mp6_hash_outer_addr(config);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
+ /* Inner */
+ mlxsw_sp_mp_hash_inner_l3(config);
+ break;
}
}
@@ -9696,6 +9733,10 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1);
for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT)
mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, bit, 1);
+ for_each_set_bit(bit, config.inner_headers, __MLXSW_REG_RECR2_HEADER_CNT)
+ mlxsw_reg_recr2_inner_header_enables_set(recr2_pl, bit, 1);
+ for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT)
+ mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
}