summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c7
-rw-r--r--drivers/net/ethernet/freescale/fec.h4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c17
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c51
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c20
-rw-r--r--drivers/net/geneve.c14
-rw-r--r--drivers/net/virtio_net.c5
-rw-r--r--drivers/net/vxlan.c12
-rw-r--r--drivers/ptp/ptp_chardev.c4
-rw-r--r--include/linux/mlx5/driver.h8
-rw-r--r--include/net/dst.h10
-rw-r--r--include/net/ip6_fib.h4
-rw-r--r--include/net/sctp/sm.h2
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/uapi/linux/sctp.h1
-rw-r--r--kernel/bpf/xskmap.c10
-rw-r--r--net/bpfilter/bpfilter_kern.c6
-rw-r--r--net/core/ethtool.c11
-rw-r--r--net/ipv4/ipmr_base.c2
-rw-r--r--net/ipv6/ip6_tunnel.c10
-rw-r--r--net/ipv6/mcast.c16
-rw-r--r--net/ipv6/route.c12
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/llc/llc_conn.c1
-rw-r--r--net/rxrpc/call_accept.c2
-rw-r--r--net/rxrpc/local_object.c2
-rw-r--r--net/rxrpc/output.c3
-rw-r--r--net/rxrpc/peer_event.c1
-rw-r--r--net/sched/cls_api.c13
-rw-r--r--net/sched/sch_api.c11
-rw-r--r--net/sctp/associola.c3
-rw-r--r--net/sctp/input.c1
-rw-r--r--net/sctp/output.c6
-rw-r--r--net/sctp/socket.c17
-rw-r--r--net/socket.c11
-rw-r--r--net/tipc/group.c1
-rw-r--r--net/tipc/link.c1
-rw-r--r--net/tipc/name_distr.c4
-rw-r--r--net/xdp/xsk.c2
-rw-r--r--net/xfrm/xfrm_interface.c3
-rw-r--r--net/xfrm/xfrm_policy.c8
-rw-r--r--tools/testing/selftests/net/reuseport_bpf.c13
57 files changed, 253 insertions, 187 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 556f902b3766..7f371d372bdd 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10122,7 +10122,6 @@ L: netdev@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git
S: Maintained
-F: net/core/flow.c
F: net/xfrm/
F: net/key/
F: net/ipv4/xfrm*
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 4241ae928d4a..34af5f1569c8 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -321,9 +321,12 @@ int bcmgenet_mii_probe(struct net_device *dev)
phydev->advertising = phydev->supported;
/* The internal PHY has its link interrupts routed to the
- * Ethernet MAC ISRs
+ * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
+ * that prevents the signaling of link UP interrupts when
+ * the link operates at 10Mbps, so fallback to polling for
+ * those versions of GENET.
*/
- if (priv->internal_phy)
+ if (priv->internal_phy && !GENET_IS_V5(priv))
dev->phydev->irq = PHY_IGNORE_INTERRUPT;
return 0;
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 4778b663653e..bf80855dd0dd 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -452,6 +452,10 @@ struct bufdesc_ex {
* initialisation.
*/
#define FEC_QUIRK_MIB_CLEAR (1 << 15)
+/* Only i.MX25/i.MX27/i.MX28 controller supports FRBR,FRSR registers,
+ * those FIFO receive registers are resolved in other platforms.
+ */
+#define FEC_QUIRK_HAS_FRREG (1 << 16)
struct bufdesc_prop {
int qid;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index bf9b9fd6d2a0..7b98bb75ba8a 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -91,14 +91,16 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = 0,
}, {
.name = "imx25-fec",
- .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR,
+ .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
+ FEC_QUIRK_HAS_FRREG,
}, {
.name = "imx27-fec",
- .driver_data = FEC_QUIRK_MIB_CLEAR,
+ .driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
}, {
.name = "imx28-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
- FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC,
+ FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
+ FEC_QUIRK_HAS_FRREG,
}, {
.name = "imx6q-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
@@ -2164,7 +2166,13 @@ static void fec_enet_get_regs(struct net_device *ndev,
memset(buf, 0, regs->len);
for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
- off = fec_enet_register_offset[i] / 4;
+ off = fec_enet_register_offset[i];
+
+ if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
+ !(fep->quirks & FEC_QUIRK_HAS_FRREG))
+ continue;
+
+ off >>= 2;
buf[off] = readl(&theregs[off]);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 15d8ae28c040..00172dee5339 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -432,10 +432,9 @@ static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq)
static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
struct mlx5_wq_cyc *wq,
- u16 pi, u16 frag_pi)
+ u16 pi, u16 nnops)
{
struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi];
- u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
edge_wi = wi + nnops;
@@ -454,15 +453,14 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *umr_wqe;
u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
- u16 pi, frag_pi;
+ u16 pi, contig_wqebbs_room;
int err;
int i;
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
-
- if (unlikely(frag_pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_frag_size(wq))) {
- mlx5e_fill_icosq_frag_edge(sq, wq, pi, frag_pi);
+ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs_room < MLX5E_UMR_WQEBBS)) {
+ mlx5e_fill_icosq_frag_edge(sq, wq, pi, contig_wqebbs_room);
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index ae73ea992845..6dacaeba2fbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -290,10 +290,9 @@ dma_unmap_wqe_err:
static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
struct mlx5_wq_cyc *wq,
- u16 pi, u16 frag_pi)
+ u16 pi, u16 nnops)
{
struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
- u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
edge_wi = wi + nnops;
@@ -348,8 +347,8 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe_info *wi;
struct mlx5e_sq_stats *stats = sq->stats;
+ u16 headlen, ihs, contig_wqebbs_room;
u16 ds_cnt, ds_cnt_inl = 0;
- u16 headlen, ihs, frag_pi;
u8 num_wqebbs, opcode;
u32 num_bytes;
int num_dma;
@@ -386,9 +385,9 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
- frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
- if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
- mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
+ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs_room < num_wqebbs)) {
+ mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
}
@@ -636,7 +635,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe_info *wi;
struct mlx5e_sq_stats *stats = sq->stats;
- u16 headlen, ihs, pi, frag_pi;
+ u16 headlen, ihs, pi, contig_wqebbs_room;
u16 ds_cnt, ds_cnt_inl = 0;
u8 num_wqebbs, opcode;
u32 num_bytes;
@@ -672,13 +671,14 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
- frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
- if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs_room < num_wqebbs)) {
+ mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
}
- mlx5i_sq_fetch_wqe(sq, &wqe, &pi);
+ mlx5i_sq_fetch_wqe(sq, &wqe, pi);
/* fill wqe */
wi = &sq->db.wqe_info[pi];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 48864f4988a4..c1e1a16a9b07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -273,7 +273,7 @@ static void eq_pf_process(struct mlx5_eq *eq)
case MLX5_PFAULT_SUBTYPE_WQE:
/* WQE based event */
pfault->type =
- be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24;
+ (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
pfault->token =
be32_to_cpu(pf_eqe->wqe.token);
pfault->wqe.wq_num =
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 5645a4facad2..b8ee9101c506 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -245,7 +245,7 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
return ERR_PTR(res);
}
- /* Context will be freed by wait func after completion */
+ /* Context should be freed by the caller after completion. */
return context;
}
@@ -418,10 +418,8 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
cmd.flags = htonl(flags);
context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
- if (IS_ERR(context)) {
- err = PTR_ERR(context);
- goto out;
- }
+ if (IS_ERR(context))
+ return PTR_ERR(context);
err = mlx5_fpga_ipsec_cmd_wait(context);
if (err)
@@ -435,6 +433,7 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
}
out:
+ kfree(context);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 08eac92fc26c..0982c579ec74 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -109,12 +109,11 @@ struct mlx5i_tx_wqe {
static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
struct mlx5i_tx_wqe **wqe,
- u16 *pi)
+ u16 pi)
{
struct mlx5_wq_cyc *wq = &sq->wq;
- *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- *wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
+ *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
memset(*wqe, 0, sizeof(**wqe));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 68e7f8df2a6d..ddca327e8950 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -39,11 +39,6 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
return (u32)wq->fbc.sz_m1 + 1;
}
-u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
-{
- return wq->fbc.frag_sz_m1 + 1;
-}
-
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
{
return wq->fbc.sz_m1 + 1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index 3a1a170bb2d7..b1293d153a58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -80,7 +80,6 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl);
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
-u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
@@ -140,11 +139,6 @@ static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
return ctr & wq->fbc.sz_m1;
}
-static inline u16 mlx5_wq_cyc_ctr2fragix(struct mlx5_wq_cyc *wq, u16 ctr)
-{
- return ctr & wq->fbc.frag_sz_m1;
-}
-
static inline u16 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq)
{
return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr);
@@ -160,6 +154,11 @@ static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
}
+static inline u16 mlx5_wq_cyc_get_contig_wqebbs(struct mlx5_wq_cyc *wq, u16 ix)
+{
+ return mlx5_frag_buf_get_idx_last_contig_stride(&wq->fbc, ix) - ix + 1;
+}
+
static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
{
int equal = (cc1 == cc2);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 81533d7f395c..937d0ace699a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1055,6 +1055,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
err_driver_init:
mlxsw_thermal_fini(mlxsw_core->thermal);
err_thermal_init:
+ mlxsw_hwmon_fini(mlxsw_core->hwmon);
err_hwmon_init:
if (!reload)
devlink_unregister(devlink);
@@ -1088,6 +1089,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
if (mlxsw_core->driver->fini)
mlxsw_core->driver->fini(mlxsw_core);
mlxsw_thermal_fini(mlxsw_core->thermal);
+ mlxsw_hwmon_fini(mlxsw_core->hwmon);
if (!reload)
devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 655ddd204ab2..c35be477856f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -359,6 +359,10 @@ static inline int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
return 0;
}
+static inline void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
+{
+}
+
#endif
struct mlxsw_thermal;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index f6cf2896d337..e04e8162aa14 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -303,8 +303,7 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
struct device *hwmon_dev;
int err;
- mlxsw_hwmon = devm_kzalloc(mlxsw_bus_info->dev, sizeof(*mlxsw_hwmon),
- GFP_KERNEL);
+ mlxsw_hwmon = kzalloc(sizeof(*mlxsw_hwmon), GFP_KERNEL);
if (!mlxsw_hwmon)
return -ENOMEM;
mlxsw_hwmon->core = mlxsw_core;
@@ -321,10 +320,9 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group;
mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs;
- hwmon_dev = devm_hwmon_device_register_with_groups(mlxsw_bus_info->dev,
- "mlxsw",
- mlxsw_hwmon,
- mlxsw_hwmon->groups);
+ hwmon_dev = hwmon_device_register_with_groups(mlxsw_bus_info->dev,
+ "mlxsw", mlxsw_hwmon,
+ mlxsw_hwmon->groups);
if (IS_ERR(hwmon_dev)) {
err = PTR_ERR(hwmon_dev);
goto err_hwmon_register;
@@ -337,5 +335,12 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
err_hwmon_register:
err_fans_init:
err_temp_init:
+ kfree(mlxsw_hwmon);
return err;
}
+
+void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
+{
+ hwmon_device_unregister(mlxsw_hwmon->hwmon_dev);
+ kfree(mlxsw_hwmon);
+}
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 1a4f2bb48ead..ed4e298cd823 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -133,9 +133,9 @@ static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
{
unsigned int val, timeout = 10;
- /* Wait for the issued mac table command to be completed, or timeout.
- * When the command read from ANA_TABLES_MACACCESS is
- * MACACCESS_CMD_IDLE, the issued command completed successfully.
+ /* Wait for the issued vlan table command to be completed, or timeout.
+ * When the command read from ANA_TABLES_VLANACCESS is
+ * VLANACCESS_CMD_IDLE, the issued command completed successfully.
*/
do {
val = ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 46ba0cf257c6..7a1e9cd9cc62 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -429,12 +429,14 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
switch (off) {
case offsetof(struct iphdr, daddr):
- set_ip_addr->ipv4_dst_mask = mask;
- set_ip_addr->ipv4_dst = exact;
+ set_ip_addr->ipv4_dst_mask |= mask;
+ set_ip_addr->ipv4_dst &= ~mask;
+ set_ip_addr->ipv4_dst |= exact & mask;
break;
case offsetof(struct iphdr, saddr):
- set_ip_addr->ipv4_src_mask = mask;
- set_ip_addr->ipv4_src = exact;
+ set_ip_addr->ipv4_src_mask |= mask;
+ set_ip_addr->ipv4_src &= ~mask;
+ set_ip_addr->ipv4_src |= exact & mask;
break;
default:
return -EOPNOTSUPP;
@@ -448,11 +450,12 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
}
static void
-nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask,
+nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
struct nfp_fl_set_ipv6_addr *ip6)
{
- ip6->ipv6[idx % 4].mask = mask;
- ip6->ipv6[idx % 4].exact = exact;
+ ip6->ipv6[word].mask |= mask;
+ ip6->ipv6[word].exact &= ~mask;
+ ip6->ipv6[word].exact |= exact & mask;
ip6->reserved = cpu_to_be16(0);
ip6->head.jump_id = opcode_tag;
@@ -465,6 +468,7 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
struct nfp_fl_set_ipv6_addr *ip_src)
{
__be32 exact, mask;
+ u8 word;
/* We are expecting tcf_pedit to return a big endian value */
mask = (__force __be32)~tcf_pedit_mask(action, idx);
@@ -473,17 +477,20 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
if (exact & ~mask)
return -EOPNOTSUPP;
- if (off < offsetof(struct ipv6hdr, saddr))
+ if (off < offsetof(struct ipv6hdr, saddr)) {
return -EOPNOTSUPP;
- else if (off < offsetof(struct ipv6hdr, daddr))
- nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx,
+ } else if (off < offsetof(struct ipv6hdr, daddr)) {
+ word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
+ nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
exact, mask, ip_src);
- else if (off < offsetof(struct ipv6hdr, daddr) +
- sizeof(struct in6_addr))
- nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx,
+ } else if (off < offsetof(struct ipv6hdr, daddr) +
+ sizeof(struct in6_addr)) {
+ word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact);
+ nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
exact, mask, ip_dst);
- else
+ } else {
return -EOPNOTSUPP;
+ }
return 0;
}
@@ -541,7 +548,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
struct nfp_fl_set_eth set_eth;
enum pedit_header_type htype;
int idx, nkeys, err;
- size_t act_size;
+ size_t act_size = 0;
u32 offset, cmd;
u8 ip_proto = 0;
@@ -599,7 +606,9 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
act_size = sizeof(set_eth);
memcpy(nfp_action, &set_eth, act_size);
*a_len += act_size;
- } else if (set_ip_addr.head.len_lw) {
+ }
+ if (set_ip_addr.head.len_lw) {
+ nfp_action += act_size;
act_size = sizeof(set_ip_addr);
memcpy(nfp_action, &set_ip_addr, act_size);
*a_len += act_size;
@@ -607,10 +616,12 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
nfp_fl_csum_l4_to_flag(ip_proto);
- } else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
+ }
+ if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
/* TC compiles set src and dst IPv6 address as a single action,
* the hardware requires this to be 2 separate actions.
*/
+ nfp_action += act_size;
act_size = sizeof(set_ip6_src);
memcpy(nfp_action, &set_ip6_src, act_size);
*a_len += act_size;
@@ -623,6 +634,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_ip6_dst.head.len_lw) {
+ nfp_action += act_size;
act_size = sizeof(set_ip6_dst);
memcpy(nfp_action, &set_ip6_dst, act_size);
*a_len += act_size;
@@ -630,13 +642,16 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_ip6_src.head.len_lw) {
+ nfp_action += act_size;
act_size = sizeof(set_ip6_src);
memcpy(nfp_action, &set_ip6_src, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
- } else if (set_tport.head.len_lw) {
+ }
+ if (set_tport.head.len_lw) {
+ nfp_action += act_size;
act_size = sizeof(set_tport);
memcpy(nfp_action, &set_tport, act_size);
*a_len += act_size;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index af3a28ec04eb..0f0aba793352 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -228,7 +228,7 @@ static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn)
attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)),
GET_FIELD(tmp2, QED_GRC_ATTENTION_PF),
(GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) ==
- QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Ireelevant)",
+ QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)",
GET_FIELD(tmp2, QED_GRC_ATTENTION_VF));
out:
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index b48f76182049..10b075bc5959 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -380,8 +380,6 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
- ql_write_nvram_reg(qdev, spir,
- ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
}
/*
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3a5e6160bf0d..2c350099b83c 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6549,17 +6549,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
struct net_device *dev = tp->dev;
u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
- int work_done= 0;
+ int work_done;
u16 status;
status = rtl_get_events(tp);
rtl_ack_events(tp, status & ~tp->event_slow);
- if (status & RTL_EVENT_NAPI_RX)
- work_done = rtl_rx(dev, tp, (u32) budget);
+ work_done = rtl_rx(dev, tp, (u32) budget);
- if (status & RTL_EVENT_NAPI_TX)
- rtl_tx(dev, tp);
+ rtl_tx(dev, tp);
if (status & tp->event_slow) {
enable_mask &= ~tp->event_slow;
@@ -7093,20 +7091,12 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
{
unsigned int flags;
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
RTL_W8(tp, Cfg9346, Cfg9346_Lock);
flags = PCI_IRQ_LEGACY;
- break;
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_40:
- /* This version was reported to have issues with resume
- * from suspend when using MSI-X
- */
- flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI;
- break;
- default:
+ } else {
flags = PCI_IRQ_ALL_TYPES;
}
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 6acb6b5718b9..493cd382b8aa 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -830,12 +830,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (IS_ERR(rt))
return PTR_ERR(rt);
- if (skb_dst(skb)) {
- int mtu = dst_mtu(&rt->dst) - GENEVE_IPV4_HLEN -
- info->options_len;
-
- skb_dst_update_pmtu(skb, mtu);
- }
+ skb_tunnel_check_pmtu(skb, &rt->dst,
+ GENEVE_IPV4_HLEN + info->options_len);
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->collect_md) {
@@ -876,11 +872,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (IS_ERR(dst))
return PTR_ERR(dst);
- if (skb_dst(skb)) {
- int mtu = dst_mtu(dst) - GENEVE_IPV6_HLEN - info->options_len;
-
- skb_dst_update_pmtu(skb, mtu);
- }
+ skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len);
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->collect_md) {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index dab504ec5e50..ddfa3f24204c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2218,8 +2218,9 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
/* Make sure no work handler is accessing the device */
flush_work(&vi->config_work);
+ netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
- netif_tx_disable(vi->dev);
+ netif_tx_unlock_bh(vi->dev);
cancel_delayed_work_sync(&vi->refill);
if (netif_running(vi->dev)) {
@@ -2255,7 +2256,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
}
}
+ netif_tx_lock_bh(vi->dev);
netif_device_attach(vi->dev);
+ netif_tx_unlock_bh(vi->dev);
return err;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 2b8da2b7e721..27bd586b94b0 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2194,11 +2194,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
ndst = &rt->dst;
- if (skb_dst(skb)) {
- int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
-
- skb_dst_update_pmtu(skb, mtu);
- }
+ skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
@@ -2235,11 +2231,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto out_unlock;
}
- if (skb_dst(skb)) {
- int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
-
- skb_dst_update_pmtu(skb, mtu);
- }
+ skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 01b0e2bb3319..2012551d93e0 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -24,6 +24,8 @@
#include <linux/slab.h>
#include <linux/timekeeping.h>
+#include <linux/nospec.h>
+
#include "ptp_private.h"
static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
@@ -248,6 +250,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
err = -EINVAL;
break;
}
+ pin_index = array_index_nospec(pin_index, ops->n_pins);
if (mutex_lock_interruptible(&ptp->pincfg_mux))
return -ERESTARTSYS;
pd = ops->pin_config[pin_index];
@@ -266,6 +269,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
err = -EINVAL;
break;
}
+ pin_index = array_index_nospec(pin_index, ops->n_pins);
if (mutex_lock_interruptible(&ptp->pincfg_mux))
return -ERESTARTSYS;
err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 66d94b4557cf..88a041b73abf 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -1032,6 +1032,14 @@ static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
((fbc->frag_sz_m1 & ix) << fbc->log_stride);
}
+static inline u32
+mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
+{
+ u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1;
+
+ return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
+}
+
int mlx5_cmd_init(struct mlx5_core_dev *dev);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
diff --git a/include/net/dst.h b/include/net/dst.h
index 7f735e76ca73..6cf0870414c7 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -527,4 +527,14 @@ static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
dst->ops->update_pmtu(dst, NULL, skb, mtu);
}
+static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
+ struct dst_entry *encap_dst,
+ int headroom)
+{
+ u32 encap_mtu = dst_mtu(encap_dst);
+
+ if (skb->len > encap_mtu - headroom)
+ skb_dst_update_pmtu(skb, encap_mtu - headroom);
+}
+
#endif /* _NET_DST_H */
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 3d4930528db0..2d31e22babd8 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -159,6 +159,10 @@ struct fib6_info {
struct rt6_info * __percpu *rt6i_pcpu;
struct rt6_exception_bucket __rcu *rt6i_exception_bucket;
+#ifdef CONFIG_IPV6_ROUTER_PREF
+ unsigned long last_probe;
+#endif
+
u32 fib6_metric;
u8 fib6_protocol;
u8 fib6_type;
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 5ef1bad81ef5..9e3d32746430 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -347,7 +347,7 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
__u16 size;
size = ntohs(chunk->chunk_hdr->length);
- size -= sctp_datahdr_len(&chunk->asoc->stream);
+ size -= sctp_datachk_len(&chunk->asoc->stream);
return size;
}
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 28a7c8e44636..a11f93790476 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -876,6 +876,8 @@ struct sctp_transport {
unsigned long sackdelay;
__u32 sackfreq;
+ atomic_t mtu_info;
+
/* When was the last time that we heard from this transport? We use
* this to pick new active and retran paths.
*/
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index b479db5c71d9..34dd3d497f2c 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -301,6 +301,7 @@ enum sctp_sinfo_flags {
SCTP_SACK_IMMEDIATELY = (1 << 3), /* SACK should be sent without delay. */
/* 2 bits here have been used by SCTP_PR_SCTP_MASK */
SCTP_SENDALL = (1 << 6),
+ SCTP_PR_SCTP_ALL = (1 << 7),
SCTP_NOTIFICATION = MSG_NOTIFICATION, /* Next message is not user msg but notification. */
SCTP_EOF = MSG_FIN, /* Initiate graceful shutdown process. */
};
diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c
index 9f8463afda9c..47147c9e184d 100644
--- a/kernel/bpf/xskmap.c
+++ b/kernel/bpf/xskmap.c
@@ -192,11 +192,8 @@ static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
sock_hold(sock->sk);
old_xs = xchg(&m->xsk_map[i], xs);
- if (old_xs) {
- /* Make sure we've flushed everything. */
- synchronize_net();
+ if (old_xs)
sock_put((struct sock *)old_xs);
- }
sockfd_put(sock);
return 0;
@@ -212,11 +209,8 @@ static int xsk_map_delete_elem(struct bpf_map *map, void *key)
return -EINVAL;
old_xs = xchg(&m->xsk_map[k], NULL);
- if (old_xs) {
- /* Make sure we've flushed everything. */
- synchronize_net();
+ if (old_xs)
sock_put((struct sock *)old_xs);
- }
return 0;
}
diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c
index b64e1649993b..94e88f510c5b 100644
--- a/net/bpfilter/bpfilter_kern.c
+++ b/net/bpfilter/bpfilter_kern.c
@@ -23,9 +23,11 @@ static void shutdown_umh(struct umh_info *info)
if (!info->pid)
return;
- tsk = pid_task(find_vpid(info->pid), PIDTYPE_PID);
- if (tsk)
+ tsk = get_pid_task(find_vpid(info->pid), PIDTYPE_PID);
+ if (tsk) {
force_sig(SIGKILL, tsk);
+ put_task_struct(tsk);
+ }
fput(info->pipe_to_umh);
fput(info->pipe_from_umh);
info->pid = 0;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 0762aaf8e964..aeabc4831fca 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1015,6 +1015,9 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
return -EINVAL;
}
+ if (info.cmd != cmd)
+ return -EINVAL;
+
if (info.cmd == ETHTOOL_GRXCLSRLALL) {
if (info.rule_cnt > 0) {
if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
@@ -2469,13 +2472,17 @@ roll_back:
return ret;
}
-static int ethtool_set_per_queue(struct net_device *dev, void __user *useraddr)
+static int ethtool_set_per_queue(struct net_device *dev,
+ void __user *useraddr, u32 sub_cmd)
{
struct ethtool_per_queue_op per_queue_opt;
if (copy_from_user(&per_queue_opt, useraddr, sizeof(per_queue_opt)))
return -EFAULT;
+ if (per_queue_opt.sub_command != sub_cmd)
+ return -EINVAL;
+
switch (per_queue_opt.sub_command) {
case ETHTOOL_GCOALESCE:
return ethtool_get_per_queue_coalesce(dev, useraddr, &per_queue_opt);
@@ -2846,7 +2853,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
rc = ethtool_get_phy_stats(dev, useraddr);
break;
case ETHTOOL_PERQUEUE:
- rc = ethtool_set_per_queue(dev, useraddr);
+ rc = ethtool_set_per_queue(dev, useraddr, sub_cmd);
break;
case ETHTOOL_GLINKSETTINGS:
rc = ethtool_get_link_ksettings(dev, useraddr);
diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c
index 1ad9aa62a97b..eab8cd5ec2f5 100644
--- a/net/ipv4/ipmr_base.c
+++ b/net/ipv4/ipmr_base.c
@@ -296,8 +296,6 @@ int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
next_entry:
e++;
}
- e = 0;
- s_e = 0;
spin_lock_bh(lock);
list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index a0b6932c3afd..a9d06d4dd057 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1184,11 +1184,6 @@ route_lookup:
}
skb_dst_set(skb, dst);
- if (encap_limit >= 0) {
- init_tel_txopt(&opt, encap_limit);
- ipv6_push_frag_opts(skb, &opt.ops, &proto);
- }
-
if (hop_limit == 0) {
if (skb->protocol == htons(ETH_P_IP))
hop_limit = ip_hdr(skb)->ttl;
@@ -1210,6 +1205,11 @@ route_lookup:
if (err)
return err;
+ if (encap_limit >= 0) {
+ init_tel_txopt(&opt, encap_limit);
+ ipv6_push_frag_opts(skb, &opt.ops, &proto);
+ }
+
skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
ipv6h = ipv6_hdr(skb);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 4ae54aaca373..dbab62e3f0d7 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2436,17 +2436,17 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
{
int err;
- /* callers have the socket lock and rtnl lock
- * so no other readers or writers of iml or its sflist
- */
+ write_lock_bh(&iml->sflock);
if (!iml->sflist) {
/* any-source empty exclude case */
- return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
+ err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
+ } else {
+ err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
+ iml->sflist->sl_count, iml->sflist->sl_addr, 0);
+ sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
+ iml->sflist = NULL;
}
- err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
- iml->sflist->sl_count, iml->sflist->sl_addr, 0);
- sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
- iml->sflist = NULL;
+ write_unlock_bh(&iml->sflock);
return err;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a366c05a239d..abcb5ae77319 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -520,10 +520,11 @@ static void rt6_probe_deferred(struct work_struct *w)
static void rt6_probe(struct fib6_info *rt)
{
- struct __rt6_probe_work *work;
+ struct __rt6_probe_work *work = NULL;
const struct in6_addr *nh_gw;
struct neighbour *neigh;
struct net_device *dev;
+ struct inet6_dev *idev;
/*
* Okay, this does not seem to be appropriate
@@ -539,15 +540,12 @@ static void rt6_probe(struct fib6_info *rt)
nh_gw = &rt->fib6_nh.nh_gw;
dev = rt->fib6_nh.nh_dev;
rcu_read_lock_bh();
+ idev = __in6_dev_get(dev);
neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
if (neigh) {
- struct inet6_dev *idev;
-
if (neigh->nud_state & NUD_VALID)
goto out;
- idev = __in6_dev_get(dev);
- work = NULL;
write_lock(&neigh->lock);
if (!(neigh->nud_state & NUD_VALID) &&
time_after(jiffies,
@@ -557,11 +555,13 @@ static void rt6_probe(struct fib6_info *rt)
__neigh_set_probe_once(neigh);
}
write_unlock(&neigh->lock);
- } else {
+ } else if (time_after(jiffies, rt->last_probe +
+ idev->cnf.rtr_probe_interval)) {
work = kmalloc(sizeof(*work), GFP_ATOMIC);
}
if (work) {
+ rt->last_probe = jiffies;
INIT_WORK(&work->work, rt6_probe_deferred);
work->target = *nh_gw;
dev_hold(dev);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 28c4aa5078fc..b36694b6716e 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -766,11 +766,9 @@ static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
ret = udpv6_queue_rcv_skb(sk, skb);
- /* a return value > 0 means to resubmit the input, but
- * it wants the return to be -protocol, or 0
- */
+ /* a return value > 0 means to resubmit the input */
if (ret > 0)
- return -ret;
+ return ret;
return 0;
}
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index ef3defaf43b9..d35bcf92969c 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -146,8 +146,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
- while (nh + offset + 1 < skb->data ||
- pskb_may_pull(skb, nh + offset + 1 - skb->data)) {
+ while (nh + offset + sizeof(*exthdr) < skb->data ||
+ pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
nh = skb_network_header(skb);
exthdr = (struct ipv6_opt_hdr *)(nh + offset);
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index c0ac522b48a1..4ff89cb7c86f 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -734,6 +734,7 @@ void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
llc_sk(sk)->sap = sap;
spin_lock_bh(&sap->sk_lock);
+ sock_set_flag(sk, SOCK_RCU_FREE);
sap->sk_count++;
sk_nulls_add_node_rcu(sk, laddr_hb);
hlist_add_head(&llc->dev_hash_node, dev_hb);
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 652e314de38e..8079aacaecac 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -337,7 +337,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_connection *conn;
- struct rxrpc_peer *peer;
+ struct rxrpc_peer *peer = NULL;
struct rxrpc_call *call;
_enter("");
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index cad0691c2bb4..0906e51d3cfb 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -139,7 +139,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
udp_sk(usk)->gro_complete = NULL;
udp_encap_enable();
-#if IS_ENABLED(CONFIG_IPV6)
+#if IS_ENABLED(CONFIG_AF_RXRPC_IPV6)
if (local->srx.transport.family == AF_INET6)
udpv6_encap_enable();
#endif
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index e8fb8922bca8..a141ee3ab812 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -572,7 +572,8 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
whdr.flags ^= RXRPC_CLIENT_INITIATED;
whdr.flags &= RXRPC_CLIENT_INITIATED;
- ret = kernel_sendmsg(local->socket, &msg, iov, 2, size);
+ ret = kernel_sendmsg(local->socket, &msg,
+ iov, ioc, size);
if (ret < 0)
trace_rxrpc_tx_fail(local->debug_id, 0, ret,
rxrpc_tx_point_reject);
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 05b51bdbdd41..bd2fa3b7caa7 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -195,6 +195,7 @@ void rxrpc_error_report(struct sock *sk)
rxrpc_store_error(peer, serr);
rcu_read_unlock();
rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ rxrpc_put_peer(peer);
_leave("");
}
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 0a75cb2e5e7b..70f144ac5e1d 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -31,6 +31,8 @@
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
+extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
+
/* The list of all installed classifier types */
static LIST_HEAD(tcf_proto_base);
@@ -1211,7 +1213,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
replay:
tp_created = 0;
- err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
+ err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
if (err < 0)
return err;
@@ -1360,7 +1362,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
+ err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
if (err < 0)
return err;
@@ -1475,7 +1477,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
void *fh = NULL;
int err;
- err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
+ err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
if (err < 0)
return err;
@@ -1838,7 +1840,7 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
return -EPERM;
replay:
- err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
+ err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
if (err < 0)
return err;
@@ -1949,7 +1951,8 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
if (nlmsg_len(cb->nlh) < sizeof(*tcm))
return skb->len;
- err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
+ err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
+ NULL);
if (err)
return err;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 85e73f48e48f..3dc0acf54245 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1307,10 +1307,6 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
return 0;
}
-/*
- * Delete/get qdisc.
- */
-
const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
[TCA_KIND] = { .type = NLA_STRING },
[TCA_OPTIONS] = { .type = NLA_NESTED },
@@ -1323,6 +1319,10 @@ const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
[TCA_EGRESS_BLOCK] = { .type = NLA_U32 },
};
+/*
+ * Delete/get qdisc.
+ */
+
static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
struct netlink_ext_ack *extack)
{
@@ -2059,7 +2059,8 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
if (tcm->tcm_parent) {
q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
- if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
+ if (q && q != root &&
+ tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
return -1;
return 0;
}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 297d9cf960b9..a827a1f562bf 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1450,7 +1450,8 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
/* Get the lowest pmtu of all the transports. */
list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
if (t->pmtu_pending && t->dst) {
- sctp_transport_update_pmtu(t, sctp_dst_mtu(t->dst));
+ sctp_transport_update_pmtu(t,
+ atomic_read(&t->mtu_info));
t->pmtu_pending = 0;
}
if (!pmtu || (t->pathmtu < pmtu))
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 9bbc5f92c941..5c36a99882ed 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -395,6 +395,7 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
return;
if (sock_owned_by_user(sk)) {
+ atomic_set(&t->mtu_info, pmtu);
asoc->pmtu_pending = 1;
t->pmtu_pending = 1;
return;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 7f849b01ec8e..67939ad99c01 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -120,6 +120,12 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
sctp_assoc_sync_pmtu(asoc);
}
+ if (asoc->pmtu_pending) {
+ if (asoc->param_flags & SPP_PMTUD_ENABLE)
+ sctp_assoc_sync_pmtu(asoc);
+ asoc->pmtu_pending = 0;
+ }
+
/* If there a is a prepend chunk stick it on the list before
* any other chunks get appended.
*/
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index f73e9d38d5ba..c1c1bda334a4 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -271,11 +271,10 @@ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
spin_lock_bh(&sctp_assocs_id_lock);
asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id);
+ if (asoc && (asoc->base.sk != sk || asoc->base.dead))
+ asoc = NULL;
spin_unlock_bh(&sctp_assocs_id_lock);
- if (!asoc || (asoc->base.sk != sk) || asoc->base.dead)
- return NULL;
-
return asoc;
}
@@ -1946,8 +1945,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
if (sp->strm_interleave) {
timeo = sock_sndtimeo(sk, 0);
err = sctp_wait_for_connect(asoc, &timeo);
- if (err)
+ if (err) {
+ err = -ESRCH;
goto err;
+ }
} else {
wait_connect = true;
}
@@ -7100,14 +7101,14 @@ static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len,
}
policy = params.sprstat_policy;
- if (policy & ~SCTP_PR_SCTP_MASK)
+ if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)))
goto out;
asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
if (!asoc)
goto out;
- if (policy == SCTP_PR_SCTP_NONE) {
+ if (policy & SCTP_PR_SCTP_ALL) {
params.sprstat_abandoned_unsent = 0;
params.sprstat_abandoned_sent = 0;
for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
@@ -7159,7 +7160,7 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
}
policy = params.sprstat_policy;
- if (policy & ~SCTP_PR_SCTP_MASK)
+ if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)))
goto out;
asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
@@ -7175,7 +7176,7 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
goto out;
}
- if (policy == SCTP_PR_SCTP_NONE) {
+ if (policy == SCTP_PR_SCTP_ALL) {
params.sprstat_abandoned_unsent = 0;
params.sprstat_abandoned_sent = 0;
for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
diff --git a/net/socket.c b/net/socket.c
index 01f3f8f32d6f..390a8ecef4bf 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2875,9 +2875,14 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
copy_in_user(&rxnfc->fs.ring_cookie,
&compat_rxnfc->fs.ring_cookie,
(void __user *)(&rxnfc->fs.location + 1) -
- (void __user *)&rxnfc->fs.ring_cookie) ||
- copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
- sizeof(rxnfc->rule_cnt)))
+ (void __user *)&rxnfc->fs.ring_cookie))
+ return -EFAULT;
+ if (ethcmd == ETHTOOL_GRXCLSRLALL) {
+ if (put_user(rule_cnt, &rxnfc->rule_cnt))
+ return -EFAULT;
+ } else if (copy_in_user(&rxnfc->rule_cnt,
+ &compat_rxnfc->rule_cnt,
+ sizeof(rxnfc->rule_cnt)))
return -EFAULT;
}
diff --git a/net/tipc/group.c b/net/tipc/group.c
index e82f13cb2dc5..06fee142f09f 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -666,6 +666,7 @@ static void tipc_group_create_event(struct tipc_group *grp,
struct sk_buff *skb;
struct tipc_msg *hdr;
+ memset(&evt, 0, sizeof(evt));
evt.event = event;
evt.found_lower = m->instance;
evt.found_upper = m->instance;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index f6552e4f4b43..201c3b5bc96b 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1041,6 +1041,7 @@ static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
if (r->last_retransm != buf_seqno(skb)) {
r->last_retransm = buf_seqno(skb);
r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
+ r->stale_cnt = 0;
} else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) {
link_retransmit_failure(l, skb);
if (link_is_bc_sndlink(l))
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 51b4b96f89db..3cfeb9df64b0 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -115,7 +115,7 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
struct sk_buff *buf;
struct distr_item *item;
- list_del(&publ->binding_node);
+ list_del_rcu(&publ->binding_node);
if (publ->scope == TIPC_NODE_SCOPE)
return NULL;
@@ -147,7 +147,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
ITEM_SIZE) * ITEM_SIZE;
u32 msg_rem = msg_dsz;
- list_for_each_entry(publ, pls, binding_node) {
+ list_for_each_entry_rcu(publ, pls, binding_node) {
/* Prepare next buffer: */
if (!skb) {
skb = named_prepare_buf(net, PUBLICATION, msg_rem,
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 4e937cd7c17d..661504042d30 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -744,6 +744,8 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
sk->sk_destruct = xsk_destruct;
sk_refcnt_debug_inc(sk);
+ sock_set_flag(sk, SOCK_RCU_FREE);
+
xs = xdp_sk(sk);
mutex_init(&xs->mutex);
spin_lock_init(&xs->tx_completion_lock);
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index 31acc6f33d98..6f05e831a73e 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -116,6 +116,9 @@ static void xfrmi_unlink(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
static void xfrmi_dev_free(struct net_device *dev)
{
+ struct xfrm_if *xi = netdev_priv(dev);
+
+ gro_cells_destroy(&xi->gro_cells);
free_percpu(dev->tstats);
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index f094d4b3520d..119a427d9b2b 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -632,9 +632,9 @@ static void xfrm_hash_rebuild(struct work_struct *work)
break;
}
if (newpos)
- hlist_add_behind(&policy->bydst, newpos);
+ hlist_add_behind_rcu(&policy->bydst, newpos);
else
- hlist_add_head(&policy->bydst, chain);
+ hlist_add_head_rcu(&policy->bydst, chain);
}
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
@@ -774,9 +774,9 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
break;
}
if (newpos)
- hlist_add_behind(&policy->bydst, newpos);
+ hlist_add_behind_rcu(&policy->bydst, newpos);
else
- hlist_add_head(&policy->bydst, chain);
+ hlist_add_head_rcu(&policy->bydst, chain);
__xfrm_policy_link(policy, dir);
/* After previous checking, family can either be AF_INET or AF_INET6 */
diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c
index cad14cd0ea92..b5277106df1f 100644
--- a/tools/testing/selftests/net/reuseport_bpf.c
+++ b/tools/testing/selftests/net/reuseport_bpf.c
@@ -437,14 +437,19 @@ void enable_fastopen(void)
}
}
-static struct rlimit rlim_old, rlim_new;
+static struct rlimit rlim_old;
static __attribute__((constructor)) void main_ctor(void)
{
getrlimit(RLIMIT_MEMLOCK, &rlim_old);
- rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
- rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
- setrlimit(RLIMIT_MEMLOCK, &rlim_new);
+
+ if (rlim_old.rlim_cur != RLIM_INFINITY) {
+ struct rlimit rlim_new;
+
+ rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
+ rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
+ setrlimit(RLIMIT_MEMLOCK, &rlim_new);
+ }
}
static __attribute__((destructor)) void main_dtor(void)