summaryrefslogtreecommitdiffstats
path: root/include/net
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2021-06-29 15:45:27 -0700
committerJakub Kicinski <kuba@kernel.org>2021-06-29 15:45:27 -0700
commitb6df00789e2831fff7a2c65aa7164b2a4dcbe599 (patch)
treea94cbeeca3f0ae2fffed008cb287c02dbee4dceb /include/net
parent3f8ad50a9e43b6a59070e6c9c5eec79626f81095 (diff)
parenta118ff661889ecee3ca90f8125bad8fb5bbc07d5 (diff)
downloadlinux-b6df00789e2831fff7a2c65aa7164b2a4dcbe599.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Trivial conflict in net/netfilter/nf_tables_api.c. Duplicate fix in tools/testing/selftests/net/devlink_port_split.py - take the net-next version. skmsg, and L4 bpf - keep the bpf code but remove the flags and err params. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include/net')
-rw-r--r--include/net/flow_offload.h12
-rw-r--r--include/net/ip.h12
-rw-r--r--include/net/ip6_route.h16
-rw-r--r--include/net/macsec.h2
-rw-r--r--include/net/sch_generic.h12
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/net/xfrm.h2
-rw-r--r--include/net/xsk_buff_pool.h9
8 files changed, 49 insertions, 18 deletions
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index dc5c1e69cd9f..69c9eabf8325 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -319,12 +319,14 @@ flow_action_mixed_hw_stats_check(const struct flow_action *action,
if (flow_offload_has_one_action(action))
return true;
- flow_action_for_each(i, action_entry, action) {
- if (i && action_entry->hw_stats != last_hw_stats) {
- NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
- return false;
+ if (action) {
+ flow_action_for_each(i, action_entry, action) {
+ if (i && action_entry->hw_stats != last_hw_stats) {
+ NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
+ return false;
+ }
+ last_hw_stats = action_entry->hw_stats;
}
- last_hw_stats = action_entry->hw_stats;
}
return true;
}
diff --git a/include/net/ip.h b/include/net/ip.h
index e20874059f82..d9683bef8684 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -31,6 +31,7 @@
#include <net/flow.h>
#include <net/flow_dissector.h>
#include <net/netns/hash.h>
+#include <net/lwtunnel.h>
#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
#define IPV4_MIN_MTU 68 /* RFC 791 */
@@ -445,22 +446,25 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
/* 'forwarding = true' case should always honour route mtu */
mtu = dst_metric_raw(dst, RTAX_MTU);
- if (mtu)
- return mtu;
+ if (!mtu)
+ mtu = min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
- return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
+ return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
}
static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
const struct sk_buff *skb)
{
+ unsigned int mtu;
+
if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
}
- return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
+ mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
+ return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
}
struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index f51a118bfce8..f14149df5a65 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -265,11 +265,18 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
{
+ int mtu;
+
struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
inet6_sk(skb->sk) : NULL;
- return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ?
- skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
+ if (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) {
+ mtu = READ_ONCE(skb_dst(skb)->dev->mtu);
+ mtu -= lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
+ } else
+ mtu = dst_mtu(skb_dst(skb));
+
+ return mtu;
}
static inline bool ip6_sk_accept_pmtu(const struct sock *sk)
@@ -317,7 +324,7 @@ static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
if (dst_metric_locked(dst, RTAX_MTU)) {
mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu)
- return mtu;
+ goto out;
}
mtu = IPV6_MIN_MTU;
@@ -327,7 +334,8 @@ static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
mtu = idev->cnf.mtu6;
rcu_read_unlock();
- return mtu;
+out:
+ return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
}
u32 ip6_mtu_from_fib6(const struct fib6_result *res,
diff --git a/include/net/macsec.h b/include/net/macsec.h
index 52874cdfe226..d6fa6b97f6ef 100644
--- a/include/net/macsec.h
+++ b/include/net/macsec.h
@@ -241,7 +241,7 @@ struct macsec_context {
struct macsec_rx_sc *rx_sc;
struct {
unsigned char assoc_num;
- u8 key[MACSEC_KEYID_LEN];
+ u8 key[MACSEC_MAX_KEY_LEN];
union {
struct macsec_rx_sa *rx_sa;
struct macsec_tx_sa *tx_sa;
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index c99ffe9cc88f..9ed33e6840bd 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -173,6 +173,12 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
if (spin_trylock(&qdisc->seqlock))
return true;
+ /* Paired with smp_mb__after_atomic() to make sure
+ * STATE_MISSED checking is synchronized with clearing
+ * in pfifo_fast_dequeue().
+ */
+ smp_mb__before_atomic();
+
/* If the MISSED flag is set, it means other thread has
* set the MISSED flag before second spin_trylock(), so
* we can return false here to avoid multi cpus doing
@@ -190,6 +196,12 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
*/
set_bit(__QDISC_STATE_MISSED, &qdisc->state);
+ /* spin_trylock() only has load-acquire semantic, so use
+ * smp_mb__after_atomic() to ensure STATE_MISSED is set
+ * before doing the second spin_trylock().
+ */
+ smp_mb__after_atomic();
+
/* Retry again in case other CPU may not see the new flag
* after it releases the lock at the end of qdisc_run_end().
*/
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index c4a4c1754be8..32fc4a309df5 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -463,7 +463,7 @@ struct sctp_af {
int saddr);
void (*from_sk) (union sctp_addr *,
struct sock *sk);
- void (*from_addr_param) (union sctp_addr *,
+ bool (*from_addr_param) (union sctp_addr *,
union sctp_addr_param *,
__be16 port, int iif);
int (*to_addr_param) (const union sctp_addr *,
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index b7e65aec4b2d..cbff7c2a9724 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1538,6 +1538,7 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
int xfrm_init_replay(struct xfrm_state *x);
+u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu);
u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
int xfrm_init_state(struct xfrm_state *x);
@@ -1562,7 +1563,6 @@ int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
int encap_type);
int xfrm4_transport_finish(struct sk_buff *skb, int async);
int xfrm4_rcv(struct sk_buff *skb);
-int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
{
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index eaa8386dbc63..7a9a23e7a604 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -147,11 +147,16 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
{
bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
- if (pool->dma_pages_cnt && cross_pg) {
+ if (likely(!cross_pg))
+ return false;
+
+ if (pool->dma_pages_cnt) {
return !(pool->dma_pages[addr >> PAGE_SHIFT] &
XSK_NEXT_PG_CONTIG_MASK);
}
- return false;
+
+ /* skb path */
+ return addr + len > pool->addrs_cnt;
}
static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)