summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/bitmap.h4
-rw-r--r--include/linux/bpf-cgroup.h4
-rw-r--r--include/linux/bpf.h214
-rw-r--r--include/linux/bpf_types.h9
-rw-r--r--include/linux/bpf_verifier.h10
-rw-r--r--include/linux/btf.h52
-rw-r--r--include/linux/dsa/sja1105.h6
-rw-r--r--include/linux/etherdevice.h1
-rw-r--r--include/linux/ethtool_netlink.h17
-rw-r--r--include/linux/filter.h53
-rw-r--r--include/linux/fsl/enetc_mdio.h55
-rw-r--r--include/linux/fsl/ptp_qoriq.h1
-rw-r--r--include/linux/mii.h50
-rw-r--r--include/linux/mii_timestamper.h121
-rw-r--r--include/linux/mlx4/device.h2
-rw-r--r--include/linux/mlx5/device.h23
-rw-r--r--include/linux/mlx5/driver.h6
-rw-r--r--include/linux/mlx5/fs.h20
-rw-r--r--include/linux/mlx5/mlx5_ifc.h237
-rw-r--r--include/linux/mmc/sdio_ids.h2
-rw-r--r--include/linux/net.h1
-rw-r--r--include/linux/netdev_features.h9
-rw-r--r--include/linux/netdevice.h114
-rw-r--r--include/linux/phy.h123
-rw-r--r--include/linux/phylink.h2
-rw-r--r--include/linux/platform_data/eth_ixp4xx.h19
-rw-r--r--include/linux/platform_data/wan_ixp4xx_hss.h17
-rw-r--r--include/linux/ptp_clock_kernel.h9
-rw-r--r--include/linux/ptr_ring.h1
-rw-r--r--include/linux/qed/common_hsi.h44
-rw-r--r--include/linux/qed/eth_common.h78
-rw-r--r--include/linux/qed/iscsi_common.h64
-rw-r--r--include/linux/qed/qed_if.h14
-rw-r--r--include/linux/qed/qed_ll2_if.h7
-rw-r--r--include/linux/qed/storage_common.h3
-rw-r--r--include/linux/sfp.h95
-rw-r--r--include/linux/skbuff.h26
-rw-r--r--include/linux/stmmac.h14
-rw-r--r--include/linux/tcp.h37
-rw-r--r--include/linux/usb/usbnet.h2
-rw-r--r--include/net/addrconf.h8
-rw-r--r--include/net/af_unix.h5
-rw-r--r--include/net/af_vsock.h2
-rw-r--r--include/net/bluetooth/bluetooth.h8
-rw-r--r--include/net/bluetooth/hci.h165
-rw-r--r--include/net/bluetooth/hci_core.h12
-rw-r--r--include/net/bluetooth/hci_mon.h2
-rw-r--r--include/net/bluetooth/mgmt.h17
-rw-r--r--include/net/devlink.h22
-rw-r--r--include/net/dsa.h19
-rw-r--r--include/net/dsfield.h2
-rw-r--r--include/net/espintcp.h39
-rw-r--r--include/net/ip6_fib.h12
-rw-r--r--include/net/ip_fib.h13
-rw-r--r--include/net/ipv6.h3
-rw-r--r--include/net/macsec.h224
-rw-r--r--include/net/mptcp.h188
-rw-r--r--include/net/net_namespace.h10
-rw-r--r--include/net/netfilter/nf_flow_table.h27
-rw-r--r--include/net/netfilter/nf_tables.h22
-rw-r--r--include/net/netfilter/nf_tables_core.h1
-rw-r--r--include/net/netlink.h8
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/pie.h138
-rw-r--r--include/net/pkt_cls.h91
-rw-r--r--include/net/sch_generic.h3
-rw-r--r--include/net/sock.h29
-rw-r--r--include/net/tcp.h46
-rw-r--r--include/net/tls.h9
-rw-r--r--include/net/udp.h5
-rw-r--r--include/net/xdp_sock.h25
-rw-r--r--include/net/xfrm.h4
-rw-r--r--include/rdma/ib_umem.h4
-rw-r--r--include/rdma/ib_umem_odp.h6
-rw-r--r--include/rdma/ib_verbs.h9
-rw-r--r--include/soc/mscc/ocelot.h2
-rw-r--r--include/soc/mscc/ocelot_ana.h625
-rw-r--r--include/soc/mscc/ocelot_dev.h275
-rw-r--r--include/soc/mscc/ocelot_qsys.h270
-rw-r--r--include/trace/events/sctp.h9
-rw-r--r--include/trace/events/sock.h5
-rw-r--r--include/trace/events/xdp.h130
-rw-r--r--include/uapi/linux/audit.h1
-rw-r--r--include/uapi/linux/batadv_packet.h2
-rw-r--r--include/uapi/linux/batman_adv.h2
-rw-r--r--include/uapi/linux/bpf.h82
-rw-r--r--include/uapi/linux/btf.h9
-rw-r--r--include/uapi/linux/ethtool.h11
-rw-r--r--include/uapi/linux/ethtool_netlink.h237
-rw-r--r--include/uapi/linux/hdlc/ioctl.h9
-rw-r--r--include/uapi/linux/if.h1
-rw-r--r--include/uapi/linux/if_bonding.h10
-rw-r--r--include/uapi/linux/if_bridge.h41
-rw-r--r--include/uapi/linux/if_link.h8
-rw-r--r--include/uapi/linux/if_macsec.h11
-rw-r--r--include/uapi/linux/in.h2
-rw-r--r--include/uapi/linux/mii.h12
-rw-r--r--include/uapi/linux/net_tstamp.h8
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h47
-rw-r--r--include/uapi/linux/openvswitch.h31
-rw-r--r--include/uapi/linux/pkt_sched.h48
-rw-r--r--include/uapi/linux/rtnetlink.h11
-rw-r--r--include/uapi/linux/snmp.h2
-rw-r--r--include/uapi/linux/tcp.h6
-rw-r--r--include/uapi/linux/tipc_netlink.h2
-rw-r--r--include/uapi/linux/udp.h1
-rw-r--r--include/uapi/linux/vm_sockets.h8
-rw-r--r--include/uapi/linux/wireguard.h196
108 files changed, 4425 insertions, 434 deletions
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index cb63feb3cfbe..80ad521116d7 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -53,6 +53,7 @@
* bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
+ * bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest
* bitmap_replace(dst, old, new, mask, nbits) *dst = (*old & ~(*mask)) | (*new & *mask)
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
* bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
@@ -133,6 +134,9 @@ extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits);
extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits);
+extern void bitmap_cut(unsigned long *dst, const unsigned long *src,
+ unsigned int first, unsigned int cut,
+ unsigned int nbits);
extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 9be71c195d74..a11d5b7dbbf3 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -85,6 +85,7 @@ int cgroup_bpf_inherit(struct cgroup *cgrp);
void cgroup_bpf_offline(struct cgroup *cgrp);
int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
+ struct bpf_prog *replace_prog,
enum bpf_attach_type type, u32 flags);
int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type);
@@ -93,7 +94,8 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
- enum bpf_attach_type type, u32 flags);
+ struct bpf_prog *replace_prog, enum bpf_attach_type type,
+ u32 flags);
int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type, u32 flags);
int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 085a59afba85..8e9ad3943cd9 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -17,6 +17,7 @@
#include <linux/u64_stats_sync.h>
#include <linux/refcount.h>
#include <linux/mutex.h>
+#include <linux/module.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -43,6 +44,15 @@ struct bpf_map_ops {
int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
void (*map_release_uref)(struct bpf_map *map);
void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
+ int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+ int (*map_lookup_and_delete_batch)(struct bpf_map *map,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+ int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+ int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
/* funcs callable from userspace and from eBPF programs */
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
@@ -106,6 +116,7 @@ struct bpf_map {
struct btf *btf;
struct bpf_map_memory memory;
char name[BPF_OBJ_NAME_LEN];
+ u32 btf_vmlinux_value_type_id;
bool unpriv_array;
bool frozen; /* write-once; write-protected by freeze_mutex */
/* 22 bytes hole */
@@ -183,7 +194,8 @@ static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
{
- return map->btf && map->ops->map_seq_show_elem;
+ return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
+ map->ops->map_seq_show_elem;
}
int map_check_no_btf(const struct bpf_map *map,
@@ -349,6 +361,10 @@ struct bpf_verifier_ops {
const struct bpf_insn *src,
struct bpf_insn *dst,
struct bpf_prog *prog, u32 *target_size);
+ int (*btf_struct_access)(struct bpf_verifier_log *log,
+ const struct btf_type *t, int off, int size,
+ enum bpf_access_type atype,
+ u32 *next_btf_id);
};
struct bpf_prog_offload_ops {
@@ -437,7 +453,8 @@ struct btf_func_model {
* fentry = a set of program to run before calling original function
* fexit = a set of program to run after original function
*/
-int arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags,
+int arch_prepare_bpf_trampoline(void *image, void *image_end,
+ const struct btf_func_model *m, u32 flags,
struct bpf_prog **fentry_progs, int fentry_cnt,
struct bpf_prog **fexit_progs, int fexit_cnt,
void *orig_call);
@@ -448,7 +465,8 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
enum bpf_tramp_prog_type {
BPF_TRAMP_FENTRY,
BPF_TRAMP_FEXIT,
- BPF_TRAMP_MAX
+ BPF_TRAMP_MAX,
+ BPF_TRAMP_REPLACE, /* more than MAX */
};
struct bpf_trampoline {
@@ -463,6 +481,11 @@ struct bpf_trampoline {
void *addr;
bool ftrace_managed;
} func;
+ /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
+ * program by replacing one of its functions. func.addr is the address
+ * of the function it replaced.
+ */
+ struct bpf_prog *extension_prog;
/* list of BPF programs using this trampoline */
struct hlist_head progs_hlist[BPF_TRAMP_MAX];
/* Number of attached programs. A counter per kind. */
@@ -471,11 +494,75 @@ struct bpf_trampoline {
void *image;
u64 selector;
};
+
+#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
+
+struct bpf_dispatcher_prog {
+ struct bpf_prog *prog;
+ refcount_t users;
+};
+
+struct bpf_dispatcher {
+ /* dispatcher mutex */
+ struct mutex mutex;
+ void *func;
+ struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
+ int num_progs;
+ void *image;
+ u32 image_off;
+};
+
+static __always_inline unsigned int bpf_dispatcher_nopfunc(
+ const void *ctx,
+ const struct bpf_insn *insnsi,
+ unsigned int (*bpf_func)(const void *,
+ const struct bpf_insn *))
+{
+ return bpf_func(ctx, insnsi);
+}
#ifdef CONFIG_BPF_JIT
struct bpf_trampoline *bpf_trampoline_lookup(u64 key);
int bpf_trampoline_link_prog(struct bpf_prog *prog);
int bpf_trampoline_unlink_prog(struct bpf_prog *prog);
void bpf_trampoline_put(struct bpf_trampoline *tr);
+#define BPF_DISPATCHER_INIT(name) { \
+ .mutex = __MUTEX_INITIALIZER(name.mutex), \
+ .func = &name##func, \
+ .progs = {}, \
+ .num_progs = 0, \
+ .image = NULL, \
+ .image_off = 0 \
+}
+
+#define DEFINE_BPF_DISPATCHER(name) \
+ noinline unsigned int name##func( \
+ const void *ctx, \
+ const struct bpf_insn *insnsi, \
+ unsigned int (*bpf_func)(const void *, \
+ const struct bpf_insn *)) \
+ { \
+ return bpf_func(ctx, insnsi); \
+ } \
+ EXPORT_SYMBOL(name##func); \
+ struct bpf_dispatcher name = BPF_DISPATCHER_INIT(name);
+#define DECLARE_BPF_DISPATCHER(name) \
+ unsigned int name##func( \
+ const void *ctx, \
+ const struct bpf_insn *insnsi, \
+ unsigned int (*bpf_func)(const void *, \
+ const struct bpf_insn *)); \
+ extern struct bpf_dispatcher name;
+#define BPF_DISPATCHER_FUNC(name) name##func
+#define BPF_DISPATCHER_PTR(name) (&name)
+void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
+ struct bpf_prog *to);
+struct bpf_image {
+ struct latch_tree_node tnode;
+ unsigned char data[];
+};
+#define BPF_IMAGE_SIZE (PAGE_SIZE - sizeof(struct bpf_image))
+bool is_bpf_image_address(unsigned long address);
+void *bpf_image_alloc(void);
#else
static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
{
@@ -490,9 +577,21 @@ static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
return -ENOTSUPP;
}
static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
+#define DEFINE_BPF_DISPATCHER(name)
+#define DECLARE_BPF_DISPATCHER(name)
+#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nopfunc
+#define BPF_DISPATCHER_PTR(name) NULL
+static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
+ struct bpf_prog *from,
+ struct bpf_prog *to) {}
+static inline bool is_bpf_image_address(unsigned long address)
+{
+ return false;
+}
#endif
struct bpf_func_info_aux {
+ u16 linkage;
bool unreliable;
};
@@ -603,6 +702,73 @@ struct bpf_array_aux {
struct work_struct work;
};
+struct bpf_struct_ops_value;
+struct btf_type;
+struct btf_member;
+
+#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
+struct bpf_struct_ops {
+ const struct bpf_verifier_ops *verifier_ops;
+ int (*init)(struct btf *btf);
+ int (*check_member)(const struct btf_type *t,
+ const struct btf_member *member);
+ int (*init_member)(const struct btf_type *t,
+ const struct btf_member *member,
+ void *kdata, const void *udata);
+ int (*reg)(void *kdata);
+ void (*unreg)(void *kdata);
+ const struct btf_type *type;
+ const struct btf_type *value_type;
+ const char *name;
+ struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
+ u32 type_id;
+ u32 value_id;
+};
+
+#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
+#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
+const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
+void bpf_struct_ops_init(struct btf *btf);
+bool bpf_struct_ops_get(const void *kdata);
+void bpf_struct_ops_put(const void *kdata);
+int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
+ void *value);
+static inline bool bpf_try_module_get(const void *data, struct module *owner)
+{
+ if (owner == BPF_MODULE_OWNER)
+ return bpf_struct_ops_get(data);
+ else
+ return try_module_get(owner);
+}
+static inline void bpf_module_put(const void *data, struct module *owner)
+{
+ if (owner == BPF_MODULE_OWNER)
+ bpf_struct_ops_put(data);
+ else
+ module_put(owner);
+}
+#else
+static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
+{
+ return NULL;
+}
+static inline void bpf_struct_ops_init(struct btf *btf) { }
+static inline bool bpf_try_module_get(const void *data, struct module *owner)
+{
+ return try_module_get(owner);
+}
+static inline void bpf_module_put(const void *data, struct module *owner)
+{
+ module_put(owner);
+}
+static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
+ void *key,
+ void *value)
+{
+ return -EINVAL;
+}
+#endif
+
struct bpf_array {
struct bpf_map map;
u32 elem_size;
@@ -841,6 +1007,15 @@ void *bpf_map_area_alloc(u64 size, int numa_node);
void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
void bpf_map_area_free(void *base);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
+int generic_map_lookup_batch(struct bpf_map *map,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+int generic_map_update_batch(struct bpf_map *map,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+int generic_map_delete_batch(struct bpf_map *map,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
extern int sysctl_unprivileged_bpf_disabled;
@@ -897,14 +1072,16 @@ struct sk_buff;
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
-void __dev_map_flush(struct bpf_map *map);
+void __dev_flush(void);
+int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
+ struct net_device *dev_rx);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
struct net_device *dev_rx);
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
struct bpf_prog *xdp_prog);
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
-void __cpu_map_flush(struct bpf_map *map);
+void __cpu_map_flush(void);
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
struct net_device *dev_rx);
@@ -941,7 +1118,15 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
const char *func_name,
struct btf_func_model *m);
-int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog);
+struct bpf_reg_state;
+int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
+ struct bpf_reg_state *regs);
+int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
+ struct bpf_reg_state *reg);
+int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog,
+ struct btf *btf, const struct btf_type *t);
+
+struct bpf_prog *bpf_prog_by_id(u32 id);
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
@@ -1004,7 +1189,7 @@ static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map
return NULL;
}
-static inline void __dev_map_flush(struct bpf_map *map)
+static inline void __dev_flush(void)
{
}
@@ -1012,6 +1197,13 @@ struct xdp_buff;
struct bpf_dtab_netdev;
static inline
+int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
+ struct net_device *dev_rx)
+{
+ return 0;
+}
+
+static inline
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
struct net_device *dev_rx)
{
@@ -1033,7 +1225,7 @@ struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
return NULL;
}
-static inline void __cpu_map_flush(struct bpf_map *map)
+static inline void __cpu_map_flush(void)
{
}
@@ -1074,6 +1266,11 @@ static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
static inline void bpf_map_put(struct bpf_map *map)
{
}
+
+static inline struct bpf_prog *bpf_prog_by_id(u32 id)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
#endif /* CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
@@ -1227,6 +1424,7 @@ extern const struct bpf_func_proto bpf_get_local_storage_proto;
extern const struct bpf_func_proto bpf_strtol_proto;
extern const struct bpf_func_proto bpf_strtoul_proto;
extern const struct bpf_func_proto bpf_tcp_sock_proto;
+extern const struct bpf_func_proto bpf_jiffies64_proto;
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 93740b3614d7..c81d4ece79a4 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -65,6 +65,12 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2,
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport,
struct sk_reuseport_md, struct sk_reuseport_kern)
#endif
+#if defined(CONFIG_BPF_JIT)
+BPF_PROG_TYPE(BPF_PROG_TYPE_STRUCT_OPS, bpf_struct_ops,
+ void *, void *)
+BPF_PROG_TYPE(BPF_PROG_TYPE_EXT, bpf_extension,
+ void *, void *)
+#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops)
@@ -105,3 +111,6 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_QUEUE, queue_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops)
+#if defined(CONFIG_BPF_JIT)
+BPF_MAP_TYPE(BPF_MAP_TYPE_STRUCT_OPS, bpf_struct_ops_map_ops)
+#endif
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 26e40de9ef55..5406e6e96585 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -304,11 +304,13 @@ struct bpf_insn_aux_data {
u64 map_key_state; /* constant (32 bit) key tracking for maps */
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
int sanitize_stack_off; /* stack slot to be cleared */
- bool seen; /* this insn was processed by the verifier */
+ u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
bool zext_dst; /* this insn zero extends dst reg */
u8 alu_state; /* used in combination with alu_limit */
- bool prune_point;
+
+ /* below fields are initialized once */
unsigned int orig_idx; /* original instruction index */
+ bool prune_point;
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -379,6 +381,7 @@ struct bpf_verifier_env {
int *insn_stack;
int cur_stack;
} cfg;
+ u32 pass_cnt; /* number of times do_check() was called */
u32 subprog_cnt;
/* number of instructions analyzed by the verifier */
u32 prev_insn_processed, insn_processed;
@@ -428,4 +431,7 @@ bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
void
bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
+int check_ctx_reg(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg, int regno);
+
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 79d4abc2556a..5c1ea99b480f 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -7,6 +7,8 @@
#include <linux/types.h>
#include <uapi/linux/btf.h>
+#define BTF_TYPE_EMIT(type) ((void)(type *)0)
+
struct btf;
struct btf_member;
struct btf_type;
@@ -53,6 +55,22 @@ bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
u32 expected_offset, u32 expected_size);
int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t);
bool btf_type_is_void(const struct btf_type *t);
+s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind);
+const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
+ u32 id, u32 *res_id);
+const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
+ u32 id, u32 *res_id);
+const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
+ u32 id, u32 *res_id);
+const struct btf_type *
+btf_resolve_size(const struct btf *btf, const struct btf_type *type,
+ u32 *type_size, const struct btf_type **elem_type,
+ u32 *total_nelems);
+
+#define for_each_member(i, struct_type, member) \
+ for (i = 0, member = btf_type_member(struct_type); \
+ i < btf_type_vlen(struct_type); \
+ i++, member++)
static inline bool btf_type_is_ptr(const struct btf_type *t)
{
@@ -84,6 +102,40 @@ static inline bool btf_type_is_func_proto(const struct btf_type *t)
return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO;
}
+static inline u16 btf_type_vlen(const struct btf_type *t)
+{
+ return BTF_INFO_VLEN(t->info);
+}
+
+static inline u16 btf_func_linkage(const struct btf_type *t)
+{
+ return BTF_INFO_VLEN(t->info);
+}
+
+static inline bool btf_type_kflag(const struct btf_type *t)
+{
+ return BTF_INFO_KFLAG(t->info);
+}
+
+static inline u32 btf_member_bit_offset(const struct btf_type *struct_type,
+ const struct btf_member *member)
+{
+ return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset)
+ : member->offset;
+}
+
+static inline u32 btf_member_bitfield_size(const struct btf_type *struct_type,
+ const struct btf_member *member)
+{
+ return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset)
+ : 0;
+}
+
+static inline const struct btf_member *btf_type_member(const struct btf_type *t)
+{
+ return (const struct btf_member *)(t + 1);
+}
+
#ifdef CONFIG_BPF_SYSCALL
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
const char *btf_name_by_offset(const struct btf *btf, u32 offset);
diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h
index 897e799dbcb9..fa5735c353cd 100644
--- a/include/linux/dsa/sja1105.h
+++ b/include/linux/dsa/sja1105.h
@@ -37,8 +37,6 @@
* the structure defined in struct sja1105_private.
*/
struct sja1105_tagger_data {
- struct sk_buff_head skb_rxtstamp_queue;
- struct work_struct rxtstamp_work;
struct sk_buff *stampable_skb;
/* Protects concurrent access to the meta state machine
* from taggers running on multiple ports on SMP systems
@@ -55,10 +53,12 @@ struct sja1105_skb_cb {
((struct sja1105_skb_cb *)DSA_SKB_CB_PRIV(skb))
struct sja1105_port {
+ struct kthread_worker *xmit_worker;
+ struct kthread_work xmit_work;
+ struct sk_buff_head xmit_queue;
struct sja1105_tagger_data *data;
struct dsa_port *dp;
bool hwts_tx_en;
- int mgmt_slot;
};
#endif /* _NET_DSA_SJA1105_H */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index f6564b572d77..8801f1f986e5 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -43,7 +43,6 @@ __be16 eth_header_parse_protocol(const struct sk_buff *skb);
int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
void eth_commit_mac_addr_change(struct net_device *dev, void *p);
int eth_mac_addr(struct net_device *dev, void *p);
-int eth_change_mtu(struct net_device *dev, int new_mtu);
int eth_validate_addr(struct net_device *dev);
struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h
new file mode 100644
index 000000000000..d01b77887f82
--- /dev/null
+++ b/include/linux/ethtool_netlink.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _LINUX_ETHTOOL_NETLINK_H_
+#define _LINUX_ETHTOOL_NETLINK_H_
+
+#include <uapi/linux/ethtool_netlink.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#define __ETHTOOL_LINK_MODE_MASK_NWORDS \
+ DIV_ROUND_UP(__ETHTOOL_LINK_MODE_MASK_NBITS, 32)
+
+enum ethtool_multicast_groups {
+ ETHNL_MCGRP_MONITOR,
+};
+
+#endif /* _LINUX_ETHTOOL_NETLINK_H_ */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 345f3748e0fb..f349e2c0884c 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -559,23 +559,26 @@ struct sk_filter {
DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
-#define BPF_PROG_RUN(prog, ctx) ({ \
- u32 ret; \
- cant_sleep(); \
- if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
- struct bpf_prog_stats *stats; \
- u64 start = sched_clock(); \
- ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \
- stats = this_cpu_ptr(prog->aux->stats); \
- u64_stats_update_begin(&stats->syncp); \
- stats->cnt++; \
- stats->nsecs += sched_clock() - start; \
- u64_stats_update_end(&stats->syncp); \
- } else { \
- ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \
- } \
+#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \
+ u32 ret; \
+ cant_sleep(); \
+ if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
+ struct bpf_prog_stats *stats; \
+ u64 start = sched_clock(); \
+ ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
+ stats = this_cpu_ptr(prog->aux->stats); \
+ u64_stats_update_begin(&stats->syncp); \
+ stats->cnt++; \
+ stats->nsecs += sched_clock() - start; \
+ u64_stats_update_end(&stats->syncp); \
+ } else { \
+ ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
+ } \
ret; })
+#define BPF_PROG_RUN(prog, ctx) __BPF_PROG_RUN(prog, ctx, \
+ bpf_dispatcher_nopfunc)
+
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
struct bpf_skb_data_end {
@@ -589,7 +592,6 @@ struct bpf_redirect_info {
u32 tgt_index;
void *tgt_value;
struct bpf_map *map;
- struct bpf_map *map_to_flush;
u32 kern_flags;
};
@@ -699,6 +701,8 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
return res;
}
+DECLARE_BPF_DISPATCHER(bpf_dispatcher_xdp)
+
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
struct xdp_buff *xdp)
{
@@ -708,9 +712,12 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
* already takes rcu_read_lock() when fetching the program, so
* it's not necessary here anymore.
*/
- return BPF_PROG_RUN(prog, xdp);
+ return __BPF_PROG_RUN(prog, xdp,
+ BPF_DISPATCHER_FUNC(bpf_dispatcher_xdp));
}
+void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
+
static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
{
return prog->len * sizeof(struct bpf_insn);
@@ -836,6 +843,8 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
bpf_aux_classic_check_t trans, bool save_orig);
void bpf_prog_destroy(struct bpf_prog *fp);
+const struct bpf_func_proto *
+bpf_base_func_proto(enum bpf_func_id func_id);
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_attach_bpf(u32 ufd, struct sock *sk);
@@ -909,7 +918,7 @@ static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
return 0;
}
-/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
+/* The pair of xdp_do_redirect and xdp_do_flush MUST be called in the
* same cpu context. Further for best results no more than a single map
* for the do_redirect/do_flush pair should be used. This limitation is
* because we only track one map and force a flush when the map changes.
@@ -920,7 +929,13 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
int xdp_do_redirect(struct net_device *dev,
struct xdp_buff *xdp,
struct bpf_prog *prog);
-void xdp_do_flush_map(void);
+void xdp_do_flush(void);
+
+/* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as
+ * it is no longer only flushing maps. Keep this define for compatibility
+ * until all drivers are updated - do not use xdp_do_flush_map() in new code!
+ */
+#define xdp_do_flush_map xdp_do_flush
void bpf_warn_invalid_xdp_action(u32 act);
diff --git a/include/linux/fsl/enetc_mdio.h b/include/linux/fsl/enetc_mdio.h
new file mode 100644
index 000000000000..4875dd38af7e
--- /dev/null
+++ b/include/linux/fsl/enetc_mdio.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2019 NXP */
+
+#ifndef _FSL_ENETC_MDIO_H_
+#define _FSL_ENETC_MDIO_H_
+
+#include <linux/phy.h>
+
+/* PCS registers */
+#define ENETC_PCS_LINK_TIMER1 0x12
+#define ENETC_PCS_LINK_TIMER1_VAL 0x06a0
+#define ENETC_PCS_LINK_TIMER2 0x13
+#define ENETC_PCS_LINK_TIMER2_VAL 0x0003
+#define ENETC_PCS_IF_MODE 0x14
+#define ENETC_PCS_IF_MODE_SGMII_EN BIT(0)
+#define ENETC_PCS_IF_MODE_USE_SGMII_AN BIT(1)
+#define ENETC_PCS_IF_MODE_SGMII_SPEED(x) (((x) << 2) & GENMASK(3, 2))
+
+/* Not a mistake, the SerDes PLL needs to be set at 3.125 GHz by Reset
+ * Configuration Word (RCW, outside Linux control) for 2.5G SGMII mode. The PCS
+ * still thinks it's at gigabit.
+ */
+enum enetc_pcs_speed {
+ ENETC_PCS_SPEED_10 = 0,
+ ENETC_PCS_SPEED_100 = 1,
+ ENETC_PCS_SPEED_1000 = 2,
+ ENETC_PCS_SPEED_2500 = 2,
+};
+
+struct enetc_hw;
+
+struct enetc_mdio_priv {
+ struct enetc_hw *hw;
+ int mdio_base;
+};
+
+#if IS_REACHABLE(CONFIG_FSL_ENETC_MDIO)
+
+int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum);
+int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value);
+struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs);
+
+#else
+
+static inline int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{ return -EINVAL; }
+static inline int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
+ u16 value)
+{ return -EINVAL; }
+struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
+{ return ERR_PTR(-EINVAL); }
+
+#endif
+
+#endif
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h
index 992bf9fa1729..b0b743563f43 100644
--- a/include/linux/fsl/ptp_qoriq.h
+++ b/include/linux/fsl/ptp_qoriq.h
@@ -192,6 +192,7 @@ int ptp_qoriq_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts);
int ptp_qoriq_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on);
+int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index, bool update_event);
#ifdef CONFIG_DEBUG_FS
void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq);
void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq);
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 4ce8901a1af6..18c6208f56fc 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -373,6 +373,56 @@ static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
}
/**
+ * mii_lpa_mod_linkmode_adv_sgmii
+ * @lp_advertising: pointer to destination link mode.
+ * @lpa: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_LPA bits to
+ * linkmode advertisement settings for SGMII.
+ * Leaves other bits unchanged.
+ */
+static inline void
+mii_lpa_mod_linkmode_lpa_sgmii(unsigned long *lp_advertising, u32 lpa)
+{
+ u32 speed_duplex = lpa & LPA_SGMII_DPX_SPD_MASK;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, lp_advertising,
+ speed_duplex == LPA_SGMII_1000HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, lp_advertising,
+ speed_duplex == LPA_SGMII_1000FULL);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, lp_advertising,
+ speed_duplex == LPA_SGMII_100HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, lp_advertising,
+ speed_duplex == LPA_SGMII_100FULL);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, lp_advertising,
+ speed_duplex == LPA_SGMII_10HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, lp_advertising,
+ speed_duplex == LPA_SGMII_10FULL);
+}
+
+/**
+ * mii_lpa_to_linkmode_adv_sgmii
+ * @advertising: pointer to destination link mode.
+ * @lpa: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_ADVERTISE bits
+ * to linkmode advertisement settings when in SGMII mode.
+ * Clears the old value of advertising.
+ */
+static inline void mii_lpa_to_linkmode_lpa_sgmii(unsigned long *lp_advertising,
+ u32 lpa)
+{
+ linkmode_zero(lp_advertising);
+
+ mii_lpa_mod_linkmode_lpa_sgmii(lp_advertising, lpa);
+}
+
+/**
* mii_adv_mod_linkmode_adv_t
* @advertising:pointer to destination link mode.
* @adv: value of the MII_ADVERTISE register
diff --git a/include/linux/mii_timestamper.h b/include/linux/mii_timestamper.h
new file mode 100644
index 000000000000..fa940bbaf8ae
--- /dev/null
+++ b/include/linux/mii_timestamper.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for generic time stamping devices on MII buses.
+ * Copyright (C) 2018 Richard Cochran <richardcochran@gmail.com>
+ */
+#ifndef _LINUX_MII_TIMESTAMPER_H
+#define _LINUX_MII_TIMESTAMPER_H
+
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+
+struct phy_device;
+
+/**
+ * struct mii_timestamper - Callback interface to MII time stamping devices.
+ *
+ * @rxtstamp: Requests a Rx timestamp for 'skb'. If the skb is accepted,
+ * the MII time stamping device promises to deliver it using
+ * netif_rx() as soon as a timestamp becomes available. One of
+ * the PTP_CLASS_ values is passed in 'type'. The function
+ * must return true if the skb is accepted for delivery.
+ *
+ * @txtstamp: Requests a Tx timestamp for 'skb'. The MII time stamping
+ * device promises to deliver it using skb_complete_tx_timestamp()
+ * as soon as a timestamp becomes available. One of the PTP_CLASS_
+ * values is passed in 'type'.
+ *
+ * @hwtstamp: Handles SIOCSHWTSTAMP ioctl for hardware time stamping.
+ *
+ * @link_state: Allows the device to respond to changes in the link
+ * state. The caller invokes this function while holding
+ * the phy_device mutex.
+ *
+ * @ts_info: Handles ethtool queries for hardware time stamping.
+ * @device: Remembers the device to which the instance belongs.
+ *
+ * Drivers for PHY time stamping devices should embed their
+ * mii_timestamper within a private structure, obtaining a reference
+ * to it using container_of().
+ *
+ * Drivers for non-PHY time stamping devices should return a pointer
+ * to a mii_timestamper from the probe_channel() callback of their
+ * mii_timestamping_ctrl interface.
+ */
+struct mii_timestamper {
+ bool (*rxtstamp)(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type);
+
+ void (*txtstamp)(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type);
+
+ int (*hwtstamp)(struct mii_timestamper *mii_ts,
+ struct ifreq *ifreq);
+
+ void (*link_state)(struct mii_timestamper *mii_ts,
+ struct phy_device *phydev);
+
+ int (*ts_info)(struct mii_timestamper *mii_ts,
+ struct ethtool_ts_info *ts_info);
+
+ struct device *device;
+};
+
+/**
+ * struct mii_timestamping_ctrl - MII time stamping controller interface.
+ *
+ * @probe_channel: Callback into the controller driver announcing the
+ * presence of the 'port' channel. The 'device' field
+ * had been passed to register_mii_tstamp_controller().
+ * The driver must return either a pointer to a valid
+ * MII timestamper instance or PTR_ERR.
+ *
+ * @release_channel: Releases an instance obtained via .probe_channel.
+ */
+struct mii_timestamping_ctrl {
+ struct mii_timestamper *(*probe_channel)(struct device *device,
+ unsigned int port);
+ void (*release_channel)(struct device *device,
+ struct mii_timestamper *mii_ts);
+};
+
+#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
+
+int register_mii_tstamp_controller(struct device *device,
+ struct mii_timestamping_ctrl *ctrl);
+
+void unregister_mii_tstamp_controller(struct device *device);
+
+struct mii_timestamper *register_mii_timestamper(struct device_node *node,
+ unsigned int port);
+
+void unregister_mii_timestamper(struct mii_timestamper *mii_ts);
+
+#else
+
+static inline
+int register_mii_tstamp_controller(struct device *device,
+ struct mii_timestamping_ctrl *ctrl)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void unregister_mii_tstamp_controller(struct device *device)
+{
+}
+
+static inline
+struct mii_timestamper *register_mii_timestamper(struct device_node *node,
+ unsigned int port)
+{
+ return NULL;
+}
+
+static inline void unregister_mii_timestamper(struct mii_timestamper *mii_ts)
+{
+}
+
+#endif
+
+#endif
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 36e412c3d657..20372de0b587 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -47,7 +47,7 @@
#define DEFAULT_UAR_PAGE_SHIFT 12
#define MAX_MSIX_P_PORT 17
-#define MAX_MSIX 64
+#define MAX_MSIX 128
#define MIN_MSIX_P_PORT 5
#define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \
(dev_cap).num_ports * MIN_MSIX_P_PORT)
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index cc1c230f10ee..0e62c3db45e5 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1105,6 +1105,7 @@ enum mlx5_cap_type {
MLX5_CAP_DEV_MEM,
MLX5_CAP_RESERVED_16,
MLX5_CAP_TLS,
+ MLX5_CAP_VDPA_EMULATION = 0x13,
MLX5_CAP_DEV_EVENT = 0x14,
/* NUM OF CAP Types */
MLX5_CAP_NUM
@@ -1120,6 +1121,9 @@ enum mlx5_pcam_feature_groups {
enum mlx5_mcam_reg_groups {
MLX5_MCAM_REGS_FIRST_128 = 0x0,
+ MLX5_MCAM_REGS_0x9080_0x90FF = 0x1,
+ MLX5_MCAM_REGS_0x9100_0x917F = 0x2,
+ MLX5_MCAM_REGS_NUM = 0x3,
};
enum mlx5_mcam_feature_groups {
@@ -1268,7 +1272,16 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg)
#define MLX5_CAP_MCAM_REG(mdev, reg) \
- MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_access_reg_cap_mask.access_regs.reg)
+ MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \
+ mng_access_reg_cap_mask.access_regs.reg)
+
+#define MLX5_CAP_MCAM_REG1(mdev, reg) \
+ MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9080_0x90FF], \
+ mng_access_reg_cap_mask.access_regs1.reg)
+
+#define MLX5_CAP_MCAM_REG2(mdev, reg) \
+ MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \
+ mng_access_reg_cap_mask.access_regs2.reg)
#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
@@ -1297,6 +1310,14 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_DEV_EVENT(mdev, cap)\
MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap)
+#define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
+ MLX5_GET(device_virtio_emulation_cap, \
+ (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
+
+#define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
+ MLX5_GET64(device_virtio_emulation_cap, \
+ (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
+
enum {
MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 27200dea0297..22bd0d5024c8 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -145,6 +145,8 @@ enum {
MLX5_REG_MCC = 0x9062,
MLX5_REG_MCDA = 0x9063,
MLX5_REG_MCAM = 0x907f,
+ MLX5_REG_MIRC = 0x9162,
+ MLX5_REG_RESOURCE_DUMP = 0xC000,
};
enum mlx5_qpts_trust_state {
@@ -684,7 +686,7 @@ struct mlx5_core_dev {
u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
- u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
+ u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)];
u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
u8 embedded_cpu;
@@ -928,8 +930,6 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
-int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
- struct mlx5_frag_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev,
int size, struct mlx5_frag_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 4e5b84e66822..4cae16016b2b 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -48,6 +48,7 @@ enum {
MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0),
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1),
MLX5_FLOW_TABLE_TERMINATION = BIT(2),
+ MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
};
#define LEFTOVERS_RULE_NUM 2
@@ -145,19 +146,17 @@ mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type,
int vport);
-struct mlx5_flow_table *
-mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- int num_flow_table_entries,
- int max_num_groups,
- u32 level,
- u32 flags);
-
struct mlx5_flow_table_attr {
int prio;
int max_fte;
u32 level;
u32 flags;
+ struct mlx5_flow_table *next_ft;
+
+ struct {
+ int max_num_groups;
+ int num_reserved_entries;
+ } autogroup;
};
struct mlx5_flow_table *
@@ -165,6 +164,10 @@ mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table_attr *ft_attr);
struct mlx5_flow_table *
+mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
+ struct mlx5_flow_table_attr *ft_attr);
+
+struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
@@ -194,6 +197,7 @@ struct mlx5_fs_vlan {
enum {
FLOW_ACT_NO_APPEND = BIT(0),
+ FLOW_ACT_IGNORE_FLOW_LEVEL = BIT(1),
};
struct mlx5_flow_act {
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 5d54fccf87fc..ee0a34d66c7c 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -87,6 +87,7 @@ enum {
enum {
MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM),
MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11),
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13),
};
enum {
@@ -374,8 +375,17 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_esp_spi[0x1];
u8 reserved_at_58[0x2];
u8 bth_dst_qp[0x1];
+ u8 reserved_at_5b[0x5];
- u8 reserved_at_5b[0x25];
+ u8 reserved_at_60[0x18];
+ u8 metadata_reg_c_7[0x1];
+ u8 metadata_reg_c_6[0x1];
+ u8 metadata_reg_c_5[0x1];
+ u8 metadata_reg_c_4[0x1];
+ u8 metadata_reg_c_3[0x1];
+ u8 metadata_reg_c_2[0x1];
+ u8 metadata_reg_c_1[0x1];
+ u8 metadata_reg_c_0[0x1];
};
struct mlx5_ifc_flow_table_prop_layout_bits {
@@ -400,7 +410,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reformat_l3_tunnel_to_l2[0x1];
u8 reformat_l2_to_l3_tunnel[0x1];
u8 reformat_and_modify_action[0x1];
- u8 reserved_at_15[0x2];
+ u8 ignore_flow_level[0x1];
+ u8 reserved_at_16[0x1];
u8 table_miss_action_domain[0x1];
u8 termination_table[0x1];
u8 reserved_at_19[0x7];
@@ -721,7 +732,9 @@ enum {
struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 fdb_to_vport_reg_c_id[0x8];
- u8 reserved_at_8[0xf];
+ u8 reserved_at_8[0xd];
+ u8 fdb_modify_header_fwd_to_table[0x1];
+ u8 reserved_at_16[0x1];
u8 flow_source[0x1];
u8 reserved_at_18[0x2];
u8 multi_fdb_encap[0x1];
@@ -822,7 +835,9 @@ struct mlx5_ifc_qos_cap_bits {
struct mlx5_ifc_debug_cap_bits {
u8 core_dump_general[0x1];
u8 core_dump_qp[0x1];
- u8 reserved_at_2[0x1e];
+ u8 reserved_at_2[0x7];
+ u8 resource_dump[0x1];
+ u8 reserved_at_a[0x16];
u8 reserved_at_20[0x2];
u8 stall_detect[0x1];
@@ -953,6 +968,19 @@ struct mlx5_ifc_device_event_cap_bits {
u8 user_unaffiliated_events[4][0x40];
};
+struct mlx5_ifc_device_virtio_emulation_cap_bits {
+ u8 reserved_at_0[0x20];
+
+ u8 reserved_at_20[0x13];
+ u8 log_doorbell_stride[0x5];
+ u8 reserved_at_38[0x3];
+ u8 log_doorbell_bar_size[0x5];
+
+ u8 doorbell_bar_offset[0x40];
+
+ u8 reserved_at_80[0x780];
+};
+
enum {
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0,
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2,
@@ -1753,6 +1781,132 @@ struct mlx5_ifc_resize_field_select_bits {
u8 resize_field_select[0x20];
};
+struct mlx5_ifc_resource_dump_bits {
+ u8 more_dump[0x1];
+ u8 inline_dump[0x1];
+ u8 reserved_at_2[0xa];
+ u8 seq_num[0x4];
+ u8 segment_type[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 vhca_id[0x10];
+
+ u8 index1[0x20];
+
+ u8 index2[0x20];
+
+ u8 num_of_obj1[0x10];
+ u8 num_of_obj2[0x10];
+
+ u8 reserved_at_a0[0x20];
+
+ u8 device_opaque[0x40];
+
+ u8 mkey[0x20];
+
+ u8 size[0x20];
+
+ u8 address[0x40];
+
+ u8 inline_data[52][0x20];
+};
+
+struct mlx5_ifc_resource_dump_menu_record_bits {
+ u8 reserved_at_0[0x4];
+ u8 num_of_obj2_supports_active[0x1];
+ u8 num_of_obj2_supports_all[0x1];
+ u8 must_have_num_of_obj2[0x1];
+ u8 support_num_of_obj2[0x1];
+ u8 num_of_obj1_supports_active[0x1];
+ u8 num_of_obj1_supports_all[0x1];
+ u8 must_have_num_of_obj1[0x1];
+ u8 support_num_of_obj1[0x1];
+ u8 must_have_index2[0x1];
+ u8 support_index2[0x1];
+ u8 must_have_index1[0x1];
+ u8 support_index1[0x1];
+ u8 segment_type[0x10];
+
+ u8 segment_name[4][0x20];
+
+ u8 index1_name[4][0x20];
+
+ u8 index2_name[4][0x20];
+};
+
+struct mlx5_ifc_resource_dump_segment_header_bits {
+ u8 length_dw[0x10];
+ u8 segment_type[0x10];
+};
+
+struct mlx5_ifc_resource_dump_command_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+
+ u8 segment_called[0x10];
+ u8 vhca_id[0x10];
+
+ u8 index1[0x20];
+
+ u8 index2[0x20];
+
+ u8 num_of_obj1[0x10];
+ u8 num_of_obj2[0x10];
+};
+
+struct mlx5_ifc_resource_dump_error_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+
+ u8 reserved_at_20[0x10];
+ u8 syndrome_id[0x10];
+
+ u8 reserved_at_40[0x40];
+
+ u8 error[8][0x20];
+};
+
+struct mlx5_ifc_resource_dump_info_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+
+ u8 reserved_at_20[0x18];
+ u8 dump_version[0x8];
+
+ u8 hw_version[0x20];
+
+ u8 fw_version[0x20];
+};
+
+struct mlx5_ifc_resource_dump_menu_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+
+ u8 reserved_at_20[0x10];
+ u8 num_of_records[0x10];
+
+ struct mlx5_ifc_resource_dump_menu_record_bits record[0];
+};
+
+struct mlx5_ifc_resource_dump_resource_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+
+ u8 reserved_at_20[0x20];
+
+ u8 index1[0x20];
+
+ u8 index2[0x20];
+
+ u8 payload[0][0x20];
+};
+
+struct mlx5_ifc_resource_dump_terminate_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+};
+
+struct mlx5_ifc_menu_resource_dump_response_bits {
+ struct mlx5_ifc_resource_dump_info_segment_bits info;
+ struct mlx5_ifc_resource_dump_command_segment_bits cmd;
+ struct mlx5_ifc_resource_dump_menu_segment_bits menu;
+ struct mlx5_ifc_resource_dump_terminate_segment_bits terminate;
+};
+
enum {
MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD = 0x1,
MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT = 0x2,
@@ -2026,7 +2180,9 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
u8 rx_pause_transition_low[0x20];
- u8 reserved_at_3c0[0x40];
+ u8 rx_discards_high[0x20];
+
+ u8 rx_discards_low[0x20];
u8 device_stall_minor_watermark_cnt_high[0x20];
@@ -2751,6 +2907,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_fpga_cap_bits fpga_cap;
struct mlx5_ifc_tls_cap_bits tls_cap;
struct mlx5_ifc_device_mem_cap_bits device_mem_cap;
+ struct mlx5_ifc_device_virtio_emulation_cap_bits virtio_emulation_cap;
u8 reserved_at_0[0x8000];
};
@@ -3998,7 +4155,8 @@ struct mlx5_ifc_set_fte_in_bits {
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
- u8 reserved_at_c0[0x18];
+ u8 ignore_flow_level[0x1];
+ u8 reserved_at_c1[0x17];
u8 modify_enable_mask[0x8];
u8 reserved_at_e0[0x20];
@@ -5466,15 +5624,32 @@ struct mlx5_ifc_add_action_in_bits {
u8 data[0x20];
};
+struct mlx5_ifc_copy_action_in_bits {
+ u8 action_type[0x4];
+ u8 src_field[0xc];
+ u8 reserved_at_10[0x3];
+ u8 src_offset[0x5];
+ u8 reserved_at_18[0x3];
+ u8 length[0x5];
+
+ u8 reserved_at_20[0x4];
+ u8 dst_field[0xc];
+ u8 reserved_at_30[0x3];
+ u8 dst_offset[0x5];
+ u8 reserved_at_38[0x8];
+};
+
union mlx5_ifc_set_action_in_add_action_in_auto_bits {
struct mlx5_ifc_set_action_in_bits set_action_in;
struct mlx5_ifc_add_action_in_bits add_action_in;
+ struct mlx5_ifc_copy_action_in_bits copy_action_in;
u8 reserved_at_0[0x40];
};
enum {
MLX5_ACTION_TYPE_SET = 0x1,
MLX5_ACTION_TYPE_ADD = 0x2,
+ MLX5_ACTION_TYPE_COPY = 0x3,
};
enum {
@@ -5510,6 +5685,8 @@ enum {
MLX5_ACTION_IN_FIELD_METADATA_REG_C_3 = 0x54,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_4 = 0x55,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_5 = 0x56,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_6 = 0x57,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_7 = 0x58,
MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59,
MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B,
};
@@ -8406,6 +8583,18 @@ struct mlx5_ifc_pplm_reg_bits {
u8 fec_override_admin_50g[0x4];
u8 fec_override_admin_25g[0x4];
u8 fec_override_admin_10g_40g[0x4];
+
+ u8 fec_override_cap_400g_8x[0x10];
+ u8 fec_override_cap_200g_4x[0x10];
+
+ u8 fec_override_cap_100g_2x[0x10];
+ u8 fec_override_cap_50g_1x[0x10];
+
+ u8 fec_override_admin_400g_8x[0x10];
+ u8 fec_override_admin_200g_4x[0x10];
+
+ u8 fec_override_admin_100g_2x[0x10];
+ u8 fec_override_admin_50g_1x[0x10];
};
struct mlx5_ifc_ppcnt_reg_bits {
@@ -8732,7 +8921,9 @@ struct mlx5_ifc_mpegc_reg_bits {
};
struct mlx5_ifc_pcam_enhanced_features_bits {
- u8 reserved_at_0[0x6d];
+ u8 reserved_at_0[0x68];
+ u8 fec_50G_per_lane_in_pplm[0x1];
+ u8 reserved_at_69[0x4];
u8 rx_icrc_encapsulated_counter[0x1];
u8 reserved_at_6e[0x4];
u8 ptys_extended_ethernet[0x1];
@@ -8817,6 +9008,28 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 regs_31_to_0[0x20];
};
+struct mlx5_ifc_mcam_access_reg_bits1 {
+ u8 regs_127_to_96[0x20];
+
+ u8 regs_95_to_64[0x20];
+
+ u8 regs_63_to_32[0x20];
+
+ u8 regs_31_to_0[0x20];
+};
+
+struct mlx5_ifc_mcam_access_reg_bits2 {
+ u8 regs_127_to_99[0x1d];
+ u8 mirc[0x1];
+ u8 regs_97_to_96[0x2];
+
+ u8 regs_95_to_64[0x20];
+
+ u8 regs_63_to_32[0x20];
+
+ u8 regs_31_to_0[0x20];
+};
+
struct mlx5_ifc_mcam_reg_bits {
u8 reserved_at_0[0x8];
u8 feature_group[0x8];
@@ -8827,6 +9040,8 @@ struct mlx5_ifc_mcam_reg_bits {
union {
struct mlx5_ifc_mcam_access_reg_bits access_regs;
+ struct mlx5_ifc_mcam_access_reg_bits1 access_regs1;
+ struct mlx5_ifc_mcam_access_reg_bits2 access_regs2;
u8 reserved_at_0[0x80];
} mng_access_reg_cap_mask;
@@ -9432,6 +9647,13 @@ struct mlx5_ifc_mcda_reg_bits {
u8 data[0][0x20];
};
+struct mlx5_ifc_mirc_reg_bits {
+ u8 reserved_at_0[0x18];
+ u8 status_code[0x8];
+
+ u8 reserved_at_20[0x20];
+};
+
union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
@@ -9487,6 +9709,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_mcqi_reg_bits mcqi_reg;
struct mlx5_ifc_mcc_reg_bits mcc_reg;
struct mlx5_ifc_mcda_reg_bits mcda_reg;
+ struct mlx5_ifc_mirc_reg_bits mirc_reg;
u8 reserved_at_0[0x60e0];
};
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 08b25c02b5a1..2e9a6e4634eb 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -41,8 +41,10 @@
#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356
+#define SDIO_DEVICE_ID_BROADCOM_4359 0x4359
#define SDIO_DEVICE_ID_CYPRESS_4373 0x4373
#define SDIO_DEVICE_ID_CYPRESS_43012 43012
+#define SDIO_DEVICE_ID_CYPRESS_89359 0x4355
#define SDIO_VENDOR_ID_INTEL 0x0089
#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402
diff --git a/include/linux/net.h b/include/linux/net.h
index 9cafb5f353a9..6451425e828f 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -171,6 +171,7 @@ struct proto_ops {
int (*compat_getsockopt)(struct socket *sock, int level,
int optname, char __user *optval, int __user *optlen);
#endif
+ void (*show_fdinfo)(struct seq_file *m, struct socket *sock);
int (*sendmsg) (struct socket *sock, struct msghdr *m,
size_t total_len);
/* Notes for implementing recvmsg:
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 4b19c544c59a..34d050bb1ae6 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -53,8 +53,9 @@ enum {
NETIF_F_GSO_ESP_BIT, /* ... ESP with TSO */
NETIF_F_GSO_UDP_BIT, /* ... UFO, deprecated except tuntap */
NETIF_F_GSO_UDP_L4_BIT, /* ... UDP payload GSO (not UFO) */
+ NETIF_F_GSO_FRAGLIST_BIT, /* ... Fraglist GSO */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
- NETIF_F_GSO_UDP_L4_BIT,
+ NETIF_F_GSO_FRAGLIST_BIT,
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */
@@ -80,6 +81,7 @@ enum {
NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */
NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */
+ NETIF_F_GRO_FRAGLIST_BIT, /* Fraglist GRO */
/*
* Add your fresh new feature above and remember to update
@@ -150,6 +152,8 @@ enum {
#define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4)
#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
+#define NETIF_F_GRO_FRAGLIST __NETIF_F(GRO_FRAGLIST)
+#define NETIF_F_GSO_FRAGLIST __NETIF_F(GSO_FRAGLIST)
/* Finds the next feature with the highest number of the range of start till 0.
*/
@@ -226,6 +230,9 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
/* changeable features with no special hardware requirements */
#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
+/* Changeable features with no special hardware requirements that defaults to off. */
+#define NETIF_F_SOFT_FEATURES_OFF NETIF_F_GRO_FRAGLIST
+
#define NETIF_F_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_TX | \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index cac56fb59af8..a9c6b5c61d27 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -849,6 +849,8 @@ enum tc_setup_type {
TC_SETUP_QDISC_GRED,
TC_SETUP_QDISC_TAPRIO,
TC_SETUP_FT,
+ TC_SETUP_QDISC_ETS,
+ TC_SETUP_QDISC_TBF,
};
/* These structures hold the attributes of bpf state that are being passed
@@ -875,6 +877,7 @@ enum bpf_netdev_command {
struct bpf_prog_offload_ops;
struct netlink_ext_ack;
struct xdp_umem;
+struct xdp_dev_bulk_queue;
struct netdev_bpf {
enum bpf_netdev_command command;
@@ -936,6 +939,11 @@ struct netdev_name_node {
int netdev_name_node_alt_create(struct net_device *dev, const char *name);
int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
+struct netdev_net_notifier {
+ struct list_head list;
+ struct notifier_block *nb;
+};
+
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
@@ -1014,7 +1022,7 @@ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
* Called when a user wants to change the Maximum Transfer Unit
* of a device.
*
- * void (*ndo_tx_timeout)(struct net_device *dev);
+ * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue);
* Callback used when the transmitter has not made any progress
* for dev->watchdog ticks.
*
@@ -1281,7 +1289,8 @@ struct net_device_ops {
int new_mtu);
int (*ndo_neigh_setup)(struct net_device *dev,
struct neigh_parms *);
- void (*ndo_tx_timeout) (struct net_device *dev);
+ void (*ndo_tx_timeout) (struct net_device *dev,
+ unsigned int txqueue);
void (*ndo_get_stats64)(struct net_device *dev,
struct rtnl_link_stats64 *storage);
@@ -1707,6 +1716,7 @@ enum netdev_priv_flags {
* @miniq_ingress: ingress/clsact qdisc specific data for
* ingress processing
* @ingress_queue: XXX: need comments on this one
+ * @nf_hooks_ingress: netfilter hooks executed for ingress packets
* @broadcast: hw bcast address
*
* @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
@@ -1788,6 +1798,10 @@ enum netdev_priv_flags {
*
* @wol_enabled: Wake-on-LAN is enabled
*
+ * @net_notifier_list: List of per-net netdev notifier block
+ * that follow this device when it is moved
+ * to another network namespace.
+ *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
@@ -1983,12 +1997,10 @@ struct net_device {
unsigned int num_tx_queues;
unsigned int real_num_tx_queues;
struct Qdisc *qdisc;
-#ifdef CONFIG_NET_SCHED
- DECLARE_HASHTABLE (qdisc_hash, 4);
-#endif
unsigned int tx_queue_len;
spinlock_t tx_global_lock;
- int watchdog_timeo;
+
+ struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
#ifdef CONFIG_XPS
struct xps_dev_maps __rcu *xps_cpus_map;
@@ -1998,11 +2010,15 @@ struct net_device {
struct mini_Qdisc __rcu *miniq_egress;
#endif
+#ifdef CONFIG_NET_SCHED
+ DECLARE_HASHTABLE (qdisc_hash, 4);
+#endif
/* These may be needed for future network-power-down code. */
struct timer_list watchdog_timer;
+ int watchdog_timeo;
- int __percpu *pcpu_refcnt;
struct list_head todo_list;
+ int __percpu *pcpu_refcnt;
struct list_head link_watch_list;
@@ -2078,6 +2094,8 @@ struct net_device {
struct lock_class_key addr_list_lock_key;
bool proto_down;
unsigned wol_enabled:1;
+
+ struct list_head net_notifier_list;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -2319,7 +2337,8 @@ struct napi_gro_cb {
/* Number of gro_receive callbacks this packet already went through */
u8 recursion_counter:4;
- /* 1 bit hole */
+ /* GRO is done by frag_list pointer chaining. */
+ u8 is_flist:1;
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
@@ -2521,6 +2540,12 @@ int unregister_netdevice_notifier(struct notifier_block *nb);
int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb);
int unregister_netdevice_notifier_net(struct net *net,
struct notifier_block *nb);
+int register_netdevice_notifier_dev_net(struct net_device *dev,
+ struct notifier_block *nb,
+ struct netdev_net_notifier *nn);
+int unregister_netdevice_notifier_dev_net(struct net_device *dev,
+ struct notifier_block *nb,
+ struct netdev_net_notifier *nn);
struct netdev_notifier_info {
struct net_device *dev;
@@ -2687,6 +2712,7 @@ struct net_device *dev_get_by_napi_id(unsigned int napi_id);
int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_restart(struct net_device *dev);
int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
+int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
{
@@ -2823,16 +2849,16 @@ static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
}
static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
- __sum16 check, __wsum pseudo)
+ __wsum pseudo)
{
NAPI_GRO_CB(skb)->csum = ~pseudo;
NAPI_GRO_CB(skb)->csum_valid = 1;
}
-#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
+#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
do { \
if (__skb_gro_checksum_convert_check(skb)) \
- __skb_gro_checksum_convert(skb, check, \
+ __skb_gro_checksum_convert(skb, \
compute_pseudo(skb, proto)); \
} while (0)
@@ -3887,22 +3913,48 @@ void netif_device_attach(struct net_device *dev);
*/
enum {
- NETIF_MSG_DRV = 0x0001,
- NETIF_MSG_PROBE = 0x0002,
- NETIF_MSG_LINK = 0x0004,
- NETIF_MSG_TIMER = 0x0008,
- NETIF_MSG_IFDOWN = 0x0010,
- NETIF_MSG_IFUP = 0x0020,
- NETIF_MSG_RX_ERR = 0x0040,
- NETIF_MSG_TX_ERR = 0x0080,
- NETIF_MSG_TX_QUEUED = 0x0100,
- NETIF_MSG_INTR = 0x0200,
- NETIF_MSG_TX_DONE = 0x0400,
- NETIF_MSG_RX_STATUS = 0x0800,
- NETIF_MSG_PKTDATA = 0x1000,
- NETIF_MSG_HW = 0x2000,
- NETIF_MSG_WOL = 0x4000,
+ NETIF_MSG_DRV_BIT,
+ NETIF_MSG_PROBE_BIT,
+ NETIF_MSG_LINK_BIT,
+ NETIF_MSG_TIMER_BIT,
+ NETIF_MSG_IFDOWN_BIT,
+ NETIF_MSG_IFUP_BIT,
+ NETIF_MSG_RX_ERR_BIT,
+ NETIF_MSG_TX_ERR_BIT,
+ NETIF_MSG_TX_QUEUED_BIT,
+ NETIF_MSG_INTR_BIT,
+ NETIF_MSG_TX_DONE_BIT,
+ NETIF_MSG_RX_STATUS_BIT,
+ NETIF_MSG_PKTDATA_BIT,
+ NETIF_MSG_HW_BIT,
+ NETIF_MSG_WOL_BIT,
+
+ /* When you add a new bit above, update netif_msg_class_names array
+ * in net/ethtool/common.c
+ */
+ NETIF_MSG_CLASS_COUNT,
};
+/* Both ethtool_ops interface and internal driver implementation use u32 */
+static_assert(NETIF_MSG_CLASS_COUNT <= 32);
+
+#define __NETIF_MSG_BIT(bit) ((u32)1 << (bit))
+#define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT)
+
+#define NETIF_MSG_DRV __NETIF_MSG(DRV)
+#define NETIF_MSG_PROBE __NETIF_MSG(PROBE)
+#define NETIF_MSG_LINK __NETIF_MSG(LINK)
+#define NETIF_MSG_TIMER __NETIF_MSG(TIMER)
+#define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN)
+#define NETIF_MSG_IFUP __NETIF_MSG(IFUP)
+#define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR)
+#define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR)
+#define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED)
+#define NETIF_MSG_INTR __NETIF_MSG(INTR)
+#define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE)
+#define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS)
+#define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA)
+#define NETIF_MSG_HW __NETIF_MSG(HW)
+#define NETIF_MSG_WOL __NETIF_MSG(WOL)
#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
@@ -4393,6 +4445,15 @@ struct netdev_notifier_bonding_info {
void netdev_bonding_info_change(struct net_device *dev,
struct netdev_bonding_info *bonding_info);
+#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
+void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data);
+#else
+static inline void ethtool_notify(struct net_device *dev, unsigned int cmd,
+ const void *data)
+{
+}
+#endif
+
static inline
struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
{
@@ -4554,6 +4615,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature;
}
diff --git a/include/linux/phy.h b/include/linux/phy.h
index dd4a91f1feaa..c570e162e05e 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -17,10 +17,12 @@
#include <linux/linkmode.h>
#include <linux/mdio.h>
#include <linux/mii.h>
+#include <linux/mii_timestamper.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/mod_devicetable.h>
+#include <linux/u64_stats_sync.h>
#include <linux/atomic.h>
@@ -99,9 +101,11 @@ typedef enum {
PHY_INTERFACE_MODE_2500BASEX,
PHY_INTERFACE_MODE_RXAUI,
PHY_INTERFACE_MODE_XAUI,
- /* 10GBASE-KR, XFI, SFI - single lane 10G Serdes */
- PHY_INTERFACE_MODE_10GKR,
+ /* 10GBASE-R, XFI, SFI - single lane 10G Serdes */
+ PHY_INTERFACE_MODE_10GBASER,
PHY_INTERFACE_MODE_USXGMII,
+ /* 10GBASE-KR - with Clause 73 AN */
+ PHY_INTERFACE_MODE_10GKR,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
@@ -175,10 +179,12 @@ static inline const char *phy_modes(phy_interface_t interface)
return "rxaui";
case PHY_INTERFACE_MODE_XAUI:
return "xaui";
- case PHY_INTERFACE_MODE_10GKR:
- return "10gbase-kr";
+ case PHY_INTERFACE_MODE_10GBASER:
+ return "10gbase-r";
case PHY_INTERFACE_MODE_USXGMII:
return "usxgmii";
+ case PHY_INTERFACE_MODE_10GKR:
+ return "10gbase-kr";
default:
return "unknown";
}
@@ -207,6 +213,15 @@ struct sfp_bus;
struct sfp_upstream_ops;
struct sk_buff;
+struct mdio_bus_stats {
+ u64_stats_t transfers;
+ u64_stats_t errors;
+ u64_stats_t writes;
+ u64_stats_t reads;
+ /* Must be last, add new statistics above */
+ struct u64_stats_sync syncp;
+};
+
/*
* The Bus class for PHYs. Devices which provide access to
* PHYs should register using this structure
@@ -219,6 +234,7 @@ struct mii_bus {
int (*read)(struct mii_bus *bus, int addr, int regnum);
int (*write)(struct mii_bus *bus, int addr, int regnum, u16 val);
int (*reset)(struct mii_bus *bus);
+ struct mdio_bus_stats stats[PHY_MAX_ADDR];
/*
* A lock to ensure that only one thing can read/write
@@ -327,6 +343,9 @@ struct phy_c45_device_ids {
u32 device_ids[8];
};
+struct macsec_context;
+struct macsec_ops;
+
/* phy_device: An instance of a PHY
*
* drv: Pointer to the driver for this PHY instance
@@ -349,6 +368,7 @@ struct phy_c45_device_ids {
* attached_dev: The attached enet driver's device instance ptr
* adjust_link: Callback for the enet controller to respond to
* changes in the link state.
+ * macsec_ops: MACsec offloading ops.
*
* speed, duplex, pause, supported, advertising, lp_advertising,
* and autoneg are used like in mii_if_info
@@ -441,12 +461,18 @@ struct phy_device {
struct sfp_bus *sfp_bus;
struct phylink *phylink;
struct net_device *attached_dev;
+ struct mii_timestamper *mii_ts;
u8 mdix;
u8 mdix_ctrl;
void (*phy_link_change)(struct phy_device *, bool up, bool do_carrier);
void (*adjust_link)(struct net_device *dev);
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ /* MACsec management functions */
+ const struct macsec_ops *macsec_ops;
+#endif
};
#define to_phy_device(d) container_of(to_mdio_device(d), \
struct phy_device, mdio)
@@ -546,29 +572,6 @@ struct phy_driver {
*/
int (*match_phy_device)(struct phy_device *phydev);
- /* Handles ethtool queries for hardware time stamping. */
- int (*ts_info)(struct phy_device *phydev, struct ethtool_ts_info *ti);
-
- /* Handles SIOCSHWTSTAMP ioctl for hardware time stamping. */
- int (*hwtstamp)(struct phy_device *phydev, struct ifreq *ifr);
-
- /*
- * Requests a Rx timestamp for 'skb'. If the skb is accepted,
- * the phy driver promises to deliver it using netif_rx() as
- * soon as a timestamp becomes available. One of the
- * PTP_CLASS_ values is passed in 'type'. The function must
- * return true if the skb is accepted for delivery.
- */
- bool (*rxtstamp)(struct phy_device *dev, struct sk_buff *skb, int type);
-
- /*
- * Requests a Tx timestamp for 'skb'. The phy driver promises
- * to deliver it using skb_complete_tx_timestamp() as soon as a
- * timestamp becomes available. One of the PTP_CLASS_ values
- * is passed in 'type'.
- */
- void (*txtstamp)(struct phy_device *dev, struct sk_buff *skb, int type);
-
/* Some devices (e.g. qnap TS-119P II) require PHY register changes to
* enable Wake on LAN, so set_wol is provided to be called in the
* ethernet driver's set_wol function. */
@@ -937,6 +940,66 @@ static inline bool phy_polling_mode(struct phy_device *phydev)
}
/**
+ * phy_has_hwtstamp - Tests whether a PHY time stamp configuration.
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_has_hwtstamp(struct phy_device *phydev)
+{
+ return phydev && phydev->mii_ts && phydev->mii_ts->hwtstamp;
+}
+
+/**
+ * phy_has_rxtstamp - Tests whether a PHY supports receive time stamping.
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_has_rxtstamp(struct phy_device *phydev)
+{
+ return phydev && phydev->mii_ts && phydev->mii_ts->rxtstamp;
+}
+
+/**
+ * phy_has_tsinfo - Tests whether a PHY reports time stamping and/or
+ * PTP hardware clock capabilities.
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_has_tsinfo(struct phy_device *phydev)
+{
+ return phydev && phydev->mii_ts && phydev->mii_ts->ts_info;
+}
+
+/**
+ * phy_has_txtstamp - Tests whether a PHY supports transmit time stamping.
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_has_txtstamp(struct phy_device *phydev)
+{
+ return phydev && phydev->mii_ts && phydev->mii_ts->txtstamp;
+}
+
+static inline int phy_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
+{
+ return phydev->mii_ts->hwtstamp(phydev->mii_ts, ifr);
+}
+
+static inline bool phy_rxtstamp(struct phy_device *phydev, struct sk_buff *skb,
+ int type)
+{
+ return phydev->mii_ts->rxtstamp(phydev->mii_ts, skb, type);
+}
+
+static inline int phy_ts_info(struct phy_device *phydev,
+ struct ethtool_ts_info *tsinfo)
+{
+ return phydev->mii_ts->ts_info(phydev->mii_ts, tsinfo);
+}
+
+static inline void phy_txtstamp(struct phy_device *phydev, struct sk_buff *skb,
+ int type)
+{
+ phydev->mii_ts->txtstamp(phydev->mii_ts, skb, type);
+}
+
+/**
* phy_is_internal - Convenience function for testing if a PHY is internal
* @phydev: the phy_device struct
*/
@@ -1088,17 +1151,21 @@ static inline void phy_unlock_mdio_bus(struct phy_device *phydev)
void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
__printf(2, 3);
+char *phy_attached_info_irq(struct phy_device *phydev)
+ __malloc;
void phy_attached_info(struct phy_device *phydev);
/* Clause 22 PHY */
int genphy_read_abilities(struct phy_device *phydev);
int genphy_setup_forced(struct phy_device *phydev);
int genphy_restart_aneg(struct phy_device *phydev);
+int genphy_check_and_restart_aneg(struct phy_device *phydev, bool restart);
int genphy_config_eee_advert(struct phy_device *phydev);
int __genphy_config_aneg(struct phy_device *phydev, bool changed);
int genphy_aneg_done(struct phy_device *phydev);
int genphy_update_link(struct phy_device *phydev);
int genphy_read_lpa(struct phy_device *phydev);
+int genphy_read_status_fixed(struct phy_device *phydev);
int genphy_read_status(struct phy_device *phydev);
int genphy_suspend(struct phy_device *phydev);
int genphy_resume(struct phy_device *phydev);
@@ -1175,6 +1242,8 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
int phy_ethtool_ksettings_set(struct phy_device *phydev,
const struct ethtool_link_ksettings *cmd);
int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
+int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd);
void phy_request_interrupt(struct phy_device *phydev);
void phy_free_interrupt(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index fed5488e3c75..523209e70947 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -63,10 +63,12 @@ enum phylink_op_type {
* struct phylink_config - PHYLINK configuration structure
* @dev: a pointer to a struct device associated with the MAC
* @type: operation type of PHYLINK instance
+ * @pcs_poll: MAC PCS cannot provide link change interrupt
*/
struct phylink_config {
struct device *dev;
enum phylink_op_type type;
+ bool pcs_poll;
};
/**
diff --git a/include/linux/platform_data/eth_ixp4xx.h b/include/linux/platform_data/eth_ixp4xx.h
new file mode 100644
index 000000000000..6f652ea0c6ae
--- /dev/null
+++ b/include/linux/platform_data/eth_ixp4xx.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PLATFORM_DATA_ETH_IXP4XX
+#define __PLATFORM_DATA_ETH_IXP4XX
+
+#include <linux/types.h>
+
+#define IXP4XX_ETH_NPEA 0x00
+#define IXP4XX_ETH_NPEB 0x10
+#define IXP4XX_ETH_NPEC 0x20
+
+/* Information about built-in Ethernet MAC interfaces */
+struct eth_plat_info {
+ u8 phy; /* MII PHY ID, 0 - 31 */
+ u8 rxq; /* configurable, currently 0 - 31 only */
+ u8 txreadyq;
+ u8 hwaddr[6];
+};
+
+#endif
diff --git a/include/linux/platform_data/wan_ixp4xx_hss.h b/include/linux/platform_data/wan_ixp4xx_hss.h
new file mode 100644
index 000000000000..d525a0feb9e1
--- /dev/null
+++ b/include/linux/platform_data/wan_ixp4xx_hss.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PLATFORM_DATA_WAN_IXP4XX_HSS_H
+#define __PLATFORM_DATA_WAN_IXP4XX_HSS_H
+
+#include <linux/types.h>
+
+/* Information about built-in HSS (synchronous serial) interfaces */
+struct hss_plat_info {
+ int (*set_clock)(int port, unsigned int clock_type);
+ int (*open)(int port, void *pdev,
+ void (*set_carrier_cb)(void *pdev, int carrier));
+ void (*close)(int port, void *pdev);
+ u8 txreadyq;
+ u32 timer_freq;
+};
+
+#endif
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 93cc4f1d444a..c64a1ef87240 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -243,6 +243,13 @@ int ptp_find_pin(struct ptp_clock *ptp,
int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay);
+/**
+ * ptp_cancel_worker_sync() - cancel ptp auxiliary clock
+ *
+ * @ptp: The clock obtained from ptp_clock_register().
+ */
+void ptp_cancel_worker_sync(struct ptp_clock *ptp);
+
#else
static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
struct device *parent)
@@ -260,6 +267,8 @@ static inline int ptp_find_pin(struct ptp_clock *ptp,
static inline int ptp_schedule_worker(struct ptp_clock *ptp,
unsigned long delay)
{ return -EOPNOTSUPP; }
+static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp)
+{ }
#endif
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 0abe9a4fc842..417db0a79a62 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/slab.h>
+#include <linux/mm.h>
#include <asm/errno.h>
#endif
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 03f59a28fefd..2c4737e6694a 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -76,7 +76,6 @@
#define FW_ASSERT_GENERAL_ATTN_IDX 32
-#define MAX_PINNED_CCFC 32
/* Queue Zone sizes in bytes */
#define TSTORM_QZONE_SIZE 8
@@ -105,12 +104,19 @@
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
-#define MAX_NUM_LL2_RX_QUEUES 48
-#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
+/* Number of LL2 RAM based queues */
+#define MAX_NUM_LL2_RX_RAM_QUEUES 32
+
+/* Number of LL2 context based queues */
+#define MAX_NUM_LL2_RX_CTX_QUEUES 208
+#define MAX_NUM_LL2_RX_QUEUES \
+ (MAX_NUM_LL2_RX_RAM_QUEUES + MAX_NUM_LL2_RX_CTX_QUEUES)
+
+#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 37
-#define FW_REVISION_VERSION 7
+#define FW_MINOR_VERSION 42
+#define FW_REVISION_VERSION 2
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -132,10 +138,10 @@
#define MAX_NUM_VFS (MAX_NUM_VFS_K2)
#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
-#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS)
#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
-#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS)
+#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
+#define MAX_NUM_FUNCTIONS (MAX_FUNCTION_NUMBER_K2)
#define MAX_NUM_VPORTS_K2 (208)
#define MAX_NUM_VPORTS_BB (160)
@@ -222,6 +228,7 @@
#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
+#define DQ_XCM_ROCE_ACK_EDPM_DORQ_SEQ_CMD DQ_XCM_AGG_VAL_SEL_WORD5
/* UCM agg val selection (HW) */
#define DQ_UCM_AGG_VAL_SEL_WORD0 0
@@ -340,6 +347,10 @@
#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1)
#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3)
+/* DQ_DEMS_AGG_VAL_BASE */
+#define DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE \
+ (DQ_PWM_OFFSET_TCM32_BASE + DQ_TCM_AGG_VAL_SEL_REG9 - 4)
+
#define DQ_REGION_SHIFT (12)
/* DPM */
@@ -395,6 +406,7 @@
/* Number of Protocol Indices per Status Block */
#define PIS_PER_SB_E4 12
+#define MAX_PIS_PER_SB PIS_PER_SB
#define CAU_HC_STOPPED_STATE 3
#define CAU_HC_DISABLE_STATE 4
@@ -425,8 +437,6 @@
#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
#define IGU_CMD_INT_ACK_BASE 0x0400
-#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
- MAX_TOT_SB_PER_PATH - 1)
#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
@@ -439,8 +449,6 @@
#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6
#define IGU_CMD_PROD_UPD_BASE 0x0600
-#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\
- MAX_TOT_SB_PER_PATH - 1)
#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff
/*****************/
@@ -652,8 +660,8 @@
#define PBF_MAX_CMD_LINES 3328
/* Number of BTB blocks. Each block is 256B. */
-#define BTB_MAX_BLOCKS 1440
-
+#define BTB_MAX_BLOCKS_BB 1440
+#define BTB_MAX_BLOCKS_K2 1840
/*****************/
/* PRS CONSTANTS */
/*****************/
@@ -730,6 +738,8 @@ enum protocol_type {
PROTOCOLID_PREROCE,
PROTOCOLID_COMMON,
PROTOCOLID_RESERVED1,
+ PROTOCOLID_RDMA,
+ PROTOCOLID_SCSI,
MAX_PROTOCOL_TYPE
};
@@ -750,6 +760,10 @@ union rdma_eqe_data {
struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
};
+struct tstorm_queue_zone {
+ __le32 reserved[2];
+};
+
/* Ustorm Queue Zone */
struct ustorm_eth_queue_zone {
struct coalescing_timeset int_coalescing_timeset;
@@ -872,8 +886,8 @@ struct db_l2_dpm_data {
#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
-#define DB_L2_DPM_DATA_GFS_SRC_EN_MASK 0x1
-#define DB_L2_DPM_DATA_GFS_SRC_EN_SHIFT 31
+#define DB_L2_DPM_DATA_TGFS_SRC_EN_MASK 0x1
+#define DB_L2_DPM_DATA_TGFS_SRC_EN_SHIFT 31
};
/* Structure for SGE in a DPM doorbell of type DPM_L2_BD */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index d9416ad5ef59..95f5fd615852 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -38,9 +38,11 @@
/********************/
#define ETH_HSI_VER_MAJOR 3
-#define ETH_HSI_VER_MINOR 10
+#define ETH_HSI_VER_MINOR 11
-#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
+#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
+/* Maximum number of pinned L2 connections (CIDs) */
+#define ETH_PINNED_CONN_MAX_NUM 32
#define ETH_CACHE_LINE_SIZE 64
#define ETH_RX_CQE_GAP 32
@@ -61,6 +63,7 @@
#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
+#define ETH_TX_MIN_BDS_PER_PKT_W_VPORT_FORWARDING 4
#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
#define ETH_TX_MAX_LSO_HDR_BYTES 510
#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
@@ -75,9 +78,8 @@
#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
(ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
-/* Maximum number of buffers, used for RX packet placement */
#define ETH_RX_MAX_BUFF_PER_PKT 5
-#define ETH_RX_BD_THRESHOLD 12
+#define ETH_RX_BD_THRESHOLD 16
/* Num of MAC/VLAN filters */
#define ETH_NUM_MAC_FILTERS 512
@@ -96,24 +98,24 @@
#define ETH_RSS_ENGINE_NUM_BB 127
/* TPA constants */
-#define ETH_TPA_MAX_AGGS_NUM 64
-#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT
-#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
-#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
+#define ETH_TPA_MAX_AGGS_NUM 64
+#define ETH_TPA_CQE_START_BW_LEN_LIST_SIZE 2
+#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
+#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
/* Control frame check constants */
-#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
+#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
/* GFS constants */
#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */
/* Destination port mode */
-enum dest_port_mode {
- DEST_PORT_PHY,
- DEST_PORT_LOOPBACK,
- DEST_PORT_PHY_LOOPBACK,
- DEST_PORT_DROP,
- MAX_DEST_PORT_MODE
+enum dst_port_mode {
+ DST_PORT_PHY,
+ DST_PORT_LOOPBACK,
+ DST_PORT_PHY_LOOPBACK,
+ DST_PORT_DROP,
+ MAX_DST_PORT_MODE
};
/* Ethernet address type */
@@ -167,8 +169,8 @@ struct eth_tx_data_2nd_bd {
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
-#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
-#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
+#define ETH_TX_DATA_2ND_BD_DST_PORT_MODE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_DST_PORT_MODE_SHIFT 6
#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
@@ -244,8 +246,9 @@ struct eth_fast_path_rx_reg_cqe {
struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 bd_num;
u8 reserved;
- __le16 flow_id;
- u8 reserved1[11];
+ __le16 reserved2;
+ __le32 flow_id_or_resource_id;
+ u8 reserved1[7];
struct eth_pmd_flow_flags pmd_flags;
};
@@ -296,9 +299,10 @@ struct eth_fast_path_rx_tpa_start_cqe {
struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 tpa_agg_index;
u8 header_len;
- __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
- __le16 flow_id;
- u8 reserved;
+ __le16 bw_ext_bd_len_list[ETH_TPA_CQE_START_BW_LEN_LIST_SIZE];
+ __le16 reserved2;
+ __le32 flow_id_or_resource_id;
+ u8 reserved[3];
struct eth_pmd_flow_flags pmd_flags;
};
@@ -407,6 +411,29 @@ struct eth_tx_3rd_bd {
struct eth_tx_data_3rd_bd data;
};
+/* The parsing information data for the forth tx bd of a given packet. */
+struct eth_tx_data_4th_bd {
+ u8 dst_vport_id;
+ u8 reserved4;
+ __le16 bitfields;
+#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_MASK 0x1
+#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_SHIFT 0
+#define ETH_TX_DATA_4TH_BD_RESERVED1_MASK 0x7F
+#define ETH_TX_DATA_4TH_BD_RESERVED1_SHIFT 1
+#define ETH_TX_DATA_4TH_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_4TH_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_4TH_BD_RESERVED2_MASK 0x7F
+#define ETH_TX_DATA_4TH_BD_RESERVED2_SHIFT 9
+ __le16 reserved3;
+};
+
+/* The forth tx bd of a given packet */
+struct eth_tx_4th_bd {
+ struct regpair addr; /* Single continuous buffer */
+ __le16 nbytes; /* Number of bytes in this BD */
+ struct eth_tx_data_4th_bd data; /* Parsing information data */
+};
+
/* Complementary information for the regular tx bd of a given packet */
struct eth_tx_data_bd {
__le16 reserved0;
@@ -431,6 +458,7 @@ union eth_tx_bd_types {
struct eth_tx_1st_bd first_bd;
struct eth_tx_2nd_bd second_bd;
struct eth_tx_3rd_bd third_bd;
+ struct eth_tx_4th_bd fourth_bd;
struct eth_tx_bd reg_bd;
};
@@ -443,6 +471,12 @@ enum eth_tx_tunn_type {
MAX_ETH_TX_TUNN_TYPE
};
+/* Mstorm Queue Zone */
+struct mstorm_eth_queue_zone {
+ struct eth_rx_prod_data rx_producers;
+ __le32 reserved[3];
+};
+
/* Ystorm Queue Zone */
struct xstorm_eth_queue_zone {
struct coalescing_timeset int_coalescing_timeset;
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 66aba505ec56..2f0a771a9176 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -999,7 +999,6 @@ struct iscsi_conn_offload_params {
struct regpair r2tq_pbl_addr;
struct regpair xhq_pbl_addr;
struct regpair uhq_pbl_addr;
- __le32 initial_ack;
__le16 physical_q0;
__le16 physical_q1;
u8 flags;
@@ -1011,10 +1010,10 @@ struct iscsi_conn_offload_params {
#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F
#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3
- u8 pbl_page_size_log;
- u8 pbe_page_size_log;
u8 default_cq;
+ __le16 reserved0;
__le32 stat_sn;
+ __le32 initial_ack;
};
/* iSCSI connection statistics */
@@ -1029,25 +1028,14 @@ struct iscsi_conn_stats_params {
__le32 reserved;
};
-/* spe message header */
-struct iscsi_slow_path_hdr {
- u8 op_code;
- u8 flags;
-#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF
-#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0
-#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7
-#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4
-#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1
-#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7
-};
/* iSCSI connection update params passed by driver to FW in ISCSI update
*ramrod.
*/
struct iscsi_conn_update_ramrod_params {
- struct iscsi_slow_path_hdr hdr;
+ __le16 reserved0;
__le16 conn_id;
- __le32 fw_cid;
+ __le32 reserved1;
u8 flags;
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
@@ -1065,7 +1053,7 @@ struct iscsi_conn_update_ramrod_params {
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT 6
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT 7
- u8 reserved0[3];
+ u8 reserved3[3];
__le32 max_seq_size;
__le32 max_send_pdu_length;
__le32 max_recv_pdu_length;
@@ -1251,22 +1239,22 @@ enum iscsi_ramrod_cmd_id {
/* iSCSI connection termination request */
struct iscsi_spe_conn_mac_update {
- struct iscsi_slow_path_hdr hdr;
+ __le16 reserved0;
__le16 conn_id;
- __le32 fw_cid;
+ __le32 reserved1;
__le16 remote_mac_addr_lo;
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
- u8 reserved0[2];
+ u8 reserved2[2];
};
/* iSCSI and TCP connection (Option 1) offload params passed by driver to FW in
* iSCSI offload ramrod.
*/
struct iscsi_spe_conn_offload {
- struct iscsi_slow_path_hdr hdr;
+ __le16 reserved0;
__le16 conn_id;
- __le32 fw_cid;
+ __le32 reserved1;
struct iscsi_conn_offload_params iscsi;
struct tcp_offload_params tcp;
};
@@ -1275,44 +1263,36 @@ struct iscsi_spe_conn_offload {
* iSCSI offload ramrod.
*/
struct iscsi_spe_conn_offload_option2 {
- struct iscsi_slow_path_hdr hdr;
+ __le16 reserved0;
__le16 conn_id;
- __le32 fw_cid;
+ __le32 reserved1;
struct iscsi_conn_offload_params iscsi;
struct tcp_offload_params_opt2 tcp;
};
/* iSCSI collect connection statistics request */
struct iscsi_spe_conn_statistics {
- struct iscsi_slow_path_hdr hdr;
+ __le16 reserved0;
__le16 conn_id;
- __le32 fw_cid;
+ __le32 reserved1;
u8 reset_stats;
- u8 reserved0[7];
+ u8 reserved2[7];
struct regpair stats_cnts_addr;
};
/* iSCSI connection termination request */
struct iscsi_spe_conn_termination {
- struct iscsi_slow_path_hdr hdr;
+ __le16 reserved0;
__le16 conn_id;
- __le32 fw_cid;
+ __le32 reserved1;
u8 abortive;
- u8 reserved0[7];
+ u8 reserved2[7];
struct regpair queue_cnts_addr;
struct regpair query_params_addr;
};
-/* iSCSI firmware function destroy parameters */
-struct iscsi_spe_func_dstry {
- struct iscsi_slow_path_hdr hdr;
- __le16 reserved0;
- __le32 reserved1;
-};
-
/* iSCSI firmware function init parameters */
struct iscsi_spe_func_init {
- struct iscsi_slow_path_hdr hdr;
__le16 half_way_close_timeout;
u8 num_sq_pages_in_ring;
u8 num_r2tq_pages_in_ring;
@@ -1324,8 +1304,12 @@ struct iscsi_spe_func_init {
#define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK 0x7F
#define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT 1
struct iscsi_debug_modes debug_mode;
- __le16 reserved1;
- __le32 reserved2;
+ u8 params;
+#define ISCSI_SPE_FUNC_INIT_MAX_SYN_RT_MASK 0xF
+#define ISCSI_SPE_FUNC_INIT_MAX_SYN_RT_SHIFT 0
+#define ISCSI_SPE_FUNC_INIT_RESERVED1_MASK 0xF
+#define ISCSI_SPE_FUNC_INIT_RESERVED1_SHIFT 4
+ u8 reserved2[7];
struct scsi_init_func_params func_params;
struct scsi_init_func_queues q_params;
};
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index b5db1ee96d78..8f29e0d8a7b3 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -159,6 +159,7 @@ struct qed_dcbx_get {
enum qed_nvm_images {
QED_NVM_IMAGE_ISCSI_CFG,
QED_NVM_IMAGE_FCOE_CFG,
+ QED_NVM_IMAGE_MDUMP,
QED_NVM_IMAGE_NVM_CFG1,
QED_NVM_IMAGE_DEFAULT_CFG,
QED_NVM_IMAGE_NVM_META,
@@ -463,7 +464,7 @@ enum qed_db_rec_space {
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
-#define DIRECT_REG_WR64(reg_addr, val) writeq((u32)val, \
+#define DIRECT_REG_WR64(reg_addr, val) writeq((u64)val, \
(void __iomem *)(reg_addr))
#define QED_COALESCE_MAX 0x1FF
@@ -1177,6 +1178,17 @@ struct qed_common_ops {
#define GET_FIELD(value, name) \
(((value) >> (name ## _SHIFT)) & name ## _MASK)
+#define GET_MFW_FIELD(name, field) \
+ (((name) & (field ## _MASK)) >> (field ## _OFFSET))
+
+#define SET_MFW_FIELD(name, field, value) \
+ do { \
+ (name) &= ~(field ## _MASK); \
+ (name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK));\
+ } while (0)
+
+#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
+
/* Debug print definitions */
#define DP_ERR(cdev, fmt, ...) \
do { \
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index 5eb022953aca..1313c34d9a68 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -52,6 +52,12 @@ enum qed_ll2_conn_type {
QED_LL2_TYPE_ROCE,
QED_LL2_TYPE_IWARP,
QED_LL2_TYPE_RESERVED3,
+ MAX_QED_LL2_CONN_TYPE
+};
+
+enum qed_ll2_rx_conn_type {
+ QED_LL2_RX_TYPE_LEGACY,
+ QED_LL2_RX_TYPE_CTX,
MAX_QED_LL2_RX_CONN_TYPE
};
@@ -165,6 +171,7 @@ struct qed_ll2_cbs {
};
struct qed_ll2_acquire_data_inputs {
+ enum qed_ll2_rx_conn_type rx_conn_type;
enum qed_ll2_conn_type conn_type;
u16 mtu;
u16 rx_num_desc;
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
index 505c0b48a761..9a973ffbbff5 100644
--- a/include/linux/qed/storage_common.h
+++ b/include/linux/qed/storage_common.h
@@ -107,8 +107,9 @@ struct scsi_drv_cmdq {
struct scsi_init_func_params {
__le16 num_tasks;
u8 log_page_size;
+ u8 log_page_size_conn;
u8 debug_mode;
- u8 reserved2[12];
+ u8 reserved2[11];
};
/* SCSI RQ/CQ/CMDQ firmware function init parameters */
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index 487fd9412d10..38893e4dd0f0 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -275,6 +275,61 @@ struct sfp_diag {
__be16 cal_v_offset;
} __packed;
+/* SFF8024 defined constants */
+enum {
+ SFF8024_ID_UNK = 0x00,
+ SFF8024_ID_SFF_8472 = 0x02,
+ SFF8024_ID_SFP = 0x03,
+ SFF8024_ID_DWDM_SFP = 0x0b,
+ SFF8024_ID_QSFP_8438 = 0x0c,
+ SFF8024_ID_QSFP_8436_8636 = 0x0d,
+ SFF8024_ID_QSFP28_8636 = 0x11,
+
+ SFF8024_ENCODING_UNSPEC = 0x00,
+ SFF8024_ENCODING_8B10B = 0x01,
+ SFF8024_ENCODING_4B5B = 0x02,
+ SFF8024_ENCODING_NRZ = 0x03,
+ SFF8024_ENCODING_8472_MANCHESTER= 0x04,
+ SFF8024_ENCODING_8472_SONET = 0x05,
+ SFF8024_ENCODING_8472_64B66B = 0x06,
+ SFF8024_ENCODING_8436_MANCHESTER= 0x06,
+ SFF8024_ENCODING_8436_SONET = 0x04,
+ SFF8024_ENCODING_8436_64B66B = 0x05,
+ SFF8024_ENCODING_256B257B = 0x07,
+ SFF8024_ENCODING_PAM4 = 0x08,
+
+ SFF8024_CONNECTOR_UNSPEC = 0x00,
+ /* codes 01-05 not supportable on SFP, but some modules have single SC */
+ SFF8024_CONNECTOR_SC = 0x01,
+ SFF8024_CONNECTOR_FIBERJACK = 0x06,
+ SFF8024_CONNECTOR_LC = 0x07,
+ SFF8024_CONNECTOR_MT_RJ = 0x08,
+ SFF8024_CONNECTOR_MU = 0x09,
+ SFF8024_CONNECTOR_SG = 0x0a,
+ SFF8024_CONNECTOR_OPTICAL_PIGTAIL= 0x0b,
+ SFF8024_CONNECTOR_MPO_1X12 = 0x0c,
+ SFF8024_CONNECTOR_MPO_2X16 = 0x0d,
+ SFF8024_CONNECTOR_HSSDC_II = 0x20,
+ SFF8024_CONNECTOR_COPPER_PIGTAIL= 0x21,
+ SFF8024_CONNECTOR_RJ45 = 0x22,
+ SFF8024_CONNECTOR_NOSEPARATE = 0x23,
+ SFF8024_CONNECTOR_MXC_2X16 = 0x24,
+
+ SFF8024_ECC_UNSPEC = 0x00,
+ SFF8024_ECC_100G_25GAUI_C2M_AOC = 0x01,
+ SFF8024_ECC_100GBASE_SR4_25GBASE_SR = 0x02,
+ SFF8024_ECC_100GBASE_LR4_25GBASE_LR = 0x03,
+ SFF8024_ECC_100GBASE_ER4_25GBASE_ER = 0x04,
+ SFF8024_ECC_100GBASE_SR10 = 0x05,
+ SFF8024_ECC_100GBASE_CR4 = 0x0b,
+ SFF8024_ECC_25GBASE_CR_S = 0x0c,
+ SFF8024_ECC_25GBASE_CR_N = 0x0d,
+ SFF8024_ECC_10GBASE_T_SFI = 0x16,
+ SFF8024_ECC_10GBASE_T_SR = 0x1c,
+ SFF8024_ECC_5GBASE_T = 0x1d,
+ SFF8024_ECC_2_5GBASE_T = 0x1e,
+};
+
/* SFP EEPROM registers */
enum {
SFP_PHYS_ID = 0x00,
@@ -309,34 +364,7 @@ enum {
SFP_SFF8472_COMPLIANCE = 0x5e,
SFP_CC_EXT = 0x5f,
- SFP_PHYS_ID_SFF = 0x02,
- SFP_PHYS_ID_SFP = 0x03,
SFP_PHYS_EXT_ID_SFP = 0x04,
- SFP_CONNECTOR_UNSPEC = 0x00,
- /* codes 01-05 not supportable on SFP, but some modules have single SC */
- SFP_CONNECTOR_SC = 0x01,
- SFP_CONNECTOR_FIBERJACK = 0x06,
- SFP_CONNECTOR_LC = 0x07,
- SFP_CONNECTOR_MT_RJ = 0x08,
- SFP_CONNECTOR_MU = 0x09,
- SFP_CONNECTOR_SG = 0x0a,
- SFP_CONNECTOR_OPTICAL_PIGTAIL = 0x0b,
- SFP_CONNECTOR_MPO_1X12 = 0x0c,
- SFP_CONNECTOR_MPO_2X16 = 0x0d,
- SFP_CONNECTOR_HSSDC_II = 0x20,
- SFP_CONNECTOR_COPPER_PIGTAIL = 0x21,
- SFP_CONNECTOR_RJ45 = 0x22,
- SFP_CONNECTOR_NOSEPARATE = 0x23,
- SFP_CONNECTOR_MXC_2X16 = 0x24,
- SFP_ENCODING_UNSPEC = 0x00,
- SFP_ENCODING_8B10B = 0x01,
- SFP_ENCODING_4B5B = 0x02,
- SFP_ENCODING_NRZ = 0x03,
- SFP_ENCODING_8472_MANCHESTER = 0x04,
- SFP_ENCODING_8472_SONET = 0x05,
- SFP_ENCODING_8472_64B66B = 0x06,
- SFP_ENCODING_256B257B = 0x07,
- SFP_ENCODING_PAM4 = 0x08,
SFP_OPTIONS_HIGH_POWER_LEVEL = BIT(13),
SFP_OPTIONS_PAGING_A2 = BIT(12),
SFP_OPTIONS_RETIMER = BIT(11),
@@ -479,6 +507,8 @@ struct sfp_bus;
* @module_insert: called after a module has been detected to determine
* whether the module is supported for the upstream device.
* @module_remove: called after the module has been removed.
+ * @module_start: called after the PHY probe step
+ * @module_stop: called before the PHY is removed
* @link_down: called when the link is non-operational for whatever
* reason.
* @link_up: called when the link is operational.
@@ -492,6 +522,8 @@ struct sfp_upstream_ops {
void (*detach)(void *priv, struct sfp_bus *bus);
int (*module_insert)(void *priv, const struct sfp_eeprom_id *id);
void (*module_remove)(void *priv);
+ int (*module_start)(void *priv);
+ void (*module_stop)(void *priv);
void (*link_down)(void *priv);
void (*link_up)(void *priv);
int (*connect_phy)(void *priv, struct phy_device *);
@@ -501,10 +533,10 @@ struct sfp_upstream_ops {
#if IS_ENABLED(CONFIG_SFP)
int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
unsigned long *support);
+bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
unsigned long *support);
phy_interface_t sfp_select_interface(struct sfp_bus *bus,
- const struct sfp_eeprom_id *id,
unsigned long *link_modes);
int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo);
@@ -525,6 +557,12 @@ static inline int sfp_parse_port(struct sfp_bus *bus,
return PORT_OTHER;
}
+static inline bool sfp_may_have_phy(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id)
+{
+ return false;
+}
+
static inline void sfp_parse_support(struct sfp_bus *bus,
const struct sfp_eeprom_id *id,
unsigned long *support)
@@ -532,7 +570,6 @@ static inline void sfp_parse_support(struct sfp_bus *bus,
}
static inline phy_interface_t sfp_select_interface(struct sfp_bus *bus,
- const struct sfp_eeprom_id *id,
unsigned long *link_modes)
{
return PHY_INTERFACE_MODE_NA;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index e9133bcf0544..3d13a4b717e9 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -592,6 +592,8 @@ enum {
SKB_GSO_UDP = 1 << 16,
SKB_GSO_UDP_L4 = 1 << 17,
+
+ SKB_GSO_FRAGLIST = 1 << 18,
};
#if BITS_PER_LONG > 32
@@ -1478,6 +1480,11 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb)
skb->next = NULL;
}
+/* Iterate through singly-linked GSO fragments of an skb. */
+#define skb_list_walk_safe(first, skb, next_skb) \
+ for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
+ (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL)
+
static inline void skb_list_del_init(struct sk_buff *skb)
{
__list_del_entry(&skb->list);
@@ -3459,7 +3466,8 @@ static inline void skb_frag_list_init(struct sk_buff *skb)
for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
-int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
+int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
+ int *err, long *timeo_p,
const struct sk_buff *skb);
struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
struct sk_buff_head *queue,
@@ -3468,12 +3476,16 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
struct sk_buff *skb),
int *off, int *err,
struct sk_buff **last);
-struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
+struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
+ struct sk_buff_head *queue,
+ unsigned int flags,
void (*destructor)(struct sock *sk,
struct sk_buff *skb),
int *off, int *err,
struct sk_buff **last);
-struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
+struct sk_buff *__skb_recv_datagram(struct sock *sk,
+ struct sk_buff_head *sk_queue,
+ unsigned int flags,
void (*destructor)(struct sock *sk,
struct sk_buff *skb),
int *off, int *err);
@@ -3523,6 +3535,8 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet);
bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
+struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
+ unsigned int offset);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, int write_len);
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
@@ -4092,6 +4106,9 @@ enum skb_ext_id {
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
TC_SKB_EXT,
#endif
+#if IS_ENABLED(CONFIG_MPTCP)
+ SKB_EXT_MPTCP,
+#endif
SKB_EXT_NUM, /* must be last */
};
@@ -4112,6 +4129,9 @@ struct skb_ext {
char data[0] __aligned(8);
};
+struct skb_ext *__skb_ext_alloc(void);
+void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
+ struct skb_ext *ext);
void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
void __skb_ext_put(struct skb_ext *ext);
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index d4bcd9387136..19190c609282 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -109,6 +109,18 @@ struct stmmac_axi {
bool axi_rb;
};
+#define EST_GCL 1024
+struct stmmac_est {
+ int enable;
+ u32 btr_offset[2];
+ u32 btr[2];
+ u32 ctr[2];
+ u32 ter;
+ u32 gcl_unaligned[EST_GCL];
+ u32 gcl[EST_GCL];
+ u32 gcl_size;
+};
+
struct stmmac_rxq_cfg {
u8 mode_to_use;
u32 chan;
@@ -127,6 +139,7 @@ struct stmmac_txq_cfg {
u32 low_credit;
bool use_prio;
u32 prio;
+ int tbs_en;
};
struct plat_stmmacenet_data {
@@ -139,6 +152,7 @@ struct plat_stmmacenet_data {
struct device_node *phylink_node;
struct device_node *mdio_node;
struct stmmac_dma_cfg *dma_cfg;
+ struct stmmac_est *est;
int clk_csr;
int has_gmac;
int enh_desc;
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index ca6f01531e64..1cf73e6f85ca 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -78,6 +78,27 @@ struct tcp_sack_block {
#define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */
#define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/
+#if IS_ENABLED(CONFIG_MPTCP)
+struct mptcp_options_received {
+ u64 sndr_key;
+ u64 rcvr_key;
+ u64 data_ack;
+ u64 data_seq;
+ u32 subflow_seq;
+ u16 data_len;
+ u8 mp_capable : 1,
+ mp_join : 1,
+ dss : 1;
+ u8 use_map:1,
+ dsn64:1,
+ data_fin:1,
+ use_ack:1,
+ ack64:1,
+ mpc_map:1,
+ __unused:2;
+};
+#endif
+
struct tcp_options_received {
/* PAWS/RTTM data */
int ts_recent_stamp;/* Time we stored ts_recent (for aging) */
@@ -95,6 +116,9 @@ struct tcp_options_received {
u8 num_sacks; /* Number of SACK blocks */
u16 user_mss; /* mss requested by user in ioctl */
u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
+#if IS_ENABLED(CONFIG_MPTCP)
+ struct mptcp_options_received mptcp;
+#endif
};
static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
@@ -104,6 +128,11 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
#if IS_ENABLED(CONFIG_SMC)
rx_opt->smc_ok = 0;
#endif
+#if IS_ENABLED(CONFIG_MPTCP)
+ rx_opt->mptcp.mp_capable = 0;
+ rx_opt->mptcp.mp_join = 0;
+ rx_opt->mptcp.dss = 0;
+#endif
}
/* This is the max number of SACKS that we'll generate and process. It's safe
@@ -119,6 +148,9 @@ struct tcp_request_sock {
const struct tcp_request_sock_ops *af_specific;
u64 snt_synack; /* first SYNACK sent time */
bool tfo_listener;
+#if IS_ENABLED(CONFIG_MPTCP)
+ bool is_mptcp;
+#endif
u32 txhash;
u32 rcv_isn;
u32 snt_isn;
@@ -354,6 +386,8 @@ struct tcp_sock {
#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0
#endif
+ u16 timeout_rehash; /* Timeout-triggered rehash attempts */
+
u32 rcv_ooopack; /* Received out-of-order packets, for tcpinfo */
/* Receiver side RTT estimation */
@@ -379,6 +413,9 @@ struct tcp_sock {
u32 mtu_info; /* We received an ICMP_FRAG_NEEDED / ICMPV6_PKT_TOOBIG
* while socket was owned by user.
*/
+#if IS_ENABLED(CONFIG_MPTCP)
+ bool is_mptcp;
+#endif
#ifdef CONFIG_TCP_MD5SIG
/* TCP AF-Specific parts; only used by MD5 Signature support so far */
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index d8860f2d0976..b0bff3083278 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -253,7 +253,7 @@ extern int usbnet_open(struct net_device *net);
extern int usbnet_stop(struct net_device *net);
extern netdev_tx_t usbnet_start_xmit(struct sk_buff *skb,
struct net_device *net);
-extern void usbnet_tx_timeout(struct net_device *net);
+extern void usbnet_tx_timeout(struct net_device *net, unsigned int txqueue);
extern int usbnet_change_mtu(struct net_device *net, int new_mtu);
extern int usbnet_get_endpoints(struct usbnet *, struct usb_interface *);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 1bab88184d3c..a088349dd94f 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -437,7 +437,7 @@ static inline void addrconf_addr_solict_mult(const struct in6_addr *addr,
static inline bool ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
- __be64 *p = (__be64 *)addr;
+ __be64 *p = (__force __be64 *)addr;
return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) | (p[1] ^ cpu_to_be64(1))) == 0UL;
#else
return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
@@ -449,7 +449,7 @@ static inline bool ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr)
static inline bool ipv6_addr_is_ll_all_routers(const struct in6_addr *addr)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
- __be64 *p = (__be64 *)addr;
+ __be64 *p = (__force __be64 *)addr;
return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) | (p[1] ^ cpu_to_be64(2))) == 0UL;
#else
return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
@@ -466,7 +466,7 @@ static inline bool ipv6_addr_is_isatap(const struct in6_addr *addr)
static inline bool ipv6_addr_is_solict_mult(const struct in6_addr *addr)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
- __be64 *p = (__be64 *)addr;
+ __be64 *p = (__force __be64 *)addr;
return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) |
((p[1] ^ cpu_to_be64(0x00000001ff000000UL)) &
cpu_to_be64(0xffffffffff000000UL))) == 0UL;
@@ -481,7 +481,7 @@ static inline bool ipv6_addr_is_solict_mult(const struct in6_addr *addr)
static inline bool ipv6_addr_is_all_snoopers(const struct in6_addr *addr)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
- __be64 *p = (__be64 *)addr;
+ __be64 *p = (__force __be64 *)addr;
return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) |
(p[1] ^ cpu_to_be64(0x6a))) == 0UL;
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 3426d6dacc45..17e10fba2152 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -41,6 +41,10 @@ struct unix_skb_parms {
u32 consumed;
} __randomize_layout;
+struct scm_stat {
+ u32 nr_fds;
+};
+
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
@@ -65,6 +69,7 @@ struct unix_sock {
#define UNIX_GC_MAYBE_CYCLE 1
struct socket_wq peer_wq;
wait_queue_entry_t peer_wake;
+ struct scm_stat scm_stat;
};
static inline struct unix_sock *unix_sk(const struct sock *sk)
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 4206dc6d813f..b1c717286993 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -98,6 +98,8 @@ struct vsock_transport_send_notify_data {
#define VSOCK_TRANSPORT_F_G2H 0x00000002
/* Transport provides DGRAM communication */
#define VSOCK_TRANSPORT_F_DGRAM 0x00000004
+/* Transport provides local (loopback) communication */
+#define VSOCK_TRANSPORT_F_LOCAL 0x00000008
struct vsock_transport {
struct module *module;
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index fabee6db0abb..e42bb8e03c09 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -129,6 +129,8 @@ void bt_warn(const char *fmt, ...);
__printf(1, 2)
void bt_err(const char *fmt, ...);
__printf(1, 2)
+void bt_warn_ratelimited(const char *fmt, ...);
+__printf(1, 2)
void bt_err_ratelimited(const char *fmt, ...);
#define BT_INFO(fmt, ...) bt_info(fmt "\n", ##__VA_ARGS__)
@@ -136,8 +138,6 @@ void bt_err_ratelimited(const char *fmt, ...);
#define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__)
#define BT_DBG(fmt, ...) pr_debug(fmt "\n", ##__VA_ARGS__)
-#define BT_ERR_RATELIMITED(fmt, ...) bt_err_ratelimited(fmt "\n", ##__VA_ARGS__)
-
#define bt_dev_info(hdev, fmt, ...) \
BT_INFO("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
#define bt_dev_warn(hdev, fmt, ...) \
@@ -147,8 +147,10 @@ void bt_err_ratelimited(const char *fmt, ...);
#define bt_dev_dbg(hdev, fmt, ...) \
BT_DBG("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+#define bt_dev_warn_ratelimited(hdev, fmt, ...) \
+ bt_warn_ratelimited("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
#define bt_dev_err_ratelimited(hdev, fmt, ...) \
- BT_ERR_RATELIMITED("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+ bt_err_ratelimited("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
/* Connection and socket states */
enum {
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 5bc1e30dedde..6293bdd7d862 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -27,6 +27,7 @@
#define HCI_MAX_ACL_SIZE 1024
#define HCI_MAX_SCO_SIZE 255
+#define HCI_MAX_ISO_SIZE 251
#define HCI_MAX_EVENT_SIZE 260
#define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4)
@@ -303,6 +304,7 @@ enum {
#define HCI_ACLDATA_PKT 0x02
#define HCI_SCODATA_PKT 0x03
#define HCI_EVENT_PKT 0x04
+#define HCI_ISODATA_PKT 0x05
#define HCI_DIAG_PKT 0xf0
#define HCI_VENDOR_PKT 0xff
@@ -352,6 +354,15 @@ enum {
#define ACL_ACTIVE_BCAST 0x04
#define ACL_PICO_BCAST 0x08
+/* ISO PB flags */
+#define ISO_START 0x00
+#define ISO_CONT 0x01
+#define ISO_SINGLE 0x02
+#define ISO_END 0x03
+
+/* ISO TS flags */
+#define ISO_TS 0x01
+
/* Baseband links */
#define SCO_LINK 0x00
#define ACL_LINK 0x01
@@ -359,6 +370,7 @@ enum {
/* Low Energy links do not have defined link type. Use invented one */
#define LE_LINK 0x80
#define AMP_LINK 0x81
+#define ISO_LINK 0x82
#define INVALID_LINK 0xff
/* LMP features */
@@ -440,6 +452,8 @@ enum {
#define HCI_LE_PHY_2M 0x01
#define HCI_LE_PHY_CODED 0x08
#define HCI_LE_CHAN_SEL_ALG2 0x40
+#define HCI_LE_CIS_MASTER 0x10
+#define HCI_LE_CIS_SLAVE 0x20
/* Connection modes */
#define HCI_CM_ACTIVE 0x0000
@@ -1718,6 +1732,86 @@ struct hci_cp_le_set_adv_set_rand_addr {
bdaddr_t bdaddr;
} __packed;
+#define HCI_OP_LE_READ_BUFFER_SIZE_V2 0x2060
+struct hci_rp_le_read_buffer_size_v2 {
+ __u8 status;
+ __le16 acl_mtu;
+ __u8 acl_max_pkt;
+ __le16 iso_mtu;
+ __u8 iso_max_pkt;
+} __packed;
+
+#define HCI_OP_LE_READ_ISO_TX_SYNC 0x2061
+struct hci_cp_le_read_iso_tx_sync {
+ __le16 handle;
+} __packed;
+
+struct hci_rp_le_read_iso_tx_sync {
+ __u8 status;
+ __le16 handle;
+ __le16 seq;
+ __le32 imestamp;
+ __u8 offset[3];
+} __packed;
+
+#define HCI_OP_LE_SET_CIG_PARAMS 0x2062
+struct hci_cis_params {
+ __u8 cis_id;
+ __le16 m_sdu;
+ __le16 s_sdu;
+ __u8 m_phy;
+ __u8 s_phy;
+ __u8 m_rtn;
+ __u8 s_rtn;
+} __packed;
+
+struct hci_cp_le_set_cig_params {
+ __u8 cig_id;
+ __u8 m_interval[3];
+ __u8 s_interval[3];
+ __u8 sca;
+ __u8 packing;
+ __u8 framing;
+ __le16 m_latency;
+ __le16 s_latency;
+ __u8 num_cis;
+ struct hci_cis_params cis[0];
+} __packed;
+
+struct hci_rp_le_set_cig_params {
+ __u8 status;
+ __u8 cig_id;
+ __u8 num_handles;
+ __le16 handle[0];
+} __packed;
+
+#define HCI_OP_LE_CREATE_CIS 0x2064
+struct hci_cis {
+ __le16 cis_handle;
+ __le16 acl_handle;
+} __packed;
+
+struct hci_cp_le_create_cis {
+ __u8 num_cis;
+ struct hci_cis cis[0];
+} __packed;
+
+#define HCI_OP_LE_REMOVE_CIG 0x2065
+struct hci_cp_le_remove_cig {
+ __u8 cig_id;
+} __packed;
+
+#define HCI_OP_LE_ACCEPT_CIS 0x2066
+struct hci_cp_le_accept_cis {
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_REJECT_CIS 0x2067
+struct hci_cp_le_reject_cis {
+ __le16 handle;
+ __u8 reason;
+} __packed;
+
/* ---- HCI Events ---- */
#define HCI_EV_INQUIRY_COMPLETE 0x01
@@ -2186,6 +2280,14 @@ struct hci_ev_le_direct_adv_info {
__s8 rssi;
} __packed;
+#define HCI_EV_LE_PHY_UPDATE_COMPLETE 0x0c
+struct hci_ev_le_phy_update_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 tx_phy;
+ __u8 rx_phy;
+} __packed;
+
#define HCI_EV_LE_EXT_ADV_REPORT 0x0d
struct hci_ev_le_ext_adv_report {
__le16 evt_type;
@@ -2226,6 +2328,34 @@ struct hci_evt_le_ext_adv_set_term {
__u8 num_evts;
} __packed;
+#define HCI_EVT_LE_CIS_ESTABLISHED 0x19
+struct hci_evt_le_cis_established {
+ __u8 status;
+ __le16 handle;
+ __u8 cig_sync_delay[3];
+ __u8 cis_sync_delay[3];
+ __u8 m_latency[3];
+ __u8 s_latency[3];
+ __u8 m_phy;
+ __u8 s_phy;
+ __u8 nse;
+ __u8 m_bn;
+ __u8 s_bn;
+ __u8 m_ft;
+ __u8 s_ft;
+ __le16 m_mtu;
+ __le16 s_mtu;
+ __le16 interval;
+} __packed;
+
+#define HCI_EVT_LE_CIS_REQ 0x1a
+struct hci_evt_le_cis_req {
+ __le16 acl_handle;
+ __le16 cis_handle;
+ __u8 cig_id;
+ __u8 cis_id;
+} __packed;
+
#define HCI_EV_VENDOR 0xff
/* Internal events generated by Bluetooth stack */
@@ -2254,6 +2384,7 @@ struct hci_ev_si_security {
#define HCI_EVENT_HDR_SIZE 2
#define HCI_ACL_HDR_SIZE 4
#define HCI_SCO_HDR_SIZE 3
+#define HCI_ISO_HDR_SIZE 4
struct hci_command_hdr {
__le16 opcode; /* OCF & OGF */
@@ -2275,6 +2406,30 @@ struct hci_sco_hdr {
__u8 dlen;
} __packed;
+struct hci_iso_hdr {
+ __le16 handle;
+ __le16 dlen;
+ __u8 data[0];
+} __packed;
+
+/* ISO data packet status flags */
+#define HCI_ISO_STATUS_VALID 0x00
+#define HCI_ISO_STATUS_INVALID 0x01
+#define HCI_ISO_STATUS_NOP 0x02
+
+#define HCI_ISO_DATA_HDR_SIZE 4
+struct hci_iso_data_hdr {
+ __le16 sn;
+ __le16 slen;
+};
+
+#define HCI_ISO_TS_DATA_HDR_SIZE 8
+struct hci_iso_ts_data_hdr {
+ __le32 ts;
+ __le16 sn;
+ __le16 slen;
+};
+
static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb)
{
return (struct hci_event_hdr *) skb->data;
@@ -2300,4 +2455,14 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
#define hci_handle(h) (h & 0x0fff)
#define hci_flags(h) (h >> 12)
+/* ISO handle and flags pack/unpack */
+#define hci_iso_flags_pb(f) (f & 0x0003)
+#define hci_iso_flags_ts(f) ((f >> 2) & 0x0001)
+#define hci_iso_flags_pack(pb, ts) ((pb & 0x03) | ((ts & 0x01) << 2))
+
+/* ISO data length and flags pack/unpack */
+#define hci_iso_data_len_pack(h, f) ((__u16) ((h) | ((f) << 14)))
+#define hci_iso_data_len(h) ((h) & 0x3fff)
+#define hci_iso_data_flags(h) ((h) >> 14)
+
#endif /* __HCI_H */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index b689aceb636b..89ecf0a80aa1 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -118,6 +118,13 @@ struct bt_uuid {
u8 svc_hint;
};
+struct blocked_key {
+ struct list_head list;
+ struct rcu_head rcu;
+ u8 type;
+ u8 val[16];
+};
+
struct smp_csrk {
bdaddr_t bdaddr;
u8 bdaddr_type;
@@ -397,6 +404,7 @@ struct hci_dev {
struct list_head le_conn_params;
struct list_head pend_le_conns;
struct list_head pend_le_reports;
+ struct list_head blocked_keys;
struct hci_dev_stats stat;
@@ -493,6 +501,8 @@ struct hci_conn {
__u16 le_supv_timeout;
__u8 le_adv_data[HCI_MAX_AD_LENGTH];
__u8 le_adv_data_len;
+ __u8 le_tx_phy;
+ __u8 le_rx_phy;
__s8 rssi;
__s8 tx_power;
__s8 max_tx_power;
@@ -1121,6 +1131,8 @@ struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 addr_type, u8 val[16], bdaddr_t *rpa);
void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type);
+bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]);
+void hci_blocked_keys_clear(struct hci_dev *hdev);
void hci_smp_irks_clear(struct hci_dev *hdev);
bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
index 240786b04a46..2d5fcda1bcd0 100644
--- a/include/net/bluetooth/hci_mon.h
+++ b/include/net/bluetooth/hci_mon.h
@@ -49,6 +49,8 @@ struct hci_mon_hdr {
#define HCI_MON_CTRL_CLOSE 15
#define HCI_MON_CTRL_COMMAND 16
#define HCI_MON_CTRL_EVENT 17
+#define HCI_MON_ISO_TX_PKT 18
+#define HCI_MON_ISO_RX_PKT 19
struct hci_mon_new_index {
__u8 type;
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 9cee7ddc6741..a90666af05bd 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -654,6 +654,23 @@ struct mgmt_cp_set_phy_confguration {
} __packed;
#define MGMT_SET_PHY_CONFIGURATION_SIZE 4
+#define MGMT_OP_SET_BLOCKED_KEYS 0x0046
+
+#define HCI_BLOCKED_KEY_TYPE_LINKKEY 0x00
+#define HCI_BLOCKED_KEY_TYPE_LTK 0x01
+#define HCI_BLOCKED_KEY_TYPE_IRK 0x02
+
+struct mgmt_blocked_key_info {
+ __u8 type;
+ __u8 val[16];
+} __packed;
+
+struct mgmt_cp_set_blocked_keys {
+ __le16 key_count;
+ struct mgmt_blocked_key_info keys[0];
+} __packed;
+#define MGMT_OP_SET_BLOCKED_KEYS_SIZE 2
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 38b4acb93f74..ce5cea428fdc 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -485,6 +485,10 @@ enum devlink_param_generic_id {
#define DEVLINK_INFO_VERSION_GENERIC_FW_UNDI "fw.undi"
/* NCSI support/handler version */
#define DEVLINK_INFO_VERSION_GENERIC_FW_NCSI "fw.ncsi"
+/* FW parameter set id */
+#define DEVLINK_INFO_VERSION_GENERIC_FW_PSID "fw.psid"
+/* RoCE FW version */
+#define DEVLINK_INFO_VERSION_GENERIC_FW_ROCE "fw.roce"
struct devlink_region;
struct devlink_info_req;
@@ -562,7 +566,7 @@ struct devlink_trap {
};
/* All traps must be documented in
- * Documentation/networking/devlink-trap.rst
+ * Documentation/networking/devlink/devlink-trap.rst
*/
enum devlink_trap_generic_id {
DEVLINK_TRAP_GENERIC_ID_SMAC_MC,
@@ -589,6 +593,9 @@ enum devlink_trap_generic_id {
DEVLINK_TRAP_GENERIC_ID_REJECT_ROUTE,
DEVLINK_TRAP_GENERIC_ID_IPV4_LPM_UNICAST_MISS,
DEVLINK_TRAP_GENERIC_ID_IPV6_LPM_UNICAST_MISS,
+ DEVLINK_TRAP_GENERIC_ID_NON_ROUTABLE,
+ DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_OVERLAY_SMAC_MC,
/* Add new generic trap IDs above */
__DEVLINK_TRAP_GENERIC_ID_MAX,
@@ -596,12 +603,13 @@ enum devlink_trap_generic_id {
};
/* All trap groups must be documented in
- * Documentation/networking/devlink-trap.rst
+ * Documentation/networking/devlink/devlink-trap.rst
*/
enum devlink_trap_group_generic_id {
DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS,
DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS,
DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS,
/* Add new generic trap group IDs above */
__DEVLINK_TRAP_GROUP_GENERIC_ID_MAX,
@@ -657,6 +665,12 @@ enum devlink_trap_group_generic_id {
"ipv4_lpm_miss"
#define DEVLINK_TRAP_GENERIC_NAME_IPV6_LPM_UNICAST_MISS \
"ipv6_lpm_miss"
+#define DEVLINK_TRAP_GENERIC_NAME_NON_ROUTABLE \
+ "non_routable_packet"
+#define DEVLINK_TRAP_GENERIC_NAME_DECAP_ERROR \
+ "decap_error"
+#define DEVLINK_TRAP_GENERIC_NAME_OVERLAY_SMAC_MC \
+ "overlay_smac_is_mc"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_L2_DROPS \
"l2_drops"
@@ -664,6 +678,8 @@ enum devlink_trap_group_generic_id {
"l3_drops"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_BUFFER_DROPS \
"buffer_drops"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_TUNNEL_DROPS \
+ "tunnel_drops"
#define DEVLINK_TRAP_GENERIC(_type, _init_action, _id, _group, _metadata_cap) \
{ \
@@ -1000,6 +1016,8 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
void
devlink_health_reporter_state_update(struct devlink_health_reporter *reporter,
enum devlink_health_reporter_state state);
+void
+devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter);
bool devlink_is_reload_failed(const struct devlink *devlink);
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 6767dc3f66c0..63495e3443ac 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -43,6 +43,7 @@ struct phylink_link_state;
#define DSA_TAG_PROTO_SJA1105_VALUE 13
#define DSA_TAG_PROTO_KSZ8795_VALUE 14
#define DSA_TAG_PROTO_OCELOT_VALUE 15
+#define DSA_TAG_PROTO_AR9331_VALUE 16
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
@@ -61,6 +62,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_SJA1105 = DSA_TAG_PROTO_SJA1105_VALUE,
DSA_TAG_PROTO_KSZ8795 = DSA_TAG_PROTO_KSZ8795_VALUE,
DSA_TAG_PROTO_OCELOT = DSA_TAG_PROTO_OCELOT_VALUE,
+ DSA_TAG_PROTO_AR9331 = DSA_TAG_PROTO_AR9331_VALUE,
};
struct packet_type;
@@ -88,7 +90,6 @@ struct dsa_device_ops {
struct dsa_skb_cb {
struct sk_buff *clone;
- bool deferred_xmit;
};
struct __dsa_skb_cb {
@@ -190,9 +191,6 @@ struct dsa_port {
struct phylink *pl;
struct phylink_config pl_config;
- struct work_struct xmit_work;
- struct sk_buff_head xmit_queue;
-
struct list_head list;
/*
@@ -281,6 +279,11 @@ struct dsa_switch {
*/
bool vlan_filtering;
+ /* MAC PCS does not provide link state change interrupt, and requires
+ * polling. Flag passed on to PHYLINK.
+ */
+ bool pcs_poll;
+
size_t num_ports;
};
@@ -377,7 +380,8 @@ typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
bool is_static, void *data);
struct dsa_switch_ops {
enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds,
- int port);
+ int port,
+ enum dsa_tag_protocol mprot);
int (*setup)(struct dsa_switch *ds);
void (*teardown)(struct dsa_switch *ds);
@@ -562,11 +566,6 @@ struct dsa_switch_ops {
bool (*port_rxtstamp)(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type);
- /*
- * Deferred frame Tx
- */
- netdev_tx_t (*port_deferred_xmit)(struct dsa_switch *ds, int port,
- struct sk_buff *skb);
/* Devlink parameters */
int (*devlink_param_get)(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx);
diff --git a/include/net/dsfield.h b/include/net/dsfield.h
index 1a245ee10c95..a59a57ffc546 100644
--- a/include/net/dsfield.h
+++ b/include/net/dsfield.h
@@ -21,7 +21,7 @@ static inline __u8 ipv4_get_dsfield(const struct iphdr *iph)
static inline __u8 ipv6_get_dsfield(const struct ipv6hdr *ipv6h)
{
- return ntohs(*(const __be16 *)ipv6h) >> 4;
+ return ntohs(*(__force const __be16 *)ipv6h) >> 4;
}
diff --git a/include/net/espintcp.h b/include/net/espintcp.h
new file mode 100644
index 000000000000..dd7026a00066
--- /dev/null
+++ b/include/net/espintcp.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_ESPINTCP_H
+#define _NET_ESPINTCP_H
+
+#include <net/strparser.h>
+#include <linux/skmsg.h>
+
+void __init espintcp_init(void);
+
+int espintcp_push_skb(struct sock *sk, struct sk_buff *skb);
+int espintcp_queue_out(struct sock *sk, struct sk_buff *skb);
+bool tcp_is_ulp_esp(struct sock *sk);
+
+struct espintcp_msg {
+ struct sk_buff *skb;
+ struct sk_msg skmsg;
+ int offset;
+ int len;
+};
+
+struct espintcp_ctx {
+ struct strparser strp;
+ struct sk_buff_head ike_queue;
+ struct sk_buff_head out_queue;
+ struct espintcp_msg partial;
+ void (*saved_data_ready)(struct sock *sk);
+ void (*saved_write_space)(struct sock *sk);
+ struct work_struct work;
+ bool tx_running;
+};
+
+static inline struct espintcp_ctx *espintcp_getctx(const struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ /* RCU is only needed for diag */
+ return (__force void *)icsk->icsk_ulp_data;
+}
+#endif
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index f1535f172935..fd60a8ac02ee 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -192,7 +192,9 @@ struct fib6_info {
dst_nopolicy:1,
dst_host:1,
fib6_destroying:1,
- unused:3;
+ offload:1,
+ trap:1,
+ unused:1;
struct rcu_head rcu;
struct nexthop *nh;
@@ -329,6 +331,13 @@ static inline void fib6_info_release(struct fib6_info *f6i)
call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
}
+static inline void fib6_info_hw_flags_set(struct fib6_info *f6i, bool offload,
+ bool trap)
+{
+ f6i->offload = offload;
+ f6i->trap = trap;
+}
+
enum fib6_walk_state {
#ifdef CONFIG_IPV6_SUBTREES
FWS_S,
@@ -487,6 +496,7 @@ int call_fib6_multipath_entry_notifiers(struct net *net,
struct fib6_info *rt,
unsigned int nsiblings,
struct netlink_ext_ack *extack);
+int call_fib6_entry_notifiers_replace(struct net *net, struct fib6_info *rt);
void fib6_rt_update(struct net *net, struct fib6_info *rt,
struct nl_info *info);
void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index b9cba41c6d4f..6a1ae49809de 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -204,6 +204,18 @@ __be32 fib_result_prefsrc(struct net *net, struct fib_result *res);
#define FIB_RES_DEV(res) (FIB_RES_NHC(res)->nhc_dev)
#define FIB_RES_OIF(res) (FIB_RES_NHC(res)->nhc_oif)
+struct fib_rt_info {
+ struct fib_info *fi;
+ u32 tb_id;
+ __be32 dst;
+ int dst_len;
+ u8 tos;
+ u8 type;
+ u8 offload:1,
+ trap:1,
+ unused:6;
+};
+
struct fib_entry_notifier_info {
struct fib_notifier_info info; /* must be first */
u32 dst;
@@ -464,6 +476,7 @@ int fib_nh_common_init(struct fib_nh_common *nhc, struct nlattr *fc_encap,
void fib_nh_common_release(struct fib_nh_common *nhc);
/* Exported by fib_trie.c */
+void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri);
void fib_trie_init(void);
struct fib_table *fib_trie_table(u32 id, struct fib_table *alias);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 4e95f6df508c..cec1a54401f2 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -1113,6 +1113,9 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int inet6_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk);
+int inet6_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);
+int inet6_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags);
/*
* reassembly.c
diff --git a/include/net/macsec.h b/include/net/macsec.h
new file mode 100644
index 000000000000..92e43db8b566
--- /dev/null
+++ b/include/net/macsec.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * MACsec netdev header, used for h/w accelerated implementations.
+ *
+ * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
+ */
+#ifndef _NET_MACSEC_H_
+#define _NET_MACSEC_H_
+
+#include <linux/u64_stats_sync.h>
+#include <uapi/linux/if_link.h>
+#include <uapi/linux/if_macsec.h>
+
+typedef u64 __bitwise sci_t;
+
+#define MACSEC_NUM_AN 4 /* 2 bits for the association number */
+
+/**
+ * struct macsec_key - SA key
+ * @id: user-provided key identifier
+ * @tfm: crypto struct, key storage
+ */
+struct macsec_key {
+ u8 id[MACSEC_KEYID_LEN];
+ struct crypto_aead *tfm;
+};
+
+struct macsec_rx_sc_stats {
+ __u64 InOctetsValidated;
+ __u64 InOctetsDecrypted;
+ __u64 InPktsUnchecked;
+ __u64 InPktsDelayed;
+ __u64 InPktsOK;
+ __u64 InPktsInvalid;
+ __u64 InPktsLate;
+ __u64 InPktsNotValid;
+ __u64 InPktsNotUsingSA;
+ __u64 InPktsUnusedSA;
+};
+
+struct macsec_rx_sa_stats {
+ __u32 InPktsOK;
+ __u32 InPktsInvalid;
+ __u32 InPktsNotValid;
+ __u32 InPktsNotUsingSA;
+ __u32 InPktsUnusedSA;
+};
+
+struct macsec_tx_sa_stats {
+ __u32 OutPktsProtected;
+ __u32 OutPktsEncrypted;
+};
+
+struct macsec_tx_sc_stats {
+ __u64 OutPktsProtected;
+ __u64 OutPktsEncrypted;
+ __u64 OutOctetsProtected;
+ __u64 OutOctetsEncrypted;
+};
+
+/**
+ * struct macsec_rx_sa - receive secure association
+ * @active:
+ * @next_pn: packet number expected for the next packet
+ * @lock: protects next_pn manipulations
+ * @key: key structure
+ * @stats: per-SA stats
+ */
+struct macsec_rx_sa {
+ struct macsec_key key;
+ spinlock_t lock;
+ u32 next_pn;
+ refcount_t refcnt;
+ bool active;
+ struct macsec_rx_sa_stats __percpu *stats;
+ struct macsec_rx_sc *sc;
+ struct rcu_head rcu;
+};
+
+struct pcpu_rx_sc_stats {
+ struct macsec_rx_sc_stats stats;
+ struct u64_stats_sync syncp;
+};
+
+struct pcpu_tx_sc_stats {
+ struct macsec_tx_sc_stats stats;
+ struct u64_stats_sync syncp;
+};
+
+/**
+ * struct macsec_rx_sc - receive secure channel
+ * @sci: secure channel identifier for this SC
+ * @active: channel is active
+ * @sa: array of secure associations
+ * @stats: per-SC stats
+ */
+struct macsec_rx_sc {
+ struct macsec_rx_sc __rcu *next;
+ sci_t sci;
+ bool active;
+ struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN];
+ struct pcpu_rx_sc_stats __percpu *stats;
+ refcount_t refcnt;
+ struct rcu_head rcu_head;
+};
+
+/**
+ * struct macsec_tx_sa - transmit secure association
+ * @active:
+ * @next_pn: packet number to use for the next packet
+ * @lock: protects next_pn manipulations
+ * @key: key structure
+ * @stats: per-SA stats
+ */
+struct macsec_tx_sa {
+ struct macsec_key key;
+ spinlock_t lock;
+ u32 next_pn;
+ refcount_t refcnt;
+ bool active;
+ struct macsec_tx_sa_stats __percpu *stats;
+ struct rcu_head rcu;
+};
+
+/**
+ * struct macsec_tx_sc - transmit secure channel
+ * @active:
+ * @encoding_sa: association number of the SA currently in use
+ * @encrypt: encrypt packets on transmit, or authenticate only
+ * @send_sci: always include the SCI in the SecTAG
+ * @end_station:
+ * @scb: single copy broadcast flag
+ * @sa: array of secure associations
+ * @stats: stats for this TXSC
+ */
+struct macsec_tx_sc {
+ bool active;
+ u8 encoding_sa;
+ bool encrypt;
+ bool send_sci;
+ bool end_station;
+ bool scb;
+ struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN];
+ struct pcpu_tx_sc_stats __percpu *stats;
+};
+
+/**
+ * struct macsec_secy - MACsec Security Entity
+ * @netdev: netdevice for this SecY
+ * @n_rx_sc: number of receive secure channels configured on this SecY
+ * @sci: secure channel identifier used for tx
+ * @key_len: length of keys used by the cipher suite
+ * @icv_len: length of ICV used by the cipher suite
+ * @validate_frames: validation mode
+ * @operational: MAC_Operational flag
+ * @protect_frames: enable protection for this SecY
+ * @replay_protect: enable packet number checks on receive
+ * @replay_window: size of the replay window
+ * @tx_sc: transmit secure channel
+ * @rx_sc: linked list of receive secure channels
+ */
+struct macsec_secy {
+ struct net_device *netdev;
+ unsigned int n_rx_sc;
+ sci_t sci;
+ u16 key_len;
+ u16 icv_len;
+ enum macsec_validation_type validate_frames;
+ bool operational;
+ bool protect_frames;
+ bool replay_protect;
+ u32 replay_window;
+ struct macsec_tx_sc tx_sc;
+ struct macsec_rx_sc __rcu *rx_sc;
+};
+
+/**
+ * struct macsec_context - MACsec context for hardware offloading
+ */
+struct macsec_context {
+ struct phy_device *phydev;
+ enum macsec_offload offload;
+
+ struct macsec_secy *secy;
+ struct macsec_rx_sc *rx_sc;
+ struct {
+ unsigned char assoc_num;
+ u8 key[MACSEC_KEYID_LEN];
+ union {
+ struct macsec_rx_sa *rx_sa;
+ struct macsec_tx_sa *tx_sa;
+ };
+ } sa;
+
+ u8 prepare:1;
+};
+
+/**
+ * struct macsec_ops - MACsec offloading operations
+ */
+struct macsec_ops {
+ /* Device wide */
+ int (*mdo_dev_open)(struct macsec_context *ctx);
+ int (*mdo_dev_stop)(struct macsec_context *ctx);
+ /* SecY */
+ int (*mdo_add_secy)(struct macsec_context *ctx);
+ int (*mdo_upd_secy)(struct macsec_context *ctx);
+ int (*mdo_del_secy)(struct macsec_context *ctx);
+ /* Security channels */
+ int (*mdo_add_rxsc)(struct macsec_context *ctx);
+ int (*mdo_upd_rxsc)(struct macsec_context *ctx);
+ int (*mdo_del_rxsc)(struct macsec_context *ctx);
+ /* Security associations */
+ int (*mdo_add_rxsa)(struct macsec_context *ctx);
+ int (*mdo_upd_rxsa)(struct macsec_context *ctx);
+ int (*mdo_del_rxsa)(struct macsec_context *ctx);
+ int (*mdo_add_txsa)(struct macsec_context *ctx);
+ int (*mdo_upd_txsa)(struct macsec_context *ctx);
+ int (*mdo_del_txsa)(struct macsec_context *ctx);
+};
+
+void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
+
+#endif /* _NET_MACSEC_H_ */
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
new file mode 100644
index 000000000000..27627e2d1bc2
--- /dev/null
+++ b/include/net/mptcp.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Multipath TCP
+ *
+ * Copyright (c) 2017 - 2019, Intel Corporation.
+ */
+
+#ifndef __NET_MPTCP_H
+#define __NET_MPTCP_H
+
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+
+/* MPTCP sk_buff extension data */
+struct mptcp_ext {
+ u64 data_ack;
+ u64 data_seq;
+ u32 subflow_seq;
+ u16 data_len;
+ u8 use_map:1,
+ dsn64:1,
+ data_fin:1,
+ use_ack:1,
+ ack64:1,
+ mpc_map:1,
+ __unused:2;
+ /* one byte hole */
+};
+
+struct mptcp_out_options {
+#if IS_ENABLED(CONFIG_MPTCP)
+ u16 suboptions;
+ u64 sndr_key;
+ u64 rcvr_key;
+ struct mptcp_ext ext_copy;
+#endif
+};
+
+#ifdef CONFIG_MPTCP
+
+void mptcp_init(void);
+
+static inline bool sk_is_mptcp(const struct sock *sk)
+{
+ return tcp_sk(sk)->is_mptcp;
+}
+
+static inline bool rsk_is_mptcp(const struct request_sock *req)
+{
+ return tcp_rsk(req)->is_mptcp;
+}
+
+void mptcp_parse_option(const struct sk_buff *skb, const unsigned char *ptr,
+ int opsize, struct tcp_options_received *opt_rx);
+bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
+ unsigned int *size, struct mptcp_out_options *opts);
+void mptcp_rcv_synsent(struct sock *sk);
+bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
+ struct mptcp_out_options *opts);
+bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
+ unsigned int *size, unsigned int remaining,
+ struct mptcp_out_options *opts);
+void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
+ struct tcp_options_received *opt_rx);
+
+void mptcp_write_options(__be32 *ptr, struct mptcp_out_options *opts);
+
+/* move the skb extension owership, with the assumption that 'to' is
+ * newly allocated
+ */
+static inline void mptcp_skb_ext_move(struct sk_buff *to,
+ struct sk_buff *from)
+{
+ if (!skb_ext_exist(from, SKB_EXT_MPTCP))
+ return;
+
+ if (WARN_ON_ONCE(to->active_extensions))
+ skb_ext_put(to);
+
+ to->active_extensions = from->active_extensions;
+ to->extensions = from->extensions;
+ from->active_extensions = 0;
+}
+
+static inline bool mptcp_ext_matches(const struct mptcp_ext *to_ext,
+ const struct mptcp_ext *from_ext)
+{
+ /* MPTCP always clears the ext when adding it to the skb, so
+ * holes do not bother us here
+ */
+ return !from_ext ||
+ (to_ext && from_ext &&
+ !memcmp(from_ext, to_ext, sizeof(struct mptcp_ext)));
+}
+
+/* check if skbs can be collapsed.
+ * MPTCP collapse is allowed if neither @to or @from carry an mptcp data
+ * mapping, or if the extension of @to is the same as @from.
+ * Collapsing is not possible if @to lacks an extension, but @from carries one.
+ */
+static inline bool mptcp_skb_can_collapse(const struct sk_buff *to,
+ const struct sk_buff *from)
+{
+ return mptcp_ext_matches(skb_ext_find(to, SKB_EXT_MPTCP),
+ skb_ext_find(from, SKB_EXT_MPTCP));
+}
+
+#else
+
+static inline void mptcp_init(void)
+{
+}
+
+static inline bool sk_is_mptcp(const struct sock *sk)
+{
+ return false;
+}
+
+static inline bool rsk_is_mptcp(const struct request_sock *req)
+{
+ return false;
+}
+
+static inline void mptcp_parse_option(const struct sk_buff *skb,
+ const unsigned char *ptr, int opsize,
+ struct tcp_options_received *opt_rx)
+{
+}
+
+static inline bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
+ unsigned int *size,
+ struct mptcp_out_options *opts)
+{
+ return false;
+}
+
+static inline void mptcp_rcv_synsent(struct sock *sk)
+{
+}
+
+static inline bool mptcp_synack_options(const struct request_sock *req,
+ unsigned int *size,
+ struct mptcp_out_options *opts)
+{
+ return false;
+}
+
+static inline bool mptcp_established_options(struct sock *sk,
+ struct sk_buff *skb,
+ unsigned int *size,
+ unsigned int remaining,
+ struct mptcp_out_options *opts)
+{
+ return false;
+}
+
+static inline void mptcp_incoming_options(struct sock *sk,
+ struct sk_buff *skb,
+ struct tcp_options_received *opt_rx)
+{
+}
+
+static inline void mptcp_skb_ext_move(struct sk_buff *to,
+ const struct sk_buff *from)
+{
+}
+
+static inline bool mptcp_skb_can_collapse(const struct sk_buff *to,
+ const struct sk_buff *from)
+{
+ return true;
+}
+
+#endif /* CONFIG_MPTCP */
+
+void mptcp_handle_ipv6_mapped(struct sock *sk, bool mapped);
+
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+int mptcpv6_init(void);
+#elif IS_ENABLED(CONFIG_IPV6)
+static inline int mptcpv6_init(void)
+{
+ return 0;
+}
+#endif
+
+#endif /* __NET_MPTCP_H */
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index b8ceaf0cd997..854d39ef1ca3 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -347,9 +347,9 @@ static inline struct net *read_pnet(const possible_net_t *pnet)
#endif
int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp);
-int peernet2id(struct net *net, struct net *peer);
-bool peernet_has_id(struct net *net, struct net *peer);
-struct net *get_net_ns_by_id(struct net *net, int id);
+int peernet2id(const struct net *net, struct net *peer);
+bool peernet_has_id(const struct net *net, struct net *peer);
+struct net *get_net_ns_by_id(const struct net *net, int id);
struct pernet_operations {
struct list_head list;
@@ -427,7 +427,7 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
}
#endif
-static inline int rt_genid_ipv4(struct net *net)
+static inline int rt_genid_ipv4(const struct net *net)
{
return atomic_read(&net->ipv4.rt_genid);
}
@@ -459,7 +459,7 @@ static inline void rt_genid_bump_all(struct net *net)
rt_genid_bump_ipv6(net);
}
-static inline int fnhe_genid(struct net *net)
+static inline int fnhe_genid(const struct net *net)
{
return atomic_read(&net->fnhe_genid);
}
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 415b8f49d150..e0f709d9d547 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -47,6 +47,11 @@ struct nf_flowtable {
possible_net_t net;
};
+static inline bool nf_flowtable_hw_offload(struct nf_flowtable *flowtable)
+{
+ return flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD;
+}
+
enum flow_offload_tuple_dir {
FLOW_OFFLOAD_DIR_ORIGINAL = IP_CT_DIR_ORIGINAL,
FLOW_OFFLOAD_DIR_REPLY = IP_CT_DIR_REPLY,
@@ -83,13 +88,15 @@ struct flow_offload_tuple_rhash {
struct flow_offload_tuple tuple;
};
-#define FLOW_OFFLOAD_SNAT 0x1
-#define FLOW_OFFLOAD_DNAT 0x2
-#define FLOW_OFFLOAD_DYING 0x4
-#define FLOW_OFFLOAD_TEARDOWN 0x8
-#define FLOW_OFFLOAD_HW 0x10
-#define FLOW_OFFLOAD_HW_DYING 0x20
-#define FLOW_OFFLOAD_HW_DEAD 0x40
+enum nf_flow_flags {
+ NF_FLOW_SNAT,
+ NF_FLOW_DNAT,
+ NF_FLOW_TEARDOWN,
+ NF_FLOW_HW,
+ NF_FLOW_HW_DYING,
+ NF_FLOW_HW_DEAD,
+ NF_FLOW_HW_REFRESH,
+};
enum flow_offload_type {
NF_FLOW_OFFLOAD_UNSPEC = 0,
@@ -99,7 +106,7 @@ enum flow_offload_type {
struct flow_offload {
struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
struct nf_conn *ct;
- u16 flags;
+ unsigned long flags;
u16 type;
u32 timeout;
struct rcu_head rcu_head;
@@ -134,10 +141,6 @@ int nf_flow_table_init(struct nf_flowtable *flow_table);
void nf_flow_table_free(struct nf_flowtable *flow_table);
void flow_offload_teardown(struct flow_offload *flow);
-static inline void flow_offload_dead(struct flow_offload *flow)
-{
- flow->flags |= FLOW_OFFLOAD_DYING;
-}
int nf_flow_snat_port(const struct flow_offload *flow,
struct sk_buff *skb, unsigned int thoff,
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index fe7c50acc681..4170c033d461 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -231,6 +231,7 @@ struct nft_userdata {
* struct nft_set_elem - generic representation of set elements
*
* @key: element key
+ * @key_end: closing element key
* @priv: element private data and extensions
*/
struct nft_set_elem {
@@ -238,6 +239,10 @@ struct nft_set_elem {
u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
struct nft_data val;
} key;
+ union {
+ u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
+ struct nft_data val;
+ } key_end;
void *priv;
};
@@ -259,11 +264,15 @@ struct nft_set_iter {
* @klen: key length
* @dlen: data length
* @size: number of set elements
+ * @field_len: length of each field in concatenation, bytes
+ * @field_count: number of concatenated fields in element
*/
struct nft_set_desc {
unsigned int klen;
unsigned int dlen;
unsigned int size;
+ u8 field_len[NFT_REG32_COUNT];
+ u8 field_count;
};
/**
@@ -404,6 +413,8 @@ void nft_unregister_set(struct nft_set_type *type);
* @dtype: data type (verdict or numeric type defined by userspace)
* @objtype: object type (see NFT_OBJECT_* definitions)
* @size: maximum set size
+ * @field_len: length of each field in concatenation, bytes
+ * @field_count: number of concatenated fields in element
* @use: number of rules references to this set
* @nelems: number of elements
* @ndeact: number of deactivated elements queued for removal
@@ -430,6 +441,8 @@ struct nft_set {
u32 dtype;
u32 objtype;
u32 size;
+ u8 field_len[NFT_REG32_COUNT];
+ u8 field_count;
u32 use;
atomic_t nelems;
u32 ndeact;
@@ -502,6 +515,7 @@ void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
* enum nft_set_extensions - set extension type IDs
*
* @NFT_SET_EXT_KEY: element key
+ * @NFT_SET_EXT_KEY_END: upper bound element key, for ranges
* @NFT_SET_EXT_DATA: mapping data
* @NFT_SET_EXT_FLAGS: element flags
* @NFT_SET_EXT_TIMEOUT: element timeout
@@ -513,6 +527,7 @@ void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
*/
enum nft_set_extensions {
NFT_SET_EXT_KEY,
+ NFT_SET_EXT_KEY_END,
NFT_SET_EXT_DATA,
NFT_SET_EXT_FLAGS,
NFT_SET_EXT_TIMEOUT,
@@ -606,6 +621,11 @@ static inline struct nft_data *nft_set_ext_key(const struct nft_set_ext *ext)
return nft_set_ext(ext, NFT_SET_EXT_KEY);
}
+static inline struct nft_data *nft_set_ext_key_end(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_KEY_END);
+}
+
static inline struct nft_data *nft_set_ext_data(const struct nft_set_ext *ext)
{
return nft_set_ext(ext, NFT_SET_EXT_DATA);
@@ -655,7 +675,7 @@ static inline struct nft_object **nft_set_ext_obj(const struct nft_set_ext *ext)
void *nft_set_elem_init(const struct nft_set *set,
const struct nft_set_ext_tmpl *tmpl,
- const u32 *key, const u32 *data,
+ const u32 *key, const u32 *key_end, const u32 *data,
u64 timeout, u64 expiration, gfp_t gfp);
void nft_set_elem_destroy(const struct nft_set *set, void *elem,
bool destroy_expr);
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index 2656155b4069..29e7e1021267 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -74,6 +74,7 @@ extern struct nft_set_type nft_set_hash_type;
extern struct nft_set_type nft_set_hash_fast_type;
extern struct nft_set_type nft_set_rbtree_type;
extern struct nft_set_type nft_set_bitmap_type;
+extern struct nft_set_type nft_set_pipapo_type;
struct nft_expr;
struct nft_regs;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index b140c8f1be22..56c365dc6dc7 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -1735,7 +1735,7 @@ static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
}
/**
- * nla_validate_nested - Validate a stream of nested attributes
+ * __nla_validate_nested - Validate a stream of nested attributes
* @start: container attribute
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
@@ -1758,9 +1758,9 @@ static inline int __nla_validate_nested(const struct nlattr *start, int maxtype,
}
static inline int
-nl80211_validate_nested(const struct nlattr *start, int maxtype,
- const struct nla_policy *policy,
- struct netlink_ext_ack *extack)
+nla_validate_nested(const struct nlattr *start, int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
{
return __nla_validate_nested(start, maxtype, policy,
NL_VALIDATE_STRICT, extack);
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index c0c0791b1912..08b98414d94e 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -154,6 +154,7 @@ struct netns_ipv4 {
int sysctl_tcp_adv_win_scale;
int sysctl_tcp_frto;
int sysctl_tcp_nometrics_save;
+ int sysctl_tcp_no_ssthresh_metrics_save;
int sysctl_tcp_moderate_rcvbuf;
int sysctl_tcp_tso_win_divisor;
int sysctl_tcp_workaround_signed_windows;
diff --git a/include/net/pie.h b/include/net/pie.h
new file mode 100644
index 000000000000..fd5a37cb7993
--- /dev/null
+++ b/include/net/pie.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __NET_SCHED_PIE_H
+#define __NET_SCHED_PIE_H
+
+#include <linux/ktime.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <net/inet_ecn.h>
+#include <net/pkt_sched.h>
+
+#define MAX_PROB U64_MAX
+#define DTIME_INVALID U64_MAX
+#define QUEUE_THRESHOLD 16384
+#define DQCOUNT_INVALID -1
+#define PIE_SCALE 8
+
+/**
+ * struct pie_params - contains pie parameters
+ * @target: target delay in pschedtime
+ * @tudpate: interval at which drop probability is calculated
+ * @limit: total number of packets that can be in the queue
+ * @alpha: parameter to control drop probability
+ * @beta: parameter to control drop probability
+ * @ecn: is ECN marking of packets enabled
+ * @bytemode: is drop probability scaled based on pkt size
+ * @dq_rate_estimator: is Little's law used for qdelay calculation
+ */
+struct pie_params {
+ psched_time_t target;
+ u32 tupdate;
+ u32 limit;
+ u32 alpha;
+ u32 beta;
+ u8 ecn;
+ u8 bytemode;
+ u8 dq_rate_estimator;
+};
+
+/**
+ * struct pie_vars - contains pie variables
+ * @qdelay: current queue delay
+ * @qdelay_old: queue delay in previous qdelay calculation
+ * @burst_time: burst time allowance
+ * @dq_tstamp: timestamp at which dq rate was last calculated
+ * @prob: drop probability
+ * @accu_prob: accumulated drop probability
+ * @dq_count: number of bytes dequeued in a measurement cycle
+ * @avg_dq_rate: calculated average dq rate
+ * @qlen_old: queue length during previous qdelay calculation
+ * @accu_prob_overflows: number of times accu_prob overflows
+ */
+struct pie_vars {
+ psched_time_t qdelay;
+ psched_time_t qdelay_old;
+ psched_time_t burst_time;
+ psched_time_t dq_tstamp;
+ u64 prob;
+ u64 accu_prob;
+ u64 dq_count;
+ u32 avg_dq_rate;
+ u32 qlen_old;
+ u8 accu_prob_overflows;
+};
+
+/**
+ * struct pie_stats - contains pie stats
+ * @packets_in: total number of packets enqueued
+ * @dropped: packets dropped due to pie action
+ * @overlimit: packets dropped due to lack of space in queue
+ * @ecn_mark: packets marked with ECN
+ * @maxq: maximum queue size
+ */
+struct pie_stats {
+ u32 packets_in;
+ u32 dropped;
+ u32 overlimit;
+ u32 ecn_mark;
+ u32 maxq;
+};
+
+/**
+ * struct pie_skb_cb - contains private skb vars
+ * @enqueue_time: timestamp when the packet is enqueued
+ * @mem_usage: size of the skb during enqueue
+ */
+struct pie_skb_cb {
+ psched_time_t enqueue_time;
+ u32 mem_usage;
+};
+
+static inline void pie_params_init(struct pie_params *params)
+{
+ params->target = PSCHED_NS2TICKS(15 * NSEC_PER_MSEC); /* 15 ms */
+ params->tupdate = usecs_to_jiffies(15 * USEC_PER_MSEC); /* 15 ms */
+ params->limit = 1000;
+ params->alpha = 2;
+ params->beta = 20;
+ params->ecn = false;
+ params->bytemode = false;
+ params->dq_rate_estimator = false;
+}
+
+static inline void pie_vars_init(struct pie_vars *vars)
+{
+ vars->burst_time = PSCHED_NS2TICKS(150 * NSEC_PER_MSEC); /* 150 ms */
+ vars->dq_tstamp = DTIME_INVALID;
+ vars->accu_prob = 0;
+ vars->dq_count = DQCOUNT_INVALID;
+ vars->avg_dq_rate = 0;
+ vars->accu_prob_overflows = 0;
+}
+
+static inline struct pie_skb_cb *get_pie_cb(const struct sk_buff *skb)
+{
+ qdisc_cb_private_validate(skb, sizeof(struct pie_skb_cb));
+ return (struct pie_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static inline psched_time_t pie_get_enqueue_time(const struct sk_buff *skb)
+{
+ return get_pie_cb(skb)->enqueue_time;
+}
+
+static inline void pie_set_enqueue_time(struct sk_buff *skb)
+{
+ get_pie_cb(skb)->enqueue_time = psched_get_time();
+}
+
+bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
+ struct pie_vars *vars, u32 qlen, u32 packet_size);
+
+void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
+ struct pie_vars *vars, u32 qlen);
+
+void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
+ u32 qlen);
+
+#endif
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index e553fc80eb23..a972244ab193 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -141,31 +141,38 @@ __cls_set_class(unsigned long *clp, unsigned long cl)
return xchg(clp, cl);
}
-static inline unsigned long
-cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
+static inline void
+__tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
{
- unsigned long old_cl;
+ unsigned long cl;
- sch_tree_lock(q);
- old_cl = __cls_set_class(clp, cl);
- sch_tree_unlock(q);
- return old_cl;
+ cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
+ cl = __cls_set_class(&r->class, cl);
+ if (cl)
+ q->ops->cl_ops->unbind_tcf(q, cl);
}
static inline void
tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
{
struct Qdisc *q = tp->chain->block->q;
- unsigned long cl;
/* Check q as it is not set for shared blocks. In that case,
* setting class is not supported.
*/
if (!q)
return;
- cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
- cl = cls_set_class(q, &r->class, cl);
- if (cl)
+ sch_tree_lock(q);
+ __tcf_bind_filter(q, r, base);
+ sch_tree_unlock(q);
+}
+
+static inline void
+__tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
+{
+ unsigned long cl;
+
+ if ((cl = __cls_set_class(&r->class, 0)) != 0)
q->ops->cl_ops->unbind_tcf(q, cl);
}
@@ -173,12 +180,10 @@ static inline void
tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
{
struct Qdisc *q = tp->chain->block->q;
- unsigned long cl;
if (!q)
return;
- if ((cl = __cls_set_class(&r->class, 0)) != 0)
- q->ops->cl_ops->unbind_tcf(q, cl);
+ __tcf_unbind_filter(q, r);
}
struct tcf_exts {
@@ -791,9 +796,8 @@ enum tc_prio_command {
struct tc_prio_qopt_offload_params {
int bands;
u8 priomap[TC_PRIO_MAX + 1];
- /* In case that a prio qdisc is offloaded and now is changed to a
- * non-offloadedable config, it needs to update the backlog & qlen
- * values to negate the HW backlog & qlen values (and only them).
+ /* At the point of un-offloading the Qdisc, the reported backlog and
+ * qlen need to be reduced by the portion that is in HW.
*/
struct gnet_stats_queue *qstats;
};
@@ -824,4 +828,57 @@ struct tc_root_qopt_offload {
bool ingress;
};
+enum tc_ets_command {
+ TC_ETS_REPLACE,
+ TC_ETS_DESTROY,
+ TC_ETS_STATS,
+ TC_ETS_GRAFT,
+};
+
+struct tc_ets_qopt_offload_replace_params {
+ unsigned int bands;
+ u8 priomap[TC_PRIO_MAX + 1];
+ unsigned int quanta[TCQ_ETS_MAX_BANDS]; /* 0 for strict bands. */
+ unsigned int weights[TCQ_ETS_MAX_BANDS];
+ struct gnet_stats_queue *qstats;
+};
+
+struct tc_ets_qopt_offload_graft_params {
+ u8 band;
+ u32 child_handle;
+};
+
+struct tc_ets_qopt_offload {
+ enum tc_ets_command command;
+ u32 handle;
+ u32 parent;
+ union {
+ struct tc_ets_qopt_offload_replace_params replace_params;
+ struct tc_qopt_offload_stats stats;
+ struct tc_ets_qopt_offload_graft_params graft_params;
+ };
+};
+
+enum tc_tbf_command {
+ TC_TBF_REPLACE,
+ TC_TBF_DESTROY,
+ TC_TBF_STATS,
+};
+
+struct tc_tbf_qopt_offload_replace_params {
+ struct psched_ratecfg rate;
+ u32 max_size;
+ struct gnet_stats_queue *qstats;
+};
+
+struct tc_tbf_qopt_offload {
+ enum tc_tbf_command command;
+ u32 handle;
+ u32 parent;
+ union {
+ struct tc_tbf_qopt_offload_replace_params replace_params;
+ struct tc_qopt_offload_stats stats;
+ };
+};
+
#endif
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index fceddf89592a..151208704ed2 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -318,7 +318,8 @@ struct tcf_proto_ops {
void *type_data);
void (*hw_del)(struct tcf_proto *tp,
void *type_data);
- void (*bind_class)(void *, u32, unsigned long);
+ void (*bind_class)(void *, u32, unsigned long,
+ void *, unsigned long);
void * (*tmplt_create)(struct net *net,
struct tcf_chain *chain,
struct nlattr **tca,
diff --git a/include/net/sock.h b/include/net/sock.h
index 8dff68b4c316..02162b0378f7 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -436,31 +436,15 @@ struct sock {
* Because of non atomicity rules, all
* changes are protected by socket lock.
*/
- unsigned int __sk_flags_offset[0];
-#ifdef __BIG_ENDIAN_BITFIELD
-#define SK_FL_PROTO_SHIFT 16
-#define SK_FL_PROTO_MASK 0x00ff0000
-
-#define SK_FL_TYPE_SHIFT 0
-#define SK_FL_TYPE_MASK 0x0000ffff
-#else
-#define SK_FL_PROTO_SHIFT 8
-#define SK_FL_PROTO_MASK 0x0000ff00
-
-#define SK_FL_TYPE_SHIFT 16
-#define SK_FL_TYPE_MASK 0xffff0000
-#endif
-
- unsigned int sk_padding : 1,
+ u8 sk_padding : 1,
sk_kern_sock : 1,
sk_no_check_tx : 1,
sk_no_check_rx : 1,
- sk_userlocks : 4,
- sk_protocol : 8,
- sk_type : 16;
-#define SK_PROTOCOL_MAX U8_MAX
- u16 sk_gso_max_segs;
+ sk_userlocks : 4;
u8 sk_pacing_shift;
+ u16 sk_type;
+ u16 sk_protocol;
+ u16 sk_gso_max_segs;
unsigned long sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
@@ -1480,6 +1464,7 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
sk_mem_uncharge(sk, skb->truesize);
if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
!sk->sk_tx_skb_cache && !skb_cloned(skb)) {
+ skb_ext_reset(skb);
skb_zcopy_clear(skb, true);
sk->sk_tx_skb_cache = skb;
return;
@@ -2612,4 +2597,6 @@ static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
return false;
}
+void sock_def_readable(struct sock *sk);
+
#endif /* _SOCK_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e6f48384dc71..a5ea27df3c2b 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -39,6 +39,7 @@
#include <net/tcp_states.h>
#include <net/inet_ecn.h>
#include <net/dst.h>
+#include <net/mptcp.h>
#include <linux/seq_file.h>
#include <linux/memcontrol.h>
@@ -182,6 +183,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOPT_SACK 5 /* SACK Block */
#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
+#define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */
#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
#define TCPOPT_EXP 254 /* Experimental */
/* Magic number to be after the option value for sharing TCP
@@ -328,6 +330,9 @@ int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
+int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
+void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
+ int size_goal);
void tcp_release_cb(struct sock *sk);
void tcp_wfree(struct sk_buff *skb);
void tcp_write_timer_handler(struct sock *sk);
@@ -977,6 +982,13 @@ static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
return likely(!TCP_SKB_CB(skb)->eor);
}
+static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
+ const struct sk_buff *from)
+{
+ return likely(tcp_skb_can_collapse_to(to) &&
+ mptcp_skb_can_collapse(to, from));
+}
+
/* Events passed to congestion control interface */
enum tcp_ca_event {
CA_EVENT_TX_START, /* first transmit when no packets in flight */
@@ -1007,6 +1019,7 @@ enum tcp_ca_ack_event_flags {
#define TCP_CONG_NON_RESTRICTED 0x1
/* Requires ECN/ECT set on all packets */
#define TCP_CONG_NEEDS_ECN 0x2
+#define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
union tcp_cc_info;
@@ -1101,6 +1114,7 @@ u32 tcp_reno_undo_cwnd(struct sock *sk);
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
extern struct tcp_congestion_ops tcp_reno;
+struct tcp_congestion_ops *tcp_ca_find(const char *name);
struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
#ifdef CONFIG_INET
@@ -1532,8 +1546,9 @@ struct tcp_md5sig_key {
struct hlist_node node;
u8 keylen;
u8 family; /* AF_INET or AF_INET6 */
- union tcp_md5_addr addr;
u8 prefixlen;
+ union tcp_md5_addr addr;
+ int l3index; /* set if key added with L3 scope */
u8 key[TCP_MD5SIG_MAXKEYLEN];
struct rcu_head rcu;
};
@@ -1577,34 +1592,33 @@ struct tcp_md5sig_pool {
int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
const struct sock *sk, const struct sk_buff *skb);
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
- int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
- gfp_t gfp);
+ int family, u8 prefixlen, int l3index,
+ const u8 *newkey, u8 newkeylen, gfp_t gfp);
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
- int family, u8 prefixlen);
+ int family, u8 prefixlen, int l3index);
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
const struct sock *addr_sk);
#ifdef CONFIG_TCP_MD5SIG
#include <linux/jump_label.h>
extern struct static_key_false tcp_md5_needed;
-struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk,
+struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
const union tcp_md5_addr *addr,
int family);
static inline struct tcp_md5sig_key *
-tcp_md5_do_lookup(const struct sock *sk,
- const union tcp_md5_addr *addr,
- int family)
+tcp_md5_do_lookup(const struct sock *sk, int l3index,
+ const union tcp_md5_addr *addr, int family)
{
if (!static_branch_unlikely(&tcp_md5_needed))
return NULL;
- return __tcp_md5_do_lookup(sk, addr, family);
+ return __tcp_md5_do_lookup(sk, l3index, addr, family);
}
#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
#else
-static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
- const union tcp_md5_addr *addr,
- int family)
+static inline struct tcp_md5sig_key *
+tcp_md5_do_lookup(const struct sock *sk, int l3index,
+ const union tcp_md5_addr *addr, int family)
{
return NULL;
}
@@ -2002,6 +2016,11 @@ struct tcp_request_sock_ops {
enum tcp_synack_type synack_type);
};
+extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
+#if IS_ENABLED(CONFIG_IPV6)
+extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
+#endif
+
#ifdef CONFIG_SYN_COOKIES
static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
const struct sock *sk, struct sk_buff *skb,
@@ -2154,6 +2173,9 @@ struct tcp_ulp_ops {
/* diagnostic */
int (*get_info)(const struct sock *sk, struct sk_buff *skb);
size_t (*get_info_size)(const struct sock *sk);
+ /* clone ulp */
+ void (*clone)(const struct request_sock *req, struct sock *newsk,
+ const gfp_t priority);
char name[TCP_ULP_NAME_MAX];
struct module *owner;
diff --git a/include/net/tls.h b/include/net/tls.h
index df630f5fc723..bf9eb4823933 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -641,6 +641,7 @@ int tls_sw_fallback_init(struct sock *sk,
#ifdef CONFIG_TLS_DEVICE
void tls_device_init(void);
void tls_device_cleanup(void);
+void tls_device_sk_destruct(struct sock *sk);
int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
void tls_device_free_resources_tx(struct sock *sk);
int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
@@ -649,6 +650,14 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq);
int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
struct sk_buff *skb, struct strp_msg *rxm);
+
+static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
+{
+ if (!sk_fullsock(sk) ||
+ smp_load_acquire(&sk->sk_destruct) != tls_device_sk_destruct)
+ return false;
+ return tls_get_ctx(sk)->rx_conf == TLS_HW;
+}
#else
static inline void tls_device_init(void) {}
static inline void tls_device_cleanup(void) {}
diff --git a/include/net/udp.h b/include/net/udp.h
index bad74f780831..4a180f2a13e3 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -167,7 +167,7 @@ typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
__be16 dport);
struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
- struct udphdr *uh, udp_lookup_t lookup);
+ struct udphdr *uh, struct sock *sk);
int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
@@ -476,6 +476,9 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
if (!inet_get_convert_csum(sk))
features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ if (skb->pkt_type == PACKET_LOOPBACK)
+ skb->ip_summed = CHECKSUM_PARTIAL;
+
/* the GSO CB lays after the UDP one, no need to save and restore any
* CB fragment
*/
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index e3780e4b74e1..e86ec48ef627 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -72,7 +72,6 @@ struct xdp_umem {
struct xsk_map {
struct bpf_map map;
- struct list_head __percpu *flush_list;
spinlock_t lock; /* Synchronize map updates */
struct xdp_sock *xsk_map[];
};
@@ -119,8 +118,8 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
/* Used from netdev driver */
bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
-u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
-void xsk_umem_discard_addr(struct xdp_umem *umem);
+bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
+void xsk_umem_release_addr(struct xdp_umem *umem);
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
void xsk_umem_consume_tx_done(struct xdp_umem *umem);
@@ -139,9 +138,8 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
struct xdp_sock **map_entry);
int xsk_map_inc(struct xsk_map *map);
void xsk_map_put(struct xsk_map *map);
-int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
- struct xdp_sock *xs);
-void __xsk_map_flush(struct bpf_map *map);
+int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
+void __xsk_map_flush(void);
static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
u32 key)
@@ -199,7 +197,7 @@ static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
return xsk_umem_has_addrs(umem, cnt - rq->length);
}
-static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
+static inline bool xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
{
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
@@ -210,12 +208,12 @@ static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
return addr;
}
-static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
+static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
{
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
if (!rq->length)
- xsk_umem_discard_addr(umem);
+ xsk_umem_release_addr(umem);
else
rq->length--;
}
@@ -260,7 +258,7 @@ static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
return NULL;
}
-static inline void xsk_umem_discard_addr(struct xdp_umem *umem)
+static inline void xsk_umem_release_addr(struct xdp_umem *umem)
{
}
@@ -334,7 +332,7 @@ static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
return NULL;
}
-static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
+static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
{
}
@@ -369,13 +367,12 @@ static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
return 0;
}
-static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
- struct xdp_sock *xs)
+static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
{
return -EOPNOTSUPP;
}
-static inline void __xsk_map_flush(struct bpf_map *map)
+static inline void __xsk_map_flush(void)
{
}
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index dda3c025452e..8f71c111e65a 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -193,6 +193,7 @@ struct xfrm_state {
/* Data for encapsulator */
struct xfrm_encap_tmpl *encap;
+ struct sock __rcu *encap_sk;
/* Data for care-of address */
xfrm_address_t *coaddr;
@@ -1547,6 +1548,9 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
int xfrm_init_state(struct xfrm_state *x);
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
+int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
+ int (*finish)(struct net *, struct sock *,
+ struct sk_buff *));
int xfrm_trans_queue(struct sk_buff *skb,
int (*finish)(struct net *, struct sock *,
struct sk_buff *));
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 753f54e17e0a..e3518fd6b95b 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -69,7 +69,7 @@ static inline size_t ib_umem_num_pages(struct ib_umem *umem)
#ifdef CONFIG_INFINIBAND_USER_MEM
-struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
+struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
size_t size, int access);
void ib_umem_release(struct ib_umem *umem);
int ib_umem_page_count(struct ib_umem *umem);
@@ -83,7 +83,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
#include <linux/err.h>
-static inline struct ib_umem *ib_umem_get(struct ib_udata *udata,
+static inline struct ib_umem *ib_umem_get(struct ib_device *device,
unsigned long addr, size_t size,
int access)
{
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 81429acc8257..64314ff76612 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -114,9 +114,9 @@ static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp)
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
struct ib_umem_odp *
-ib_umem_odp_get(struct ib_udata *udata, unsigned long addr, size_t size,
+ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size,
int access, const struct mmu_interval_notifier_ops *ops);
-struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata,
+struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
int access);
struct ib_umem_odp *
ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem, unsigned long addr,
@@ -134,7 +134,7 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline struct ib_umem_odp *
-ib_umem_odp_get(struct ib_udata *udata, unsigned long addr, size_t size,
+ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size,
int access, const struct mmu_interval_notifier_ops *ops)
{
return ERR_PTR(-EINVAL);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 5608e14e3aad..e2cc62217cc2 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -4153,6 +4153,15 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
}
+/* ib_reg_user_mr - register a memory region for virtual addresses from kernel
+ * space. This function should be called when 'current' is the owning MM.
+ */
+struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags);
+
+/* ib_advise_mr - give an advice about an address range in a memory region */
+int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
+ u32 flags, struct ib_sge *sg_list, u32 num_sge);
/**
* ib_dereg_mr_user - Deregisters a memory region and removes it from the
* HCA translation table.
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index 64cbbbe74a36..068f96b1a83e 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -420,6 +420,8 @@ struct ocelot_port {
u8 ptp_cmd;
struct sk_buff_head tx_skbs;
u8 ts_id;
+
+ phy_interface_t phy_mode;
};
struct ocelot {
diff --git a/include/soc/mscc/ocelot_ana.h b/include/soc/mscc/ocelot_ana.h
new file mode 100644
index 000000000000..841c6ec22b64
--- /dev/null
+++ b/include/soc/mscc/ocelot_ana.h
@@ -0,0 +1,625 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_ANA_H_
+#define _MSCC_OCELOT_ANA_H_
+
+#define ANA_ANAGEFIL_B_DOM_EN BIT(22)
+#define ANA_ANAGEFIL_B_DOM_VAL BIT(21)
+#define ANA_ANAGEFIL_AGE_LOCKED BIT(20)
+#define ANA_ANAGEFIL_PID_EN BIT(19)
+#define ANA_ANAGEFIL_PID_VAL(x) (((x) << 14) & GENMASK(18, 14))
+#define ANA_ANAGEFIL_PID_VAL_M GENMASK(18, 14)
+#define ANA_ANAGEFIL_PID_VAL_X(x) (((x) & GENMASK(18, 14)) >> 14)
+#define ANA_ANAGEFIL_VID_EN BIT(13)
+#define ANA_ANAGEFIL_VID_VAL(x) ((x) & GENMASK(12, 0))
+#define ANA_ANAGEFIL_VID_VAL_M GENMASK(12, 0)
+
+#define ANA_STORMLIMIT_CFG_RSZ 0x4
+
+#define ANA_STORMLIMIT_CFG_STORM_RATE(x) (((x) << 3) & GENMASK(6, 3))
+#define ANA_STORMLIMIT_CFG_STORM_RATE_M GENMASK(6, 3)
+#define ANA_STORMLIMIT_CFG_STORM_RATE_X(x) (((x) & GENMASK(6, 3)) >> 3)
+#define ANA_STORMLIMIT_CFG_STORM_UNIT BIT(2)
+#define ANA_STORMLIMIT_CFG_STORM_MODE(x) ((x) & GENMASK(1, 0))
+#define ANA_STORMLIMIT_CFG_STORM_MODE_M GENMASK(1, 0)
+
+#define ANA_AUTOAGE_AGE_FAST BIT(21)
+#define ANA_AUTOAGE_AGE_PERIOD(x) (((x) << 1) & GENMASK(20, 1))
+#define ANA_AUTOAGE_AGE_PERIOD_M GENMASK(20, 1)
+#define ANA_AUTOAGE_AGE_PERIOD_X(x) (((x) & GENMASK(20, 1)) >> 1)
+#define ANA_AUTOAGE_AUTOAGE_LOCKED BIT(0)
+
+#define ANA_MACTOPTIONS_REDUCED_TABLE BIT(1)
+#define ANA_MACTOPTIONS_SHADOW BIT(0)
+
+#define ANA_AGENCTRL_FID_MASK(x) (((x) << 12) & GENMASK(23, 12))
+#define ANA_AGENCTRL_FID_MASK_M GENMASK(23, 12)
+#define ANA_AGENCTRL_FID_MASK_X(x) (((x) & GENMASK(23, 12)) >> 12)
+#define ANA_AGENCTRL_IGNORE_DMAC_FLAGS BIT(11)
+#define ANA_AGENCTRL_IGNORE_SMAC_FLAGS BIT(10)
+#define ANA_AGENCTRL_FLOOD_SPECIAL BIT(9)
+#define ANA_AGENCTRL_FLOOD_IGNORE_VLAN BIT(8)
+#define ANA_AGENCTRL_MIRROR_CPU BIT(7)
+#define ANA_AGENCTRL_LEARN_CPU_COPY BIT(6)
+#define ANA_AGENCTRL_LEARN_FWD_KILL BIT(5)
+#define ANA_AGENCTRL_LEARN_IGNORE_VLAN BIT(4)
+#define ANA_AGENCTRL_CPU_CPU_KILL_ENA BIT(3)
+#define ANA_AGENCTRL_GREEN_COUNT_MODE BIT(2)
+#define ANA_AGENCTRL_YELLOW_COUNT_MODE BIT(1)
+#define ANA_AGENCTRL_RED_COUNT_MODE BIT(0)
+
+#define ANA_FLOODING_RSZ 0x4
+
+#define ANA_FLOODING_FLD_UNICAST(x) (((x) << 12) & GENMASK(17, 12))
+#define ANA_FLOODING_FLD_UNICAST_M GENMASK(17, 12)
+#define ANA_FLOODING_FLD_UNICAST_X(x) (((x) & GENMASK(17, 12)) >> 12)
+#define ANA_FLOODING_FLD_BROADCAST(x) (((x) << 6) & GENMASK(11, 6))
+#define ANA_FLOODING_FLD_BROADCAST_M GENMASK(11, 6)
+#define ANA_FLOODING_FLD_BROADCAST_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define ANA_FLOODING_FLD_MULTICAST(x) ((x) & GENMASK(5, 0))
+#define ANA_FLOODING_FLD_MULTICAST_M GENMASK(5, 0)
+
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL(x) (((x) << 18) & GENMASK(23, 18))
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_M GENMASK(23, 18)
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_X(x) (((x) & GENMASK(23, 18)) >> 18)
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA(x) (((x) << 12) & GENMASK(17, 12))
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA_M GENMASK(17, 12)
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA_X(x) (((x) & GENMASK(17, 12)) >> 12)
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL(x) (((x) << 6) & GENMASK(11, 6))
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_M GENMASK(11, 6)
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA(x) ((x) & GENMASK(5, 0))
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA_M GENMASK(5, 0)
+
+#define ANA_SFLOW_CFG_RSZ 0x4
+
+#define ANA_SFLOW_CFG_SF_RATE(x) (((x) << 2) & GENMASK(13, 2))
+#define ANA_SFLOW_CFG_SF_RATE_M GENMASK(13, 2)
+#define ANA_SFLOW_CFG_SF_RATE_X(x) (((x) & GENMASK(13, 2)) >> 2)
+#define ANA_SFLOW_CFG_SF_SAMPLE_RX BIT(1)
+#define ANA_SFLOW_CFG_SF_SAMPLE_TX BIT(0)
+
+#define ANA_PORT_MODE_RSZ 0x4
+
+#define ANA_PORT_MODE_REDTAG_PARSE_CFG BIT(3)
+#define ANA_PORT_MODE_VLAN_PARSE_CFG(x) (((x) << 1) & GENMASK(2, 1))
+#define ANA_PORT_MODE_VLAN_PARSE_CFG_M GENMASK(2, 1)
+#define ANA_PORT_MODE_VLAN_PARSE_CFG_X(x) (((x) & GENMASK(2, 1)) >> 1)
+#define ANA_PORT_MODE_L3_PARSE_CFG BIT(0)
+
+#define ANA_CUT_THRU_CFG_RSZ 0x4
+
+#define ANA_PGID_PGID_RSZ 0x4
+
+#define ANA_PGID_PGID_PGID(x) ((x) & GENMASK(11, 0))
+#define ANA_PGID_PGID_PGID_M GENMASK(11, 0)
+#define ANA_PGID_PGID_CPUQ_DST_PGID(x) (((x) << 27) & GENMASK(29, 27))
+#define ANA_PGID_PGID_CPUQ_DST_PGID_M GENMASK(29, 27)
+#define ANA_PGID_PGID_CPUQ_DST_PGID_X(x) (((x) & GENMASK(29, 27)) >> 27)
+
+#define ANA_TABLES_MACHDATA_VID(x) (((x) << 16) & GENMASK(28, 16))
+#define ANA_TABLES_MACHDATA_VID_M GENMASK(28, 16)
+#define ANA_TABLES_MACHDATA_VID_X(x) (((x) & GENMASK(28, 16)) >> 16)
+#define ANA_TABLES_MACHDATA_MACHDATA(x) ((x) & GENMASK(15, 0))
+#define ANA_TABLES_MACHDATA_MACHDATA_M GENMASK(15, 0)
+
+#define ANA_TABLES_STREAMDATA_SSID_VALID BIT(16)
+#define ANA_TABLES_STREAMDATA_SSID(x) (((x) << 9) & GENMASK(15, 9))
+#define ANA_TABLES_STREAMDATA_SSID_M GENMASK(15, 9)
+#define ANA_TABLES_STREAMDATA_SSID_X(x) (((x) & GENMASK(15, 9)) >> 9)
+#define ANA_TABLES_STREAMDATA_SFID_VALID BIT(8)
+#define ANA_TABLES_STREAMDATA_SFID(x) ((x) & GENMASK(7, 0))
+#define ANA_TABLES_STREAMDATA_SFID_M GENMASK(7, 0)
+
+#define ANA_TABLES_MACACCESS_MAC_CPU_COPY BIT(15)
+#define ANA_TABLES_MACACCESS_SRC_KILL BIT(14)
+#define ANA_TABLES_MACACCESS_IGNORE_VLAN BIT(13)
+#define ANA_TABLES_MACACCESS_AGED_FLAG BIT(12)
+#define ANA_TABLES_MACACCESS_VALID BIT(11)
+#define ANA_TABLES_MACACCESS_ENTRYTYPE(x) (((x) << 9) & GENMASK(10, 9))
+#define ANA_TABLES_MACACCESS_ENTRYTYPE_M GENMASK(10, 9)
+#define ANA_TABLES_MACACCESS_ENTRYTYPE_X(x) (((x) & GENMASK(10, 9)) >> 9)
+#define ANA_TABLES_MACACCESS_DEST_IDX(x) (((x) << 3) & GENMASK(8, 3))
+#define ANA_TABLES_MACACCESS_DEST_IDX_M GENMASK(8, 3)
+#define ANA_TABLES_MACACCESS_DEST_IDX_X(x) (((x) & GENMASK(8, 3)) >> 3)
+#define ANA_TABLES_MACACCESS_MAC_TABLE_CMD(x) ((x) & GENMASK(2, 0))
+#define ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M GENMASK(2, 0)
+#define MACACCESS_CMD_IDLE 0
+#define MACACCESS_CMD_LEARN 1
+#define MACACCESS_CMD_FORGET 2
+#define MACACCESS_CMD_AGE 3
+#define MACACCESS_CMD_GET_NEXT 4
+#define MACACCESS_CMD_INIT 5
+#define MACACCESS_CMD_READ 6
+#define MACACCESS_CMD_WRITE 7
+
+#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(x) (((x) << 2) & GENMASK(13, 2))
+#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK_M GENMASK(13, 2)
+#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK_X(x) (((x) & GENMASK(13, 2)) >> 2)
+#define ANA_TABLES_VLANACCESS_VLAN_TBL_CMD(x) ((x) & GENMASK(1, 0))
+#define ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M GENMASK(1, 0)
+#define ANA_TABLES_VLANACCESS_CMD_IDLE 0x0
+#define ANA_TABLES_VLANACCESS_CMD_WRITE 0x2
+#define ANA_TABLES_VLANACCESS_CMD_INIT 0x3
+
+#define ANA_TABLES_VLANTIDX_VLAN_SEC_FWD_ENA BIT(17)
+#define ANA_TABLES_VLANTIDX_VLAN_FLOOD_DIS BIT(16)
+#define ANA_TABLES_VLANTIDX_VLAN_PRIV_VLAN BIT(15)
+#define ANA_TABLES_VLANTIDX_VLAN_LEARN_DISABLED BIT(14)
+#define ANA_TABLES_VLANTIDX_VLAN_MIRROR BIT(13)
+#define ANA_TABLES_VLANTIDX_VLAN_SRC_CHK BIT(12)
+#define ANA_TABLES_VLANTIDX_V_INDEX(x) ((x) & GENMASK(11, 0))
+#define ANA_TABLES_VLANTIDX_V_INDEX_M GENMASK(11, 0)
+
+#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK(x) (((x) << 2) & GENMASK(8, 2))
+#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK_M GENMASK(8, 2)
+#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK_X(x) (((x) & GENMASK(8, 2)) >> 2)
+#define ANA_TABLES_ISDXACCESS_ISDX_TBL_CMD(x) ((x) & GENMASK(1, 0))
+#define ANA_TABLES_ISDXACCESS_ISDX_TBL_CMD_M GENMASK(1, 0)
+
+#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI(x) (((x) << 21) & GENMASK(28, 21))
+#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI_M GENMASK(28, 21)
+#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI_X(x) (((x) & GENMASK(28, 21)) >> 21)
+#define ANA_TABLES_ISDXTIDX_ISDX_MSTI(x) (((x) << 15) & GENMASK(20, 15))
+#define ANA_TABLES_ISDXTIDX_ISDX_MSTI_M GENMASK(20, 15)
+#define ANA_TABLES_ISDXTIDX_ISDX_MSTI_X(x) (((x) & GENMASK(20, 15)) >> 15)
+#define ANA_TABLES_ISDXTIDX_ISDX_ES0_KEY_ENA BIT(14)
+#define ANA_TABLES_ISDXTIDX_ISDX_FORCE_ENA BIT(10)
+#define ANA_TABLES_ISDXTIDX_ISDX_INDEX(x) ((x) & GENMASK(7, 0))
+#define ANA_TABLES_ISDXTIDX_ISDX_INDEX_M GENMASK(7, 0)
+
+#define ANA_TABLES_ENTRYLIM_RSZ 0x4
+
+#define ANA_TABLES_ENTRYLIM_ENTRYLIM(x) (((x) << 14) & GENMASK(17, 14))
+#define ANA_TABLES_ENTRYLIM_ENTRYLIM_M GENMASK(17, 14)
+#define ANA_TABLES_ENTRYLIM_ENTRYLIM_X(x) (((x) & GENMASK(17, 14)) >> 14)
+#define ANA_TABLES_ENTRYLIM_ENTRYSTAT(x) ((x) & GENMASK(13, 0))
+#define ANA_TABLES_ENTRYLIM_ENTRYSTAT_M GENMASK(13, 0)
+
+#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM(x) (((x) << 4) & GENMASK(31, 4))
+#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM_M GENMASK(31, 4)
+#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM_X(x) (((x) & GENMASK(31, 4)) >> 4)
+#define ANA_TABLES_STREAMACCESS_SEQ_GEN_REC_ENA BIT(3)
+#define ANA_TABLES_STREAMACCESS_GEN_REC_TYPE BIT(2)
+#define ANA_TABLES_STREAMACCESS_STREAM_TBL_CMD(x) ((x) & GENMASK(1, 0))
+#define ANA_TABLES_STREAMACCESS_STREAM_TBL_CMD_M GENMASK(1, 0)
+
+#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS(x) (((x) << 30) & GENMASK(31, 30))
+#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS_M GENMASK(31, 30)
+#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS_X(x) (((x) & GENMASK(31, 30)) >> 30)
+#define ANA_TABLES_STREAMTIDX_S_INDEX(x) (((x) << 16) & GENMASK(22, 16))
+#define ANA_TABLES_STREAMTIDX_S_INDEX_M GENMASK(22, 16)
+#define ANA_TABLES_STREAMTIDX_S_INDEX_X(x) (((x) & GENMASK(22, 16)) >> 16)
+#define ANA_TABLES_STREAMTIDX_FORCE_SF_BEHAVIOUR BIT(14)
+#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN(x) (((x) << 8) & GENMASK(13, 8))
+#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN_M GENMASK(13, 8)
+#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN_X(x) (((x) & GENMASK(13, 8)) >> 8)
+#define ANA_TABLES_STREAMTIDX_RESET_ON_ROGUE BIT(7)
+#define ANA_TABLES_STREAMTIDX_REDTAG_POP BIT(6)
+#define ANA_TABLES_STREAMTIDX_STREAM_SPLIT BIT(5)
+#define ANA_TABLES_STREAMTIDX_SEQ_SPACE_LOG2(x) ((x) & GENMASK(4, 0))
+#define ANA_TABLES_STREAMTIDX_SEQ_SPACE_LOG2_M GENMASK(4, 0)
+
+#define ANA_TABLES_SEQ_MASK_SPLIT_MASK(x) (((x) << 16) & GENMASK(22, 16))
+#define ANA_TABLES_SEQ_MASK_SPLIT_MASK_M GENMASK(22, 16)
+#define ANA_TABLES_SEQ_MASK_SPLIT_MASK_X(x) (((x) & GENMASK(22, 16)) >> 16)
+#define ANA_TABLES_SEQ_MASK_INPUT_PORT_MASK(x) ((x) & GENMASK(6, 0))
+#define ANA_TABLES_SEQ_MASK_INPUT_PORT_MASK_M GENMASK(6, 0)
+
+#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK(x) (((x) << 1) & GENMASK(7, 1))
+#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK_M GENMASK(7, 1)
+#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK_X(x) (((x) & GENMASK(7, 1)) >> 1)
+#define ANA_TABLES_SFID_MASK_IGR_SRCPORT_MATCH_ENA BIT(0)
+
+#define ANA_TABLES_SFIDACCESS_IGR_PRIO_MATCH_ENA BIT(22)
+#define ANA_TABLES_SFIDACCESS_IGR_PRIO(x) (((x) << 19) & GENMASK(21, 19))
+#define ANA_TABLES_SFIDACCESS_IGR_PRIO_M GENMASK(21, 19)
+#define ANA_TABLES_SFIDACCESS_IGR_PRIO_X(x) (((x) & GENMASK(21, 19)) >> 19)
+#define ANA_TABLES_SFIDACCESS_FORCE_BLOCK BIT(18)
+#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN(x) (((x) << 2) & GENMASK(17, 2))
+#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN_M GENMASK(17, 2)
+#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN_X(x) (((x) & GENMASK(17, 2)) >> 2)
+#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(x) ((x) & GENMASK(1, 0))
+#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M GENMASK(1, 0)
+
+#define ANA_TABLES_SFIDTIDX_SGID_VALID BIT(26)
+#define ANA_TABLES_SFIDTIDX_SGID(x) (((x) << 18) & GENMASK(25, 18))
+#define ANA_TABLES_SFIDTIDX_SGID_M GENMASK(25, 18)
+#define ANA_TABLES_SFIDTIDX_SGID_X(x) (((x) & GENMASK(25, 18)) >> 18)
+#define ANA_TABLES_SFIDTIDX_POL_ENA BIT(17)
+#define ANA_TABLES_SFIDTIDX_POL_IDX(x) (((x) << 8) & GENMASK(16, 8))
+#define ANA_TABLES_SFIDTIDX_POL_IDX_M GENMASK(16, 8)
+#define ANA_TABLES_SFIDTIDX_POL_IDX_X(x) (((x) & GENMASK(16, 8)) >> 8)
+#define ANA_TABLES_SFIDTIDX_SFID_INDEX(x) ((x) & GENMASK(7, 0))
+#define ANA_TABLES_SFIDTIDX_SFID_INDEX_M GENMASK(7, 0)
+
+#define ANA_MSTI_STATE_RSZ 0x4
+
+#define ANA_OAM_UPM_LM_CNT_RSZ 0x4
+
+#define ANA_SG_ACCESS_CTRL_SGID(x) ((x) & GENMASK(7, 0))
+#define ANA_SG_ACCESS_CTRL_SGID_M GENMASK(7, 0)
+#define ANA_SG_ACCESS_CTRL_CONFIG_CHANGE BIT(28)
+
+#define ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
+#define ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0)
+#define ANA_SG_CONFIG_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(18, 16))
+#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_M GENMASK(18, 16)
+#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(18, 16)) >> 16)
+#define ANA_SG_CONFIG_REG_3_GATE_ENABLE BIT(20)
+#define ANA_SG_CONFIG_REG_3_INIT_IPS(x) (((x) << 24) & GENMASK(27, 24))
+#define ANA_SG_CONFIG_REG_3_INIT_IPS_M GENMASK(27, 24)
+#define ANA_SG_CONFIG_REG_3_INIT_IPS_X(x) (((x) & GENMASK(27, 24)) >> 24)
+#define ANA_SG_CONFIG_REG_3_INIT_GATE_STATE BIT(28)
+
+#define ANA_SG_GCL_GS_CONFIG_RSZ 0x4
+
+#define ANA_SG_GCL_GS_CONFIG_IPS(x) ((x) & GENMASK(3, 0))
+#define ANA_SG_GCL_GS_CONFIG_IPS_M GENMASK(3, 0)
+#define ANA_SG_GCL_GS_CONFIG_GATE_STATE BIT(4)
+
+#define ANA_SG_GCL_TI_CONFIG_RSZ 0x4
+
+#define ANA_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
+#define ANA_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB_M GENMASK(15, 0)
+#define ANA_SG_STATUS_REG_3_GATE_STATE BIT(16)
+#define ANA_SG_STATUS_REG_3_IPS(x) (((x) << 20) & GENMASK(23, 20))
+#define ANA_SG_STATUS_REG_3_IPS_M GENMASK(23, 20)
+#define ANA_SG_STATUS_REG_3_IPS_X(x) (((x) & GENMASK(23, 20)) >> 20)
+#define ANA_SG_STATUS_REG_3_CONFIG_PENDING BIT(24)
+
+#define ANA_PORT_VLAN_CFG_GSZ 0x100
+
+#define ANA_PORT_VLAN_CFG_VLAN_VID_AS_ISDX BIT(21)
+#define ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA BIT(20)
+#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT(x) (((x) << 18) & GENMASK(19, 18))
+#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M GENMASK(19, 18)
+#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT_X(x) (((x) & GENMASK(19, 18)) >> 18)
+#define ANA_PORT_VLAN_CFG_VLAN_INNER_TAG_ENA BIT(17)
+#define ANA_PORT_VLAN_CFG_VLAN_TAG_TYPE BIT(16)
+#define ANA_PORT_VLAN_CFG_VLAN_DEI BIT(15)
+#define ANA_PORT_VLAN_CFG_VLAN_PCP(x) (((x) << 12) & GENMASK(14, 12))
+#define ANA_PORT_VLAN_CFG_VLAN_PCP_M GENMASK(14, 12)
+#define ANA_PORT_VLAN_CFG_VLAN_PCP_X(x) (((x) & GENMASK(14, 12)) >> 12)
+#define ANA_PORT_VLAN_CFG_VLAN_VID(x) ((x) & GENMASK(11, 0))
+#define ANA_PORT_VLAN_CFG_VLAN_VID_M GENMASK(11, 0)
+
+#define ANA_PORT_DROP_CFG_GSZ 0x100
+
+#define ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA BIT(6)
+#define ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA BIT(5)
+#define ANA_PORT_DROP_CFG_DROP_C_TAGGED_ENA BIT(4)
+#define ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA BIT(3)
+#define ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA BIT(2)
+#define ANA_PORT_DROP_CFG_DROP_NULL_MAC_ENA BIT(1)
+#define ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA BIT(0)
+
+#define ANA_PORT_QOS_CFG_GSZ 0x100
+
+#define ANA_PORT_QOS_CFG_DP_DEFAULT_VAL BIT(8)
+#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL(x) (((x) << 5) & GENMASK(7, 5))
+#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_M GENMASK(7, 5)
+#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(x) (((x) & GENMASK(7, 5)) >> 5)
+#define ANA_PORT_QOS_CFG_QOS_DSCP_ENA BIT(4)
+#define ANA_PORT_QOS_CFG_QOS_PCP_ENA BIT(3)
+#define ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA BIT(2)
+#define ANA_PORT_QOS_CFG_DSCP_REWR_CFG(x) ((x) & GENMASK(1, 0))
+#define ANA_PORT_QOS_CFG_DSCP_REWR_CFG_M GENMASK(1, 0)
+
+#define ANA_PORT_VCAP_CFG_GSZ 0x100
+
+#define ANA_PORT_VCAP_CFG_S1_ENA BIT(14)
+#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA(x) (((x) << 11) & GENMASK(13, 11))
+#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA_M GENMASK(13, 11)
+#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA_X(x) (((x) & GENMASK(13, 11)) >> 11)
+#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA(x) (((x) << 8) & GENMASK(10, 8))
+#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA_M GENMASK(10, 8)
+#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA_X(x) (((x) & GENMASK(10, 8)) >> 8)
+#define ANA_PORT_VCAP_CFG_PAG_VAL(x) ((x) & GENMASK(7, 0))
+#define ANA_PORT_VCAP_CFG_PAG_VAL_M GENMASK(7, 0)
+
+#define ANA_PORT_VCAP_S1_KEY_CFG_GSZ 0x100
+#define ANA_PORT_VCAP_S1_KEY_CFG_RSZ 0x4
+
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG(x) (((x) << 4) & GENMASK(6, 4))
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG_M GENMASK(6, 4)
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG_X(x) (((x) & GENMASK(6, 4)) >> 4)
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG(x) (((x) << 2) & GENMASK(3, 2))
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG_M GENMASK(3, 2)
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG_X(x) (((x) & GENMASK(3, 2)) >> 2)
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_OTHER_CFG(x) ((x) & GENMASK(1, 0))
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_OTHER_CFG_M GENMASK(1, 0)
+
+#define ANA_PORT_VCAP_S2_CFG_GSZ 0x100
+
+#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA(x) (((x) << 17) & GENMASK(18, 17))
+#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA_M GENMASK(18, 17)
+#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA_X(x) (((x) & GENMASK(18, 17)) >> 17)
+#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA(x) (((x) << 15) & GENMASK(16, 15))
+#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA_M GENMASK(16, 15)
+#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA_X(x) (((x) & GENMASK(16, 15)) >> 15)
+#define ANA_PORT_VCAP_S2_CFG_S2_ENA BIT(14)
+#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(x) (((x) << 12) & GENMASK(13, 12))
+#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_M GENMASK(13, 12)
+#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_X(x) (((x) & GENMASK(13, 12)) >> 12)
+#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(x) (((x) << 10) & GENMASK(11, 10))
+#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_M GENMASK(11, 10)
+#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_X(x) (((x) & GENMASK(11, 10)) >> 10)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(x) (((x) << 8) & GENMASK(9, 8))
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_M GENMASK(9, 8)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_X(x) (((x) & GENMASK(9, 8)) >> 8)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(x) (((x) << 6) & GENMASK(7, 6))
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_M GENMASK(7, 6)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_X(x) (((x) & GENMASK(7, 6)) >> 6)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(x) (((x) << 2) & GENMASK(5, 2))
+#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG_M GENMASK(5, 2)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG_X(x) (((x) & GENMASK(5, 2)) >> 2)
+#define ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(x) ((x) & GENMASK(1, 0))
+#define ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS_M GENMASK(1, 0)
+
+#define ANA_PORT_PCP_DEI_MAP_GSZ 0x100
+#define ANA_PORT_PCP_DEI_MAP_RSZ 0x4
+
+#define ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL BIT(3)
+#define ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(x) ((x) & GENMASK(2, 0))
+#define ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M GENMASK(2, 0)
+
+#define ANA_PORT_CPU_FWD_CFG_GSZ 0x100
+
+#define ANA_PORT_CPU_FWD_CFG_CPU_VRAP_REDIR_ENA BIT(7)
+#define ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA BIT(6)
+#define ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA BIT(5)
+#define ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA BIT(4)
+#define ANA_PORT_CPU_FWD_CFG_CPU_SRC_COPY_ENA BIT(3)
+#define ANA_PORT_CPU_FWD_CFG_CPU_ALLBRIDGE_DROP_ENA BIT(2)
+#define ANA_PORT_CPU_FWD_CFG_CPU_ALLBRIDGE_REDIR_ENA BIT(1)
+#define ANA_PORT_CPU_FWD_CFG_CPU_OAM_ENA BIT(0)
+
+#define ANA_PORT_CPU_FWD_BPDU_CFG_GSZ 0x100
+
+#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16))
+#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA_M GENMASK(31, 16)
+#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(x) ((x) & GENMASK(15, 0))
+#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA_M GENMASK(15, 0)
+
+#define ANA_PORT_CPU_FWD_GARP_CFG_GSZ 0x100
+
+#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16))
+#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA_M GENMASK(31, 16)
+#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_REDIR_ENA(x) ((x) & GENMASK(15, 0))
+#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_REDIR_ENA_M GENMASK(15, 0)
+
+#define ANA_PORT_CPU_FWD_CCM_CFG_GSZ 0x100
+
+#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16))
+#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA_M GENMASK(31, 16)
+#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_REDIR_ENA(x) ((x) & GENMASK(15, 0))
+#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_REDIR_ENA_M GENMASK(15, 0)
+
+#define ANA_PORT_PORT_CFG_GSZ 0x100
+
+#define ANA_PORT_PORT_CFG_SRC_MIRROR_ENA BIT(15)
+#define ANA_PORT_PORT_CFG_LIMIT_DROP BIT(14)
+#define ANA_PORT_PORT_CFG_LIMIT_CPU BIT(13)
+#define ANA_PORT_PORT_CFG_LOCKED_PORTMOVE_DROP BIT(12)
+#define ANA_PORT_PORT_CFG_LOCKED_PORTMOVE_CPU BIT(11)
+#define ANA_PORT_PORT_CFG_LEARNDROP BIT(10)
+#define ANA_PORT_PORT_CFG_LEARNCPU BIT(9)
+#define ANA_PORT_PORT_CFG_LEARNAUTO BIT(8)
+#define ANA_PORT_PORT_CFG_LEARN_ENA BIT(7)
+#define ANA_PORT_PORT_CFG_RECV_ENA BIT(6)
+#define ANA_PORT_PORT_CFG_PORTID_VAL(x) (((x) << 2) & GENMASK(5, 2))
+#define ANA_PORT_PORT_CFG_PORTID_VAL_M GENMASK(5, 2)
+#define ANA_PORT_PORT_CFG_PORTID_VAL_X(x) (((x) & GENMASK(5, 2)) >> 2)
+#define ANA_PORT_PORT_CFG_USE_B_DOM_TBL BIT(1)
+#define ANA_PORT_PORT_CFG_LSR_MODE BIT(0)
+
+#define ANA_PORT_POL_CFG_GSZ 0x100
+
+#define ANA_PORT_POL_CFG_POL_CPU_REDIR_8021 BIT(19)
+#define ANA_PORT_POL_CFG_POL_CPU_REDIR_IP BIT(18)
+#define ANA_PORT_POL_CFG_PORT_POL_ENA BIT(17)
+#define ANA_PORT_POL_CFG_QUEUE_POL_ENA(x) (((x) << 9) & GENMASK(16, 9))
+#define ANA_PORT_POL_CFG_QUEUE_POL_ENA_M GENMASK(16, 9)
+#define ANA_PORT_POL_CFG_QUEUE_POL_ENA_X(x) (((x) & GENMASK(16, 9)) >> 9)
+#define ANA_PORT_POL_CFG_POL_ORDER(x) ((x) & GENMASK(8, 0))
+#define ANA_PORT_POL_CFG_POL_ORDER_M GENMASK(8, 0)
+
+#define ANA_PORT_PTP_CFG_GSZ 0x100
+
+#define ANA_PORT_PTP_CFG_PTP_BACKPLANE_MODE BIT(0)
+
+#define ANA_PORT_PTP_DLY1_CFG_GSZ 0x100
+
+#define ANA_PORT_PTP_DLY2_CFG_GSZ 0x100
+
+#define ANA_PORT_SFID_CFG_GSZ 0x100
+#define ANA_PORT_SFID_CFG_RSZ 0x4
+
+#define ANA_PORT_SFID_CFG_SFID_VALID BIT(8)
+#define ANA_PORT_SFID_CFG_SFID(x) ((x) & GENMASK(7, 0))
+#define ANA_PORT_SFID_CFG_SFID_M GENMASK(7, 0)
+
+#define ANA_PFC_PFC_CFG_GSZ 0x40
+
+#define ANA_PFC_PFC_CFG_RX_PFC_ENA(x) (((x) << 2) & GENMASK(9, 2))
+#define ANA_PFC_PFC_CFG_RX_PFC_ENA_M GENMASK(9, 2)
+#define ANA_PFC_PFC_CFG_RX_PFC_ENA_X(x) (((x) & GENMASK(9, 2)) >> 2)
+#define ANA_PFC_PFC_CFG_FC_LINK_SPEED(x) ((x) & GENMASK(1, 0))
+#define ANA_PFC_PFC_CFG_FC_LINK_SPEED_M GENMASK(1, 0)
+
+#define ANA_PFC_PFC_TIMER_GSZ 0x40
+#define ANA_PFC_PFC_TIMER_RSZ 0x4
+
+#define ANA_IPT_OAM_MEP_CFG_GSZ 0x8
+
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P(x) (((x) << 6) & GENMASK(10, 6))
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P_M GENMASK(10, 6)
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P_X(x) (((x) & GENMASK(10, 6)) >> 6)
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX(x) (((x) << 1) & GENMASK(5, 1))
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_M GENMASK(5, 1)
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_X(x) (((x) & GENMASK(5, 1)) >> 1)
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_ENA BIT(0)
+
+#define ANA_IPT_IPT_GSZ 0x8
+
+#define ANA_IPT_IPT_IPT_CFG(x) (((x) << 15) & GENMASK(16, 15))
+#define ANA_IPT_IPT_IPT_CFG_M GENMASK(16, 15)
+#define ANA_IPT_IPT_IPT_CFG_X(x) (((x) & GENMASK(16, 15)) >> 15)
+#define ANA_IPT_IPT_ISDX_P(x) (((x) << 7) & GENMASK(14, 7))
+#define ANA_IPT_IPT_ISDX_P_M GENMASK(14, 7)
+#define ANA_IPT_IPT_ISDX_P_X(x) (((x) & GENMASK(14, 7)) >> 7)
+#define ANA_IPT_IPT_PPT_IDX(x) ((x) & GENMASK(6, 0))
+#define ANA_IPT_IPT_PPT_IDX_M GENMASK(6, 0)
+
+#define ANA_PPT_PPT_RSZ 0x4
+
+#define ANA_FID_MAP_FID_MAP_RSZ 0x4
+
+#define ANA_FID_MAP_FID_MAP_FID_C_VAL(x) (((x) << 6) & GENMASK(11, 6))
+#define ANA_FID_MAP_FID_MAP_FID_C_VAL_M GENMASK(11, 6)
+#define ANA_FID_MAP_FID_MAP_FID_C_VAL_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define ANA_FID_MAP_FID_MAP_FID_B_VAL(x) ((x) & GENMASK(5, 0))
+#define ANA_FID_MAP_FID_MAP_FID_B_VAL_M GENMASK(5, 0)
+
+#define ANA_AGGR_CFG_AC_RND_ENA BIT(7)
+#define ANA_AGGR_CFG_AC_DMAC_ENA BIT(6)
+#define ANA_AGGR_CFG_AC_SMAC_ENA BIT(5)
+#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA BIT(4)
+#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA BIT(3)
+#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA BIT(2)
+#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA BIT(1)
+#define ANA_AGGR_CFG_AC_ISDX_ENA BIT(0)
+
+#define ANA_CPUQ_CFG_CPUQ_MLD(x) (((x) << 27) & GENMASK(29, 27))
+#define ANA_CPUQ_CFG_CPUQ_MLD_M GENMASK(29, 27)
+#define ANA_CPUQ_CFG_CPUQ_MLD_X(x) (((x) & GENMASK(29, 27)) >> 27)
+#define ANA_CPUQ_CFG_CPUQ_IGMP(x) (((x) << 24) & GENMASK(26, 24))
+#define ANA_CPUQ_CFG_CPUQ_IGMP_M GENMASK(26, 24)
+#define ANA_CPUQ_CFG_CPUQ_IGMP_X(x) (((x) & GENMASK(26, 24)) >> 24)
+#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(x) (((x) << 21) & GENMASK(23, 21))
+#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL_M GENMASK(23, 21)
+#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL_X(x) (((x) & GENMASK(23, 21)) >> 21)
+#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(x) (((x) << 18) & GENMASK(20, 18))
+#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE_M GENMASK(20, 18)
+#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE_X(x) (((x) & GENMASK(20, 18)) >> 18)
+#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(x) (((x) << 15) & GENMASK(17, 15))
+#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE_M GENMASK(17, 15)
+#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE_X(x) (((x) & GENMASK(17, 15)) >> 15)
+#define ANA_CPUQ_CFG_CPUQ_SRC_COPY(x) (((x) << 12) & GENMASK(14, 12))
+#define ANA_CPUQ_CFG_CPUQ_SRC_COPY_M GENMASK(14, 12)
+#define ANA_CPUQ_CFG_CPUQ_SRC_COPY_X(x) (((x) & GENMASK(14, 12)) >> 12)
+#define ANA_CPUQ_CFG_CPUQ_MAC_COPY(x) (((x) << 9) & GENMASK(11, 9))
+#define ANA_CPUQ_CFG_CPUQ_MAC_COPY_M GENMASK(11, 9)
+#define ANA_CPUQ_CFG_CPUQ_MAC_COPY_X(x) (((x) & GENMASK(11, 9)) >> 9)
+#define ANA_CPUQ_CFG_CPUQ_LRN(x) (((x) << 6) & GENMASK(8, 6))
+#define ANA_CPUQ_CFG_CPUQ_LRN_M GENMASK(8, 6)
+#define ANA_CPUQ_CFG_CPUQ_LRN_X(x) (((x) & GENMASK(8, 6)) >> 6)
+#define ANA_CPUQ_CFG_CPUQ_MIRROR(x) (((x) << 3) & GENMASK(5, 3))
+#define ANA_CPUQ_CFG_CPUQ_MIRROR_M GENMASK(5, 3)
+#define ANA_CPUQ_CFG_CPUQ_MIRROR_X(x) (((x) & GENMASK(5, 3)) >> 3)
+#define ANA_CPUQ_CFG_CPUQ_SFLOW(x) ((x) & GENMASK(2, 0))
+#define ANA_CPUQ_CFG_CPUQ_SFLOW_M GENMASK(2, 0)
+
+#define ANA_CPUQ_8021_CFG_RSZ 0x4
+
+#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(x) (((x) << 6) & GENMASK(8, 6))
+#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL_M GENMASK(8, 6)
+#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL_X(x) (((x) & GENMASK(8, 6)) >> 6)
+#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(x) (((x) << 3) & GENMASK(5, 3))
+#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL_M GENMASK(5, 3)
+#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL_X(x) (((x) & GENMASK(5, 3)) >> 3)
+#define ANA_CPUQ_8021_CFG_CPUQ_CCM_VAL(x) ((x) & GENMASK(2, 0))
+#define ANA_CPUQ_8021_CFG_CPUQ_CCM_VAL_M GENMASK(2, 0)
+
+#define ANA_DSCP_CFG_RSZ 0x4
+
+#define ANA_DSCP_CFG_DP_DSCP_VAL BIT(11)
+#define ANA_DSCP_CFG_QOS_DSCP_VAL(x) (((x) << 8) & GENMASK(10, 8))
+#define ANA_DSCP_CFG_QOS_DSCP_VAL_M GENMASK(10, 8)
+#define ANA_DSCP_CFG_QOS_DSCP_VAL_X(x) (((x) & GENMASK(10, 8)) >> 8)
+#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL(x) (((x) << 2) & GENMASK(7, 2))
+#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_M GENMASK(7, 2)
+#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_X(x) (((x) & GENMASK(7, 2)) >> 2)
+#define ANA_DSCP_CFG_DSCP_TRUST_ENA BIT(1)
+#define ANA_DSCP_CFG_DSCP_REWR_ENA BIT(0)
+
+#define ANA_DSCP_REWR_CFG_RSZ 0x4
+
+#define ANA_VCAP_RNG_TYPE_CFG_RSZ 0x4
+
+#define ANA_VCAP_RNG_VAL_CFG_RSZ 0x4
+
+#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL(x) (((x) << 16) & GENMASK(31, 16))
+#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL_M GENMASK(31, 16)
+#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MAX_VAL(x) ((x) & GENMASK(15, 0))
+#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MAX_VAL_M GENMASK(15, 0)
+
+#define ANA_VRAP_CFG_VRAP_VLAN_AWARE_ENA BIT(12)
+#define ANA_VRAP_CFG_VRAP_VID(x) ((x) & GENMASK(11, 0))
+#define ANA_VRAP_CFG_VRAP_VID_M GENMASK(11, 0)
+
+#define ANA_DISCARD_CFG_DROP_TAGGING_ISDX0 BIT(3)
+#define ANA_DISCARD_CFG_DROP_CTRLPROT_ISDX0 BIT(2)
+#define ANA_DISCARD_CFG_DROP_TAGGING_S2_ENA BIT(1)
+#define ANA_DISCARD_CFG_DROP_CTRLPROT_S2_ENA BIT(0)
+
+#define ANA_FID_CFG_VID_MC_ENA BIT(0)
+
+#define ANA_POL_PIR_CFG_GSZ 0x20
+
+#define ANA_POL_PIR_CFG_PIR_RATE(x) (((x) << 6) & GENMASK(20, 6))
+#define ANA_POL_PIR_CFG_PIR_RATE_M GENMASK(20, 6)
+#define ANA_POL_PIR_CFG_PIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6)
+#define ANA_POL_PIR_CFG_PIR_BURST(x) ((x) & GENMASK(5, 0))
+#define ANA_POL_PIR_CFG_PIR_BURST_M GENMASK(5, 0)
+
+#define ANA_POL_CIR_CFG_GSZ 0x20
+
+#define ANA_POL_CIR_CFG_CIR_RATE(x) (((x) << 6) & GENMASK(20, 6))
+#define ANA_POL_CIR_CFG_CIR_RATE_M GENMASK(20, 6)
+#define ANA_POL_CIR_CFG_CIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6)
+#define ANA_POL_CIR_CFG_CIR_BURST(x) ((x) & GENMASK(5, 0))
+#define ANA_POL_CIR_CFG_CIR_BURST_M GENMASK(5, 0)
+
+#define ANA_POL_MODE_CFG_GSZ 0x20
+
+#define ANA_POL_MODE_CFG_IPG_SIZE(x) (((x) << 5) & GENMASK(9, 5))
+#define ANA_POL_MODE_CFG_IPG_SIZE_M GENMASK(9, 5)
+#define ANA_POL_MODE_CFG_IPG_SIZE_X(x) (((x) & GENMASK(9, 5)) >> 5)
+#define ANA_POL_MODE_CFG_FRM_MODE(x) (((x) << 3) & GENMASK(4, 3))
+#define ANA_POL_MODE_CFG_FRM_MODE_M GENMASK(4, 3)
+#define ANA_POL_MODE_CFG_FRM_MODE_X(x) (((x) & GENMASK(4, 3)) >> 3)
+#define ANA_POL_MODE_CFG_DLB_COUPLED BIT(2)
+#define ANA_POL_MODE_CFG_CIR_ENA BIT(1)
+#define ANA_POL_MODE_CFG_OVERSHOOT_ENA BIT(0)
+
+#define ANA_POL_PIR_STATE_GSZ 0x20
+
+#define ANA_POL_CIR_STATE_GSZ 0x20
+
+#define ANA_POL_STATE_GSZ 0x20
+
+#define ANA_POL_FLOWC_RSZ 0x4
+
+#define ANA_POL_FLOWC_POL_FLOWC BIT(0)
+
+#define ANA_POL_HYST_POL_FC_HYST(x) (((x) << 4) & GENMASK(9, 4))
+#define ANA_POL_HYST_POL_FC_HYST_M GENMASK(9, 4)
+#define ANA_POL_HYST_POL_FC_HYST_X(x) (((x) & GENMASK(9, 4)) >> 4)
+#define ANA_POL_HYST_POL_STOP_HYST(x) ((x) & GENMASK(3, 0))
+#define ANA_POL_HYST_POL_STOP_HYST_M GENMASK(3, 0)
+
+#define ANA_POL_MISC_CFG_POL_CLOSE_ALL BIT(1)
+#define ANA_POL_MISC_CFG_POL_LEAK_DIS BIT(0)
+
+#endif
diff --git a/include/soc/mscc/ocelot_dev.h b/include/soc/mscc/ocelot_dev.h
new file mode 100644
index 000000000000..0a50d53bbd3f
--- /dev/null
+++ b/include/soc/mscc/ocelot_dev.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_DEV_H_
+#define _MSCC_OCELOT_DEV_H_
+
+#define DEV_CLOCK_CFG 0x0
+
+#define DEV_CLOCK_CFG_MAC_TX_RST BIT(7)
+#define DEV_CLOCK_CFG_MAC_RX_RST BIT(6)
+#define DEV_CLOCK_CFG_PCS_TX_RST BIT(5)
+#define DEV_CLOCK_CFG_PCS_RX_RST BIT(4)
+#define DEV_CLOCK_CFG_PORT_RST BIT(3)
+#define DEV_CLOCK_CFG_PHY_RST BIT(2)
+#define DEV_CLOCK_CFG_LINK_SPEED(x) ((x) & GENMASK(1, 0))
+#define DEV_CLOCK_CFG_LINK_SPEED_M GENMASK(1, 0)
+
+#define DEV_PORT_MISC 0x4
+
+#define DEV_PORT_MISC_FWD_ERROR_ENA BIT(4)
+#define DEV_PORT_MISC_FWD_PAUSE_ENA BIT(3)
+#define DEV_PORT_MISC_FWD_CTRL_ENA BIT(2)
+#define DEV_PORT_MISC_DEV_LOOP_ENA BIT(1)
+#define DEV_PORT_MISC_HDX_FAST_DIS BIT(0)
+
+#define DEV_EVENTS 0x8
+
+#define DEV_EEE_CFG 0xc
+
+#define DEV_EEE_CFG_EEE_ENA BIT(22)
+#define DEV_EEE_CFG_EEE_TIMER_AGE(x) (((x) << 15) & GENMASK(21, 15))
+#define DEV_EEE_CFG_EEE_TIMER_AGE_M GENMASK(21, 15)
+#define DEV_EEE_CFG_EEE_TIMER_AGE_X(x) (((x) & GENMASK(21, 15)) >> 15)
+#define DEV_EEE_CFG_EEE_TIMER_WAKEUP(x) (((x) << 8) & GENMASK(14, 8))
+#define DEV_EEE_CFG_EEE_TIMER_WAKEUP_M GENMASK(14, 8)
+#define DEV_EEE_CFG_EEE_TIMER_WAKEUP_X(x) (((x) & GENMASK(14, 8)) >> 8)
+#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF(x) (((x) << 1) & GENMASK(7, 1))
+#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_M GENMASK(7, 1)
+#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_X(x) (((x) & GENMASK(7, 1)) >> 1)
+#define DEV_EEE_CFG_PORT_LPI BIT(0)
+
+#define DEV_RX_PATH_DELAY 0x10
+
+#define DEV_TX_PATH_DELAY 0x14
+
+#define DEV_PTP_PREDICT_CFG 0x18
+
+#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG(x) (((x) << 4) & GENMASK(11, 4))
+#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_M GENMASK(11, 4)
+#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_X(x) (((x) & GENMASK(11, 4)) >> 4)
+#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG(x) ((x) & GENMASK(3, 0))
+#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG_M GENMASK(3, 0)
+
+#define DEV_MAC_ENA_CFG 0x1c
+
+#define DEV_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEV_MAC_ENA_CFG_TX_ENA BIT(0)
+
+#define DEV_MAC_MODE_CFG 0x20
+
+#define DEV_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8)
+#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4)
+#define DEV_MAC_MODE_CFG_FDX_ENA BIT(0)
+
+#define DEV_MAC_MAXLEN_CFG 0x24
+
+#define DEV_MAC_TAGS_CFG 0x28
+
+#define DEV_MAC_TAGS_CFG_TAG_ID(x) (((x) << 16) & GENMASK(31, 16))
+#define DEV_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16)
+#define DEV_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(2)
+#define DEV_MAC_TAGS_CFG_PB_ENA BIT(1)
+#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
+
+#define DEV_MAC_ADV_CHK_CFG 0x2c
+
+#define DEV_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0)
+
+#define DEV_MAC_IFG_CFG 0x30
+
+#define DEV_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17)
+#define DEV_MAC_IFG_CFG_REDUCED_TX_IFG BIT(16)
+#define DEV_MAC_IFG_CFG_TX_IFG(x) (((x) << 8) & GENMASK(12, 8))
+#define DEV_MAC_IFG_CFG_TX_IFG_M GENMASK(12, 8)
+#define DEV_MAC_IFG_CFG_TX_IFG_X(x) (((x) & GENMASK(12, 8)) >> 8)
+#define DEV_MAC_IFG_CFG_RX_IFG2(x) (((x) << 4) & GENMASK(7, 4))
+#define DEV_MAC_IFG_CFG_RX_IFG2_M GENMASK(7, 4)
+#define DEV_MAC_IFG_CFG_RX_IFG2_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define DEV_MAC_IFG_CFG_RX_IFG1(x) ((x) & GENMASK(3, 0))
+#define DEV_MAC_IFG_CFG_RX_IFG1_M GENMASK(3, 0)
+
+#define DEV_MAC_HDX_CFG 0x34
+
+#define DEV_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26)
+#define DEV_MAC_HDX_CFG_OB_ENA BIT(25)
+#define DEV_MAC_HDX_CFG_WEXC_DIS BIT(24)
+#define DEV_MAC_HDX_CFG_SEED(x) (((x) << 16) & GENMASK(23, 16))
+#define DEV_MAC_HDX_CFG_SEED_M GENMASK(23, 16)
+#define DEV_MAC_HDX_CFG_SEED_X(x) (((x) & GENMASK(23, 16)) >> 16)
+#define DEV_MAC_HDX_CFG_SEED_LOAD BIT(12)
+#define DEV_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8)
+#define DEV_MAC_HDX_CFG_LATE_COL_POS(x) ((x) & GENMASK(6, 0))
+#define DEV_MAC_HDX_CFG_LATE_COL_POS_M GENMASK(6, 0)
+
+#define DEV_MAC_DBG_CFG 0x38
+
+#define DEV_MAC_DBG_CFG_TBI_MODE BIT(4)
+#define DEV_MAC_DBG_CFG_IFG_CRS_EXT_CHK_ENA BIT(0)
+
+#define DEV_MAC_FC_MAC_LOW_CFG 0x3c
+
+#define DEV_MAC_FC_MAC_HIGH_CFG 0x40
+
+#define DEV_MAC_STICKY 0x44
+
+#define DEV_MAC_STICKY_RX_IPG_SHRINK_STICKY BIT(9)
+#define DEV_MAC_STICKY_RX_PREAM_SHRINK_STICKY BIT(8)
+#define DEV_MAC_STICKY_RX_CARRIER_EXT_STICKY BIT(7)
+#define DEV_MAC_STICKY_RX_CARRIER_EXT_ERR_STICKY BIT(6)
+#define DEV_MAC_STICKY_RX_JUNK_STICKY BIT(5)
+#define DEV_MAC_STICKY_TX_RETRANSMIT_STICKY BIT(4)
+#define DEV_MAC_STICKY_TX_JAM_STICKY BIT(3)
+#define DEV_MAC_STICKY_TX_FIFO_OFLW_STICKY BIT(2)
+#define DEV_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1)
+#define DEV_MAC_STICKY_TX_ABORT_STICKY BIT(0)
+
+#define PCS1G_CFG 0x48
+
+#define PCS1G_CFG_LINK_STATUS_TYPE BIT(4)
+#define PCS1G_CFG_AN_LINK_CTRL_ENA BIT(1)
+#define PCS1G_CFG_PCS_ENA BIT(0)
+
+#define PCS1G_MODE_CFG 0x4c
+
+#define PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4)
+#define PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0)
+
+#define PCS1G_SD_CFG 0x50
+
+#define PCS1G_SD_CFG_SD_SEL BIT(8)
+#define PCS1G_SD_CFG_SD_POL BIT(4)
+#define PCS1G_SD_CFG_SD_ENA BIT(0)
+
+#define PCS1G_ANEG_CFG 0x54
+
+#define PCS1G_ANEG_CFG_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16))
+#define PCS1G_ANEG_CFG_ADV_ABILITY_M GENMASK(31, 16)
+#define PCS1G_ANEG_CFG_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8)
+#define PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT BIT(1)
+#define PCS1G_ANEG_CFG_ANEG_ENA BIT(0)
+
+#define PCS1G_ANEG_NP_CFG 0x58
+
+#define PCS1G_ANEG_NP_CFG_NP_TX(x) (((x) << 16) & GENMASK(31, 16))
+#define PCS1G_ANEG_NP_CFG_NP_TX_M GENMASK(31, 16)
+#define PCS1G_ANEG_NP_CFG_NP_TX_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define PCS1G_ANEG_NP_CFG_NP_LOADED_ONE_SHOT BIT(0)
+
+#define PCS1G_LB_CFG 0x5c
+
+#define PCS1G_LB_CFG_RA_ENA BIT(4)
+#define PCS1G_LB_CFG_GMII_PHY_LB_ENA BIT(1)
+#define PCS1G_LB_CFG_TBI_HOST_LB_ENA BIT(0)
+
+#define PCS1G_DBG_CFG 0x60
+
+#define PCS1G_DBG_CFG_UDLT BIT(0)
+
+#define PCS1G_CDET_CFG 0x64
+
+#define PCS1G_CDET_CFG_CDET_ENA BIT(0)
+
+#define PCS1G_ANEG_STATUS 0x68
+
+#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16))
+#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_M GENMASK(31, 16)
+#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define PCS1G_ANEG_STATUS_PR BIT(4)
+#define PCS1G_ANEG_STATUS_PAGE_RX_STICKY BIT(3)
+#define PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0)
+
+#define PCS1G_ANEG_NP_STATUS 0x6c
+
+#define PCS1G_LINK_STATUS 0x70
+
+#define PCS1G_LINK_STATUS_DELAY_VAR(x) (((x) << 12) & GENMASK(15, 12))
+#define PCS1G_LINK_STATUS_DELAY_VAR_M GENMASK(15, 12)
+#define PCS1G_LINK_STATUS_DELAY_VAR_X(x) (((x) & GENMASK(15, 12)) >> 12)
+#define PCS1G_LINK_STATUS_SIGNAL_DETECT BIT(8)
+#define PCS1G_LINK_STATUS_LINK_STATUS BIT(4)
+#define PCS1G_LINK_STATUS_SYNC_STATUS BIT(0)
+
+#define PCS1G_LINK_DOWN_CNT 0x74
+
+#define PCS1G_STICKY 0x78
+
+#define PCS1G_STICKY_LINK_DOWN_STICKY BIT(4)
+#define PCS1G_STICKY_OUT_OF_SYNC_STICKY BIT(0)
+
+#define PCS1G_DEBUG_STATUS 0x7c
+
+#define PCS1G_LPI_CFG 0x80
+
+#define PCS1G_LPI_CFG_QSGMII_MS_SEL BIT(20)
+#define PCS1G_LPI_CFG_RX_LPI_OUT_DIS BIT(17)
+#define PCS1G_LPI_CFG_LPI_TESTMODE BIT(16)
+#define PCS1G_LPI_CFG_LPI_RX_WTIM(x) (((x) << 4) & GENMASK(5, 4))
+#define PCS1G_LPI_CFG_LPI_RX_WTIM_M GENMASK(5, 4)
+#define PCS1G_LPI_CFG_LPI_RX_WTIM_X(x) (((x) & GENMASK(5, 4)) >> 4)
+#define PCS1G_LPI_CFG_TX_ASSERT_LPIDLE BIT(0)
+
+#define PCS1G_LPI_WAKE_ERROR_CNT 0x84
+
+#define PCS1G_LPI_STATUS 0x88
+
+#define PCS1G_LPI_STATUS_RX_LPI_FAIL BIT(16)
+#define PCS1G_LPI_STATUS_RX_LPI_EVENT_STICKY BIT(12)
+#define PCS1G_LPI_STATUS_RX_QUIET BIT(9)
+#define PCS1G_LPI_STATUS_RX_LPI_MODE BIT(8)
+#define PCS1G_LPI_STATUS_TX_LPI_EVENT_STICKY BIT(4)
+#define PCS1G_LPI_STATUS_TX_QUIET BIT(1)
+#define PCS1G_LPI_STATUS_TX_LPI_MODE BIT(0)
+
+#define PCS1G_TSTPAT_MODE_CFG 0x8c
+
+#define PCS1G_TSTPAT_STATUS 0x90
+
+#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT(x) (((x) << 8) & GENMASK(15, 8))
+#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_M GENMASK(15, 8)
+#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define PCS1G_TSTPAT_STATUS_JTP_ERR BIT(4)
+#define PCS1G_TSTPAT_STATUS_JTP_LOCK BIT(0)
+
+#define DEV_PCS_FX100_CFG 0x94
+
+#define DEV_PCS_FX100_CFG_SD_SEL BIT(26)
+#define DEV_PCS_FX100_CFG_SD_POL BIT(25)
+#define DEV_PCS_FX100_CFG_SD_ENA BIT(24)
+#define DEV_PCS_FX100_CFG_LOOPBACK_ENA BIT(20)
+#define DEV_PCS_FX100_CFG_SWAP_MII_ENA BIT(16)
+#define DEV_PCS_FX100_CFG_RXBITSEL(x) (((x) << 12) & GENMASK(15, 12))
+#define DEV_PCS_FX100_CFG_RXBITSEL_M GENMASK(15, 12)
+#define DEV_PCS_FX100_CFG_RXBITSEL_X(x) (((x) & GENMASK(15, 12)) >> 12)
+#define DEV_PCS_FX100_CFG_SIGDET_CFG(x) (((x) << 9) & GENMASK(10, 9))
+#define DEV_PCS_FX100_CFG_SIGDET_CFG_M GENMASK(10, 9)
+#define DEV_PCS_FX100_CFG_SIGDET_CFG_X(x) (((x) & GENMASK(10, 9)) >> 9)
+#define DEV_PCS_FX100_CFG_LINKHYST_TM_ENA BIT(8)
+#define DEV_PCS_FX100_CFG_LINKHYSTTIMER(x) (((x) << 4) & GENMASK(7, 4))
+#define DEV_PCS_FX100_CFG_LINKHYSTTIMER_M GENMASK(7, 4)
+#define DEV_PCS_FX100_CFG_LINKHYSTTIMER_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define DEV_PCS_FX100_CFG_UNIDIR_MODE_ENA BIT(3)
+#define DEV_PCS_FX100_CFG_FEFCHK_ENA BIT(2)
+#define DEV_PCS_FX100_CFG_FEFGEN_ENA BIT(1)
+#define DEV_PCS_FX100_CFG_PCS_ENA BIT(0)
+
+#define DEV_PCS_FX100_STATUS 0x98
+
+#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP(x) (((x) << 8) & GENMASK(11, 8))
+#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_M GENMASK(11, 8)
+#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_X(x) (((x) & GENMASK(11, 8)) >> 8)
+#define DEV_PCS_FX100_STATUS_PCS_ERROR_STICKY BIT(7)
+#define DEV_PCS_FX100_STATUS_FEF_FOUND_STICKY BIT(6)
+#define DEV_PCS_FX100_STATUS_SSD_ERROR_STICKY BIT(5)
+#define DEV_PCS_FX100_STATUS_SYNC_LOST_STICKY BIT(4)
+#define DEV_PCS_FX100_STATUS_FEF_STATUS BIT(2)
+#define DEV_PCS_FX100_STATUS_SIGNAL_DETECT BIT(1)
+#define DEV_PCS_FX100_STATUS_SYNC_STATUS BIT(0)
+
+#endif
diff --git a/include/soc/mscc/ocelot_qsys.h b/include/soc/mscc/ocelot_qsys.h
new file mode 100644
index 000000000000..d8c63aa761be
--- /dev/null
+++ b/include/soc/mscc/ocelot_qsys.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_QSYS_H_
+#define _MSCC_OCELOT_QSYS_H_
+
+#define QSYS_PORT_MODE_RSZ 0x4
+
+#define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1)
+#define QSYS_PORT_MODE_DEQUEUE_LATE BIT(0)
+
+#define QSYS_SWITCH_PORT_MODE_RSZ 0x4
+
+#define QSYS_SWITCH_PORT_MODE_PORT_ENA BIT(14)
+#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(x) (((x) << 11) & GENMASK(13, 11))
+#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_M GENMASK(13, 11)
+#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_X(x) (((x) & GENMASK(13, 11)) >> 11)
+#define QSYS_SWITCH_PORT_MODE_YEL_RSRVD BIT(10)
+#define QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE BIT(9)
+#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA(x) (((x) << 1) & GENMASK(8, 1))
+#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_M GENMASK(8, 1)
+#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_X(x) (((x) & GENMASK(8, 1)) >> 1)
+#define QSYS_SWITCH_PORT_MODE_TX_PFC_MODE BIT(0)
+
+#define QSYS_STAT_CNT_CFG_TX_GREEN_CNT_MODE BIT(5)
+#define QSYS_STAT_CNT_CFG_TX_YELLOW_CNT_MODE BIT(4)
+#define QSYS_STAT_CNT_CFG_DROP_GREEN_CNT_MODE BIT(3)
+#define QSYS_STAT_CNT_CFG_DROP_YELLOW_CNT_MODE BIT(2)
+#define QSYS_STAT_CNT_CFG_DROP_COUNT_ONCE BIT(1)
+#define QSYS_STAT_CNT_CFG_DROP_COUNT_EGRESS BIT(0)
+
+#define QSYS_EEE_CFG_RSZ 0x4
+
+#define QSYS_EEE_THRES_EEE_HIGH_BYTES(x) (((x) << 8) & GENMASK(15, 8))
+#define QSYS_EEE_THRES_EEE_HIGH_BYTES_M GENMASK(15, 8)
+#define QSYS_EEE_THRES_EEE_HIGH_BYTES_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define QSYS_EEE_THRES_EEE_HIGH_FRAMES(x) ((x) & GENMASK(7, 0))
+#define QSYS_EEE_THRES_EEE_HIGH_FRAMES_M GENMASK(7, 0)
+
+#define QSYS_SW_STATUS_RSZ 0x4
+
+#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT(x) (((x) << 8) & GENMASK(12, 8))
+#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT_M GENMASK(12, 8)
+#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT_X(x) (((x) & GENMASK(12, 8)) >> 8)
+#define QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK(x) ((x) & GENMASK(7, 0))
+#define QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M GENMASK(7, 0)
+
+#define QSYS_QMAP_GSZ 0x4
+
+#define QSYS_QMAP_SE_BASE(x) (((x) << 5) & GENMASK(12, 5))
+#define QSYS_QMAP_SE_BASE_M GENMASK(12, 5)
+#define QSYS_QMAP_SE_BASE_X(x) (((x) & GENMASK(12, 5)) >> 5)
+#define QSYS_QMAP_SE_IDX_SEL(x) (((x) << 2) & GENMASK(4, 2))
+#define QSYS_QMAP_SE_IDX_SEL_M GENMASK(4, 2)
+#define QSYS_QMAP_SE_IDX_SEL_X(x) (((x) & GENMASK(4, 2)) >> 2)
+#define QSYS_QMAP_SE_INP_SEL(x) ((x) & GENMASK(1, 0))
+#define QSYS_QMAP_SE_INP_SEL_M GENMASK(1, 0)
+
+#define QSYS_ISDX_SGRP_GSZ 0x4
+
+#define QSYS_TIMED_FRAME_ENTRY_GSZ 0x4
+
+#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT(x) (((x) << 9) & GENMASK(18, 9))
+#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT_M GENMASK(18, 9)
+#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT_X(x) (((x) & GENMASK(18, 9)) >> 9)
+#define QSYS_TFRM_MISC_TIMED_CANCEL_1SHOT BIT(8)
+#define QSYS_TFRM_MISC_TIMED_SLOT_MODE_MC BIT(7)
+#define QSYS_TFRM_MISC_TIMED_ENTRY_FAST_CNT(x) ((x) & GENMASK(6, 0))
+#define QSYS_TFRM_MISC_TIMED_ENTRY_FAST_CNT_M GENMASK(6, 0)
+
+#define QSYS_RED_PROFILE_RSZ 0x4
+
+#define QSYS_RED_PROFILE_WM_RED_LOW(x) (((x) << 8) & GENMASK(15, 8))
+#define QSYS_RED_PROFILE_WM_RED_LOW_M GENMASK(15, 8)
+#define QSYS_RED_PROFILE_WM_RED_LOW_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define QSYS_RED_PROFILE_WM_RED_HIGH(x) ((x) & GENMASK(7, 0))
+#define QSYS_RED_PROFILE_WM_RED_HIGH_M GENMASK(7, 0)
+
+#define QSYS_RES_CFG_GSZ 0x8
+
+#define QSYS_RES_STAT_GSZ 0x8
+
+#define QSYS_RES_STAT_INUSE(x) (((x) << 12) & GENMASK(23, 12))
+#define QSYS_RES_STAT_INUSE_M GENMASK(23, 12)
+#define QSYS_RES_STAT_INUSE_X(x) (((x) & GENMASK(23, 12)) >> 12)
+#define QSYS_RES_STAT_MAXUSE(x) ((x) & GENMASK(11, 0))
+#define QSYS_RES_STAT_MAXUSE_M GENMASK(11, 0)
+
+#define QSYS_EVENTS_CORE_EV_FDC(x) (((x) << 2) & GENMASK(4, 2))
+#define QSYS_EVENTS_CORE_EV_FDC_M GENMASK(4, 2)
+#define QSYS_EVENTS_CORE_EV_FDC_X(x) (((x) & GENMASK(4, 2)) >> 2)
+#define QSYS_EVENTS_CORE_EV_FRD(x) ((x) & GENMASK(1, 0))
+#define QSYS_EVENTS_CORE_EV_FRD_M GENMASK(1, 0)
+
+#define QSYS_QMAXSDU_CFG_0_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_1_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_2_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_3_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_4_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_5_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_6_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_7_RSZ 0x4
+
+#define QSYS_PREEMPTION_CFG_RSZ 0x4
+
+#define QSYS_PREEMPTION_CFG_P_QUEUES(x) ((x) & GENMASK(7, 0))
+#define QSYS_PREEMPTION_CFG_P_QUEUES_M GENMASK(7, 0)
+#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE(x) (((x) << 8) & GENMASK(9, 8))
+#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_M GENMASK(9, 8)
+#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_X(x) (((x) & GENMASK(9, 8)) >> 8)
+#define QSYS_PREEMPTION_CFG_STRICT_IPG(x) (((x) << 12) & GENMASK(13, 12))
+#define QSYS_PREEMPTION_CFG_STRICT_IPG_M GENMASK(13, 12)
+#define QSYS_PREEMPTION_CFG_STRICT_IPG_X(x) (((x) & GENMASK(13, 12)) >> 12)
+#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE(x) (((x) << 16) & GENMASK(31, 16))
+#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE_M GENMASK(31, 16)
+#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE_X(x) (((x) & GENMASK(31, 16)) >> 16)
+
+#define QSYS_CIR_CFG_GSZ 0x80
+
+#define QSYS_CIR_CFG_CIR_RATE(x) (((x) << 6) & GENMASK(20, 6))
+#define QSYS_CIR_CFG_CIR_RATE_M GENMASK(20, 6)
+#define QSYS_CIR_CFG_CIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6)
+#define QSYS_CIR_CFG_CIR_BURST(x) ((x) & GENMASK(5, 0))
+#define QSYS_CIR_CFG_CIR_BURST_M GENMASK(5, 0)
+
+#define QSYS_EIR_CFG_GSZ 0x80
+
+#define QSYS_EIR_CFG_EIR_RATE(x) (((x) << 7) & GENMASK(21, 7))
+#define QSYS_EIR_CFG_EIR_RATE_M GENMASK(21, 7)
+#define QSYS_EIR_CFG_EIR_RATE_X(x) (((x) & GENMASK(21, 7)) >> 7)
+#define QSYS_EIR_CFG_EIR_BURST(x) (((x) << 1) & GENMASK(6, 1))
+#define QSYS_EIR_CFG_EIR_BURST_M GENMASK(6, 1)
+#define QSYS_EIR_CFG_EIR_BURST_X(x) (((x) & GENMASK(6, 1)) >> 1)
+#define QSYS_EIR_CFG_EIR_MARK_ENA BIT(0)
+
+#define QSYS_SE_CFG_GSZ 0x80
+
+#define QSYS_SE_CFG_SE_DWRR_CNT(x) (((x) << 6) & GENMASK(9, 6))
+#define QSYS_SE_CFG_SE_DWRR_CNT_M GENMASK(9, 6)
+#define QSYS_SE_CFG_SE_DWRR_CNT_X(x) (((x) & GENMASK(9, 6)) >> 6)
+#define QSYS_SE_CFG_SE_RR_ENA BIT(5)
+#define QSYS_SE_CFG_SE_AVB_ENA BIT(4)
+#define QSYS_SE_CFG_SE_FRM_MODE(x) (((x) << 2) & GENMASK(3, 2))
+#define QSYS_SE_CFG_SE_FRM_MODE_M GENMASK(3, 2)
+#define QSYS_SE_CFG_SE_FRM_MODE_X(x) (((x) & GENMASK(3, 2)) >> 2)
+#define QSYS_SE_CFG_SE_EXC_ENA BIT(1)
+#define QSYS_SE_CFG_SE_EXC_FWD BIT(0)
+
+#define QSYS_SE_DWRR_CFG_GSZ 0x80
+#define QSYS_SE_DWRR_CFG_RSZ 0x4
+
+#define QSYS_SE_CONNECT_GSZ 0x80
+
+#define QSYS_SE_CONNECT_SE_OUTP_IDX(x) (((x) << 17) & GENMASK(24, 17))
+#define QSYS_SE_CONNECT_SE_OUTP_IDX_M GENMASK(24, 17)
+#define QSYS_SE_CONNECT_SE_OUTP_IDX_X(x) (((x) & GENMASK(24, 17)) >> 17)
+#define QSYS_SE_CONNECT_SE_INP_IDX(x) (((x) << 9) & GENMASK(16, 9))
+#define QSYS_SE_CONNECT_SE_INP_IDX_M GENMASK(16, 9)
+#define QSYS_SE_CONNECT_SE_INP_IDX_X(x) (((x) & GENMASK(16, 9)) >> 9)
+#define QSYS_SE_CONNECT_SE_OUTP_CON(x) (((x) << 5) & GENMASK(8, 5))
+#define QSYS_SE_CONNECT_SE_OUTP_CON_M GENMASK(8, 5)
+#define QSYS_SE_CONNECT_SE_OUTP_CON_X(x) (((x) & GENMASK(8, 5)) >> 5)
+#define QSYS_SE_CONNECT_SE_INP_CNT(x) (((x) << 1) & GENMASK(4, 1))
+#define QSYS_SE_CONNECT_SE_INP_CNT_M GENMASK(4, 1)
+#define QSYS_SE_CONNECT_SE_INP_CNT_X(x) (((x) & GENMASK(4, 1)) >> 1)
+#define QSYS_SE_CONNECT_SE_TERMINAL BIT(0)
+
+#define QSYS_SE_DLB_SENSE_GSZ 0x80
+
+#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO(x) (((x) << 11) & GENMASK(13, 11))
+#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_M GENMASK(13, 11)
+#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_X(x) (((x) & GENMASK(13, 11)) >> 11)
+#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT(x) (((x) << 7) & GENMASK(10, 7))
+#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_M GENMASK(10, 7)
+#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_X(x) (((x) & GENMASK(10, 7)) >> 7)
+#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT(x) (((x) << 3) & GENMASK(6, 3))
+#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_M GENMASK(6, 3)
+#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_X(x) (((x) & GENMASK(6, 3)) >> 3)
+#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_ENA BIT(2)
+#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_ENA BIT(1)
+#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_ENA BIT(0)
+
+#define QSYS_CIR_STATE_GSZ 0x80
+
+#define QSYS_CIR_STATE_CIR_LVL(x) (((x) << 4) & GENMASK(25, 4))
+#define QSYS_CIR_STATE_CIR_LVL_M GENMASK(25, 4)
+#define QSYS_CIR_STATE_CIR_LVL_X(x) (((x) & GENMASK(25, 4)) >> 4)
+#define QSYS_CIR_STATE_SHP_TIME(x) ((x) & GENMASK(3, 0))
+#define QSYS_CIR_STATE_SHP_TIME_M GENMASK(3, 0)
+
+#define QSYS_EIR_STATE_GSZ 0x80
+
+#define QSYS_SE_STATE_GSZ 0x80
+
+#define QSYS_SE_STATE_SE_OUTP_LVL(x) (((x) << 1) & GENMASK(2, 1))
+#define QSYS_SE_STATE_SE_OUTP_LVL_M GENMASK(2, 1)
+#define QSYS_SE_STATE_SE_OUTP_LVL_X(x) (((x) & GENMASK(2, 1)) >> 1)
+#define QSYS_SE_STATE_SE_WAS_YEL BIT(0)
+
+#define QSYS_HSCH_MISC_CFG_SE_CONNECT_VLD BIT(8)
+#define QSYS_HSCH_MISC_CFG_FRM_ADJ(x) (((x) << 3) & GENMASK(7, 3))
+#define QSYS_HSCH_MISC_CFG_FRM_ADJ_M GENMASK(7, 3)
+#define QSYS_HSCH_MISC_CFG_FRM_ADJ_X(x) (((x) & GENMASK(7, 3)) >> 3)
+#define QSYS_HSCH_MISC_CFG_LEAK_DIS BIT(2)
+#define QSYS_HSCH_MISC_CFG_QSHP_EXC_ENA BIT(1)
+#define QSYS_HSCH_MISC_CFG_PFC_BYP_UPD BIT(0)
+
+#define QSYS_TAG_CONFIG_RSZ 0x4
+
+#define QSYS_TAG_CONFIG_ENABLE BIT(0)
+#define QSYS_TAG_CONFIG_LINK_SPEED(x) (((x) << 4) & GENMASK(5, 4))
+#define QSYS_TAG_CONFIG_LINK_SPEED_M GENMASK(5, 4)
+#define QSYS_TAG_CONFIG_LINK_SPEED_X(x) (((x) & GENMASK(5, 4)) >> 4)
+#define QSYS_TAG_CONFIG_INIT_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8))
+#define QSYS_TAG_CONFIG_INIT_GATE_STATE_M GENMASK(15, 8)
+#define QSYS_TAG_CONFIG_INIT_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES(x) (((x) << 16) & GENMASK(23, 16))
+#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_M GENMASK(23, 16)
+#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_X(x) (((x) & GENMASK(23, 16)) >> 16)
+
+#define QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(x) ((x) & GENMASK(7, 0))
+#define QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M GENMASK(7, 0)
+#define QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q BIT(8)
+#define QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE BIT(16)
+
+#define QSYS_PORT_MAX_SDU_RSZ 0x4
+
+#define QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
+#define QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0)
+#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(31, 16))
+#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH_M GENMASK(31, 16)
+#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(31, 16)) >> 16)
+
+#define QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM(x) ((x) & GENMASK(5, 0))
+#define QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM_M GENMASK(5, 0)
+#define QSYS_GCL_CFG_REG_1_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8))
+#define QSYS_GCL_CFG_REG_1_GATE_STATE_M GENMASK(15, 8)
+#define QSYS_GCL_CFG_REG_1_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8)
+
+#define QSYS_PARAM_STATUS_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
+#define QSYS_PARAM_STATUS_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0)
+#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(31, 16))
+#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH_M GENMASK(31, 16)
+#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(31, 16)) >> 16)
+
+#define QSYS_PARAM_STATUS_REG_8_CFG_CHG_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
+#define QSYS_PARAM_STATUS_REG_8_CFG_CHG_TIME_SEC_MSB_M GENMASK(15, 0)
+#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE(x) (((x) << 16) & GENMASK(23, 16))
+#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE_M GENMASK(23, 16)
+#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE_X(x) (((x) & GENMASK(23, 16)) >> 16)
+#define QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING BIT(24)
+
+#define QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM(x) ((x) & GENMASK(5, 0))
+#define QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM_M GENMASK(5, 0)
+#define QSYS_GCL_STATUS_REG_1_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8))
+#define QSYS_GCL_STATUS_REG_1_GATE_STATE_M GENMASK(15, 8)
+#define QSYS_GCL_STATUS_REG_1_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8)
+
+#endif
diff --git a/include/trace/events/sctp.h b/include/trace/events/sctp.h
index 7475c7be165a..d4aac3436595 100644
--- a/include/trace/events/sctp.h
+++ b/include/trace/events/sctp.h
@@ -75,15 +75,6 @@ TRACE_EVENT(sctp_probe,
__entry->pathmtu = asoc->pathmtu;
__entry->rwnd = asoc->peer.rwnd;
__entry->unack_data = asoc->unack_data;
-
- if (trace_sctp_probe_path_enabled()) {
- struct sctp_transport *sp;
-
- list_for_each_entry(sp, &asoc->peer.transport_addr_list,
- transports) {
- trace_sctp_probe_path(sp, asoc);
- }
- }
),
TP_printk("asoc=%#llx mark=%#x bind_port=%d peer_port=%d pathmtu=%d "
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index 51fe9f6719eb..a966d4b5ab37 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -19,7 +19,8 @@
#define inet_protocol_names \
EM(IPPROTO_TCP) \
EM(IPPROTO_DCCP) \
- EMe(IPPROTO_SCTP)
+ EM(IPPROTO_SCTP) \
+ EMe(IPPROTO_MPTCP)
#define tcp_state_names \
EM(TCP_ESTABLISHED) \
@@ -147,7 +148,7 @@ TRACE_EVENT(inet_sock_set_state,
__field(__u16, sport)
__field(__u16, dport)
__field(__u16, family)
- __field(__u8, protocol)
+ __field(__u16, protocol)
__array(__u8, saddr, 4)
__array(__u8, daddr, 4)
__array(__u8, saddr_v6, 16)
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index a7378bcd9928..b95d65e8c628 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -79,14 +79,26 @@ TRACE_EVENT(xdp_bulk_tx,
__entry->sent, __entry->drops, __entry->err)
);
+#ifndef __DEVMAP_OBJ_TYPE
+#define __DEVMAP_OBJ_TYPE
+struct _bpf_dtab_netdev {
+ struct net_device *dev;
+};
+#endif /* __DEVMAP_OBJ_TYPE */
+
+#define devmap_ifindex(tgt, map) \
+ (((map->map_type == BPF_MAP_TYPE_DEVMAP || \
+ map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)) ? \
+ ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex : 0)
+
DECLARE_EVENT_CLASS(xdp_redirect_template,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
- int to_ifindex, int err,
- const struct bpf_map *map, u32 map_index),
+ const void *tgt, int err,
+ const struct bpf_map *map, u32 index),
- TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
+ TP_ARGS(dev, xdp, tgt, err, map, index),
TP_STRUCT__entry(
__field(int, prog_id)
@@ -103,90 +115,65 @@ DECLARE_EVENT_CLASS(xdp_redirect_template,
__entry->act = XDP_REDIRECT;
__entry->ifindex = dev->ifindex;
__entry->err = err;
- __entry->to_ifindex = to_ifindex;
+ __entry->to_ifindex = map ? devmap_ifindex(tgt, map) :
+ index;
__entry->map_id = map ? map->id : 0;
- __entry->map_index = map_index;
+ __entry->map_index = map ? index : 0;
),
- TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d",
+ TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
+ " map_id=%d map_index=%d",
__entry->prog_id,
__print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
__entry->ifindex, __entry->to_ifindex,
- __entry->err)
+ __entry->err, __entry->map_id, __entry->map_index)
);
DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
- int to_ifindex, int err,
- const struct bpf_map *map, u32 map_index),
- TP_ARGS(dev, xdp, to_ifindex, err, map, map_index)
+ const void *tgt, int err,
+ const struct bpf_map *map, u32 index),
+ TP_ARGS(dev, xdp, tgt, err, map, index)
);
DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
- int to_ifindex, int err,
- const struct bpf_map *map, u32 map_index),
- TP_ARGS(dev, xdp, to_ifindex, err, map, map_index)
+ const void *tgt, int err,
+ const struct bpf_map *map, u32 index),
+ TP_ARGS(dev, xdp, tgt, err, map, index)
);
#define _trace_xdp_redirect(dev, xdp, to) \
- trace_xdp_redirect(dev, xdp, to, 0, NULL, 0);
+ trace_xdp_redirect(dev, xdp, NULL, 0, NULL, to);
#define _trace_xdp_redirect_err(dev, xdp, to, err) \
- trace_xdp_redirect_err(dev, xdp, to, err, NULL, 0);
+ trace_xdp_redirect_err(dev, xdp, NULL, err, NULL, to);
-DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map,
+#define _trace_xdp_redirect_map(dev, xdp, to, map, index) \
+ trace_xdp_redirect(dev, xdp, to, 0, map, index);
+
+#define _trace_xdp_redirect_map_err(dev, xdp, to, map, index, err) \
+ trace_xdp_redirect_err(dev, xdp, to, err, map, index);
+
+/* not used anymore, but kept around so as not to break old programs */
+DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
- int to_ifindex, int err,
- const struct bpf_map *map, u32 map_index),
- TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
- TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
- " map_id=%d map_index=%d",
- __entry->prog_id,
- __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
- __entry->ifindex, __entry->to_ifindex,
- __entry->err,
- __entry->map_id, __entry->map_index)
+ const void *tgt, int err,
+ const struct bpf_map *map, u32 index),
+ TP_ARGS(dev, xdp, tgt, err, map, index)
);
-DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map_err,
+DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
- int to_ifindex, int err,
- const struct bpf_map *map, u32 map_index),
- TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
- TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
- " map_id=%d map_index=%d",
- __entry->prog_id,
- __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
- __entry->ifindex, __entry->to_ifindex,
- __entry->err,
- __entry->map_id, __entry->map_index)
+ const void *tgt, int err,
+ const struct bpf_map *map, u32 index),
+ TP_ARGS(dev, xdp, tgt, err, map, index)
);
-#ifndef __DEVMAP_OBJ_TYPE
-#define __DEVMAP_OBJ_TYPE
-struct _bpf_dtab_netdev {
- struct net_device *dev;
-};
-#endif /* __DEVMAP_OBJ_TYPE */
-
-#define devmap_ifindex(fwd, map) \
- ((map->map_type == BPF_MAP_TYPE_DEVMAP || \
- map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) ? \
- ((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0)
-
-#define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \
- trace_xdp_redirect_map(dev, xdp, devmap_ifindex(fwd, map), \
- 0, map, idx)
-
-#define _trace_xdp_redirect_map_err(dev, xdp, fwd, map, idx, err) \
- trace_xdp_redirect_map_err(dev, xdp, devmap_ifindex(fwd, map), \
- err, map, idx)
-
TRACE_EVENT(xdp_cpumap_kthread,
TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
@@ -259,43 +246,38 @@ TRACE_EVENT(xdp_cpumap_enqueue,
TRACE_EVENT(xdp_devmap_xmit,
- TP_PROTO(const struct bpf_map *map, u32 map_index,
- int sent, int drops,
- const struct net_device *from_dev,
- const struct net_device *to_dev, int err),
+ TP_PROTO(const struct net_device *from_dev,
+ const struct net_device *to_dev,
+ int sent, int drops, int err),
- TP_ARGS(map, map_index, sent, drops, from_dev, to_dev, err),
+ TP_ARGS(from_dev, to_dev, sent, drops, err),
TP_STRUCT__entry(
- __field(int, map_id)
+ __field(int, from_ifindex)
__field(u32, act)
- __field(u32, map_index)
+ __field(int, to_ifindex)
__field(int, drops)
__field(int, sent)
- __field(int, from_ifindex)
- __field(int, to_ifindex)
__field(int, err)
),
TP_fast_assign(
- __entry->map_id = map->id;
+ __entry->from_ifindex = from_dev->ifindex;
__entry->act = XDP_REDIRECT;
- __entry->map_index = map_index;
+ __entry->to_ifindex = to_dev->ifindex;
__entry->drops = drops;
__entry->sent = sent;
- __entry->from_ifindex = from_dev->ifindex;
- __entry->to_ifindex = to_dev->ifindex;
__entry->err = err;
),
TP_printk("ndo_xdp_xmit"
- " map_id=%d map_index=%d action=%s"
+ " from_ifindex=%d to_ifindex=%d action=%s"
" sent=%d drops=%d"
- " from_ifindex=%d to_ifindex=%d err=%d",
- __entry->map_id, __entry->map_index,
+ " err=%d",
+ __entry->from_ifindex, __entry->to_ifindex,
__print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
__entry->sent, __entry->drops,
- __entry->from_ifindex, __entry->to_ifindex, __entry->err)
+ __entry->err)
);
/* Expect users already include <net/xdp.h>, but not xdp_priv.h */
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 3ad935527177..a534d71e689a 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -116,6 +116,7 @@
#define AUDIT_FANOTIFY 1331 /* Fanotify access decision */
#define AUDIT_TIME_INJOFFSET 1332 /* Timekeeping offset injected */
#define AUDIT_TIME_ADJNTPVAL 1333 /* NTP value adjustment */
+#define AUDIT_BPF 1334 /* BPF subsystem */
#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h
index 2a15f01c2243..0ae34c85ef9e 100644
--- a/include/uapi/linux/batadv_packet.h
+++ b/include/uapi/linux/batadv_packet.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) */
-/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2020 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*/
diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h
index 67f4636758af..617c180ff0c8 100644
--- a/include/uapi/linux/batman_adv.h
+++ b/include/uapi/linux/batman_adv.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: MIT */
-/* Copyright (C) 2016-2019 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2016-2020 B.A.T.M.A.N. contributors:
*
* Matthias Schiffer
*/
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index dbbcf0b02970..f1d74a2bd234 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -107,6 +107,10 @@ enum bpf_cmd {
BPF_MAP_LOOKUP_AND_DELETE_ELEM,
BPF_MAP_FREEZE,
BPF_BTF_GET_NEXT_ID,
+ BPF_MAP_LOOKUP_BATCH,
+ BPF_MAP_LOOKUP_AND_DELETE_BATCH,
+ BPF_MAP_UPDATE_BATCH,
+ BPF_MAP_DELETE_BATCH,
};
enum bpf_map_type {
@@ -136,6 +140,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_STACK,
BPF_MAP_TYPE_SK_STORAGE,
BPF_MAP_TYPE_DEVMAP_HASH,
+ BPF_MAP_TYPE_STRUCT_OPS,
};
/* Note that tracing related programs such as
@@ -174,6 +179,8 @@ enum bpf_prog_type {
BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
BPF_PROG_TYPE_CGROUP_SOCKOPT,
BPF_PROG_TYPE_TRACING,
+ BPF_PROG_TYPE_STRUCT_OPS,
+ BPF_PROG_TYPE_EXT,
};
enum bpf_attach_type {
@@ -231,6 +238,11 @@ enum bpf_attach_type {
* When children program makes decision (like picking TCP CA or sock bind)
* parent program has a chance to override it.
*
+ * With BPF_F_ALLOW_MULTI a new program is added to the end of the list of
+ * programs for a cgroup. Though it's possible to replace an old program at
+ * any position by also specifying BPF_F_REPLACE flag and position itself in
+ * replace_bpf_fd attribute. Old program at this position will be released.
+ *
* A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
* A cgroup with NONE doesn't allow any programs in sub-cgroups.
* Ex1:
@@ -249,6 +261,7 @@ enum bpf_attach_type {
*/
#define BPF_F_ALLOW_OVERRIDE (1U << 0)
#define BPF_F_ALLOW_MULTI (1U << 1)
+#define BPF_F_REPLACE (1U << 2)
/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
* verifier will perform strict alignment checking as if the kernel
@@ -351,7 +364,12 @@ enum bpf_attach_type {
/* Enable memory-mapping BPF map */
#define BPF_F_MMAPABLE (1U << 10)
-/* flags for BPF_PROG_QUERY */
+/* Flags for BPF_PROG_QUERY. */
+
+/* Query effective (directly attached + inherited from ancestor cgroups)
+ * programs that will be executed for events within a cgroup.
+ * attach_flags with this flag are returned only for directly attached programs.
+ */
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
enum bpf_stack_build_id_status {
@@ -391,6 +409,10 @@ union bpf_attr {
__u32 btf_fd; /* fd pointing to a BTF type data */
__u32 btf_key_type_id; /* BTF type_id of the key */
__u32 btf_value_type_id; /* BTF type_id of the value */
+ __u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel-
+ * struct stored as the
+ * map value
+ */
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -403,6 +425,23 @@ union bpf_attr {
__u64 flags;
};
+ struct { /* struct used by BPF_MAP_*_BATCH commands */
+ __aligned_u64 in_batch; /* start batch,
+ * NULL to start from beginning
+ */
+ __aligned_u64 out_batch; /* output: next start batch */
+ __aligned_u64 keys;
+ __aligned_u64 values;
+ __u32 count; /* input/output:
+ * input: # of key/value
+ * elements
+ * output: # of filled elements
+ */
+ __u32 map_fd;
+ __u64 elem_flags;
+ __u64 flags;
+ } batch;
+
struct { /* anonymous struct used by BPF_PROG_LOAD command */
__u32 prog_type; /* one of enum bpf_prog_type */
__u32 insn_cnt;
@@ -442,6 +481,10 @@ union bpf_attr {
__u32 attach_bpf_fd; /* eBPF program to attach */
__u32 attach_type;
__u32 attach_flags;
+ __u32 replace_bpf_fd; /* previously attached eBPF
+ * program to replace if
+ * BPF_F_REPLACE is used
+ */
};
struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
@@ -2693,7 +2736,8 @@ union bpf_attr {
*
* int bpf_send_signal(u32 sig)
* Description
- * Send signal *sig* to the current task.
+ * Send signal *sig* to the process of the current task.
+ * The signal may be delivered to any of this process's threads.
* Return
* 0 on success or successfully queued.
*
@@ -2821,6 +2865,33 @@ union bpf_attr {
* Return
* On success, the strictly positive length of the string, including
* the trailing NUL character. On error, a negative value.
+ *
+ * int bpf_tcp_send_ack(void *tp, u32 rcv_nxt)
+ * Description
+ * Send out a tcp-ack. *tp* is the in-kernel struct tcp_sock.
+ * *rcv_nxt* is the ack_seq to be sent out.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_send_signal_thread(u32 sig)
+ * Description
+ * Send signal *sig* to the thread corresponding to the current task.
+ * Return
+ * 0 on success or successfully queued.
+ *
+ * **-EBUSY** if work queue under nmi is full.
+ *
+ * **-EINVAL** if *sig* is invalid.
+ *
+ * **-EPERM** if no permission to send the *sig*.
+ *
+ * **-EAGAIN** if bpf program can try again.
+ *
+ * u64 bpf_jiffies64(void)
+ * Description
+ * Obtain the 64bit jiffies
+ * Return
+ * The 64 bit jiffies
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2938,7 +3009,10 @@ union bpf_attr {
FN(probe_read_user), \
FN(probe_read_kernel), \
FN(probe_read_user_str), \
- FN(probe_read_kernel_str),
+ FN(probe_read_kernel_str), \
+ FN(tcp_send_ack), \
+ FN(send_signal_thread), \
+ FN(jiffies64),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -3339,7 +3413,7 @@ struct bpf_map_info {
__u32 map_flags;
char name[BPF_OBJ_NAME_LEN];
__u32 ifindex;
- __u32 :32;
+ __u32 btf_vmlinux_value_type_id;
__u64 netns_dev;
__u64 netns_ino;
__u32 btf_id;
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
index c02dec97e1ce..5a667107ad2c 100644
--- a/include/uapi/linux/btf.h
+++ b/include/uapi/linux/btf.h
@@ -142,7 +142,14 @@ struct btf_param {
enum {
BTF_VAR_STATIC = 0,
- BTF_VAR_GLOBAL_ALLOCATED,
+ BTF_VAR_GLOBAL_ALLOCATED = 1,
+ BTF_VAR_GLOBAL_EXTERN = 2,
+};
+
+enum btf_func_linkage {
+ BTF_FUNC_STATIC = 0,
+ BTF_FUNC_GLOBAL = 1,
+ BTF_FUNC_EXTERN = 2,
};
/* BTF_KIND_VAR is followed by a single "struct btf_var" to describe
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index d4591792f0b4..4295ebfa2f91 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -593,6 +593,9 @@ struct ethtool_pauseparam {
* @ETH_SS_RSS_HASH_FUNCS: RSS hush function names
* @ETH_SS_PHY_STATS: Statistic names, for use with %ETHTOOL_GPHYSTATS
* @ETH_SS_PHY_TUNABLES: PHY tunable names
+ * @ETH_SS_LINK_MODES: link mode names
+ * @ETH_SS_MSG_CLASSES: debug message class names
+ * @ETH_SS_WOL_MODES: wake-on-lan modes
*/
enum ethtool_stringset {
ETH_SS_TEST = 0,
@@ -604,6 +607,12 @@ enum ethtool_stringset {
ETH_SS_TUNABLES,
ETH_SS_PHY_STATS,
ETH_SS_PHY_TUNABLES,
+ ETH_SS_LINK_MODES,
+ ETH_SS_MSG_CLASSES,
+ ETH_SS_WOL_MODES,
+
+ /* add new constants above here */
+ ETH_SS_COUNT
};
/**
@@ -1688,6 +1697,8 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */
#define WAKE_FILTER (1 << 7)
+#define WOL_MODE_COUNT 8
+
/* L2-L4 network traffic flow types */
#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */
#define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
new file mode 100644
index 000000000000..7e0b460f872c
--- /dev/null
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * include/uapi/linux/ethtool_netlink.h - netlink interface for ethtool
+ *
+ * See Documentation/networking/ethtool-netlink.txt in kernel source tree for
+ * doucumentation of the interface.
+ */
+
+#ifndef _UAPI_LINUX_ETHTOOL_NETLINK_H_
+#define _UAPI_LINUX_ETHTOOL_NETLINK_H_
+
+#include <linux/ethtool.h>
+
+/* message types - userspace to kernel */
+enum {
+ ETHTOOL_MSG_USER_NONE,
+ ETHTOOL_MSG_STRSET_GET,
+ ETHTOOL_MSG_LINKINFO_GET,
+ ETHTOOL_MSG_LINKINFO_SET,
+ ETHTOOL_MSG_LINKMODES_GET,
+ ETHTOOL_MSG_LINKMODES_SET,
+ ETHTOOL_MSG_LINKSTATE_GET,
+ ETHTOOL_MSG_DEBUG_GET,
+ ETHTOOL_MSG_DEBUG_SET,
+ ETHTOOL_MSG_WOL_GET,
+ ETHTOOL_MSG_WOL_SET,
+
+ /* add new constants above here */
+ __ETHTOOL_MSG_USER_CNT,
+ ETHTOOL_MSG_USER_MAX = __ETHTOOL_MSG_USER_CNT - 1
+};
+
+/* message types - kernel to userspace */
+enum {
+ ETHTOOL_MSG_KERNEL_NONE,
+ ETHTOOL_MSG_STRSET_GET_REPLY,
+ ETHTOOL_MSG_LINKINFO_GET_REPLY,
+ ETHTOOL_MSG_LINKINFO_NTF,
+ ETHTOOL_MSG_LINKMODES_GET_REPLY,
+ ETHTOOL_MSG_LINKMODES_NTF,
+ ETHTOOL_MSG_LINKSTATE_GET_REPLY,
+ ETHTOOL_MSG_DEBUG_GET_REPLY,
+ ETHTOOL_MSG_DEBUG_NTF,
+ ETHTOOL_MSG_WOL_GET_REPLY,
+ ETHTOOL_MSG_WOL_NTF,
+
+ /* add new constants above here */
+ __ETHTOOL_MSG_KERNEL_CNT,
+ ETHTOOL_MSG_KERNEL_MAX = __ETHTOOL_MSG_KERNEL_CNT - 1
+};
+
+/* request header */
+
+/* use compact bitsets in reply */
+#define ETHTOOL_FLAG_COMPACT_BITSETS (1 << 0)
+/* provide optional reply for SET or ACT requests */
+#define ETHTOOL_FLAG_OMIT_REPLY (1 << 1)
+
+#define ETHTOOL_FLAG_ALL (ETHTOOL_FLAG_COMPACT_BITSETS | \
+ ETHTOOL_FLAG_OMIT_REPLY)
+
+enum {
+ ETHTOOL_A_HEADER_UNSPEC,
+ ETHTOOL_A_HEADER_DEV_INDEX, /* u32 */
+ ETHTOOL_A_HEADER_DEV_NAME, /* string */
+ ETHTOOL_A_HEADER_FLAGS, /* u32 - ETHTOOL_FLAG_* */
+
+ /* add new constants above here */
+ __ETHTOOL_A_HEADER_CNT,
+ ETHTOOL_A_HEADER_MAX = __ETHTOOL_A_HEADER_CNT - 1
+};
+
+/* bit sets */
+
+enum {
+ ETHTOOL_A_BITSET_BIT_UNSPEC,
+ ETHTOOL_A_BITSET_BIT_INDEX, /* u32 */
+ ETHTOOL_A_BITSET_BIT_NAME, /* string */
+ ETHTOOL_A_BITSET_BIT_VALUE, /* flag */
+
+ /* add new constants above here */
+ __ETHTOOL_A_BITSET_BIT_CNT,
+ ETHTOOL_A_BITSET_BIT_MAX = __ETHTOOL_A_BITSET_BIT_CNT - 1
+};
+
+enum {
+ ETHTOOL_A_BITSET_BITS_UNSPEC,
+ ETHTOOL_A_BITSET_BITS_BIT, /* nest - _A_BITSET_BIT_* */
+
+ /* add new constants above here */
+ __ETHTOOL_A_BITSET_BITS_CNT,
+ ETHTOOL_A_BITSET_BITS_MAX = __ETHTOOL_A_BITSET_BITS_CNT - 1
+};
+
+enum {
+ ETHTOOL_A_BITSET_UNSPEC,
+ ETHTOOL_A_BITSET_NOMASK, /* flag */
+ ETHTOOL_A_BITSET_SIZE, /* u32 */
+ ETHTOOL_A_BITSET_BITS, /* nest - _A_BITSET_BITS_* */
+ ETHTOOL_A_BITSET_VALUE, /* binary */
+ ETHTOOL_A_BITSET_MASK, /* binary */
+
+ /* add new constants above here */
+ __ETHTOOL_A_BITSET_CNT,
+ ETHTOOL_A_BITSET_MAX = __ETHTOOL_A_BITSET_CNT - 1
+};
+
+/* string sets */
+
+enum {
+ ETHTOOL_A_STRING_UNSPEC,
+ ETHTOOL_A_STRING_INDEX, /* u32 */
+ ETHTOOL_A_STRING_VALUE, /* string */
+
+ /* add new constants above here */
+ __ETHTOOL_A_STRING_CNT,
+ ETHTOOL_A_STRING_MAX = __ETHTOOL_A_STRING_CNT - 1
+};
+
+enum {
+ ETHTOOL_A_STRINGS_UNSPEC,
+ ETHTOOL_A_STRINGS_STRING, /* nest - _A_STRINGS_* */
+
+ /* add new constants above here */
+ __ETHTOOL_A_STRINGS_CNT,
+ ETHTOOL_A_STRINGS_MAX = __ETHTOOL_A_STRINGS_CNT - 1
+};
+
+enum {
+ ETHTOOL_A_STRINGSET_UNSPEC,
+ ETHTOOL_A_STRINGSET_ID, /* u32 */
+ ETHTOOL_A_STRINGSET_COUNT, /* u32 */
+ ETHTOOL_A_STRINGSET_STRINGS, /* nest - _A_STRINGS_* */
+
+ /* add new constants above here */
+ __ETHTOOL_A_STRINGSET_CNT,
+ ETHTOOL_A_STRINGSET_MAX = __ETHTOOL_A_STRINGSET_CNT - 1
+};
+
+enum {
+ ETHTOOL_A_STRINGSETS_UNSPEC,
+ ETHTOOL_A_STRINGSETS_STRINGSET, /* nest - _A_STRINGSET_* */
+
+ /* add new constants above here */
+ __ETHTOOL_A_STRINGSETS_CNT,
+ ETHTOOL_A_STRINGSETS_MAX = __ETHTOOL_A_STRINGSETS_CNT - 1
+};
+
+/* STRSET */
+
+enum {
+ ETHTOOL_A_STRSET_UNSPEC,
+ ETHTOOL_A_STRSET_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_STRSET_STRINGSETS, /* nest - _A_STRINGSETS_* */
+ ETHTOOL_A_STRSET_COUNTS_ONLY, /* flag */
+
+ /* add new constants above here */
+ __ETHTOOL_A_STRSET_CNT,
+ ETHTOOL_A_STRSET_MAX = __ETHTOOL_A_STRSET_CNT - 1
+};
+
+/* LINKINFO */
+
+enum {
+ ETHTOOL_A_LINKINFO_UNSPEC,
+ ETHTOOL_A_LINKINFO_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_LINKINFO_PORT, /* u8 */
+ ETHTOOL_A_LINKINFO_PHYADDR, /* u8 */
+ ETHTOOL_A_LINKINFO_TP_MDIX, /* u8 */
+ ETHTOOL_A_LINKINFO_TP_MDIX_CTRL, /* u8 */
+ ETHTOOL_A_LINKINFO_TRANSCEIVER, /* u8 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_LINKINFO_CNT,
+ ETHTOOL_A_LINKINFO_MAX = __ETHTOOL_A_LINKINFO_CNT - 1
+};
+
+/* LINKMODES */
+
+enum {
+ ETHTOOL_A_LINKMODES_UNSPEC,
+ ETHTOOL_A_LINKMODES_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_LINKMODES_AUTONEG, /* u8 */
+ ETHTOOL_A_LINKMODES_OURS, /* bitset */
+ ETHTOOL_A_LINKMODES_PEER, /* bitset */
+ ETHTOOL_A_LINKMODES_SPEED, /* u32 */
+ ETHTOOL_A_LINKMODES_DUPLEX, /* u8 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_LINKMODES_CNT,
+ ETHTOOL_A_LINKMODES_MAX = __ETHTOOL_A_LINKMODES_CNT - 1
+};
+
+/* LINKSTATE */
+
+enum {
+ ETHTOOL_A_LINKSTATE_UNSPEC,
+ ETHTOOL_A_LINKSTATE_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_LINKSTATE_LINK, /* u8 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_LINKSTATE_CNT,
+ ETHTOOL_A_LINKSTATE_MAX = __ETHTOOL_A_LINKSTATE_CNT - 1
+};
+
+/* DEBUG */
+
+enum {
+ ETHTOOL_A_DEBUG_UNSPEC,
+ ETHTOOL_A_DEBUG_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_DEBUG_MSGMASK, /* bitset */
+
+ /* add new constants above here */
+ __ETHTOOL_A_DEBUG_CNT,
+ ETHTOOL_A_DEBUG_MAX = __ETHTOOL_A_DEBUG_CNT - 1
+};
+
+/* WOL */
+
+enum {
+ ETHTOOL_A_WOL_UNSPEC,
+ ETHTOOL_A_WOL_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_WOL_MODES, /* bitset */
+ ETHTOOL_A_WOL_SOPASS, /* binary */
+
+ /* add new constants above here */
+ __ETHTOOL_A_WOL_CNT,
+ ETHTOOL_A_WOL_MAX = __ETHTOOL_A_WOL_CNT - 1
+};
+
+/* generic netlink info */
+#define ETHTOOL_GENL_NAME "ethtool"
+#define ETHTOOL_GENL_VERSION 1
+
+#define ETHTOOL_MCGRP_MONITOR_NAME "monitor"
+
+#endif /* _UAPI_LINUX_ETHTOOL_NETLINK_H_ */
diff --git a/include/uapi/linux/hdlc/ioctl.h b/include/uapi/linux/hdlc/ioctl.h
index 0fe4238e8246..b06341acab5e 100644
--- a/include/uapi/linux/hdlc/ioctl.h
+++ b/include/uapi/linux/hdlc/ioctl.h
@@ -79,6 +79,15 @@ typedef struct {
unsigned int timeout;
} cisco_proto;
+typedef struct {
+ unsigned short dce; /* 1 for DCE (network side) operation */
+ unsigned int modulo; /* modulo (8 = basic / 128 = extended) */
+ unsigned int window; /* frame window size */
+ unsigned int t1; /* timeout t1 */
+ unsigned int t2; /* timeout t2 */
+ unsigned int n2; /* frame retry counter */
+} x25_hdlc_proto;
+
/* PPP doesn't need any info now - supply length = 0 to ioctl */
#endif /* __ASSEMBLY__ */
diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h
index 4bf33344aab1..be714cd8c826 100644
--- a/include/uapi/linux/if.h
+++ b/include/uapi/linux/if.h
@@ -213,6 +213,7 @@ struct if_settings {
fr_proto __user *fr;
fr_proto_pvc __user *fr_pvc;
fr_proto_pvc_info __user *fr_pvc_info;
+ x25_hdlc_proto __user *x25;
/* interface settings */
sync_serial_settings __user *sync;
diff --git a/include/uapi/linux/if_bonding.h b/include/uapi/linux/if_bonding.h
index 790585f0e61b..45f3750aa861 100644
--- a/include/uapi/linux/if_bonding.h
+++ b/include/uapi/linux/if_bonding.h
@@ -95,6 +95,16 @@
#define BOND_XMIT_POLICY_ENCAP23 3 /* encapsulated layer 2+3 */
#define BOND_XMIT_POLICY_ENCAP34 4 /* encapsulated layer 3+4 */
+/* 802.3ad port state definitions (43.4.2.2 in the 802.3ad standard) */
+#define LACP_STATE_LACP_ACTIVITY 0x1
+#define LACP_STATE_LACP_TIMEOUT 0x2
+#define LACP_STATE_AGGREGATION 0x4
+#define LACP_STATE_SYNCHRONIZATION 0x8
+#define LACP_STATE_COLLECTING 0x10
+#define LACP_STATE_DISTRIBUTING 0x20
+#define LACP_STATE_DEFAULTED 0x40
+#define LACP_STATE_EXPIRED 0x80
+
typedef struct ifbond {
__s32 bond_mode;
__s32 num_slaves;
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 1b3c2b643a02..42f7ca38ad80 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -130,6 +130,7 @@ enum {
#define BRIDGE_VLAN_INFO_RANGE_BEGIN (1<<3) /* VLAN is start of vlan range */
#define BRIDGE_VLAN_INFO_RANGE_END (1<<4) /* VLAN is end of vlan range */
#define BRIDGE_VLAN_INFO_BRENTRY (1<<5) /* Global bridge VLAN entry */
+#define BRIDGE_VLAN_INFO_ONLY_OPTS (1<<6) /* Skip create/delete/flags */
struct bridge_vlan_info {
__u16 flags;
@@ -156,6 +157,45 @@ struct bridge_vlan_xstats {
__u32 pad2;
};
+struct bridge_stp_xstats {
+ __u64 transition_blk;
+ __u64 transition_fwd;
+ __u64 rx_bpdu;
+ __u64 tx_bpdu;
+ __u64 rx_tcn;
+ __u64 tx_tcn;
+};
+
+/* Bridge vlan RTM header */
+struct br_vlan_msg {
+ __u8 family;
+ __u8 reserved1;
+ __u16 reserved2;
+ __u32 ifindex;
+};
+
+/* Bridge vlan RTM attributes
+ * [BRIDGE_VLANDB_ENTRY] = {
+ * [BRIDGE_VLANDB_ENTRY_INFO]
+ * ...
+ * }
+ */
+enum {
+ BRIDGE_VLANDB_UNSPEC,
+ BRIDGE_VLANDB_ENTRY,
+ __BRIDGE_VLANDB_MAX,
+};
+#define BRIDGE_VLANDB_MAX (__BRIDGE_VLANDB_MAX - 1)
+
+enum {
+ BRIDGE_VLANDB_ENTRY_UNSPEC,
+ BRIDGE_VLANDB_ENTRY_INFO,
+ BRIDGE_VLANDB_ENTRY_RANGE,
+ BRIDGE_VLANDB_ENTRY_STATE,
+ __BRIDGE_VLANDB_ENTRY_MAX,
+};
+#define BRIDGE_VLANDB_ENTRY_MAX (__BRIDGE_VLANDB_ENTRY_MAX - 1)
+
/* Bridge multicast database attributes
* [MDBA_MDB] = {
* [MDBA_MDB_ENTRY] = {
@@ -262,6 +302,7 @@ enum {
BRIDGE_XSTATS_VLAN,
BRIDGE_XSTATS_MCAST,
BRIDGE_XSTATS_PAD,
+ BRIDGE_XSTATS_STP,
__BRIDGE_XSTATS_MAX
};
#define BRIDGE_XSTATS_MAX (__BRIDGE_XSTATS_MAX - 1)
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 8aec8769d944..024af2d1d0af 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -169,6 +169,7 @@ enum {
IFLA_MAX_MTU,
IFLA_PROP_LIST,
IFLA_ALT_IFNAME, /* Alternative ifname */
+ IFLA_PERM_ADDRESS,
__IFLA_MAX
};
@@ -485,6 +486,13 @@ enum macsec_validation_type {
MACSEC_VALIDATE_MAX = __MACSEC_VALIDATE_END - 1,
};
+enum macsec_offload {
+ MACSEC_OFFLOAD_OFF = 0,
+ MACSEC_OFFLOAD_PHY = 1,
+ __MACSEC_OFFLOAD_END,
+ MACSEC_OFFLOAD_MAX = __MACSEC_OFFLOAD_END - 1,
+};
+
/* IPVLAN section */
enum {
IFLA_IPVLAN_UNSPEC,
diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h
index 98e4d5d7c45c..1d63c43c38cc 100644
--- a/include/uapi/linux/if_macsec.h
+++ b/include/uapi/linux/if_macsec.h
@@ -45,6 +45,7 @@ enum macsec_attrs {
MACSEC_ATTR_RXSC_LIST, /* dump, nested, macsec_rxsc_attrs for each RXSC */
MACSEC_ATTR_TXSC_STATS, /* dump, nested, macsec_txsc_stats_attr */
MACSEC_ATTR_SECY_STATS, /* dump, nested, macsec_secy_stats_attr */
+ MACSEC_ATTR_OFFLOAD, /* config, nested, macsec_offload_attrs */
__MACSEC_ATTR_END,
NUM_MACSEC_ATTR = __MACSEC_ATTR_END,
MACSEC_ATTR_MAX = __MACSEC_ATTR_END - 1,
@@ -97,6 +98,15 @@ enum macsec_sa_attrs {
MACSEC_SA_ATTR_MAX = __MACSEC_SA_ATTR_END - 1,
};
+enum macsec_offload_attrs {
+ MACSEC_OFFLOAD_ATTR_UNSPEC,
+ MACSEC_OFFLOAD_ATTR_TYPE, /* config/dump, u8 0..2 */
+ MACSEC_OFFLOAD_ATTR_PAD,
+ __MACSEC_OFFLOAD_ATTR_END,
+ NUM_MACSEC_OFFLOAD_ATTR = __MACSEC_OFFLOAD_ATTR_END,
+ MACSEC_OFFLOAD_ATTR_MAX = __MACSEC_OFFLOAD_ATTR_END - 1,
+};
+
enum macsec_nl_commands {
MACSEC_CMD_GET_TXSC,
MACSEC_CMD_ADD_RXSC,
@@ -108,6 +118,7 @@ enum macsec_nl_commands {
MACSEC_CMD_ADD_RXSA,
MACSEC_CMD_DEL_RXSA,
MACSEC_CMD_UPD_RXSA,
+ MACSEC_CMD_UPD_OFFLOAD,
};
/* u64 per-RXSC stats */
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index e7ad9d350a28..1521073b6348 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -76,6 +76,8 @@ enum {
#define IPPROTO_MPLS IPPROTO_MPLS
IPPROTO_RAW = 255, /* Raw IP packets */
#define IPPROTO_RAW IPPROTO_RAW
+ IPPROTO_MPTCP = 262, /* Multipath TCP connection */
+#define IPPROTO_MPTCP IPPROTO_MPTCP
IPPROTO_MAX
};
#endif
diff --git a/include/uapi/linux/mii.h b/include/uapi/linux/mii.h
index 51b48e4be1f2..0b9c3beda345 100644
--- a/include/uapi/linux/mii.h
+++ b/include/uapi/linux/mii.h
@@ -131,6 +131,18 @@
#define NWAYTEST_LOOPBACK 0x0100 /* Enable loopback for N-way */
#define NWAYTEST_RESV2 0xfe00 /* Unused... */
+/* MAC and PHY tx_config_Reg[15:0] for SGMII in-band auto-negotiation.*/
+#define ADVERTISE_SGMII 0x0001 /* MAC can do SGMII */
+#define LPA_SGMII 0x0001 /* PHY can do SGMII */
+#define LPA_SGMII_DPX_SPD_MASK 0x1C00 /* SGMII duplex and speed bits */
+#define LPA_SGMII_10HALF 0x0000 /* Can do 10mbps half-duplex */
+#define LPA_SGMII_10FULL 0x1000 /* Can do 10mbps full-duplex */
+#define LPA_SGMII_100HALF 0x0400 /* Can do 100mbps half-duplex */
+#define LPA_SGMII_100FULL 0x1400 /* Can do 100mbps full-duplex */
+#define LPA_SGMII_1000HALF 0x0800 /* Can do 1000mbps half-duplex */
+#define LPA_SGMII_1000FULL 0x1800 /* Can do 1000mbps full-duplex */
+#define LPA_SGMII_LINK 0x8000 /* PHY link with copper-side partner */
+
/* 1000BASE-T Control register */
#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */
#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index e5b39721c6e4..f96e650d0af9 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -90,6 +90,14 @@ enum hwtstamp_tx_types {
* queue.
*/
HWTSTAMP_TX_ONESTEP_SYNC,
+
+ /*
+ * Same as HWTSTAMP_TX_ONESTEP_SYNC, but also enables time
+ * stamp insertion directly into PDelay_Resp packets. In this
+ * case, neither transmitted Sync nor PDelay_Resp packets will
+ * receive a time stamp via the socket error queue.
+ */
+ HWTSTAMP_TX_ONESTEP_P2P,
};
/* possible values for hwtstamp_config->rx_filter */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index bb9b049310df..065218a20bb7 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -48,6 +48,7 @@ enum nft_registers {
#define NFT_REG_SIZE 16
#define NFT_REG32_SIZE 4
+#define NFT_REG32_COUNT (NFT_REG32_15 - NFT_REG32_00 + 1)
/**
* enum nft_verdicts - nf_tables internal verdicts
@@ -301,15 +302,29 @@ enum nft_set_policies {
* enum nft_set_desc_attributes - set element description
*
* @NFTA_SET_DESC_SIZE: number of elements in set (NLA_U32)
+ * @NFTA_SET_DESC_CONCAT: description of field concatenation (NLA_NESTED)
*/
enum nft_set_desc_attributes {
NFTA_SET_DESC_UNSPEC,
NFTA_SET_DESC_SIZE,
+ NFTA_SET_DESC_CONCAT,
__NFTA_SET_DESC_MAX
};
#define NFTA_SET_DESC_MAX (__NFTA_SET_DESC_MAX - 1)
/**
+ * enum nft_set_field_attributes - attributes of concatenated fields
+ *
+ * @NFTA_SET_FIELD_LEN: length of single field, in bits (NLA_U32)
+ */
+enum nft_set_field_attributes {
+ NFTA_SET_FIELD_UNSPEC,
+ NFTA_SET_FIELD_LEN,
+ __NFTA_SET_FIELD_MAX
+};
+#define NFTA_SET_FIELD_MAX (__NFTA_SET_FIELD_MAX - 1)
+
+/**
* enum nft_set_attributes - nf_tables set netlink attributes
*
* @NFTA_SET_TABLE: table name (NLA_STRING)
@@ -370,6 +385,7 @@ enum nft_set_elem_flags {
* @NFTA_SET_ELEM_USERDATA: user data (NLA_BINARY)
* @NFTA_SET_ELEM_EXPR: expression (NLA_NESTED: nft_expr_attributes)
* @NFTA_SET_ELEM_OBJREF: stateful object reference (NLA_STRING)
+ * @NFTA_SET_ELEM_KEY_END: closing key value (NLA_NESTED: nft_data)
*/
enum nft_set_elem_attributes {
NFTA_SET_ELEM_UNSPEC,
@@ -382,6 +398,7 @@ enum nft_set_elem_attributes {
NFTA_SET_ELEM_EXPR,
NFTA_SET_ELEM_PAD,
NFTA_SET_ELEM_OBJREF,
+ NFTA_SET_ELEM_KEY_END,
__NFTA_SET_ELEM_MAX
};
#define NFTA_SET_ELEM_MAX (__NFTA_SET_ELEM_MAX - 1)
@@ -485,6 +502,20 @@ enum nft_immediate_attributes {
#define NFTA_IMMEDIATE_MAX (__NFTA_IMMEDIATE_MAX - 1)
/**
+ * enum nft_bitwise_ops - nf_tables bitwise operations
+ *
+ * @NFT_BITWISE_BOOL: mask-and-xor operation used to implement NOT, AND, OR and
+ * XOR boolean operations
+ * @NFT_BITWISE_LSHIFT: left-shift operation
+ * @NFT_BITWISE_RSHIFT: right-shift operation
+ */
+enum nft_bitwise_ops {
+ NFT_BITWISE_BOOL,
+ NFT_BITWISE_LSHIFT,
+ NFT_BITWISE_RSHIFT,
+};
+
+/**
* enum nft_bitwise_attributes - nf_tables bitwise expression netlink attributes
*
* @NFTA_BITWISE_SREG: source register (NLA_U32: nft_registers)
@@ -492,16 +523,20 @@ enum nft_immediate_attributes {
* @NFTA_BITWISE_LEN: length of operands (NLA_U32)
* @NFTA_BITWISE_MASK: mask value (NLA_NESTED: nft_data_attributes)
* @NFTA_BITWISE_XOR: xor value (NLA_NESTED: nft_data_attributes)
+ * @NFTA_BITWISE_OP: type of operation (NLA_U32: nft_bitwise_ops)
+ * @NFTA_BITWISE_DATA: argument for non-boolean operations
+ * (NLA_NESTED: nft_data_attributes)
*
- * The bitwise expression performs the following operation:
+ * The bitwise expression supports boolean and shift operations. It implements
+ * the boolean operations by performing the following operation:
*
* dreg = (sreg & mask) ^ xor
*
- * which allow to express all bitwise operations:
+ * with these mask and xor values:
*
* mask xor
* NOT: 1 1
- * OR: 0 x
+ * OR: ~x x
* XOR: 1 x
* AND: x 0
*/
@@ -512,6 +547,8 @@ enum nft_bitwise_attributes {
NFTA_BITWISE_LEN,
NFTA_BITWISE_MASK,
NFTA_BITWISE_XOR,
+ NFTA_BITWISE_OP,
+ NFTA_BITWISE_DATA,
__NFTA_BITWISE_MAX
};
#define NFTA_BITWISE_MAX (__NFTA_BITWISE_MAX - 1)
@@ -805,6 +842,8 @@ enum nft_exthdr_attributes {
* @NFT_META_TIME_NS: time since epoch (in nanoseconds)
* @NFT_META_TIME_DAY: day of week (from 0 = Sunday to 6 = Saturday)
* @NFT_META_TIME_HOUR: hour of day (in seconds)
+ * @NFT_META_SDIF: slave device interface index
+ * @NFT_META_SDIFNAME: slave device interface name
*/
enum nft_meta_keys {
NFT_META_LEN,
@@ -840,6 +879,8 @@ enum nft_meta_keys {
NFT_META_TIME_NS,
NFT_META_TIME_DAY,
NFT_META_TIME_HOUR,
+ NFT_META_SDIF,
+ NFT_META_SDIFNAME,
};
/**
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index a87b44cd5590..ae2bff14e7e1 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -673,6 +673,32 @@ struct ovs_action_push_mpls {
};
/**
+ * struct ovs_action_add_mpls - %OVS_ACTION_ATTR_ADD_MPLS action
+ * argument.
+ * @mpls_lse: MPLS label stack entry to push.
+ * @mpls_ethertype: Ethertype to set in the encapsulating ethernet frame.
+ * @tun_flags: MPLS tunnel attributes.
+ *
+ * The only values @mpls_ethertype should ever be given are %ETH_P_MPLS_UC and
+ * %ETH_P_MPLS_MC, indicating MPLS unicast or multicast. Other are rejected.
+ */
+struct ovs_action_add_mpls {
+ __be32 mpls_lse;
+ __be16 mpls_ethertype; /* Either %ETH_P_MPLS_UC or %ETH_P_MPLS_MC */
+ __u16 tun_flags;
+};
+
+#define OVS_MPLS_L3_TUNNEL_FLAG_MASK (1 << 0) /* Flag to specify the place of
+ * insertion of MPLS header.
+ * When false, the MPLS header
+ * will be inserted at the start
+ * of the packet.
+ * When true, the MPLS header
+ * will be inserted at the start
+ * of the l3 header.
+ */
+
+/**
* struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument.
* @vlan_tpid: Tag protocol identifier (TPID) to push.
* @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set
@@ -892,6 +918,10 @@ struct check_pkt_len_arg {
* @OVS_ACTION_ATTR_CHECK_PKT_LEN: Check the packet length and execute a set
* of actions if greater than the specified packet length, else execute
* another set of actions.
+ * @OVS_ACTION_ATTR_ADD_MPLS: Push a new MPLS label stack entry at the
+ * start of the packet or at the start of the l3 header depending on the value
+ * of l3 tunnel flag in the tun_flags field of OVS_ACTION_ATTR_ADD_MPLS
+ * argument.
*
* Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
* fields within a header are modifiable, e.g. the IPv4 protocol and fragment
@@ -927,6 +957,7 @@ enum ovs_action_attr {
OVS_ACTION_ATTR_METER, /* u32 meter ID. */
OVS_ACTION_ATTR_CLONE, /* Nested OVS_CLONE_ATTR_*. */
OVS_ACTION_ATTR_CHECK_PKT_LEN, /* Nested OVS_CHECK_PKT_LEN_ATTR_*. */
+ OVS_ACTION_ATTR_ADD_MPLS, /* struct ovs_action_add_mpls. */
__OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
* from userspace. */
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 9f1a72876212..bbe791b24168 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -971,6 +971,37 @@ struct tc_pie_xstats {
__u32 ecn_mark; /* packets marked with ecn*/
};
+/* FQ PIE */
+enum {
+ TCA_FQ_PIE_UNSPEC,
+ TCA_FQ_PIE_LIMIT,
+ TCA_FQ_PIE_FLOWS,
+ TCA_FQ_PIE_TARGET,
+ TCA_FQ_PIE_TUPDATE,
+ TCA_FQ_PIE_ALPHA,
+ TCA_FQ_PIE_BETA,
+ TCA_FQ_PIE_QUANTUM,
+ TCA_FQ_PIE_MEMORY_LIMIT,
+ TCA_FQ_PIE_ECN_PROB,
+ TCA_FQ_PIE_ECN,
+ TCA_FQ_PIE_BYTEMODE,
+ TCA_FQ_PIE_DQ_RATE_ESTIMATOR,
+ __TCA_FQ_PIE_MAX
+};
+#define TCA_FQ_PIE_MAX (__TCA_FQ_PIE_MAX - 1)
+
+struct tc_fq_pie_xstats {
+ __u32 packets_in; /* total number of packets enqueued */
+ __u32 dropped; /* packets dropped due to fq_pie_action */
+ __u32 overlimit; /* dropped due to lack of space in queue */
+ __u32 overmemory; /* dropped due to lack of memory in queue */
+ __u32 ecn_mark; /* packets marked with ecn */
+ __u32 new_flow_count; /* count of new flows created by packets */
+ __u32 new_flows_len; /* count of flows in new list */
+ __u32 old_flows_len; /* count of flows in old list */
+ __u32 memory_usage; /* total memory across all queues */
+};
+
/* CBS */
struct tc_cbs_qopt {
__u8 offload;
@@ -1187,4 +1218,21 @@ enum {
#define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1)
+/* ETS */
+
+#define TCQ_ETS_MAX_BANDS 16
+
+enum {
+ TCA_ETS_UNSPEC,
+ TCA_ETS_NBANDS, /* u8 */
+ TCA_ETS_NSTRICT, /* u8 */
+ TCA_ETS_QUANTA, /* nested TCA_ETS_QUANTA_BAND */
+ TCA_ETS_QUANTA_BAND, /* u32 */
+ TCA_ETS_PRIOMAP, /* nested TCA_ETS_PRIOMAP_BAND */
+ TCA_ETS_PRIOMAP_BAND, /* u8 */
+ __TCA_ETS_MAX,
+};
+
+#define TCA_ETS_MAX (__TCA_ETS_MAX - 1)
+
#endif
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 1418a8362bb7..4a8c5b745157 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -171,6 +171,13 @@ enum {
RTM_GETLINKPROP,
#define RTM_GETLINKPROP RTM_GETLINKPROP
+ RTM_NEWVLAN = 112,
+#define RTM_NEWNVLAN RTM_NEWVLAN
+ RTM_DELVLAN,
+#define RTM_DELVLAN RTM_DELVLAN
+ RTM_GETVLAN,
+#define RTM_GETVLAN RTM_GETVLAN
+
__RTM_MAX,
#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
};
@@ -309,6 +316,8 @@ enum rt_scope_t {
#define RTM_F_PREFIX 0x800 /* Prefix addresses */
#define RTM_F_LOOKUP_TABLE 0x1000 /* set rtm_table to FIB lookup result */
#define RTM_F_FIB_MATCH 0x2000 /* return full fib lookup match */
+#define RTM_F_OFFLOAD 0x4000 /* route is offloaded */
+#define RTM_F_TRAP 0x8000 /* route is trapping packets */
/* Reserved table identifiers */
@@ -721,6 +730,8 @@ enum rtnetlink_groups {
#define RTNLGRP_IPV6_MROUTE_R RTNLGRP_IPV6_MROUTE_R
RTNLGRP_NEXTHOP,
#define RTNLGRP_NEXTHOP RTNLGRP_NEXTHOP
+ RTNLGRP_BRVLAN,
+#define RTNLGRP_BRVLAN RTNLGRP_BRVLAN
__RTNLGRP_MAX
};
#define RTNLGRP_MAX (__RTNLGRP_MAX - 1)
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 7eee233e78d2..7d91f4debc48 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -285,6 +285,8 @@ enum
LINUX_MIB_TCPRCVQDROP, /* TCPRcvQDrop */
LINUX_MIB_TCPWQUEUETOOBIG, /* TCPWqueueTooBig */
LINUX_MIB_TCPFASTOPENPASSIVEALTKEY, /* TCPFastOpenPassiveAltKey */
+ LINUX_MIB_TCPTIMEOUTREHASH, /* TCPTimeoutRehash */
+ LINUX_MIB_TCPDUPLICATEDATAREHASH, /* TCPDuplicateDataRehash */
__LINUX_MIB_MAX
};
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 74af1f759cee..fd9eb8f6bcae 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -311,20 +311,22 @@ enum {
TCP_NLA_DSACK_DUPS, /* DSACK blocks received */
TCP_NLA_REORD_SEEN, /* reordering events seen */
TCP_NLA_SRTT, /* smoothed RTT in usecs */
+ TCP_NLA_TIMEOUT_REHASH, /* Timeout-triggered rehash attempts */
};
/* for TCP_MD5SIG socket option */
#define TCP_MD5SIG_MAXKEYLEN 80
/* tcp_md5sig extension flags for TCP_MD5SIG_EXT */
-#define TCP_MD5SIG_FLAG_PREFIX 1 /* address prefix length */
+#define TCP_MD5SIG_FLAG_PREFIX 0x1 /* address prefix length */
+#define TCP_MD5SIG_FLAG_IFINDEX 0x2 /* ifindex set */
struct tcp_md5sig {
struct __kernel_sockaddr_storage tcpm_addr; /* address associated */
__u8 tcpm_flags; /* extension flags */
__u8 tcpm_prefixlen; /* address prefix */
__u16 tcpm_keylen; /* key length */
- __u32 __tcpm_pad; /* zero */
+ int tcpm_ifindex; /* device index for scope */
__u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN]; /* key (binary) */
};
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index 6c2194ab745b..dc0d23a50e69 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -65,6 +65,7 @@ enum {
TIPC_NL_UDP_GET_REMOTEIP,
TIPC_NL_KEY_SET,
TIPC_NL_KEY_FLUSH,
+ TIPC_NL_ADDR_LEGACY_GET,
__TIPC_NL_CMD_MAX,
TIPC_NL_CMD_MAX = __TIPC_NL_CMD_MAX - 1
@@ -176,6 +177,7 @@ enum {
TIPC_NLA_NET_ADDR, /* u32 */
TIPC_NLA_NET_NODEID, /* u64 */
TIPC_NLA_NET_NODEID_W1, /* u64 */
+ TIPC_NLA_NET_ADDR_LEGACY, /* flag */
__TIPC_NLA_NET_MAX,
TIPC_NLA_NET_MAX = __TIPC_NLA_NET_MAX - 1
diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h
index 30baccb6c9c4..4828794efcf8 100644
--- a/include/uapi/linux/udp.h
+++ b/include/uapi/linux/udp.h
@@ -42,5 +42,6 @@ struct udphdr {
#define UDP_ENCAP_GTP0 4 /* GSM TS 09.60 */
#define UDP_ENCAP_GTP1U 5 /* 3GPP TS 29.060 */
#define UDP_ENCAP_RXRPC 6
+#define TCP_ENCAP_ESPINTCP 7 /* Yikes, this is really xfrm encap types. */
#endif /* _UAPI_LINUX_UDP_H */
diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h
index 68d57c5e99bc..fd0ed7221645 100644
--- a/include/uapi/linux/vm_sockets.h
+++ b/include/uapi/linux/vm_sockets.h
@@ -99,11 +99,13 @@
#define VMADDR_CID_HYPERVISOR 0
-/* This CID is specific to VMCI and can be considered reserved (even VMCI
- * doesn't use it anymore, it's a legacy value from an older release).
+/* Use this as the destination CID in an address when referring to the
+ * local communication (loopback).
+ * (This was VMADDR_CID_RESERVED, but even VMCI doesn't use it anymore,
+ * it was a legacy value from an older release).
*/
-#define VMADDR_CID_RESERVED 1
+#define VMADDR_CID_LOCAL 1
/* Use this as the destination CID in an address when referring to the host
* (any process other than the hypervisor). VMCI relies on it being 2, but
diff --git a/include/uapi/linux/wireguard.h b/include/uapi/linux/wireguard.h
new file mode 100644
index 000000000000..ae88be14c947
--- /dev/null
+++ b/include/uapi/linux/wireguard.h
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * Documentation
+ * =============
+ *
+ * The below enums and macros are for interfacing with WireGuard, using generic
+ * netlink, with family WG_GENL_NAME and version WG_GENL_VERSION. It defines two
+ * methods: get and set. Note that while they share many common attributes,
+ * these two functions actually accept a slightly different set of inputs and
+ * outputs.
+ *
+ * WG_CMD_GET_DEVICE
+ * -----------------
+ *
+ * May only be called via NLM_F_REQUEST | NLM_F_DUMP. The command should contain
+ * one but not both of:
+ *
+ * WGDEVICE_A_IFINDEX: NLA_U32
+ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1
+ *
+ * The kernel will then return several messages (NLM_F_MULTI) containing the
+ * following tree of nested items:
+ *
+ * WGDEVICE_A_IFINDEX: NLA_U32
+ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1
+ * WGDEVICE_A_PRIVATE_KEY: NLA_EXACT_LEN, len WG_KEY_LEN
+ * WGDEVICE_A_PUBLIC_KEY: NLA_EXACT_LEN, len WG_KEY_LEN
+ * WGDEVICE_A_LISTEN_PORT: NLA_U16
+ * WGDEVICE_A_FWMARK: NLA_U32
+ * WGDEVICE_A_PEERS: NLA_NESTED
+ * 0: NLA_NESTED
+ * WGPEER_A_PUBLIC_KEY: NLA_EXACT_LEN, len WG_KEY_LEN
+ * WGPEER_A_PRESHARED_KEY: NLA_EXACT_LEN, len WG_KEY_LEN
+ * WGPEER_A_ENDPOINT: NLA_MIN_LEN(struct sockaddr), struct sockaddr_in or struct sockaddr_in6
+ * WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL: NLA_U16
+ * WGPEER_A_LAST_HANDSHAKE_TIME: NLA_EXACT_LEN, struct __kernel_timespec
+ * WGPEER_A_RX_BYTES: NLA_U64
+ * WGPEER_A_TX_BYTES: NLA_U64
+ * WGPEER_A_ALLOWEDIPS: NLA_NESTED
+ * 0: NLA_NESTED
+ * WGALLOWEDIP_A_FAMILY: NLA_U16
+ * WGALLOWEDIP_A_IPADDR: NLA_MIN_LEN(struct in_addr), struct in_addr or struct in6_addr
+ * WGALLOWEDIP_A_CIDR_MASK: NLA_U8
+ * 0: NLA_NESTED
+ * ...
+ * 0: NLA_NESTED
+ * ...
+ * ...
+ * WGPEER_A_PROTOCOL_VERSION: NLA_U32
+ * 0: NLA_NESTED
+ * ...
+ * ...
+ *
+ * It is possible that all of the allowed IPs of a single peer will not
+ * fit within a single netlink message. In that case, the same peer will
+ * be written in the following message, except it will only contain
+ * WGPEER_A_PUBLIC_KEY and WGPEER_A_ALLOWEDIPS. This may occur several
+ * times in a row for the same peer. It is then up to the receiver to
+ * coalesce adjacent peers. Likewise, it is possible that all peers will
+ * not fit within a single message. So, subsequent peers will be sent
+ * in following messages, except those will only contain WGDEVICE_A_IFNAME
+ * and WGDEVICE_A_PEERS. It is then up to the receiver to coalesce these
+ * messages to form the complete list of peers.
+ *
+ * Since this is an NLA_F_DUMP command, the final message will always be
+ * NLMSG_DONE, even if an error occurs. However, this NLMSG_DONE message
+ * contains an integer error code. It is either zero or a negative error
+ * code corresponding to the errno.
+ *
+ * WG_CMD_SET_DEVICE
+ * -----------------
+ *
+ * May only be called via NLM_F_REQUEST. The command should contain the
+ * following tree of nested items, containing one but not both of
+ * WGDEVICE_A_IFINDEX and WGDEVICE_A_IFNAME:
+ *
+ * WGDEVICE_A_IFINDEX: NLA_U32
+ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1
+ * WGDEVICE_A_FLAGS: NLA_U32, 0 or WGDEVICE_F_REPLACE_PEERS if all current
+ * peers should be removed prior to adding the list below.
+ * WGDEVICE_A_PRIVATE_KEY: len WG_KEY_LEN, all zeros to remove
+ * WGDEVICE_A_LISTEN_PORT: NLA_U16, 0 to choose randomly
+ * WGDEVICE_A_FWMARK: NLA_U32, 0 to disable
+ * WGDEVICE_A_PEERS: NLA_NESTED
+ * 0: NLA_NESTED
+ * WGPEER_A_PUBLIC_KEY: len WG_KEY_LEN
+ * WGPEER_A_FLAGS: NLA_U32, 0 and/or WGPEER_F_REMOVE_ME if the
+ * specified peer should not exist at the end of the
+ * operation, rather than added/updated and/or
+ * WGPEER_F_REPLACE_ALLOWEDIPS if all current allowed
+ * IPs of this peer should be removed prior to adding
+ * the list below and/or WGPEER_F_UPDATE_ONLY if the
+ * peer should only be set if it already exists.
+ * WGPEER_A_PRESHARED_KEY: len WG_KEY_LEN, all zeros to remove
+ * WGPEER_A_ENDPOINT: struct sockaddr_in or struct sockaddr_in6
+ * WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL: NLA_U16, 0 to disable
+ * WGPEER_A_ALLOWEDIPS: NLA_NESTED
+ * 0: NLA_NESTED
+ * WGALLOWEDIP_A_FAMILY: NLA_U16
+ * WGALLOWEDIP_A_IPADDR: struct in_addr or struct in6_addr
+ * WGALLOWEDIP_A_CIDR_MASK: NLA_U8
+ * 0: NLA_NESTED
+ * ...
+ * 0: NLA_NESTED
+ * ...
+ * ...
+ * WGPEER_A_PROTOCOL_VERSION: NLA_U32, should not be set or used at
+ * all by most users of this API, as the
+ * most recent protocol will be used when
+ * this is unset. Otherwise, must be set
+ * to 1.
+ * 0: NLA_NESTED
+ * ...
+ * ...
+ *
+ * It is possible that the amount of configuration data exceeds that of
+ * the maximum message length accepted by the kernel. In that case, several
+ * messages should be sent one after another, with each successive one
+ * filling in information not contained in the prior. Note that if
+ * WGDEVICE_F_REPLACE_PEERS is specified in the first message, it probably
+ * should not be specified in fragments that come after, so that the list
+ * of peers is only cleared the first time but appended after. Likewise for
+ * peers, if WGPEER_F_REPLACE_ALLOWEDIPS is specified in the first message
+ * of a peer, it likely should not be specified in subsequent fragments.
+ *
+ * If an error occurs, NLMSG_ERROR will reply containing an errno.
+ */
+
+#ifndef _WG_UAPI_WIREGUARD_H
+#define _WG_UAPI_WIREGUARD_H
+
+#define WG_GENL_NAME "wireguard"
+#define WG_GENL_VERSION 1
+
+#define WG_KEY_LEN 32
+
+enum wg_cmd {
+ WG_CMD_GET_DEVICE,
+ WG_CMD_SET_DEVICE,
+ __WG_CMD_MAX
+};
+#define WG_CMD_MAX (__WG_CMD_MAX - 1)
+
+enum wgdevice_flag {
+ WGDEVICE_F_REPLACE_PEERS = 1U << 0,
+ __WGDEVICE_F_ALL = WGDEVICE_F_REPLACE_PEERS
+};
+enum wgdevice_attribute {
+ WGDEVICE_A_UNSPEC,
+ WGDEVICE_A_IFINDEX,
+ WGDEVICE_A_IFNAME,
+ WGDEVICE_A_PRIVATE_KEY,
+ WGDEVICE_A_PUBLIC_KEY,
+ WGDEVICE_A_FLAGS,
+ WGDEVICE_A_LISTEN_PORT,
+ WGDEVICE_A_FWMARK,
+ WGDEVICE_A_PEERS,
+ __WGDEVICE_A_LAST
+};
+#define WGDEVICE_A_MAX (__WGDEVICE_A_LAST - 1)
+
+enum wgpeer_flag {
+ WGPEER_F_REMOVE_ME = 1U << 0,
+ WGPEER_F_REPLACE_ALLOWEDIPS = 1U << 1,
+ WGPEER_F_UPDATE_ONLY = 1U << 2,
+ __WGPEER_F_ALL = WGPEER_F_REMOVE_ME | WGPEER_F_REPLACE_ALLOWEDIPS |
+ WGPEER_F_UPDATE_ONLY
+};
+enum wgpeer_attribute {
+ WGPEER_A_UNSPEC,
+ WGPEER_A_PUBLIC_KEY,
+ WGPEER_A_PRESHARED_KEY,
+ WGPEER_A_FLAGS,
+ WGPEER_A_ENDPOINT,
+ WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL,
+ WGPEER_A_LAST_HANDSHAKE_TIME,
+ WGPEER_A_RX_BYTES,
+ WGPEER_A_TX_BYTES,
+ WGPEER_A_ALLOWEDIPS,
+ WGPEER_A_PROTOCOL_VERSION,
+ __WGPEER_A_LAST
+};
+#define WGPEER_A_MAX (__WGPEER_A_LAST - 1)
+
+enum wgallowedip_attribute {
+ WGALLOWEDIP_A_UNSPEC,
+ WGALLOWEDIP_A_FAMILY,
+ WGALLOWEDIP_A_IPADDR,
+ WGALLOWEDIP_A_CIDR_MASK,
+ __WGALLOWEDIP_A_LAST
+};
+#define WGALLOWEDIP_A_MAX (__WGALLOWEDIP_A_LAST - 1)
+
+#endif /* _WG_UAPI_WIREGUARD_H */