diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-15 09:00:47 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-15 09:00:47 -0700 |
commit | 6c373ca89399c5a3f7ef210ad8f63dc3437da345 (patch) | |
tree | 74d1ec65087df1da1021b43ac51acc1ee8601809 /include | |
parent | bb0fd7ab0986105765d11baa82e619c618a235aa (diff) | |
parent | 9f9151412dd7aae0e3f51a89ae4a1f8755fdb4d0 (diff) | |
download | linux-6c373ca89399c5a3f7ef210ad8f63dc3437da345.tar.bz2 |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
1) Add BQL support to via-rhine, from Tino Reichardt.
2) Integrate SWITCHDEV layer support into the DSA layer, so DSA drivers
can support hw switch offloading. From Floria Fainelli.
3) Allow 'ip address' commands to initiate multicast group join/leave,
from Madhu Challa.
4) Many ipv4 FIB lookup optimizations from Alexander Duyck.
5) Support EBPF in cls_bpf classifier and act_bpf action, from Daniel
Borkmann.
6) Remove the ugly compat support in ARP for ugly layers like ax25,
rose, etc. And use this to clean up the neigh layer, then use it to
implement MPLS support. All from Eric Biederman.
7) Support L3 forwarding offloading in switches, from Scott Feldman.
8) Collapse the LOCAL and MAIN ipv4 FIB tables when possible, to speed
up route lookups even further. From Alexander Duyck.
9) Many improvements and bug fixes to the rhashtable implementation,
from Herbert Xu and Thomas Graf. In particular, in the case where
an rhashtable user bulk adds a large number of items into an empty
table, we expand the table much more sanely.
10) Don't make the tcp_metrics hash table per-namespace, from Eric
Biederman.
11) Extend EBPF to access SKB fields, from Alexei Starovoitov.
12) Split out new connection request sockets so that they can be
established in the main hash table. Much less false sharing since
hash lookups go direct to the request sockets instead of having to
go first to the listener then to the request socks hashed
underneath. From Eric Dumazet.
13) Add async I/O support for crytpo AF_ALG sockets, from Tadeusz Struk.
14) Support stable privacy address generation for RFC7217 in IPV6. From
Hannes Frederic Sowa.
15) Hash network namespace into IP frag IDs, also from Hannes Frederic
Sowa.
16) Convert PTP get/set methods to use 64-bit time, from Richard
Cochran.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1816 commits)
fm10k: Bump driver version to 0.15.2
fm10k: corrected VF multicast update
fm10k: mbx_update_max_size does not drop all oversized messages
fm10k: reset head instead of calling update_max_size
fm10k: renamed mbx_tx_dropped to mbx_tx_oversized
fm10k: update xcast mode before synchronizing multicast addresses
fm10k: start service timer on probe
fm10k: fix function header comment
fm10k: comment next_vf_mbx flow
fm10k: don't handle mailbox events in iov_event path and always process mailbox
fm10k: use separate workqueue for fm10k driver
fm10k: Set PF queues to unlimited bandwidth during virtualization
fm10k: expose tx_timeout_count as an ethtool stat
fm10k: only increment tx_timeout_count in Tx hang path
fm10k: remove extraneous "Reset interface" message
fm10k: separate PF only stats so that VF does not display them
fm10k: use hw->mac.max_queues for stats
fm10k: only show actual queues, not the maximum in hardware
fm10k: allow creation of VLAN on default vid
fm10k: fix unused warnings
...
Diffstat (limited to 'include')
151 files changed, 3209 insertions, 1319 deletions
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index 178525e5f430..018afb264ac2 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -58,8 +58,9 @@ struct af_alg_type { }; struct af_alg_sgl { - struct scatterlist sg[ALG_MAX_PAGES]; + struct scatterlist sg[ALG_MAX_PAGES + 1]; struct page *pages[ALG_MAX_PAGES]; + unsigned int npages; }; int af_alg_register_type(const struct af_alg_type *type); @@ -70,6 +71,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock); int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len); void af_alg_free_sg(struct af_alg_sgl *sgl); +void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new); int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con); diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 994739da827f..e34f906647d3 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -434,6 +434,27 @@ static inline struct bcma_device *bcma_find_core(struct bcma_bus *bus, return bcma_find_core_unit(bus, coreid, 0); } +#ifdef CONFIG_BCMA_HOST_PCI +extern void bcma_host_pci_up(struct bcma_bus *bus); +extern void bcma_host_pci_down(struct bcma_bus *bus); +extern int bcma_host_pci_irq_ctl(struct bcma_bus *bus, + struct bcma_device *core, bool enable); +#else +static inline void bcma_host_pci_up(struct bcma_bus *bus) +{ +} +static inline void bcma_host_pci_down(struct bcma_bus *bus) +{ +} +static inline int bcma_host_pci_irq_ctl(struct bcma_bus *bus, + struct bcma_device *core, bool enable) +{ + if (bus->hosttype == BCMA_HOSTTYPE_PCI) + return -ENOTSUPP; + return 0; +} +#endif + extern bool bcma_core_is_enabled(struct bcma_device *core); extern void bcma_core_disable(struct bcma_device *core, u32 flags); extern int bcma_core_enable(struct bcma_device *core, u32 flags); diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index db6fa217f98b..6cceedf65ca2 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -663,14 +663,6 @@ struct bcma_drv_cc_b { #define bcma_cc_maskset32(cc, offset, mask, set) \ bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set)) -extern void bcma_core_chipcommon_init(struct bcma_drv_cc *cc); -extern void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc); - -extern void bcma_chipco_suspend(struct bcma_drv_cc *cc); -extern void bcma_chipco_resume(struct bcma_drv_cc *cc); - -void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable); - extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks); extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc); @@ -690,9 +682,6 @@ u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value); u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value); /* PMU support */ -extern void bcma_pmu_init(struct bcma_drv_cc *cc); -extern void bcma_pmu_early_init(struct bcma_drv_cc *cc); - extern void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value); extern void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, diff --git a/include/linux/bcma/bcma_driver_gmac_cmn.h b/include/linux/bcma/bcma_driver_gmac_cmn.h index 4dd1f33e36a2..4354d4ea6713 100644 --- a/include/linux/bcma/bcma_driver_gmac_cmn.h +++ b/include/linux/bcma/bcma_driver_gmac_cmn.h @@ -91,10 +91,4 @@ struct bcma_drv_gmac_cmn { #define gmac_cmn_write16(gc, offset, val) bcma_write16((gc)->core, offset, val) #define gmac_cmn_write32(gc, offset, val) bcma_write32((gc)->core, offset, val) -#ifdef CONFIG_BCMA_DRIVER_GMAC_CMN -extern void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc); -#else -static inline void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc) { } -#endif - #endif /* LINUX_BCMA_DRIVER_GMAC_CMN_H_ */ diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h index 0b3b32aeeb8a..8eea7f9e33b4 100644 --- a/include/linux/bcma/bcma_driver_mips.h +++ b/include/linux/bcma/bcma_driver_mips.h @@ -39,21 +39,6 @@ struct bcma_drv_mips { u8 early_setup_done:1; }; -#ifdef CONFIG_BCMA_DRIVER_MIPS -extern void bcma_core_mips_init(struct bcma_drv_mips *mcore); -extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore); - -extern unsigned int bcma_core_mips_irq(struct bcma_device *dev); -#else -static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { } -static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { } - -static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev) -{ - return 0; -} -#endif - extern u32 bcma_cpu_clock(struct bcma_drv_mips *mcore); #endif /* LINUX_BCMA_DRIVER_MIPS_H_ */ diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h index 3f809ae372c4..5ba6918ca20b 100644 --- a/include/linux/bcma/bcma_driver_pci.h +++ b/include/linux/bcma/bcma_driver_pci.h @@ -238,13 +238,13 @@ struct bcma_drv_pci { #define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val) #define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val) -extern void bcma_core_pci_early_init(struct bcma_drv_pci *pc); -extern void bcma_core_pci_init(struct bcma_drv_pci *pc); -extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, - struct bcma_device *core, bool enable); -extern void bcma_core_pci_up(struct bcma_bus *bus); -extern void bcma_core_pci_down(struct bcma_bus *bus); +#ifdef CONFIG_BCMA_DRIVER_PCI extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up); +#else +static inline void bcma_core_pci_power_save(struct bcma_bus *bus, bool up) +{ +} +#endif extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev); extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev); diff --git a/include/linux/bcma/bcma_driver_pcie2.h b/include/linux/bcma/bcma_driver_pcie2.h index 5988b05781c3..31e6d17ab798 100644 --- a/include/linux/bcma/bcma_driver_pcie2.h +++ b/include/linux/bcma/bcma_driver_pcie2.h @@ -143,6 +143,8 @@ struct bcma_drv_pcie2 { struct bcma_device *core; + + u16 reqsize; }; #define pcie2_read16(pcie2, offset) bcma_read16((pcie2)->core, offset) @@ -153,6 +155,4 @@ struct bcma_drv_pcie2 { #define pcie2_set32(pcie2, offset, set) bcma_set32((pcie2)->core, offset, set) #define pcie2_mask32(pcie2, offset, mask) bcma_mask32((pcie2)->core, offset, mask) -void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2); - #endif /* LINUX_BCMA_DRIVER_PCIE2_H_ */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c2e21113ecc0..d5cda067115a 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -32,23 +32,19 @@ struct bpf_map { u32 key_size; u32 value_size; u32 max_entries; - struct bpf_map_ops *ops; + const struct bpf_map_ops *ops; struct work_struct work; }; struct bpf_map_type_list { struct list_head list_node; - struct bpf_map_ops *ops; + const struct bpf_map_ops *ops; enum bpf_map_type type; }; -void bpf_register_map_type(struct bpf_map_type_list *tl); -void bpf_map_put(struct bpf_map *map); -struct bpf_map *bpf_map_get(struct fd f); - /* function argument constraints */ enum bpf_arg_type { - ARG_ANYTHING = 0, /* any argument is ok */ + ARG_DONTCARE = 0, /* unused argument in helper function */ /* the following constraints used to prototype * bpf_map_lookup/update/delete_elem() functions @@ -62,6 +58,9 @@ enum bpf_arg_type { */ ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */ ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */ + + ARG_PTR_TO_CTX, /* pointer to context */ + ARG_ANYTHING, /* any (initialized) argument is ok */ }; /* type of values returned from helper functions */ @@ -105,11 +104,14 @@ struct bpf_verifier_ops { * with 'type' (read or write) is allowed */ bool (*is_valid_access)(int off, int size, enum bpf_access_type type); + + u32 (*convert_ctx_access)(int dst_reg, int src_reg, int ctx_off, + struct bpf_insn *insn); }; struct bpf_prog_type_list { struct list_head list_node; - struct bpf_verifier_ops *ops; + const struct bpf_verifier_ops *ops; enum bpf_prog_type type; }; @@ -117,20 +119,25 @@ struct bpf_prog; struct bpf_prog_aux { atomic_t refcnt; - bool is_gpl_compatible; - enum bpf_prog_type prog_type; - struct bpf_verifier_ops *ops; - struct bpf_map **used_maps; u32 used_map_cnt; + const struct bpf_verifier_ops *ops; + struct bpf_map **used_maps; struct bpf_prog *prog; struct work_struct work; }; #ifdef CONFIG_BPF_SYSCALL void bpf_register_prog_type(struct bpf_prog_type_list *tl); +void bpf_register_map_type(struct bpf_map_type_list *tl); -void bpf_prog_put(struct bpf_prog *prog); struct bpf_prog *bpf_prog_get(u32 ufd); +void bpf_prog_put(struct bpf_prog *prog); + +struct bpf_map *bpf_map_get(struct fd f); +void bpf_map_put(struct bpf_map *map); + +/* verify correctness of eBPF program */ +int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); #else static inline void bpf_register_prog_type(struct bpf_prog_type_list *tl) { @@ -144,14 +151,14 @@ static inline struct bpf_prog *bpf_prog_get(u32 ufd) static inline void bpf_prog_put(struct bpf_prog *prog) { } -#endif - -/* verify correctness of eBPF program */ -int bpf_check(struct bpf_prog *fp, union bpf_attr *attr); +#endif /* CONFIG_BPF_SYSCALL */ /* verifier prototypes for helper functions called from eBPF programs */ -extern struct bpf_func_proto bpf_map_lookup_elem_proto; -extern struct bpf_func_proto bpf_map_update_elem_proto; -extern struct bpf_func_proto bpf_map_delete_elem_proto; +extern const struct bpf_func_proto bpf_map_lookup_elem_proto; +extern const struct bpf_func_proto bpf_map_update_elem_proto; +extern const struct bpf_func_proto bpf_map_delete_elem_proto; + +extern const struct bpf_func_proto bpf_get_prandom_u32_proto; +extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; #endif /* _LINUX_BPF_H */ diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 7ccd928cc1f2..ae2982c0f7a6 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -11,6 +11,7 @@ #define PHY_ID_BCM5421 0x002060e0 #define PHY_ID_BCM5464 0x002060b0 #define PHY_ID_BCM5461 0x002060c0 +#define PHY_ID_BCM54616S 0x03625d10 #define PHY_ID_BCM57780 0x03625d90 #define PHY_ID_BCM7250 0xae025280 @@ -19,6 +20,7 @@ #define PHY_ID_BCM7425 0x03625e60 #define PHY_ID_BCM7429 0x600d8730 #define PHY_ID_BCM7439 0x600d8480 +#define PHY_ID_BCM7439_2 0xae025080 #define PHY_ID_BCM7445 0x600d8510 #define PHY_BCM_OUI_MASK 0xfffffc00 diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index c05ff0f9f9a5..c3a9c8fc60fa 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -61,6 +61,8 @@ struct can_priv { char tx_led_trig_name[CAN_LED_NAME_SZ]; struct led_trigger *rx_led_trig; char rx_led_trig_name[CAN_LED_NAME_SZ]; + struct led_trigger *rxtx_led_trig; + char rxtx_led_trig_name[CAN_LED_NAME_SZ]; #endif }; diff --git a/include/linux/can/led.h b/include/linux/can/led.h index e0475c5cbb92..146de4506d21 100644 --- a/include/linux/can/led.h +++ b/include/linux/can/led.h @@ -21,8 +21,10 @@ enum can_led_event { #ifdef CONFIG_CAN_LEDS -/* keep space for interface name + "-tx"/"-rx" suffix and null terminator */ -#define CAN_LED_NAME_SZ (IFNAMSIZ + 4) +/* keep space for interface name + "-tx"/"-rx"/"-rxtx" + * suffix and null terminator + */ +#define CAN_LED_NAME_SZ (IFNAMSIZ + 6) void can_led_event(struct net_device *netdev, enum can_led_event event); void devm_can_led_init(struct net_device *netdev); diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h index cc00d15c6107..b6a52a4b457a 100644 --- a/include/linux/can/skb.h +++ b/include/linux/can/skb.h @@ -44,16 +44,11 @@ static inline void can_skb_reserve(struct sk_buff *skb) skb_reserve(skb, sizeof(struct can_skb_priv)); } -static inline void can_skb_destructor(struct sk_buff *skb) -{ - sock_put(skb->sk); -} - static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk) { if (sk) { sock_hold(sk); - skb->destructor = can_skb_destructor; + skb->destructor = sock_efree; skb->sk = sk; } } diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 439ff698000a..221025423e6c 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -43,6 +43,7 @@ enum dccp_state { DCCP_CLOSING = TCP_CLOSING, DCCP_TIME_WAIT = TCP_TIME_WAIT, DCCP_CLOSED = TCP_CLOSE, + DCCP_NEW_SYN_RECV = TCP_NEW_SYN_RECV, DCCP_PARTOPEN = TCP_MAX_STATES, DCCP_PASSIVE_CLOSEREQ, /* clients receiving CloseReq */ DCCP_MAX_STATES @@ -57,6 +58,7 @@ enum { DCCPF_CLOSING = TCPF_CLOSING, DCCPF_TIME_WAIT = TCPF_TIME_WAIT, DCCPF_CLOSED = TCPF_CLOSE, + DCCPF_NEW_SYN_RECV = TCPF_NEW_SYN_RECV, DCCPF_PARTOPEN = (1 << DCCP_PARTOPEN), }; @@ -317,6 +319,6 @@ static inline const char *dccp_role(const struct sock *sk) return NULL; } -extern void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req); +extern void dccp_syn_ack_timeout(const struct request_sock *req); #endif /* _LINUX_DCCP_H */ diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 1d869d185a0d..606563ef8a72 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -35,7 +35,6 @@ extern const struct header_ops eth_header_ops; int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len); -int eth_rebuild_header(struct sk_buff *skb); int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); diff --git a/include/linux/filter.h b/include/linux/filter.h index caac2087a4d5..fa11b3a367be 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -145,8 +145,6 @@ struct bpf_prog_aux; .off = 0, \ .imm = ((__u64) (IMM)) >> 32 }) -#define BPF_PSEUDO_MAP_FD 1 - /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ #define BPF_LD_MAP_FD(DST, MAP_FD) \ BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) @@ -310,9 +308,11 @@ struct bpf_binary_header { struct bpf_prog { u16 pages; /* Number of allocated pages */ bool jited; /* Is our filter JIT'ed? */ + bool gpl_compatible; /* Is our filter GPL compatible? */ u32 len; /* Number of filter blocks */ - struct sock_fprog_kern *orig_prog; /* Original BPF program */ + enum bpf_prog_type type; /* Type of BPF program */ struct bpf_prog_aux *aux; /* Auxiliary fields */ + struct sock_fprog_kern *orig_prog; /* Original BPF program */ unsigned int (*bpf_func)(const struct sk_buff *skb, const struct bpf_insn *filter); /* Instructions for interpreter */ @@ -454,6 +454,7 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest) BPF_ANCILLARY(VLAN_TAG_PRESENT); BPF_ANCILLARY(PAY_OFFSET); BPF_ANCILLARY(RANDOM); + BPF_ANCILLARY(VLAN_TPID); } /* Fallthrough. */ default: diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h index 6e82d888287c..8872ca103d06 100644 --- a/include/linux/ieee802154.h +++ b/include/linux/ieee802154.h @@ -28,7 +28,9 @@ #include <asm/byteorder.h> #define IEEE802154_MTU 127 -#define IEEE802154_MIN_PSDU_LEN 5 +#define IEEE802154_ACK_PSDU_LEN 5 +#define IEEE802154_MIN_PSDU_LEN 9 +#define IEEE802154_FCS_LEN 2 #define IEEE802154_PAN_ID_BROADCAST 0xffff #define IEEE802154_ADDR_SHORT_BROADCAST 0xffff @@ -38,6 +40,7 @@ #define IEEE802154_LIFS_PERIOD 40 #define IEEE802154_SIFS_PERIOD 12 +#define IEEE802154_MAX_SIFS_FRAME_SIZE 18 #define IEEE802154_MAX_CHANNEL 26 #define IEEE802154_MAX_PAGE 31 @@ -204,11 +207,18 @@ enum { /** * ieee802154_is_valid_psdu_len - check if psdu len is valid + * available lengths: + * 0-4 Reserved + * 5 MPDU (Acknowledgment) + * 6-8 Reserved + * 9-127 MPDU + * * @len: psdu len with (MHR + payload + MFR) */ static inline bool ieee802154_is_valid_psdu_len(const u8 len) { - return (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU); + return (len == IEEE802154_ACK_PSDU_LEN || + (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU)); } /** diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index a57bca2ea97e..dad8b00beed2 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -44,6 +44,7 @@ struct br_ip_list { #define BR_PROMISC BIT(7) #define BR_PROXYARP BIT(8) #define BR_LEARNING_SYNC BIT(9) +#define BR_PROXYARP_WIFI BIT(10) extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); diff --git a/include/linux/if_link.h b/include/linux/if_link.h index 119130e9298b..da4929927f69 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -14,5 +14,6 @@ struct ifla_vf_info { __u32 linkstate; __u32 min_tx_rate; __u32 max_tx_rate; + __u32 rss_query_en; }; #endif /* _LINUX_IF_LINK_H */ diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index aff7ad8a4ea3..66a7d7600f43 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h @@ -19,6 +19,7 @@ #include <linux/netdevice.h> #include <linux/ppp_channel.h> #include <linux/skbuff.h> +#include <linux/workqueue.h> #include <uapi/linux/if_pppox.h> static inline struct pppoe_hdr *pppoe_hdr(const struct sk_buff *skb) @@ -32,6 +33,7 @@ struct pppoe_opt { struct pppoe_addr pa; /* what this socket is bound to*/ struct sockaddr_pppox relay; /* what socket data will be relayed to (PPPoE relaying) */ + struct work_struct padt_work;/* Work item for handling PADT */ }; struct pptp_opt { diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index b11b28a30b9e..920e4457ce6e 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -561,4 +561,71 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb, skb->protocol = htons(ETH_P_802_2); } +/** + * skb_vlan_tagged - check if skb is vlan tagged. + * @skb: skbuff to query + * + * Returns true if the skb is tagged, regardless of whether it is hardware + * accelerated or not. + */ +static inline bool skb_vlan_tagged(const struct sk_buff *skb) +{ + if (!skb_vlan_tag_present(skb) && + likely(skb->protocol != htons(ETH_P_8021Q) && + skb->protocol != htons(ETH_P_8021AD))) + return false; + + return true; +} + +/** + * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers. + * @skb: skbuff to query + * + * Returns true if the skb is tagged with multiple vlan headers, regardless + * of whether it is hardware accelerated or not. + */ +static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) +{ + __be16 protocol = skb->protocol; + + if (!skb_vlan_tag_present(skb)) { + struct vlan_ethhdr *veh; + + if (likely(protocol != htons(ETH_P_8021Q) && + protocol != htons(ETH_P_8021AD))) + return false; + + veh = (struct vlan_ethhdr *)skb->data; + protocol = veh->h_vlan_encapsulated_proto; + } + + if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD)) + return false; + + return true; +} + +/** + * vlan_features_check - drop unsafe features for skb with multiple tags. + * @skb: skbuff to query + * @features: features to be checked + * + * Returns features without unsafe ones if the skb has multiple tags. + */ +static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, + netdev_features_t features) +{ + if (skb_vlan_tagged_multi(skb)) + features = netdev_intersect_features(features, + NETIF_F_SG | + NETIF_F_HIGHDMA | + NETIF_F_FRAGLIST | + NETIF_F_GEN_CSUM | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); + + return features; +} + #endif /* !(_LINUX_IF_VLAN_H_) */ diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h index 46da02410a09..ac48b10c9395 100644 --- a/include/linux/inet_diag.h +++ b/include/linux/inet_diag.h @@ -11,33 +11,34 @@ struct sk_buff; struct netlink_callback; struct inet_diag_handler { - void (*dump)(struct sk_buff *skb, - struct netlink_callback *cb, - struct inet_diag_req_v2 *r, - struct nlattr *bc); - - int (*dump_one)(struct sk_buff *in_skb, - const struct nlmsghdr *nlh, - struct inet_diag_req_v2 *req); - - void (*idiag_get_info)(struct sock *sk, - struct inet_diag_msg *r, - void *info); - __u16 idiag_type; + void (*dump)(struct sk_buff *skb, + struct netlink_callback *cb, + const struct inet_diag_req_v2 *r, + struct nlattr *bc); + + int (*dump_one)(struct sk_buff *in_skb, + const struct nlmsghdr *nlh, + const struct inet_diag_req_v2 *req); + + void (*idiag_get_info)(struct sock *sk, + struct inet_diag_msg *r, + void *info); + __u16 idiag_type; }; struct inet_connection_sock; int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, - struct sk_buff *skb, struct inet_diag_req_v2 *req, - struct user_namespace *user_ns, - u32 pid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh); + struct sk_buff *skb, const struct inet_diag_req_v2 *req, + struct user_namespace *user_ns, + u32 pid, u32 seq, u16 nlmsg_flags, + const struct nlmsghdr *unlh); void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb, - struct netlink_callback *cb, struct inet_diag_req_v2 *r, - struct nlattr *bc); + struct netlink_callback *cb, + const struct inet_diag_req_v2 *r, + struct nlattr *bc); int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, - struct sk_buff *in_skb, const struct nlmsghdr *nlh, - struct inet_diag_req_v2 *req); + struct sk_buff *in_skb, const struct nlmsghdr *nlh, + const struct inet_diag_req_v2 *req); int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk); diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 4d5169f5d7d1..82806c60aa42 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -53,6 +53,10 @@ struct ipv6_devconf { __s32 ndisc_notify; __s32 suppress_frag_ndisc; __s32 accept_ra_mtu; + struct ipv6_stable_secret { + bool initialized; + struct in6_addr secret; + } stable_secret; void *sysctl; }; diff --git a/include/linux/jhash.h b/include/linux/jhash.h index 47cb09edec1a..348c6f47e4cc 100644 --- a/include/linux/jhash.h +++ b/include/linux/jhash.h @@ -145,11 +145,11 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval) } -/* jhash_3words - hash exactly 3, 2 or 1 word(s) */ -static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) +/* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */ +static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) { - a += JHASH_INITVAL; - b += JHASH_INITVAL; + a += initval; + b += initval; c += initval; __jhash_final(a, b, c); @@ -157,14 +157,19 @@ static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) return c; } +static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) +{ + return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2)); +} + static inline u32 jhash_2words(u32 a, u32 b, u32 initval) { - return jhash_3words(a, b, 0, initval); + return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2)); } static inline u32 jhash_1word(u32 a, u32 initval) { - return jhash_3words(a, 0, 0, initval); + return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2)); } #endif /* _LINUX_JHASH_H */ diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 7b6d4e9ff603..f62e7cf227c6 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -68,6 +68,8 @@ enum { MLX4_CMD_UNMAP_ICM_AUX = 0xffb, MLX4_CMD_SET_ICM_SIZE = 0xffd, MLX4_CMD_ACCESS_REG = 0x3b, + MLX4_CMD_ALLOCATE_VPP = 0x80, + MLX4_CMD_SET_VPORT_QOS = 0x81, /*master notify fw on finish for slave's flr*/ MLX4_CMD_INFORM_FLR_DONE = 0x5b, @@ -163,6 +165,9 @@ enum { MLX4_QP_FLOW_STEERING_ATTACH = 0x65, MLX4_QP_FLOW_STEERING_DETACH = 0x66, MLX4_FLOW_STEERING_IB_UC_QP_RANGE = 0x64, + + /* Update and read QCN parameters */ + MLX4_CMD_CONGESTION_CTRL_OPCODE = 0x68, }; enum { @@ -183,7 +188,14 @@ enum { }; enum { - /* set port opcode modifiers */ + /* Set port opcode modifiers */ + MLX4_SET_PORT_IB_OPCODE = 0x0, + MLX4_SET_PORT_ETH_OPCODE = 0x1, + MLX4_SET_PORT_BEACON_OPCODE = 0x4, +}; + +enum { + /* Set port Ethernet input modifiers */ MLX4_SET_PORT_GENERAL = 0x0, MLX4_SET_PORT_RQP_CALC = 0x1, MLX4_SET_PORT_MAC_TABLE = 0x2, @@ -233,6 +245,16 @@ struct mlx4_config_dev_params { u8 rx_csum_flags_port_2; }; +enum mlx4_en_congestion_control_algorithm { + MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT = 0, +}; + +enum mlx4_en_congestion_control_opmod { + MLX4_CONGESTION_CONTROL_GET_PARAMS, + MLX4_CONGESTION_CONTROL_GET_STATISTICS, + MLX4_CONGESTION_CONTROL_SET_PARAMS = 4, +}; + struct mlx4_dev; struct mlx4_cmd_mailbox { @@ -281,6 +303,8 @@ void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbo u32 mlx4_comm_get_version(void); int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac); int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); +int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate, + int max_tx_rate); int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf); int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index e4ebff7e9d02..f9ce34bec45b 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -49,8 +49,6 @@ #define MSIX_LEGACY_SZ 4 #define MIN_MSIX_P_PORT 5 -#define MLX4_NUM_UP 8 -#define MLX4_NUM_TC 8 #define MLX4_MAX_100M_UNITS_VAL 255 /* * work around: can't set values * greater then this value when @@ -174,6 +172,7 @@ enum { MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48, + MLX4_DEV_CAP_FLAG_RSS_IP_FRAG = 1LL << 52, MLX4_DEV_CAP_FLAG_SET_ETH_SCHED = 1LL << 53, MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55, MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59, @@ -203,7 +202,14 @@ enum { MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19, MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20, - MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21 + MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21, + MLX4_DEV_CAP_FLAG2_QCN = 1LL << 22, + MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT = 1LL << 23, + MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN = 1LL << 24, + MLX4_DEV_CAP_FLAG2_QOS_VPP = 1LL << 25, + MLX4_DEV_CAP_FLAG2_ETS_CFG = 1LL << 26, + MLX4_DEV_CAP_FLAG2_PORT_BEACON = 1LL << 27, + MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28, }; enum { @@ -449,6 +455,21 @@ enum mlx4_module_id { MLX4_MODULE_ID_QSFP28 = 0x11, }; +enum { /* rl */ + MLX4_QP_RATE_LIMIT_NONE = 0, + MLX4_QP_RATE_LIMIT_KBS = 1, + MLX4_QP_RATE_LIMIT_MBS = 2, + MLX4_QP_RATE_LIMIT_GBS = 3 +}; + +struct mlx4_rate_limit_caps { + u16 num_rates; /* Number of different rates */ + u8 min_unit; + u16 min_val; + u8 max_unit; + u16 max_val; +}; + static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) { return (major << 32) | (minor << 16) | subminor; @@ -564,6 +585,7 @@ struct mlx4_caps { u32 dmfs_high_rate_qpn_base; u32 dmfs_high_rate_qpn_range; u32 vf_caps; + struct mlx4_rate_limit_caps rl_caps; }; struct mlx4_buf_list { @@ -982,6 +1004,11 @@ static inline int mlx4_is_slave(struct mlx4_dev *dev) return dev->flags & MLX4_FLAG_SLAVE; } +static inline int mlx4_is_eth(struct mlx4_dev *dev, int port) +{ + return dev->caps.port_type[port] == MLX4_PORT_TYPE_IB ? 0 : 1; +} + int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, struct mlx4_buf *buf, gfp_t gfp); void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); @@ -1282,14 +1309,13 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port); int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); -void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap); int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx); int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, u8 promisc); -int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc); -int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, - u8 *pg, u16 *ratelimit); +int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time); +int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, + u8 ignore_fcs_value); int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable); int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx); int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 551f85456c11..6fed539e5456 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -207,14 +207,17 @@ struct mlx4_qp_context { __be32 msn; __be16 rq_wqe_counter; __be16 sq_wqe_counter; - u32 reserved3[2]; + u32 reserved3; + __be16 rate_limit_params; + u8 reserved4; + u8 qos_vport; __be32 param3; __be32 nummmcpeers_basemkey; u8 log_page_size; - u8 reserved4[2]; + u8 reserved5[2]; u8 mtt_base_addr_h; __be32 mtt_base_addr_l; - u32 reserved5[10]; + u32 reserved6[10]; }; struct mlx4_update_qp_context { @@ -229,6 +232,8 @@ struct mlx4_update_qp_context { enum { MLX4_UPD_QP_MASK_PM_STATE = 32, MLX4_UPD_QP_MASK_VSD = 33, + MLX4_UPD_QP_MASK_QOS_VPP = 34, + MLX4_UPD_QP_MASK_RATE_LIMIT = 35, }; enum { @@ -428,7 +433,9 @@ struct mlx4_wqe_inline_seg { enum mlx4_update_qp_attr { MLX4_UPDATE_QP_SMAC = 1 << 0, MLX4_UPDATE_QP_VSD = 1 << 1, - MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1 + MLX4_UPDATE_QP_RATE_LIMIT = 1 << 2, + MLX4_UPDATE_QP_QOS_VPORT = 1 << 3, + MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 4) - 1 }; enum mlx4_update_qp_params_flags { @@ -437,7 +444,10 @@ enum mlx4_update_qp_params_flags { struct mlx4_update_qp_params { u8 smac_index; + u8 qos_vport; u32 flags; + u16 rate_unit; + u16 rate_val; }; int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, diff --git a/include/linux/mlx5/cmd.h b/include/linux/mlx5/cmd.h index 2826a4b6071e..68cd08f02c2f 100644 --- a/include/linux/mlx5/cmd.h +++ b/include/linux/mlx5/cmd.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index f6b17ac601bd..2695ced222df 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -137,14 +137,15 @@ enum { static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, void __iomem *uar_page, - spinlock_t *doorbell_lock) + spinlock_t *doorbell_lock, + u32 cons_index) { __be32 doorbell[2]; u32 sn; u32 ci; sn = cq->arm_sn & 3; - ci = cq->cons_index & 0xffffff; + ci = cons_index & 0xffffff; *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 4e5bd813bb9a..abf65c790421 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h index 163a818411e7..afc78a3f4462 100644 --- a/include/linux/mlx5/doorbell.h +++ b/include/linux/mlx5/doorbell.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 166d9315fe4b..9a90e7523dc2 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -232,6 +232,9 @@ struct mlx5_cmd_stats { }; struct mlx5_cmd { + void *cmd_alloc_buf; + dma_addr_t alloc_dma; + int alloc_size; void *cmd_buf; dma_addr_t dma; u16 cmdif_rev; @@ -407,7 +410,7 @@ struct mlx5_core_srq { struct mlx5_eq_table { void __iomem *update_ci; void __iomem *update_arm_ci; - struct list_head *comp_eq_head; + struct list_head comp_eqs_list; struct mlx5_eq pages_eq; struct mlx5_eq async_eq; struct mlx5_eq cmd_eq; @@ -722,6 +725,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_start_eqs(struct mlx5_core_dev *dev); int mlx5_stop_eqs(struct mlx5_core_dev *dev); +int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); @@ -777,14 +781,22 @@ enum { MAX_MR_CACHE_ENTRIES = 16, }; +enum { + MLX5_INTERFACE_PROTOCOL_IB = 0, + MLX5_INTERFACE_PROTOCOL_ETH = 1, +}; + struct mlx5_interface { void * (*add)(struct mlx5_core_dev *dev); void (*remove)(struct mlx5_core_dev *dev, void *context); void (*event)(struct mlx5_core_dev *dev, void *context, enum mlx5_dev_event event, unsigned long param); + void * (*get_dev)(void *context); + int protocol; struct list_head list; }; +void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); int mlx5_register_interface(struct mlx5_interface *intf); void mlx5_unregister_interface(struct mlx5_interface *intf); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 5f48b8f592c5..cb3ad17edd1f 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 61f7a342d1bf..310b5f7fd6ae 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h index e1a363a33663..f43ed054a3e0 100644 --- a/include/linux/mlx5/srq.h +++ b/include/linux/mlx5/srq.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 996807963716..83430f2ea757 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -33,6 +33,8 @@ #define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d #define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335 #define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 +#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 +#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 #define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 #define SDIO_VENDOR_ID_INTEL 0x0089 diff --git a/include/linux/net.h b/include/linux/net.h index 17d83393afcc..738ea48be889 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -120,7 +120,6 @@ struct socket { struct vm_area_struct; struct page; -struct kiocb; struct sockaddr; struct msghdr; struct module; @@ -162,8 +161,8 @@ struct proto_ops { int (*compat_getsockopt)(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); #endif - int (*sendmsg) (struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t total_len); + int (*sendmsg) (struct socket *sock, struct msghdr *m, + size_t total_len); /* Notes for implementing recvmsg: * =============================== * msg->msg_namelen should get updated by the recvmsg handlers @@ -172,9 +171,8 @@ struct proto_ops { * handlers can assume that msg.msg_name is either NULL or has * a minimum size of sizeof(struct sockaddr_storage). */ - int (*recvmsg) (struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t total_len, - int flags); + int (*recvmsg) (struct socket *sock, struct msghdr *m, + size_t total_len, int flags); int (*mmap) (struct file *file, struct socket *sock, struct vm_area_struct * vma); ssize_t (*sendpage) (struct socket *sock, struct page *page, @@ -213,7 +211,7 @@ int sock_create(int family, int type, int proto, struct socket **res); int sock_create_kern(int family, int type, int proto, struct socket **res); int sock_create_lite(int family, int type, int proto, struct socket **res); void sock_release(struct socket *sock); -int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len); +int sock_sendmsg(struct socket *sock, struct msghdr *msg); int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags); struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 278738873703..b5679aed660b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -261,7 +261,6 @@ struct header_ops { unsigned short type, const void *daddr, const void *saddr, unsigned int len); int (*parse)(const struct sk_buff *skb, unsigned char *haddr); - int (*rebuild)(struct sk_buff *skb); int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); void (*cache_update)(struct hh_cache *hh, const struct net_device *dev, @@ -588,6 +587,7 @@ struct netdev_queue { #ifdef CONFIG_BQL struct dql dql; #endif + unsigned long tx_maxrate; } ____cacheline_aligned_in_smp; static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) @@ -795,7 +795,10 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, * struct net_device *dev); * Called when a packet needs to be transmitted. - * Must return NETDEV_TX_OK , NETDEV_TX_BUSY. + * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop + * the queue before that can happen; it's for obsolete devices and weird + * corner cases, but the stack really does a non-trivial amount + * of useless work if you return NETDEV_TX_BUSY. * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) * Required can not be NULL. * @@ -875,6 +878,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); * int (*ndo_set_vf_port)(struct net_device *dev, int vf, * struct nlattr *port[]); + * + * Enable or disable the VF ability to query its RSS Redirection Table and + * Hash Key. This is needed since on some devices VF share this information + * with PF and querying it may adduce a theoretical security risk. + * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); * int (*ndo_setup_tc)(struct net_device *dev, u8 tc) * Called to setup 'tc' number of traffic classes in the net device. This @@ -1026,15 +1034,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * be otherwise expressed by feature flags. The check is called with * the set of features that the stack has calculated and it returns * those the driver believes to be appropriate. - * - * int (*ndo_switch_parent_id_get)(struct net_device *dev, - * struct netdev_phys_item_id *psid); - * Called to get an ID of the switch chip this port is part of. - * If driver implements this, it indicates that it represents a port - * of a switch chip. - * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state); - * Called to notify switch device port of bridge port STP - * state change. + * int (*ndo_set_tx_maxrate)(struct net_device *dev, + * int queue_index, u32 maxrate); + * Called when a user wants to set a max-rate limitation of specific + * TX queue. + * int (*ndo_get_iflink)(const struct net_device *dev); + * Called to get the iflink value of this device. */ struct net_device_ops { int (*ndo_init)(struct net_device *dev); @@ -1099,6 +1104,9 @@ struct net_device_ops { struct nlattr *port[]); int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); + int (*ndo_set_vf_rss_query_en)( + struct net_device *dev, + int vf, bool setting); int (*ndo_setup_tc)(struct net_device *dev, u8 tc); #if IS_ENABLED(CONFIG_FCOE) int (*ndo_fcoe_enable)(struct net_device *dev); @@ -1172,6 +1180,8 @@ struct net_device_ops { bool new_carrier); int (*ndo_get_phys_port_id)(struct net_device *dev, struct netdev_phys_item_id *ppid); + int (*ndo_get_phys_port_name)(struct net_device *dev, + char *name, size_t len); void (*ndo_add_vxlan_port)(struct net_device *dev, sa_family_t sa_family, __be16 port); @@ -1191,12 +1201,10 @@ struct net_device_ops { netdev_features_t (*ndo_features_check) (struct sk_buff *skb, struct net_device *dev, netdev_features_t features); -#ifdef CONFIG_NET_SWITCHDEV - int (*ndo_switch_parent_id_get)(struct net_device *dev, - struct netdev_phys_item_id *psid); - int (*ndo_switch_port_stp_update)(struct net_device *dev, - u8 state); -#endif + int (*ndo_set_tx_maxrate)(struct net_device *dev, + int queue_index, + u32 maxrate); + int (*ndo_get_iflink)(const struct net_device *dev); }; /** @@ -1305,6 +1313,8 @@ enum netdev_priv_flags { * @base_addr: Device I/O address * @irq: Device IRQ number * + * @carrier_changes: Stats to monitor carrier on<->off transitions + * * @state: Generic network queuing layer state, see netdev_state_t * @dev_list: The global list of network devices * @napi_list: List entry, that is used for polling napi devices @@ -1328,7 +1338,7 @@ enum netdev_priv_flags { * @mpls_features: Mask of features inheritable by MPLS * * @ifindex: interface index - * @iflink: unique device identifier + * @group: The group, that the device belongs to * * @stats: Statistics struct, which was left as a legacy, use * rtnl_link_stats64 instead @@ -1338,8 +1348,6 @@ enum netdev_priv_flags { * @tx_dropped: Dropped packets by core network, * do not use this in drivers * - * @carrier_changes: Stats to monitor carrier on<->off transitions - * * @wireless_handlers: List of functions to handle Wireless Extensions, * instead of ioctl, * see <net/iw_handler.h> for details. @@ -1348,8 +1356,7 @@ enum netdev_priv_flags { * @netdev_ops: Includes several pointers to callbacks, * if one wants to override the ndo_*() functions * @ethtool_ops: Management operations - * @fwd_ops: Management operations - * @header_ops: Includes callbacks for creating,parsing,rebuilding,etc + * @header_ops: Includes callbacks for creating,parsing,caching,etc * of Layer 2 headers. * * @flags: Interface flags (a la BSD) @@ -1383,14 +1390,14 @@ enum netdev_priv_flags { * @dev_port: Used to differentiate devices that share * the same function * @addr_list_lock: XXX: need comments on this one - * @uc: unicast mac addresses - * @mc: multicast mac addresses - * @dev_addrs: list of device hw addresses - * @queues_kset: Group of all Kobjects in the Tx and RX queues * @uc_promisc: Counter, that indicates, that promiscuous mode * has been enabled due to the need to listen to * additional unicast addresses in a device that * does not implement ndo_set_rx_mode() + * @uc: unicast mac addresses + * @mc: multicast mac addresses + * @dev_addrs: list of device hw addresses + * @queues_kset: Group of all Kobjects in the Tx and RX queues * @promiscuity: Number of times, the NIC is told to work in * Promiscuous mode, if it becomes 0 the NIC will * exit from working in Promiscuous mode @@ -1420,6 +1427,12 @@ enum netdev_priv_flags { * @ingress_queue: XXX: need comments on this one * @broadcast: hw bcast address * + * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, + * indexed by RX queue number. Assigned by driver. + * This must only be set if the ndo_rx_flow_steer + * operation is defined + * @index_hlist: Device index hash chain + * * @_tx: Array of TX queues * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time * @real_num_tx_queues: Number of TX queues currently active in device @@ -1429,11 +1442,6 @@ enum netdev_priv_flags { * * @xps_maps: XXX: need comments on this one * - * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, - * indexed by RX queue number. Assigned by driver. - * This must only be set if the ndo_rx_flow_steer - * operation is defined - * * @trans_start: Time (in jiffies) of last Tx * @watchdog_timeo: Represents the timeout that is used by * the watchdog ( see dev_watchdog() ) @@ -1441,7 +1449,6 @@ enum netdev_priv_flags { * * @pcpu_refcnt: Number of references to this device * @todo_list: Delayed register/unregister - * @index_hlist: Device index hash chain * @link_watch_list: XXX: need comments on this one * * @reg_state: Register/unregister state machine @@ -1489,7 +1496,6 @@ enum netdev_priv_flags { * * @qdisc_tx_busylock: XXX: need comments on this one * - * @group: The group, that the device belongs to * @pm_qos_req: Power Management QoS object * * FIXME: cleanup struct net_device such that network protocol info @@ -1509,6 +1515,8 @@ struct net_device { unsigned long base_addr; int irq; + atomic_t carrier_changes; + /* * Some hardware also needs these fields (state,dev_list, * napi_list,unreg_list,close_list) but they are not @@ -1542,22 +1550,22 @@ struct net_device { netdev_features_t mpls_features; int ifindex; - int iflink; + int group; struct net_device_stats stats; atomic_long_t rx_dropped; atomic_long_t tx_dropped; - atomic_t carrier_changes; - #ifdef CONFIG_WIRELESS_EXT const struct iw_handler_def * wireless_handlers; struct iw_public_data * wireless_data; #endif const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; - const struct forwarding_accel_ops *fwd_ops; +#ifdef CONFIG_NET_SWITCHDEV + const struct swdev_ops *swdev_ops; +#endif const struct header_ops *header_ops; @@ -1588,6 +1596,8 @@ struct net_device { unsigned short dev_id; unsigned short dev_port; spinlock_t addr_list_lock; + unsigned char name_assign_type; + bool uc_promisc; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; struct netdev_hw_addr_list dev_addrs; @@ -1595,10 +1605,6 @@ struct net_device { #ifdef CONFIG_SYSFS struct kset *queues_kset; #endif - - unsigned char name_assign_type; - - bool uc_promisc; unsigned int promiscuity; unsigned int allmulti; @@ -1645,7 +1651,10 @@ struct net_device { struct netdev_queue __rcu *ingress_queue; unsigned char broadcast[MAX_ADDR_LEN]; - +#ifdef CONFIG_RFS_ACCEL + struct cpu_rmap *rx_cpu_rmap; +#endif + struct hlist_node index_hlist; /* * Cache lines mostly used on transmit path @@ -1656,13 +1665,11 @@ struct net_device { struct Qdisc *qdisc; unsigned long tx_queue_len; spinlock_t tx_global_lock; + int watchdog_timeo; #ifdef CONFIG_XPS struct xps_dev_maps __rcu *xps_maps; #endif -#ifdef CONFIG_RFS_ACCEL - struct cpu_rmap *rx_cpu_rmap; -#endif /* These may be needed for future network-power-down code. */ @@ -1672,13 +1679,11 @@ struct net_device { */ unsigned long trans_start; - int watchdog_timeo; struct timer_list watchdog_timer; int __percpu *pcpu_refcnt; struct list_head todo_list; - struct hlist_node index_hlist; struct list_head link_watch_list; enum { NETREG_UNINITIALIZED=0, @@ -1702,9 +1707,7 @@ struct net_device { struct netpoll_info __rcu *npinfo; #endif -#ifdef CONFIG_NET_NS - struct net *nd_net; -#endif + possible_net_t nd_net; /* mid-layer private */ union { @@ -1745,8 +1748,6 @@ struct net_device { #endif struct phy_device *phydev; struct lock_class_key *qdisc_tx_busylock; - int group; - struct pm_qos_request pm_qos_req; }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -1844,10 +1845,7 @@ struct net *dev_net(const struct net_device *dev) static inline void dev_net_set(struct net_device *dev, struct net *net) { -#ifdef CONFIG_NET_NS - release_net(dev->nd_net); - dev->nd_net = hold_net(net); -#endif + write_pnet(&dev->nd_net, net); } static inline bool netdev_uses_dsa(struct net_device *dev) @@ -2159,6 +2157,7 @@ void __dev_remove_pack(struct packet_type *pt); void dev_add_offload(struct packet_offload *po); void dev_remove_offload(struct packet_offload *po); +int dev_get_iflink(const struct net_device *dev); struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, unsigned short mask); struct net_device *dev_get_by_name(struct net *net, const char *name); @@ -2167,9 +2166,14 @@ struct net_device *__dev_get_by_name(struct net *net, const char *name); int dev_alloc_name(struct net_device *dev, const char *name); int dev_open(struct net_device *dev); int dev_close(struct net_device *dev); +int dev_close_many(struct list_head *head, bool unlink); void dev_disable_lro(struct net_device *dev); -int dev_loopback_xmit(struct sk_buff *newskb); -int dev_queue_xmit(struct sk_buff *skb); +int dev_loopback_xmit(struct sock *sk, struct sk_buff *newskb); +int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb); +static inline int dev_queue_xmit(struct sk_buff *skb) +{ + return dev_queue_xmit_sk(skb->sk, skb); +} int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); int register_netdevice(struct net_device *dev); void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); @@ -2409,15 +2413,6 @@ static inline int dev_parse_header(const struct sk_buff *skb, return dev->header_ops->parse(skb, haddr); } -static inline int dev_rebuild_header(struct sk_buff *skb) -{ - const struct net_device *dev = skb->dev; - - if (!dev->header_ops || !dev->header_ops->rebuild) - return 0; - return dev->header_ops->rebuild(skb); -} - typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); int register_gifconf(unsigned int family, gifconf_func_t *gifconf); static inline int unregister_gifconf(unsigned int family) @@ -2939,7 +2934,11 @@ static inline void dev_consume_skb_any(struct sk_buff *skb) int netif_rx(struct sk_buff *skb); int netif_rx_ni(struct sk_buff *skb); -int netif_receive_skb(struct sk_buff *skb); +int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb); +static inline int netif_receive_skb(struct sk_buff *skb) +{ + return netif_receive_skb_sk(skb->sk, skb); +} gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); void napi_gro_flush(struct napi_struct *napi, bool flush_old); struct sk_buff *napi_get_frags(struct napi_struct *napi); @@ -2975,6 +2974,8 @@ int dev_set_mac_address(struct net_device *, struct sockaddr *); int dev_change_carrier(struct net_device *, bool new_carrier); int dev_get_phys_port_id(struct net_device *dev, struct netdev_phys_item_id *ppid); +int dev_get_phys_port_name(struct net_device *dev, + char *name, size_t len); struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); @@ -3679,6 +3680,9 @@ void netdev_change_features(struct net_device *dev); void netif_stacked_transfer_operstate(const struct net_device *rootdev, struct net_device *dev); +netdev_features_t passthru_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features); netdev_features_t netif_skb_features(struct sk_buff *skb); static inline bool net_gso_ok(netdev_features_t features, int gso_type) diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 2517ece98820..63560d0a8dfe 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -44,11 +44,39 @@ int netfilter_init(void); struct sk_buff; struct nf_hook_ops; + +struct sock; + +struct nf_hook_state { + unsigned int hook; + int thresh; + u_int8_t pf; + struct net_device *in; + struct net_device *out; + struct sock *sk; + int (*okfn)(struct sock *, struct sk_buff *); +}; + +static inline void nf_hook_state_init(struct nf_hook_state *p, + unsigned int hook, + int thresh, u_int8_t pf, + struct net_device *indev, + struct net_device *outdev, + struct sock *sk, + int (*okfn)(struct sock *, struct sk_buff *)) +{ + p->hook = hook; + p->thresh = thresh; + p->pf = pf; + p->in = indev; + p->out = outdev; + p->sk = sk; + p->okfn = okfn; +} + typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops, struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)); + const struct nf_hook_state *state); struct nf_hook_ops { struct list_head list; @@ -118,9 +146,7 @@ static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) } #endif -int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb, - struct net_device *indev, struct net_device *outdev, - int (*okfn)(struct sk_buff *), int thresh); +int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state); /** * nf_hook_thresh - call a netfilter hook @@ -130,21 +156,29 @@ int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb, * value indicates the packet has been consumed by the hook. */ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, + struct sock *sk, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, - int (*okfn)(struct sk_buff *), int thresh) + int (*okfn)(struct sock *, struct sk_buff *), + int thresh) { - if (nf_hooks_active(pf, hook)) - return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh); + if (nf_hooks_active(pf, hook)) { + struct nf_hook_state state; + + nf_hook_state_init(&state, hook, thresh, pf, + indev, outdev, sk, okfn); + return nf_hook_slow(skb, &state); + } return 1; } -static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb, - struct net_device *indev, struct net_device *outdev, - int (*okfn)(struct sk_buff *)) +static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk, + struct sk_buff *skb, struct net_device *indev, + struct net_device *outdev, + int (*okfn)(struct sock *, struct sk_buff *)) { - return nf_hook_thresh(pf, hook, skb, indev, outdev, okfn, INT_MIN); + return nf_hook_thresh(pf, hook, sk, skb, indev, outdev, okfn, INT_MIN); } /* Activate hook; either okfn or kfree_skb called, unless a hook @@ -165,35 +199,36 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb, */ static inline int -NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sk_buff *skb, - struct net_device *in, struct net_device *out, - int (*okfn)(struct sk_buff *), int thresh) +NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sock *sk, + struct sk_buff *skb, struct net_device *in, + struct net_device *out, + int (*okfn)(struct sock *, struct sk_buff *), int thresh) { - int ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, thresh); + int ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, thresh); if (ret == 1) - ret = okfn(skb); + ret = okfn(sk, skb); return ret; } static inline int -NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sk_buff *skb, - struct net_device *in, struct net_device *out, - int (*okfn)(struct sk_buff *), bool cond) +NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sock *sk, + struct sk_buff *skb, struct net_device *in, struct net_device *out, + int (*okfn)(struct sock *, struct sk_buff *), bool cond) { int ret; if (!cond || - ((ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN)) == 1)) - ret = okfn(skb); + ((ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, INT_MIN)) == 1)) + ret = okfn(sk, skb); return ret; } static inline int -NF_HOOK(uint8_t pf, unsigned int hook, struct sk_buff *skb, +NF_HOOK(uint8_t pf, unsigned int hook, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, - int (*okfn)(struct sk_buff *)) + int (*okfn)(struct sock *, struct sk_buff *)) { - return NF_HOOK_THRESH(pf, hook, skb, in, out, okfn, INT_MIN); + return NF_HOOK_THRESH(pf, hook, sk, skb, in, out, okfn, INT_MIN); } /* Call setsockopt() */ @@ -293,19 +328,21 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) } #else /* !CONFIG_NETFILTER */ -#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb) -#define NF_HOOK_COND(pf, hook, skb, indev, outdev, okfn, cond) (okfn)(skb) +#define NF_HOOK(pf, hook, sk, skb, indev, outdev, okfn) (okfn)(sk, skb) +#define NF_HOOK_COND(pf, hook, sk, skb, indev, outdev, okfn, cond) (okfn)(sk, skb) static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, + struct sock *sk, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, - int (*okfn)(struct sk_buff *), int thresh) + int (*okfn)(struct sock *sk, struct sk_buff *), int thresh) { - return okfn(skb); + return okfn(sk, skb); } -static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb, - struct net_device *indev, struct net_device *outdev, - int (*okfn)(struct sk_buff *)) +static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk, + struct sk_buff *skb, struct net_device *indev, + struct net_device *outdev, + int (*okfn)(struct sock *, struct sk_buff *)) { return 1; } diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index f1606fa6132d..34b172301558 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -483,7 +483,7 @@ static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr) if (!__nested) return -EMSGSIZE; - ret = nla_put_net32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr); + ret = nla_put_in_addr(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr); if (!ret) ipset_nest_end(skb, __nested); return ret; @@ -497,8 +497,7 @@ static inline int nla_put_ipaddr6(struct sk_buff *skb, int type, if (!__nested) return -EMSGSIZE; - ret = nla_put(skb, IPSET_ATTR_IPADDR_IPV6, - sizeof(struct in6_addr), ipaddrptr); + ret = nla_put_in6_addr(skb, IPSET_ATTR_IPADDR_IPV6, ipaddrptr); if (!ret) ipset_nest_end(skb, __nested); return ret; diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index cfb7191e6efa..c22a7fb8d0df 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h @@ -54,8 +54,7 @@ extern struct xt_table *arpt_register_table(struct net *net, extern void arpt_unregister_table(struct xt_table *table); extern unsigned int arpt_do_table(struct sk_buff *skb, unsigned int hook, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, struct xt_table *table); #ifdef CONFIG_COMPAT diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index c755e4971fa3..ab8f76dba668 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -2,7 +2,7 @@ #define __LINUX_BRIDGE_NETFILTER_H #include <uapi/linux/netfilter_bridge.h> - +#include <linux/skbuff.h> enum nf_br_hook_priorities { NF_BR_PRI_FIRST = INT_MIN, @@ -17,110 +17,48 @@ enum nf_br_hook_priorities { #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) -#define BRNF_PKT_TYPE 0x01 #define BRNF_BRIDGED_DNAT 0x02 -#define BRNF_BRIDGED 0x04 #define BRNF_NF_BRIDGE_PREROUTING 0x08 -#define BRNF_8021Q 0x10 -#define BRNF_PPPoE 0x20 -static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb) +static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) { - switch (skb->protocol) { - case __cpu_to_be16(ETH_P_8021Q): - return VLAN_HLEN; - case __cpu_to_be16(ETH_P_PPP_SES): + if (skb->nf_bridge->orig_proto == BRNF_PROTO_PPPOE) return PPPOE_SES_HLEN; - default: - return 0; - } + return 0; } -static inline void nf_bridge_update_protocol(struct sk_buff *skb) -{ - if (skb->nf_bridge->mask & BRNF_8021Q) - skb->protocol = htons(ETH_P_8021Q); - else if (skb->nf_bridge->mask & BRNF_PPPoE) - skb->protocol = htons(ETH_P_PPP_SES); -} +int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb); -/* Fill in the header for fragmented IP packets handled by - * the IPv4 connection tracking code. - * - * Only used in br_forward.c - */ -static inline int nf_bridge_copy_header(struct sk_buff *skb) +static inline void br_drop_fake_rtable(struct sk_buff *skb) { - int err; - unsigned int header_size; - - nf_bridge_update_protocol(skb); - header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); - err = skb_cow_head(skb, header_size); - if (err) - return err; + struct dst_entry *dst = skb_dst(skb); - skb_copy_to_linear_data_offset(skb, -header_size, - skb->nf_bridge->data, header_size); - __skb_push(skb, nf_bridge_encap_header_len(skb)); - return 0; + if (dst && (dst->flags & DST_FAKE_RTABLE)) + skb_dst_drop(skb); } -static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb) +static inline int nf_bridge_get_physinif(const struct sk_buff *skb) { - if (skb->nf_bridge && - skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT)) - return nf_bridge_copy_header(skb); - return 0; + return skb->nf_bridge ? skb->nf_bridge->physindev->ifindex : 0; } -static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) +static inline int nf_bridge_get_physoutif(const struct sk_buff *skb) { - if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE)) - return PPPOE_SES_HLEN; - return 0; + return skb->nf_bridge ? skb->nf_bridge->physoutdev->ifindex : 0; } -int br_handle_frame_finish(struct sk_buff *skb); -/* Only used in br_device.c */ -static inline int br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb) +static inline struct net_device * +nf_bridge_get_physindev(const struct sk_buff *skb) { - struct nf_bridge_info *nf_bridge = skb->nf_bridge; - - skb_pull(skb, ETH_HLEN); - nf_bridge->mask ^= BRNF_BRIDGED_DNAT; - skb_copy_to_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), - skb->nf_bridge->data, ETH_HLEN-ETH_ALEN); - skb->dev = nf_bridge->physindev; - return br_handle_frame_finish(skb); + return skb->nf_bridge ? skb->nf_bridge->physindev : NULL; } -/* This is called by the IP fragmenting code and it ensures there is - * enough room for the encapsulating header (if there is one). */ -static inline unsigned int nf_bridge_pad(const struct sk_buff *skb) +static inline struct net_device * +nf_bridge_get_physoutdev(const struct sk_buff *skb) { - if (skb->nf_bridge) - return nf_bridge_encap_header_len(skb); - return 0; + return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL; } - -struct bridge_skb_cb { - union { - __be32 ipv4; - } daddr; -}; - -static inline void br_drop_fake_rtable(struct sk_buff *skb) -{ - struct dst_entry *dst = skb_dst(skb); - - if (dst && (dst->flags & DST_FAKE_RTABLE)) - skb_dst_drop(skb); -} - #else -#define nf_bridge_maybe_copy_header(skb) (0) -#define nf_bridge_pad(skb) (0) #define br_drop_fake_rtable(skb) do { } while (0) #endif /* CONFIG_BRIDGE_NETFILTER */ diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index 34e7a2b7f867..f1bd3962e6b6 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -12,9 +12,10 @@ #ifndef __LINUX_BRIDGE_EFF_H #define __LINUX_BRIDGE_EFF_H +#include <linux/if.h> +#include <linux/if_ether.h> #include <uapi/linux/netfilter_bridge/ebtables.h> - /* return values for match() functions */ #define EBT_MATCH 0 #define EBT_NOMATCH 1 diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index 901e84db847d..4073510da485 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h @@ -65,8 +65,7 @@ struct ipt_error { extern void *ipt_alloc_initial_table(const struct xt_table *); extern unsigned int ipt_do_table(struct sk_buff *skb, unsigned int hook, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, struct xt_table *table); #ifdef CONFIG_COMPAT diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index 610208b18c05..b40d2b635778 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -31,8 +31,7 @@ extern struct xt_table *ip6t_register_table(struct net *net, extern void ip6t_unregister_table(struct net *net, struct xt_table *table); extern unsigned int ip6t_do_table(struct sk_buff *skb, unsigned int hook, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, struct xt_table *table); /* Check for an extension */ diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 02fc86d2348e..6835c1279df7 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -134,7 +134,7 @@ struct netlink_callback { struct netlink_notify { struct net *net; - int portid; + u32 portid; int protocol; }; diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index d449018d0726..8f2237eb3485 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -24,6 +24,7 @@ struct phy_device *of_phy_attach(struct net_device *dev, phy_interface_t iface); extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); +extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np); #else /* CONFIG_OF */ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) @@ -60,6 +61,12 @@ static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np) { return NULL; } + +static inline int of_mdio_parse_addr(struct device *dev, + const struct device_node *np) +{ + return -ENOSYS; +} #endif /* CONFIG_OF */ #if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY) diff --git a/include/linux/of_net.h b/include/linux/of_net.h index 34597c8c1a4c..9cd72aab76fe 100644 --- a/include/linux/of_net.h +++ b/include/linux/of_net.h @@ -9,8 +9,11 @@ #ifdef CONFIG_OF_NET #include <linux/of.h> + +struct net_device; extern int of_get_phy_mode(struct device_node *np); extern const void *of_get_mac_address(struct device_node *np); +extern struct net_device *of_find_net_device_by_node(struct device_node *np); #else static inline int of_get_phy_mode(struct device_node *np) { @@ -21,6 +24,11 @@ static inline const void *of_get_mac_address(struct device_node *np) { return NULL; } + +static inline struct net_device *of_find_net_device_by_node(struct device_node *np) +{ + return NULL; +} #endif #endif /* __LINUX_OF_NET_H */ diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index 7e75bfe37cc7..fe5732d53eda 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h @@ -21,6 +21,9 @@ extern void fixed_phy_del(int phy_addr); extern int fixed_phy_set_link_update(struct phy_device *phydev, int (*link_update)(struct net_device *, struct fixed_phy_status *)); +extern int fixed_phy_update_state(struct phy_device *phydev, + const struct fixed_phy_status *status, + const struct fixed_phy_status *changed); #else static inline int fixed_phy_add(unsigned int irq, int phy_id, struct fixed_phy_status *status) @@ -43,6 +46,12 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev, { return -ENODEV; } +static inline int fixed_phy_update_state(struct phy_device *phydev, + const struct fixed_phy_status *status, + const struct fixed_phy_status *changed) +{ + return -ENODEV; +} #endif /* CONFIG_FIXED_PHY */ #endif /* __PHY_FIXED_H */ diff --git a/include/linux/platform_data/nxp-nci.h b/include/linux/platform_data/nxp-nci.h new file mode 100644 index 000000000000..d6ed28679bb2 --- /dev/null +++ b/include/linux/platform_data/nxp-nci.h @@ -0,0 +1,27 @@ +/* + * Generic platform data for the NXP NCI NFC chips. + * + * Copyright (C) 2014 NXP Semiconductors All rights reserved. + * + * Authors: Clément Perrochaud <clement.perrochaud@nxp.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _NXP_NCI_H_ +#define _NXP_NCI_H_ + +struct nxp_nci_nfc_platform_data { + unsigned int gpio_en; + unsigned int gpio_fw; + unsigned int irq; +}; + +#endif /* _NXP_NCI_H_ */ diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index 0d8ff3fb84ba..b8b73066d137 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -64,11 +64,11 @@ struct ptp_clock_request { * @adjtime: Shifts the time of the hardware clock. * parameter delta: Desired change in nanoseconds. * - * @gettime: Reads the current time from the hardware clock. - * parameter ts: Holds the result. + * @gettime64: Reads the current time from the hardware clock. + * parameter ts: Holds the result. * - * @settime: Set the current time on the hardware clock. - * parameter ts: Time value to set. + * @settime64: Set the current time on the hardware clock. + * parameter ts: Time value to set. * * @enable: Request driver to enable or disable an ancillary feature. * parameter request: Desired resource to enable or disable. @@ -104,8 +104,8 @@ struct ptp_clock_info { struct ptp_pin_desc *pin_config; int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta); int (*adjtime)(struct ptp_clock_info *ptp, s64 delta); - int (*gettime)(struct ptp_clock_info *ptp, struct timespec *ts); - int (*settime)(struct ptp_clock_info *ptp, const struct timespec *ts); + int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts); + int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts); int (*enable)(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on); int (*verify)(struct ptp_clock_info *ptp, unsigned int pin, diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index d438eeb08bff..e23d242d1230 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -1,14 +1,13 @@ /* * Resizable, Scalable, Concurrent Hash Table * - * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch> + * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> + * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> * - * Based on the following paper by Josh Triplett, Paul E. McKenney - * and Jonathan Walpole: - * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf - * * Code partially derived from nft_hash + * Rewritten with rehash code from br_multicast plus single list + * pointer as suggested by Josh Triplett * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -19,9 +18,12 @@ #define _LINUX_RHASHTABLE_H #include <linux/compiler.h> +#include <linux/errno.h> +#include <linux/jhash.h> #include <linux/list_nulls.h> #include <linux/workqueue.h> #include <linux/mutex.h> +#include <linux/rcupdate.h> /* * The end of the chain is marked with a special nulls marks which has @@ -42,6 +44,9 @@ #define RHT_HASH_BITS 27 #define RHT_BASE_SHIFT RHT_HASH_BITS +/* Base bits plus 1 bit for nulls marker */ +#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) + struct rhash_head { struct rhash_head __rcu *next; }; @@ -49,20 +54,43 @@ struct rhash_head { /** * struct bucket_table - Table of hash buckets * @size: Number of hash buckets + * @rehash: Current bucket being rehashed + * @hash_rnd: Random seed to fold into hash * @locks_mask: Mask to apply before accessing locks[] * @locks: Array of spinlocks protecting individual buckets + * @walkers: List of active walkers + * @rcu: RCU structure for freeing the table + * @future_tbl: Table under construction during rehashing * @buckets: size * hash buckets */ struct bucket_table { - size_t size; + unsigned int size; + unsigned int rehash; + u32 hash_rnd; unsigned int locks_mask; spinlock_t *locks; + struct list_head walkers; + struct rcu_head rcu; + + struct bucket_table __rcu *future_tbl; struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; }; +/** + * struct rhashtable_compare_arg - Key for the function rhashtable_compare + * @ht: Hash table + * @key: Key to compare against + */ +struct rhashtable_compare_arg { + struct rhashtable *ht; + const void *key; +}; + typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed); -typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 seed); +typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed); +typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg, + const void *obj); struct rhashtable; @@ -72,60 +100,62 @@ struct rhashtable; * @key_len: Length of key * @key_offset: Offset of key in struct to be hashed * @head_offset: Offset of rhash_head in struct to be hashed - * @hash_rnd: Seed to use while hashing - * @max_shift: Maximum number of shifts while expanding - * @min_shift: Minimum number of shifts while shrinking + * @max_size: Maximum size while expanding + * @min_size: Minimum size while shrinking * @nulls_base: Base value to generate nulls marker + * @insecure_elasticity: Set to true to disable chain length checks + * @automatic_shrinking: Enable automatic shrinking of tables * @locks_mul: Number of bucket locks to allocate per cpu (default: 128) - * @hashfn: Function to hash key + * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash) * @obj_hashfn: Function to hash object + * @obj_cmpfn: Function to compare key with object */ struct rhashtable_params { size_t nelem_hint; size_t key_len; size_t key_offset; size_t head_offset; - u32 hash_rnd; - size_t max_shift; - size_t min_shift; + unsigned int max_size; + unsigned int min_size; u32 nulls_base; + bool insecure_elasticity; + bool automatic_shrinking; size_t locks_mul; rht_hashfn_t hashfn; rht_obj_hashfn_t obj_hashfn; + rht_obj_cmpfn_t obj_cmpfn; }; /** * struct rhashtable - Hash table handle * @tbl: Bucket table - * @future_tbl: Table under construction during expansion/shrinking * @nelems: Number of elements in table - * @shift: Current size (1 << shift) + * @key_len: Key length for hashfn + * @elasticity: Maximum chain length before rehash * @p: Configuration parameters * @run_work: Deferred worker to expand/shrink asynchronously * @mutex: Mutex to protect current/future table swapping - * @walkers: List of active walkers - * @being_destroyed: True if table is set up for destruction + * @lock: Spin lock to protect walker list */ struct rhashtable { struct bucket_table __rcu *tbl; - struct bucket_table __rcu *future_tbl; atomic_t nelems; - atomic_t shift; + unsigned int key_len; + unsigned int elasticity; struct rhashtable_params p; struct work_struct run_work; struct mutex mutex; - struct list_head walkers; - bool being_destroyed; + spinlock_t lock; }; /** * struct rhashtable_walker - Hash table walker * @list: List entry on list of walkers - * @resize: Resize event occured + * @tbl: The table that we were walking over */ struct rhashtable_walker { struct list_head list; - bool resize; + struct bucket_table *tbl; }; /** @@ -162,6 +192,118 @@ static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr) return ((unsigned long) ptr) >> 1; } +static inline void *rht_obj(const struct rhashtable *ht, + const struct rhash_head *he) +{ + return (char *)he - ht->p.head_offset; +} + +static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, + unsigned int hash) +{ + return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1); +} + +static inline unsigned int rht_key_hashfn( + struct rhashtable *ht, const struct bucket_table *tbl, + const void *key, const struct rhashtable_params params) +{ + unsigned int hash; + + /* params must be equal to ht->p if it isn't constant. */ + if (!__builtin_constant_p(params.key_len)) + hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd); + else if (params.key_len) { + unsigned int key_len = params.key_len; + + if (params.hashfn) + hash = params.hashfn(key, key_len, tbl->hash_rnd); + else if (key_len & (sizeof(u32) - 1)) + hash = jhash(key, key_len, tbl->hash_rnd); + else + hash = jhash2(key, key_len / sizeof(u32), + tbl->hash_rnd); + } else { + unsigned int key_len = ht->p.key_len; + + if (params.hashfn) + hash = params.hashfn(key, key_len, tbl->hash_rnd); + else + hash = jhash(key, key_len, tbl->hash_rnd); + } + + return rht_bucket_index(tbl, hash); +} + +static inline unsigned int rht_head_hashfn( + struct rhashtable *ht, const struct bucket_table *tbl, + const struct rhash_head *he, const struct rhashtable_params params) +{ + const char *ptr = rht_obj(ht, he); + + return likely(params.obj_hashfn) ? + rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?: + ht->p.key_len, + tbl->hash_rnd)) : + rht_key_hashfn(ht, tbl, ptr + params.key_offset, params); +} + +/** + * rht_grow_above_75 - returns true if nelems > 0.75 * table-size + * @ht: hash table + * @tbl: current table + */ +static inline bool rht_grow_above_75(const struct rhashtable *ht, + const struct bucket_table *tbl) +{ + /* Expand table when exceeding 75% load */ + return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) && + (!ht->p.max_size || tbl->size < ht->p.max_size); +} + +/** + * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size + * @ht: hash table + * @tbl: current table + */ +static inline bool rht_shrink_below_30(const struct rhashtable *ht, + const struct bucket_table *tbl) +{ + /* Shrink table beneath 30% load */ + return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) && + tbl->size > ht->p.min_size; +} + +/** + * rht_grow_above_100 - returns true if nelems > table-size + * @ht: hash table + * @tbl: current table + */ +static inline bool rht_grow_above_100(const struct rhashtable *ht, + const struct bucket_table *tbl) +{ + return atomic_read(&ht->nelems) > tbl->size; +} + +/* The bucket lock is selected based on the hash and protects mutations + * on a group of hash buckets. + * + * A maximum of tbl->size/2 bucket locks is allocated. This ensures that + * a single lock always covers both buckets which may both contains + * entries which link to the same bucket of the old table during resizing. + * This allows to simplify the locking as locking the bucket in both + * tables during resize always guarantee protection. + * + * IMPORTANT: When holding the bucket lock of both the old and new table + * during expansions and shrinking, the old bucket lock must always be + * acquired first. + */ +static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl, + unsigned int hash) +{ + return &tbl->locks[hash & tbl->locks_mask]; +} + #ifdef CONFIG_PROVE_LOCKING int lockdep_rht_mutex_is_held(struct rhashtable *ht); int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); @@ -178,23 +320,13 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, } #endif /* CONFIG_PROVE_LOCKING */ -int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params); - -void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); -bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); - -int rhashtable_expand(struct rhashtable *ht); -int rhashtable_shrink(struct rhashtable *ht); +int rhashtable_init(struct rhashtable *ht, + const struct rhashtable_params *params); -void *rhashtable_lookup(struct rhashtable *ht, const void *key); -void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, - bool (*compare)(void *, void *), void *arg); - -bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj); -bool rhashtable_lookup_compare_insert(struct rhashtable *ht, - struct rhash_head *obj, - bool (*compare)(void *, void *), - void *arg); +int rhashtable_insert_slow(struct rhashtable *ht, const void *key, + struct rhash_head *obj, + struct bucket_table *old_tbl); +int rhashtable_insert_rehash(struct rhashtable *ht); int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); void rhashtable_walk_exit(struct rhashtable_iter *iter); @@ -202,6 +334,9 @@ int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); void *rhashtable_walk_next(struct rhashtable_iter *iter); void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); +void rhashtable_free_and_destroy(struct rhashtable *ht, + void (*free_fn)(void *ptr, void *arg), + void *arg); void rhashtable_destroy(struct rhashtable *ht); #define rht_dereference(p, ht) \ @@ -352,4 +487,316 @@ void rhashtable_destroy(struct rhashtable *ht); rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\ tbl, hash, member) +static inline int rhashtable_compare(struct rhashtable_compare_arg *arg, + const void *obj) +{ + struct rhashtable *ht = arg->ht; + const char *ptr = obj; + + return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len); +} + +/** + * rhashtable_lookup_fast - search hash table, inlined version + * @ht: hash table + * @key: the pointer to the key + * @params: hash table parameters + * + * Computes the hash value for the key and traverses the bucket chain looking + * for a entry with an identical key. The first matching entry is returned. + * + * Returns the first entry on which the compare function returned true. + */ +static inline void *rhashtable_lookup_fast( + struct rhashtable *ht, const void *key, + const struct rhashtable_params params) +{ + struct rhashtable_compare_arg arg = { + .ht = ht, + .key = key, + }; + const struct bucket_table *tbl; + struct rhash_head *he; + unsigned int hash; + + rcu_read_lock(); + + tbl = rht_dereference_rcu(ht->tbl, ht); +restart: + hash = rht_key_hashfn(ht, tbl, key, params); + rht_for_each_rcu(he, tbl, hash) { + if (params.obj_cmpfn ? + params.obj_cmpfn(&arg, rht_obj(ht, he)) : + rhashtable_compare(&arg, rht_obj(ht, he))) + continue; + rcu_read_unlock(); + return rht_obj(ht, he); + } + + /* Ensure we see any new tables. */ + smp_rmb(); + + tbl = rht_dereference_rcu(tbl->future_tbl, ht); + if (unlikely(tbl)) + goto restart; + rcu_read_unlock(); + + return NULL; +} + +/* Internal function, please use rhashtable_insert_fast() instead */ +static inline int __rhashtable_insert_fast( + struct rhashtable *ht, const void *key, struct rhash_head *obj, + const struct rhashtable_params params) +{ + struct rhashtable_compare_arg arg = { + .ht = ht, + .key = key, + }; + struct bucket_table *tbl, *new_tbl; + struct rhash_head *head; + spinlock_t *lock; + unsigned int elasticity; + unsigned int hash; + int err; + +restart: + rcu_read_lock(); + + tbl = rht_dereference_rcu(ht->tbl, ht); + + /* All insertions must grab the oldest table containing + * the hashed bucket that is yet to be rehashed. + */ + for (;;) { + hash = rht_head_hashfn(ht, tbl, obj, params); + lock = rht_bucket_lock(tbl, hash); + spin_lock_bh(lock); + + if (tbl->rehash <= hash) + break; + + spin_unlock_bh(lock); + tbl = rht_dereference_rcu(tbl->future_tbl, ht); + } + + new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); + if (unlikely(new_tbl)) { + err = rhashtable_insert_slow(ht, key, obj, new_tbl); + if (err == -EAGAIN) + goto slow_path; + goto out; + } + + if (unlikely(rht_grow_above_100(ht, tbl))) { +slow_path: + spin_unlock_bh(lock); + err = rhashtable_insert_rehash(ht); + rcu_read_unlock(); + if (err) + return err; + + goto restart; + } + + err = -EEXIST; + elasticity = ht->elasticity; + rht_for_each(head, tbl, hash) { + if (key && + unlikely(!(params.obj_cmpfn ? + params.obj_cmpfn(&arg, rht_obj(ht, head)) : + rhashtable_compare(&arg, rht_obj(ht, head))))) + goto out; + if (!--elasticity) + goto slow_path; + } + + err = 0; + + head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); + + RCU_INIT_POINTER(obj->next, head); + + rcu_assign_pointer(tbl->buckets[hash], obj); + + atomic_inc(&ht->nelems); + if (rht_grow_above_75(ht, tbl)) + schedule_work(&ht->run_work); + +out: + spin_unlock_bh(lock); + rcu_read_unlock(); + + return err; +} + +/** + * rhashtable_insert_fast - insert object into hash table + * @ht: hash table + * @obj: pointer to hash head inside object + * @params: hash table parameters + * + * Will take a per bucket spinlock to protect against mutual mutations + * on the same bucket. Multiple insertions may occur in parallel unless + * they map to the same bucket lock. + * + * It is safe to call this function from atomic context. + * + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). + */ +static inline int rhashtable_insert_fast( + struct rhashtable *ht, struct rhash_head *obj, + const struct rhashtable_params params) +{ + return __rhashtable_insert_fast(ht, NULL, obj, params); +} + +/** + * rhashtable_lookup_insert_fast - lookup and insert object into hash table + * @ht: hash table + * @obj: pointer to hash head inside object + * @params: hash table parameters + * + * Locks down the bucket chain in both the old and new table if a resize + * is in progress to ensure that writers can't remove from the old table + * and can't insert to the new table during the atomic operation of search + * and insertion. Searches for duplicates in both the old and new table if + * a resize is in progress. + * + * This lookup function may only be used for fixed key hash table (key_len + * parameter set). It will BUG() if used inappropriately. + * + * It is safe to call this function from atomic context. + * + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). + */ +static inline int rhashtable_lookup_insert_fast( + struct rhashtable *ht, struct rhash_head *obj, + const struct rhashtable_params params) +{ + const char *key = rht_obj(ht, obj); + + BUG_ON(ht->p.obj_hashfn); + + return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, + params); +} + +/** + * rhashtable_lookup_insert_key - search and insert object to hash table + * with explicit key + * @ht: hash table + * @key: key + * @obj: pointer to hash head inside object + * @params: hash table parameters + * + * Locks down the bucket chain in both the old and new table if a resize + * is in progress to ensure that writers can't remove from the old table + * and can't insert to the new table during the atomic operation of search + * and insertion. Searches for duplicates in both the old and new table if + * a resize is in progress. + * + * Lookups may occur in parallel with hashtable mutations and resizing. + * + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). + * + * Returns zero on success. + */ +static inline int rhashtable_lookup_insert_key( + struct rhashtable *ht, const void *key, struct rhash_head *obj, + const struct rhashtable_params params) +{ + BUG_ON(!ht->p.obj_hashfn || !key); + + return __rhashtable_insert_fast(ht, key, obj, params); +} + +/* Internal function, please use rhashtable_remove_fast() instead */ +static inline int __rhashtable_remove_fast( + struct rhashtable *ht, struct bucket_table *tbl, + struct rhash_head *obj, const struct rhashtable_params params) +{ + struct rhash_head __rcu **pprev; + struct rhash_head *he; + spinlock_t * lock; + unsigned int hash; + int err = -ENOENT; + + hash = rht_head_hashfn(ht, tbl, obj, params); + lock = rht_bucket_lock(tbl, hash); + + spin_lock_bh(lock); + + pprev = &tbl->buckets[hash]; + rht_for_each(he, tbl, hash) { + if (he != obj) { + pprev = &he->next; + continue; + } + + rcu_assign_pointer(*pprev, obj->next); + err = 0; + break; + } + + spin_unlock_bh(lock); + + return err; +} + +/** + * rhashtable_remove_fast - remove object from hash table + * @ht: hash table + * @obj: pointer to hash head inside object + * @params: hash table parameters + * + * Since the hash chain is single linked, the removal operation needs to + * walk the bucket chain upon removal. The removal operation is thus + * considerable slow if the hash table is not correctly sized. + * + * Will automatically shrink the table via rhashtable_expand() if the + * shrink_decision function specified at rhashtable_init() returns true. + * + * Returns zero on success, -ENOENT if the entry could not be found. + */ +static inline int rhashtable_remove_fast( + struct rhashtable *ht, struct rhash_head *obj, + const struct rhashtable_params params) +{ + struct bucket_table *tbl; + int err; + + rcu_read_lock(); + + tbl = rht_dereference_rcu(ht->tbl, ht); + + /* Because we have already taken (and released) the bucket + * lock in old_tbl, if we find that future_tbl is not yet + * visible then that guarantees the entry to still be in + * the old tbl if it exists. + */ + while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) && + (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) + ; + + if (err) + goto out; + + atomic_dec(&ht->nelems); + if (unlikely(ht->p.automatic_shrinking && + rht_shrink_below_30(ht, tbl))) + schedule_work(&ht->run_work); + +out: + rcu_read_unlock(); + + return err; +} + #endif /* _LINUX_RHASHTABLE_H */ diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 5db76a32fcab..2da5d1081ad9 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -77,7 +77,20 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev) return rtnl_dereference(dev->ingress_queue); } -extern struct netdev_queue *dev_ingress_queue_create(struct net_device *dev); +struct netdev_queue *dev_ingress_queue_create(struct net_device *dev); + +#ifdef CONFIG_NET_CLS_ACT +void net_inc_ingress_queue(void); +void net_dec_ingress_queue(void); +#else +static inline void net_inc_ingress_queue(void) +{ +} + +static inline void net_dec_ingress_queue(void) +{ +} +#endif extern void rtnetlink_init(void); extern void __rtnl_unlock(void); diff --git a/include/linux/security.h b/include/linux/security.h index 4e14e3d6309f..18264ea9e314 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1716,7 +1716,6 @@ struct security_operations { int (*tun_dev_attach_queue) (void *security); int (*tun_dev_attach) (struct sock *sk, void *security); int (*tun_dev_open) (void *security); - void (*skb_owned_by) (struct sk_buff *skb, struct sock *sk); #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM @@ -2734,8 +2733,6 @@ int security_tun_dev_attach_queue(void *security); int security_tun_dev_attach(struct sock *sk, void *security); int security_tun_dev_open(void *security); -void security_skb_owned_by(struct sk_buff *skb, struct sock *sk); - #else /* CONFIG_SECURITY_NETWORK */ static inline int security_unix_stream_connect(struct sock *sock, struct sock *other, @@ -2927,11 +2924,6 @@ static inline int security_tun_dev_open(void *security) { return 0; } - -static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) -{ -} - #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f54d6659713a..0991259643d6 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -166,10 +166,16 @@ struct nf_conntrack { #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) struct nf_bridge_info { atomic_t use; + enum { + BRNF_PROTO_UNCHANGED, + BRNF_PROTO_8021Q, + BRNF_PROTO_PPPOE + } orig_proto; + bool pkt_otherhost; unsigned int mask; struct net_device *physindev; struct net_device *physoutdev; - unsigned long data[32 / sizeof(unsigned long)]; + char neigh_header[8]; }; #endif @@ -492,7 +498,6 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, * @napi_id: id of the NAPI struct this skb came from * @secmark: security marking * @mark: Generic packet mark - * @dropcount: total number of sk_receive_queue overflows * @vlan_proto: vlan encapsulation protocol * @vlan_tci: vlan tag control information * @inner_protocol: Protocol (encapsulation) @@ -641,7 +646,6 @@ struct sk_buff { #endif union { __u32 mark; - __u32 dropcount; __u32 reserved_tailroom; }; @@ -870,8 +874,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data, void skb_abort_seq_read(struct skb_seq_state *st); unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, - unsigned int to, struct ts_config *config, - struct ts_state *state); + unsigned int to, struct ts_config *config); /* * Packet hash types specify the type of hash in skb_set_hash. diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h index 46cca4c06848..083ac388098e 100644 --- a/include/linux/sock_diag.h +++ b/include/linux/sock_diag.h @@ -19,8 +19,8 @@ void sock_diag_unregister(const struct sock_diag_handler *h); void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); -int sock_diag_check_cookie(void *sk, __u32 *cookie); -void sock_diag_save_cookie(void *sk, __u32 *cookie); +int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie); +void sock_diag_save_cookie(struct sock *sk, __u32 *cookie); int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr); int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk, diff --git a/include/linux/socket.h b/include/linux/socket.h index 5c19cba34dce..5bf59c8493b7 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -51,6 +51,7 @@ struct msghdr { void *msg_control; /* ancillary data */ __kernel_size_t msg_controllen; /* ancillary data buffer length */ unsigned int msg_flags; /* flags on received message */ + struct kiocb *msg_iocb; /* ptr to iocb for async requests */ }; struct user_msghdr { @@ -138,6 +139,11 @@ static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg); } +static inline size_t msg_data_left(struct msghdr *msg) +{ + return iov_iter_count(&msg->msg_iter); +} + /* "Socket"-level control message types: */ #define SCM_RIGHTS 0x01 /* rw: access rights (array of int) */ @@ -181,6 +187,7 @@ struct ucred { #define AF_WANPIPE 25 /* Wanpipe API Sockets */ #define AF_LLC 26 /* Linux LLC */ #define AF_IB 27 /* Native InfiniBand address */ +#define AF_MPLS 28 /* MPLS */ #define AF_CAN 29 /* Controller Area Network */ #define AF_TIPC 30 /* TIPC sockets */ #define AF_BLUETOOTH 31 /* Bluetooth sockets */ @@ -226,6 +233,7 @@ struct ucred { #define PF_WANPIPE AF_WANPIPE #define PF_LLC AF_LLC #define PF_IB AF_IB +#define PF_MPLS AF_MPLS #define PF_CAN AF_CAN #define PF_TIPC AF_TIPC #define PF_BLUETOOTH AF_BLUETOOTH diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h index cd519a11c2c6..b63fe6f5fdc8 100644 --- a/include/linux/spi/at86rf230.h +++ b/include/linux/spi/at86rf230.h @@ -22,6 +22,7 @@ struct at86rf230_platform_data { int rstn; int slp_tr; int dig2; + u8 xtal_trim; }; #endif diff --git a/include/linux/spi/cc2520.h b/include/linux/spi/cc2520.h index 85b8ee67e937..e741e8baad92 100644 --- a/include/linux/spi/cc2520.h +++ b/include/linux/spi/cc2520.h @@ -21,6 +21,7 @@ struct cc2520_platform_data { int sfd; int reset; int vreg; + bool amplified; }; #endif diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 1a7adb411647..0caa3a2d4106 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -58,6 +58,7 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb) struct tcp_fastopen_cookie { s8 len; u8 val[TCP_FASTOPEN_COOKIE_MAX]; + bool exp; /* In RFC6994 experimental option format */ }; /* This defines a selective acknowledgement block. */ @@ -111,7 +112,7 @@ struct tcp_request_sock_ops; struct tcp_request_sock { struct inet_request_sock req; const struct tcp_request_sock_ops *af_specific; - struct sock *listener; /* needed for TFO */ + bool tfo_listener; u32 rcv_isn; u32 snt_isn; u32 snt_synack; /* synack sent time */ @@ -188,6 +189,7 @@ struct tcp_sock { u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */ syn_data:1, /* SYN includes data */ syn_fastopen:1, /* SYN includes Fast Open option */ + syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */ syn_data_acked:1,/* data in SYN is acked by SYN-ACK */ is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */ u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ @@ -236,7 +238,6 @@ struct tcp_sock { u32 lost_out; /* Lost packets */ u32 sacked_out; /* SACK'd packets */ u32 fackets_out; /* FACK'd packets */ - u32 tso_deferred; /* from STCP, retrans queue hinting */ struct sk_buff* lost_skb_hint; diff --git a/include/linux/udp.h b/include/linux/udp.h index 247cfdcc4b08..87c094961bd5 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -34,7 +34,7 @@ static inline struct udphdr *inner_udp_hdr(const struct sk_buff *skb) #define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256) -static inline int udp_hashfn(struct net *net, unsigned num, unsigned mask) +static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask) { return (num + net_hash_mix(net)) & mask; } diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index 0d87674fb775..172632dd9930 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -100,8 +100,8 @@ struct vsock_transport { /* DGRAM. */ int (*dgram_bind)(struct vsock_sock *, struct sockaddr_vm *); - int (*dgram_dequeue)(struct kiocb *kiocb, struct vsock_sock *vsk, - struct msghdr *msg, size_t len, int flags); + int (*dgram_dequeue)(struct vsock_sock *vsk, struct msghdr *msg, + size_t len, int flags); int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *, struct msghdr *, size_t len); bool (*dgram_allow)(u32 cid, u32 port); diff --git a/include/net/arp.h b/include/net/arp.h index 73c49864076b..5e0f891d476c 100644 --- a/include/net/arp.h +++ b/include/net/arp.h @@ -9,28 +9,17 @@ extern struct neigh_table arp_tbl; -static inline u32 arp_hashfn(u32 key, const struct net_device *dev, u32 hash_rnd) +static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32 *hash_rnd) { + u32 key = *(const u32 *)pkey; u32 val = key ^ hash32_ptr(dev); - return val * hash_rnd; + return val * hash_rnd[0]; } static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key) { - struct neigh_hash_table *nht = rcu_dereference_bh(arp_tbl.nht); - struct neighbour *n; - u32 hash_val; - - hash_val = arp_hashfn(key, dev, nht->hash_rnd[0]) >> (32 - nht->hash_shift); - for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); - n != NULL; - n = rcu_dereference_bh(n->next)) { - if (n->dev == dev && *(u32 *)n->primary_key == key) - return n; - } - - return NULL; + return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev); } static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key) @@ -47,7 +36,6 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 } void arp_init(void); -int arp_find(unsigned char *haddr, struct sk_buff *skb); int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg); void arp_send(int type, int ptype, __be32 dest_ip, struct net_device *dev, __be32 src_ip, diff --git a/include/net/ax25.h b/include/net/ax25.h index bf0396e9a5d3..16a923a3a43a 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -12,6 +12,7 @@ #include <linux/list.h> #include <linux/slab.h> #include <linux/atomic.h> +#include <net/neighbour.h> #define AX25_T1CLAMPLO 1 #define AX25_T1CLAMPHI (30 * HZ) @@ -366,9 +367,7 @@ int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); /* ax25_ip.c */ -int ax25_hard_header(struct sk_buff *, struct net_device *, unsigned short, - const void *, const void *, unsigned int); -int ax25_rebuild_header(struct sk_buff *); +netdev_tx_t ax25_ip_xmit(struct sk_buff *skb); extern const struct header_ops ax25_header_ops; /* ax25_out.c */ diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index e00455aab18c..7dba80546f16 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -245,10 +245,10 @@ int bt_sock_register(int proto, const struct net_proto_family *ops); void bt_sock_unregister(int proto); void bt_sock_link(struct bt_sock_list *l, struct sock *s); void bt_sock_unlink(struct bt_sock_list *l, struct sock *s); -int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags); -int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags); +int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + int flags); +int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg, + size_t len, int flags); uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait); int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo); @@ -269,29 +269,34 @@ struct l2cap_ctrl { __u16 reqseq; __u16 txseq; __u8 retries; + __le16 psm; + bdaddr_t bdaddr; + struct l2cap_chan *chan; }; struct hci_dev; typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode); - -struct hci_req_ctrl { - bool start; - u8 event; - hci_req_complete_t complete; +typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status, + u16 opcode, struct sk_buff *skb); + +struct req_ctrl { + bool start; + u8 event; + hci_req_complete_t complete; + hci_req_complete_skb_t complete_skb; }; struct bt_skb_cb { __u8 pkt_type; - __u8 incoming; + __u8 force_active; __u16 opcode; __u16 expect; - __u8 force_active; - struct l2cap_chan *chan; - struct l2cap_ctrl control; - struct hci_req_ctrl req; - bdaddr_t bdaddr; - __le16 psm; + __u8 incoming:1; + union { + struct l2cap_ctrl l2cap; + struct req_ctrl req; + }; }; #define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb)) @@ -339,6 +344,11 @@ out: int bt_to_errno(__u16 code); +void hci_sock_set_flag(struct sock *sk, int nr); +void hci_sock_clear_flag(struct sock *sk, int nr); +int hci_sock_test_flag(struct sock *sk, int nr); +unsigned short hci_sock_get_channel(struct sock *sk); + int hci_sock_init(void); void hci_sock_cleanup(void); @@ -358,6 +368,9 @@ void l2cap_exit(void); int sco_init(void); void sco_exit(void); +int mgmt_init(void); +void mgmt_exit(void); + void bt_sock_reclassify_lock(struct sock *sk, int proto); #endif /* __BLUETOOTH_H */ diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 8e54f825153c..d95da83cb1b0 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -160,6 +160,14 @@ enum { * during the hdev->setup vendor callback. */ HCI_QUIRK_STRICT_DUPLICATE_FILTER, + + /* When this quirk is set, LE scan and BR/EDR inquiry is done + * simultaneously, otherwise it's interleaved. + * + * This quirk can be set before hci_register_dev is called or + * during the hdev->setup vendor callback. + */ + HCI_QUIRK_SIMULTANEOUS_DISCOVERY, }; /* HCI device flags */ @@ -179,13 +187,14 @@ enum { HCI_RESET, }; -/* BR/EDR and/or LE controller flags: the flags defined here should represent - * states configured via debugfs for debugging and testing purposes only. - */ +/* HCI socket flags */ enum { - HCI_DUT_MODE, - HCI_FORCE_BREDR_SMP, - HCI_FORCE_STATIC_ADDR, + HCI_SOCK_TRUSTED, + HCI_MGMT_INDEX_EVENTS, + HCI_MGMT_UNCONF_INDEX_EVENTS, + HCI_MGMT_EXT_INDEX_EVENTS, + HCI_MGMT_GENERIC_EVENTS, + HCI_MGMT_OOB_DATA_EVENTS, }; /* @@ -217,6 +226,8 @@ enum { HCI_HS_ENABLED, HCI_LE_ENABLED, HCI_ADVERTISING, + HCI_ADVERTISING_CONNECTABLE, + HCI_ADVERTISING_INSTANCE, HCI_CONNECTABLE, HCI_DISCOVERABLE, HCI_LIMITED_DISCOVERABLE, @@ -225,13 +236,13 @@ enum { HCI_FAST_CONNECTABLE, HCI_BREDR_ENABLED, HCI_LE_SCAN_INTERRUPTED, -}; -/* A mask for the flags that are supposed to remain when a reset happens - * or the HCI device is closed. - */ -#define HCI_PERSISTENT_MASK (BIT(HCI_LE_SCAN) | BIT(HCI_PERIODIC_INQ) | \ - BIT(HCI_FAST_CONNECTABLE) | BIT(HCI_LE_ADV)) + HCI_DUT_MODE, + HCI_FORCE_BREDR_SMP, + HCI_FORCE_STATIC_ADDR, + + __HCI_NUM_FLAGS, +}; /* HCI timeouts */ #define HCI_DISCONN_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ @@ -363,6 +374,7 @@ enum { /* LE features */ #define HCI_LE_ENCRYPTION 0x01 #define HCI_LE_CONN_PARAM_REQ_PROC 0x02 +#define HCI_LE_SLAVE_FEATURES 0x08 #define HCI_LE_PING 0x10 #define HCI_LE_DATA_LEN_EXT 0x20 #define HCI_LE_EXT_SCAN_POLICY 0x80 @@ -452,9 +464,16 @@ enum { #define EIR_NAME_COMPLETE 0x09 /* complete local name */ #define EIR_TX_POWER 0x0A /* transmit power level */ #define EIR_CLASS_OF_DEV 0x0D /* Class of Device */ -#define EIR_SSP_HASH_C 0x0E /* Simple Pairing Hash C */ -#define EIR_SSP_RAND_R 0x0F /* Simple Pairing Randomizer R */ +#define EIR_SSP_HASH_C192 0x0E /* Simple Pairing Hash C-192 */ +#define EIR_SSP_RAND_R192 0x0F /* Simple Pairing Randomizer R-192 */ #define EIR_DEVICE_ID 0x10 /* device ID */ +#define EIR_APPEARANCE 0x19 /* Device appearance */ +#define EIR_LE_BDADDR 0x1B /* LE Bluetooth device address */ +#define EIR_LE_ROLE 0x1C /* LE role */ +#define EIR_SSP_HASH_C256 0x1D /* Simple Pairing Hash C-256 */ +#define EIR_SSP_RAND_R256 0x1E /* Simple Pairing Rand R-256 */ +#define EIR_LE_SC_CONFIRM 0x22 /* LE SC Confirmation Value */ +#define EIR_LE_SC_RANDOM 0x23 /* LE SC Random Value */ /* Low Energy Advertising Flags */ #define LE_AD_LIMITED 0x01 /* Limited Discoverable */ @@ -1358,6 +1377,11 @@ struct hci_cp_le_conn_update { __le16 max_ce_len; } __packed; +#define HCI_OP_LE_READ_REMOTE_FEATURES 0x2016 +struct hci_cp_le_read_remote_features { + __le16 handle; +} __packed; + #define HCI_OP_LE_START_ENC 0x2019 struct hci_cp_le_start_enc { __le16 handle; @@ -1850,6 +1874,13 @@ struct hci_ev_le_conn_update_complete { __le16 supervision_timeout; } __packed; +#define HCI_EV_LE_REMOTE_FEAT_COMPLETE 0x04 +struct hci_ev_le_remote_feat_complete { + __u8 status; + __le16 handle; + __u8 features[8]; +} __packed; + #define HCI_EV_LE_LTK_REQ 0x05 struct hci_ev_le_ltk_req { __le16 handle; diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 52863c3e0b13..a056c2bfeb81 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -76,6 +76,7 @@ struct discovery_state { u8 last_adv_data[HCI_MAX_AD_LENGTH]; u8 last_adv_data_len; bool report_invalid_rssi; + bool result_filtering; s8 rssi; u16 uuid_count; u8 (*uuids)[16]; @@ -108,7 +109,7 @@ struct bt_uuid { struct smp_csrk { bdaddr_t bdaddr; u8 bdaddr_type; - u8 master; + u8 type; u8 val[16]; }; @@ -154,6 +155,17 @@ struct oob_data { u8 rand256[16]; }; +struct adv_info { + struct delayed_work timeout_exp; + __u8 instance; + __u32 flags; + __u16 timeout; + __u16 adv_data_len; + __u8 adv_data[HCI_MAX_AD_LENGTH]; + __u16 scan_rsp_len; + __u8 scan_rsp_data[HCI_MAX_AD_LENGTH]; +}; + #define HCI_MAX_SHORT_NAME_LENGTH 10 /* Default LE RPA expiry time, 15 minutes */ @@ -173,7 +185,6 @@ struct amp_assoc { #define HCI_MAX_PAGES 3 -#define NUM_REASSEMBLY 4 struct hci_dev { struct list_head list; struct mutex lock; @@ -314,14 +325,13 @@ struct hci_dev { struct sk_buff_head raw_q; struct sk_buff_head cmd_q; - struct sk_buff *recv_evt; struct sk_buff *sent_cmd; - struct sk_buff *reassembly[NUM_REASSEMBLY]; struct mutex req_lock; wait_queue_head_t req_wait_q; __u32 req_status; __u32 req_result; + struct sk_buff *req_skb; void *smp_data; void *smp_bredr_data; @@ -352,8 +362,7 @@ struct hci_dev { struct rfkill *rfkill; - unsigned long dbg_flags; - unsigned long dev_flags; + DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS); struct delayed_work le_scan_disable; struct delayed_work le_scan_restart; @@ -364,6 +373,8 @@ struct hci_dev { __u8 scan_rsp_data[HCI_MAX_AD_LENGTH]; __u8 scan_rsp_data_len; + struct adv_info adv_instance; + __u8 irk[16]; __u32 rpa_timeout; struct delayed_work rpa_expired; @@ -373,6 +384,7 @@ struct hci_dev { int (*close)(struct hci_dev *hdev); int (*flush)(struct hci_dev *hdev); int (*setup)(struct hci_dev *hdev); + int (*shutdown)(struct hci_dev *hdev); int (*send)(struct hci_dev *hdev, struct sk_buff *skb); void (*notify)(struct hci_dev *hdev, unsigned int evt); void (*hw_error)(struct hci_dev *hdev, u8 code); @@ -498,19 +510,29 @@ struct hci_conn_params { extern struct list_head hci_dev_list; extern struct list_head hci_cb_list; extern rwlock_t hci_dev_list_lock; -extern rwlock_t hci_cb_list_lock; +extern struct mutex hci_cb_list_lock; + +#define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags) +#define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags) +#define hci_dev_change_flag(hdev, nr) change_bit((nr), (hdev)->dev_flags) +#define hci_dev_test_flag(hdev, nr) test_bit((nr), (hdev)->dev_flags) +#define hci_dev_test_and_set_flag(hdev, nr) test_and_set_bit((nr), (hdev)->dev_flags) +#define hci_dev_test_and_clear_flag(hdev, nr) test_and_clear_bit((nr), (hdev)->dev_flags) +#define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags) + +#define hci_dev_clear_volatile_flags(hdev) \ + do { \ + hci_dev_clear_flag(hdev, HCI_LE_SCAN); \ + hci_dev_clear_flag(hdev, HCI_LE_ADV); \ + hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \ + } while (0) /* ----- HCI interface to upper protocols ----- */ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); -void l2cap_connect_cfm(struct hci_conn *hcon, u8 status); int l2cap_disconn_ind(struct hci_conn *hcon); -void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason); -int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt); int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags); -void sco_connect_cfm(struct hci_conn *hcon, __u8 status); -void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason); int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb); /* ----- Inquiry cache ----- */ @@ -529,6 +551,7 @@ static inline void discovery_init(struct hci_dev *hdev) static inline void hci_discovery_filter_clear(struct hci_dev *hdev) { + hdev->discovery.result_filtering = false; hdev->discovery.report_invalid_rssi = true; hdev->discovery.rssi = HCI_RSSI_INVALID; hdev->discovery.uuid_count = 0; @@ -538,6 +561,11 @@ static inline void hci_discovery_filter_clear(struct hci_dev *hdev) hdev->discovery.scan_duration = 0; } +static inline void adv_info_init(struct hci_dev *hdev) +{ + memset(&hdev->adv_instance, 0, sizeof(struct adv_info)); +} + bool hci_discovery_active(struct hci_dev *hdev); void hci_discovery_set_state(struct hci_dev *hdev, int state); @@ -584,7 +612,6 @@ enum { HCI_CONN_SC_ENABLED, HCI_CONN_AES_CCM, HCI_CONN_POWER_SAVE, - HCI_CONN_REMOTE_OOB, HCI_CONN_FLUSH_KEY, HCI_CONN_ENCRYPT, HCI_CONN_AUTH, @@ -600,14 +627,14 @@ enum { static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; - return test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && + return hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && test_bit(HCI_CONN_SSP_ENABLED, &conn->flags); } static inline bool hci_conn_sc_enabled(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; - return test_bit(HCI_SC_ENABLED, &hdev->dev_flags) && + return hci_dev_test_flag(hdev, HCI_SC_ENABLED) && test_bit(HCI_CONN_SC_ENABLED, &conn->flags); } @@ -969,6 +996,8 @@ struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type); void hci_smp_irks_clear(struct hci_dev *hdev); +bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); + void hci_remote_oob_data_clear(struct hci_dev *hdev); struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type); @@ -981,7 +1010,6 @@ int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb); -int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count); void hci_init_sysfs(struct hci_dev *hdev); void hci_conn_init_sysfs(struct hci_conn *conn); @@ -1025,10 +1053,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE)) #define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR)) -#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \ - !test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) -#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \ - test_bit(HCI_SC_ENABLED, &(dev)->dev_flags)) +#define hdev_is_powered(dev) (test_bit(HCI_UP, &(dev)->flags) && \ + !hci_dev_test_flag(dev, HCI_AUTO_OFF)) +#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \ + hci_dev_test_flag(dev, HCI_SC_ENABLED)) /* ----- HCI protocols ----- */ #define HCI_PROTO_DEFER 0x01 @@ -1050,28 +1078,6 @@ static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, } } -static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status) -{ - switch (conn->type) { - case ACL_LINK: - case LE_LINK: - l2cap_connect_cfm(conn, status); - break; - - case SCO_LINK: - case ESCO_LINK: - sco_connect_cfm(conn, status); - break; - - default: - BT_ERR("unknown link type %d", conn->type); - break; - } - - if (conn->connect_cfm_cb) - conn->connect_cfm_cb(conn, status); -} - static inline int hci_proto_disconn_ind(struct hci_conn *conn) { if (conn->type != ACL_LINK && conn->type != LE_LINK) @@ -1080,91 +1086,69 @@ static inline int hci_proto_disconn_ind(struct hci_conn *conn) return l2cap_disconn_ind(conn); } -static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason) -{ - switch (conn->type) { - case ACL_LINK: - case LE_LINK: - l2cap_disconn_cfm(conn, reason); - break; - - case SCO_LINK: - case ESCO_LINK: - sco_disconn_cfm(conn, reason); - break; - - /* L2CAP would be handled for BREDR chan */ - case AMP_LINK: - break; +/* ----- HCI callbacks ----- */ +struct hci_cb { + struct list_head list; - default: - BT_ERR("unknown link type %d", conn->type); - break; - } + char *name; - if (conn->disconn_cfm_cb) - conn->disconn_cfm_cb(conn, reason); -} + void (*connect_cfm) (struct hci_conn *conn, __u8 status); + void (*disconn_cfm) (struct hci_conn *conn, __u8 status); + void (*security_cfm) (struct hci_conn *conn, __u8 status, + __u8 encrypt); + void (*key_change_cfm) (struct hci_conn *conn, __u8 status); + void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role); +}; -static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status) +static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status) { - __u8 encrypt; - - if (conn->type != ACL_LINK && conn->type != LE_LINK) - return; - - if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) - return; + struct hci_cb *cb; - encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00; - l2cap_security_cfm(conn, status, encrypt); + mutex_lock(&hci_cb_list_lock); + list_for_each_entry(cb, &hci_cb_list, list) { + if (cb->connect_cfm) + cb->connect_cfm(conn, status); + } + mutex_unlock(&hci_cb_list_lock); - if (conn->security_cfm_cb) - conn->security_cfm_cb(conn, status); + if (conn->connect_cfm_cb) + conn->connect_cfm_cb(conn, status); } -static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, - __u8 encrypt) +static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason) { - if (conn->type != ACL_LINK && conn->type != LE_LINK) - return; + struct hci_cb *cb; - l2cap_security_cfm(conn, status, encrypt); + mutex_lock(&hci_cb_list_lock); + list_for_each_entry(cb, &hci_cb_list, list) { + if (cb->disconn_cfm) + cb->disconn_cfm(conn, reason); + } + mutex_unlock(&hci_cb_list_lock); - if (conn->security_cfm_cb) - conn->security_cfm_cb(conn, status); + if (conn->disconn_cfm_cb) + conn->disconn_cfm_cb(conn, reason); } -/* ----- HCI callbacks ----- */ -struct hci_cb { - struct list_head list; - - char *name; - - void (*security_cfm) (struct hci_conn *conn, __u8 status, - __u8 encrypt); - void (*key_change_cfm) (struct hci_conn *conn, __u8 status); - void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role); -}; - static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) { struct hci_cb *cb; __u8 encrypt; - hci_proto_auth_cfm(conn, status); - if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) return; encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00; - read_lock(&hci_cb_list_lock); + mutex_lock(&hci_cb_list_lock); list_for_each_entry(cb, &hci_cb_list, list) { if (cb->security_cfm) cb->security_cfm(conn, status, encrypt); } - read_unlock(&hci_cb_list_lock); + mutex_unlock(&hci_cb_list_lock); + + if (conn->security_cfm_cb) + conn->security_cfm_cb(conn, status); } static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, @@ -1178,26 +1162,27 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, if (conn->pending_sec_level > conn->sec_level) conn->sec_level = conn->pending_sec_level; - hci_proto_encrypt_cfm(conn, status, encrypt); - - read_lock(&hci_cb_list_lock); + mutex_lock(&hci_cb_list_lock); list_for_each_entry(cb, &hci_cb_list, list) { if (cb->security_cfm) cb->security_cfm(conn, status, encrypt); } - read_unlock(&hci_cb_list_lock); + mutex_unlock(&hci_cb_list_lock); + + if (conn->security_cfm_cb) + conn->security_cfm_cb(conn, status); } static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status) { struct hci_cb *cb; - read_lock(&hci_cb_list_lock); + mutex_lock(&hci_cb_list_lock); list_for_each_entry(cb, &hci_cb_list, list) { if (cb->key_change_cfm) cb->key_change_cfm(conn, status); } - read_unlock(&hci_cb_list_lock); + mutex_unlock(&hci_cb_list_lock); } static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, @@ -1205,12 +1190,12 @@ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, { struct hci_cb *cb; - read_lock(&hci_cb_list_lock); + mutex_lock(&hci_cb_list_lock); list_for_each_entry(cb, &hci_cb_list, list) { if (cb->role_switch_cfm) cb->role_switch_cfm(conn, status, role); } - read_unlock(&hci_cb_list_lock); + mutex_unlock(&hci_cb_list_lock); } static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type) @@ -1296,8 +1281,6 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency, int hci_register_cb(struct hci_cb *hcb); int hci_unregister_cb(struct hci_cb *hcb); -bool hci_req_pending(struct hci_dev *hdev); - struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u32 timeout); struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, @@ -1312,11 +1295,35 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); /* ----- HCI Sockets ----- */ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb); -void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk); +void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, + int flag, struct sock *skip_sk); void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb); void hci_sock_dev_event(struct hci_dev *hdev, int event); +#define HCI_MGMT_VAR_LEN BIT(0) +#define HCI_MGMT_NO_HDEV BIT(1) +#define HCI_MGMT_UNTRUSTED BIT(2) +#define HCI_MGMT_UNCONFIGURED BIT(3) + +struct hci_mgmt_handler { + int (*func) (struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len); + size_t data_len; + unsigned long flags; +}; + +struct hci_mgmt_chan { + struct list_head list; + unsigned short channel; + size_t handler_count; + const struct hci_mgmt_handler *handlers; + void (*hdev_init) (struct sock *sk, struct hci_dev *hdev); +}; + +int hci_mgmt_chan_register(struct hci_mgmt_chan *c); +void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c); + /* Management interface */ #define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR)) #define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \ @@ -1336,7 +1343,6 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event); #define DISCOV_BREDR_INQUIRY_LEN 0x08 #define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */ -int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len); int mgmt_new_settings(struct hci_dev *hdev); void mgmt_index_added(struct hci_dev *hdev); void mgmt_index_removed(struct hci_dev *hdev); @@ -1382,9 +1388,6 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status); void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, u8 status); void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status); -void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192, - u8 *rand192, u8 *hash256, u8 *rand256, - u8 status); void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len); diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index e218a30f2061..b831242d48a4 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -43,6 +43,8 @@ #define MGMT_STATUS_CANCELLED 0x10 #define MGMT_STATUS_INVALID_INDEX 0x11 #define MGMT_STATUS_RFKILLED 0x12 +#define MGMT_STATUS_ALREADY_PAIRED 0x13 +#define MGMT_STATUS_PERMISSION_DENIED 0x14 struct mgmt_hdr { __le16 opcode; @@ -98,6 +100,7 @@ struct mgmt_rp_read_index_list { #define MGMT_SETTING_DEBUG_KEYS 0x00001000 #define MGMT_SETTING_PRIVACY 0x00002000 #define MGMT_SETTING_CONFIGURATION 0x00004000 +#define MGMT_SETTING_STATIC_ADDRESS 0x00008000 #define MGMT_OP_READ_INFO 0x0004 #define MGMT_READ_INFO_SIZE 0 @@ -503,6 +506,71 @@ struct mgmt_cp_start_service_discovery { } __packed; #define MGMT_START_SERVICE_DISCOVERY_SIZE 4 +#define MGMT_OP_READ_LOCAL_OOB_EXT_DATA 0x003B +struct mgmt_cp_read_local_oob_ext_data { + __u8 type; +} __packed; +#define MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE 1 +struct mgmt_rp_read_local_oob_ext_data { + __u8 type; + __le16 eir_len; + __u8 eir[0]; +} __packed; + +#define MGMT_OP_READ_EXT_INDEX_LIST 0x003C +#define MGMT_READ_EXT_INDEX_LIST_SIZE 0 +struct mgmt_rp_read_ext_index_list { + __le16 num_controllers; + struct { + __le16 index; + __u8 type; + __u8 bus; + } entry[0]; +} __packed; + +#define MGMT_OP_READ_ADV_FEATURES 0x0003D +#define MGMT_READ_ADV_FEATURES_SIZE 0 +struct mgmt_rp_read_adv_features { + __le32 supported_flags; + __u8 max_adv_data_len; + __u8 max_scan_rsp_len; + __u8 max_instances; + __u8 num_instances; + __u8 instance[0]; +} __packed; + +#define MGMT_OP_ADD_ADVERTISING 0x003E +struct mgmt_cp_add_advertising { + __u8 instance; + __le32 flags; + __le16 duration; + __le16 timeout; + __u8 adv_data_len; + __u8 scan_rsp_len; + __u8 data[0]; +} __packed; +#define MGMT_ADD_ADVERTISING_SIZE 11 +struct mgmt_rp_add_advertising { + __u8 instance; +} __packed; + +#define MGMT_ADV_FLAG_CONNECTABLE BIT(0) +#define MGMT_ADV_FLAG_DISCOV BIT(1) +#define MGMT_ADV_FLAG_LIMITED_DISCOV BIT(2) +#define MGMT_ADV_FLAG_MANAGED_FLAGS BIT(3) +#define MGMT_ADV_FLAG_TX_POWER BIT(4) +#define MGMT_ADV_FLAG_APPEARANCE BIT(5) +#define MGMT_ADV_FLAG_LOCAL_NAME BIT(6) + +#define MGMT_OP_REMOVE_ADVERTISING 0x003F +struct mgmt_cp_remove_advertising { + __u8 instance; +} __packed; +#define MGMT_REMOVE_ADVERTISING_SIZE 1 +struct mgmt_rp_remove_advertising { + __u8 instance; +} __packed; + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; @@ -647,9 +715,14 @@ struct mgmt_ev_new_irk { struct mgmt_irk_info irk; } __packed; +#define MGMT_CSRK_LOCAL_UNAUTHENTICATED 0x00 +#define MGMT_CSRK_REMOTE_UNAUTHENTICATED 0x01 +#define MGMT_CSRK_LOCAL_AUTHENTICATED 0x02 +#define MGMT_CSRK_REMOTE_AUTHENTICATED 0x03 + struct mgmt_csrk_info { struct mgmt_addr_info addr; - __u8 master; + __u8 type; __u8 val[16]; } __packed; @@ -685,3 +758,29 @@ struct mgmt_ev_new_conn_param { #define MGMT_EV_UNCONF_INDEX_REMOVED 0x001e #define MGMT_EV_NEW_CONFIG_OPTIONS 0x001f + +struct mgmt_ev_ext_index { + __u8 type; + __u8 bus; +} __packed; + +#define MGMT_EV_EXT_INDEX_ADDED 0x0020 + +#define MGMT_EV_EXT_INDEX_REMOVED 0x0021 + +#define MGMT_EV_LOCAL_OOB_DATA_UPDATED 0x0022 +struct mgmt_ev_local_oob_data_updated { + __u8 type; + __le16 eir_len; + __u8 eir[0]; +} __packed; + +#define MGMT_EV_ADVERTISING_ADDED 0x0023 +struct mgmt_ev_advertising_added { + __u8 instance; +} __packed; + +#define MGMT_EV_ADVERTISING_REMOVED 0x0024 +struct mgmt_ev_advertising_removed { + __u8 instance; +} __packed; diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h index f04cdbb7848e..c2a40a172fcd 100644 --- a/include/net/bond_3ad.h +++ b/include/net/bond_3ad.h @@ -82,6 +82,13 @@ typedef enum { AD_TRANSMIT /* tx Machine */ } tx_states_t; +/* churn machine states(43.4.17 in the 802.3ad standard) */ +typedef enum { + AD_CHURN_MONITOR, /* monitoring for churn */ + AD_CHURN, /* churn detected (error) */ + AD_NO_CHURN /* no churn (no error) */ +} churn_state_t; + /* rx indication types */ typedef enum { AD_TYPE_LACPDU = 1, /* type lacpdu */ @@ -229,6 +236,12 @@ typedef struct port { u16 sm_mux_timer_counter; /* state machine mux timer counter */ tx_states_t sm_tx_state; /* state machine tx state */ u16 sm_tx_timer_counter; /* state machine tx timer counter(allways on - enter to transmit state 3 time per second) */ + u16 sm_churn_actor_timer_counter; + u16 sm_churn_partner_timer_counter; + u32 churn_actor_count; + u32 churn_partner_count; + churn_state_t sm_churn_actor_state; + churn_state_t sm_churn_partner_state; struct slave *slave; /* pointer to the bond slave that this port belongs to */ struct aggregator *aggregator; /* pointer to an aggregator that this port related to */ struct port *next_port_in_aggregator; /* Next port on the linked list of the parent aggregator */ @@ -262,6 +275,22 @@ struct ad_slave_info { u16 id; }; +static inline const char *bond_3ad_churn_desc(churn_state_t state) +{ + static const char *const churn_description[] = { + "monitoring", + "churned", + "none", + "unknown" + }; + int max_size = sizeof(churn_description) / sizeof(churn_description[0]); + + if (state >= max_size) + state = max_size - 1; + + return churn_description[state]; +} + /* ========== AD Exported functions to the main bonding code ========== */ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution); void bond_3ad_bind_slave(struct slave *slave); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 64e09e1e8099..f8d6813cd5b2 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -215,6 +215,39 @@ enum ieee80211_rate_flags { }; /** + * enum ieee80211_bss_type - BSS type filter + * + * @IEEE80211_BSS_TYPE_ESS: Infrastructure BSS + * @IEEE80211_BSS_TYPE_PBSS: Personal BSS + * @IEEE80211_BSS_TYPE_IBSS: Independent BSS + * @IEEE80211_BSS_TYPE_MBSS: Mesh BSS + * @IEEE80211_BSS_TYPE_ANY: Wildcard value for matching any BSS type + */ +enum ieee80211_bss_type { + IEEE80211_BSS_TYPE_ESS, + IEEE80211_BSS_TYPE_PBSS, + IEEE80211_BSS_TYPE_IBSS, + IEEE80211_BSS_TYPE_MBSS, + IEEE80211_BSS_TYPE_ANY +}; + +/** + * enum ieee80211_privacy - BSS privacy filter + * + * @IEEE80211_PRIVACY_ON: privacy bit set + * @IEEE80211_PRIVACY_OFF: privacy bit clear + * @IEEE80211_PRIVACY_ANY: Wildcard value for matching any privacy setting + */ +enum ieee80211_privacy { + IEEE80211_PRIVACY_ON, + IEEE80211_PRIVACY_OFF, + IEEE80211_PRIVACY_ANY +}; + +#define IEEE80211_PRIVACY(x) \ + ((x) ? IEEE80211_PRIVACY_ON : IEEE80211_PRIVACY_OFF) + +/** * struct ieee80211_rate - bitrate definition * * This structure describes a bitrate that an 802.11 PHY can @@ -2423,6 +2456,7 @@ struct cfg80211_ops { struct wireless_dev * (*add_virtual_intf)(struct wiphy *wiphy, const char *name, + unsigned char name_assign_type, enum nl80211_iftype type, u32 *flags, struct vif_params *params); @@ -3183,10 +3217,8 @@ struct wiphy { const struct ieee80211_ht_cap *ht_capa_mod_mask; const struct ieee80211_vht_cap *vht_capa_mod_mask; -#ifdef CONFIG_NET_NS /* the network namespace this phy lives in currently */ - struct net *_net; -#endif + possible_net_t _net; #ifdef CONFIG_CFG80211_WEXT const struct iw_handler_def *wext; @@ -4012,14 +4044,16 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, struct ieee80211_channel *channel, const u8 *bssid, const u8 *ssid, size_t ssid_len, - u16 capa_mask, u16 capa_val); + enum ieee80211_bss_type bss_type, + enum ieee80211_privacy); static inline struct cfg80211_bss * cfg80211_get_ibss(struct wiphy *wiphy, struct ieee80211_channel *channel, const u8 *ssid, size_t ssid_len) { return cfg80211_get_bss(wiphy, channel, NULL, ssid, ssid_len, - WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS); + IEEE80211_BSS_TYPE_IBSS, + IEEE80211_PRIVACY_ANY); } /** @@ -4260,6 +4294,7 @@ struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy, int approxlen); struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy, + struct wireless_dev *wdev, enum nl80211_commands cmd, enum nl80211_attrs attr, int vendor_event_idx, @@ -4314,6 +4349,7 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb); /** * cfg80211_vendor_event_alloc - allocate vendor-specific event skb * @wiphy: the wiphy + * @wdev: the wireless device * @event_idx: index of the vendor event in the wiphy's vendor_events * @approxlen: an upper bound of the length of the data that will * be put into the skb @@ -4322,16 +4358,20 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb); * This function allocates and pre-fills an skb for an event on the * vendor-specific multicast group. * + * If wdev != NULL, both the ifindex and identifier of the specified + * wireless device are added to the event message before the vendor data + * attribute. + * * When done filling the skb, call cfg80211_vendor_event() with the * skb to send the event. * * Return: An allocated and pre-filled skb. %NULL if any errors happen. */ static inline struct sk_buff * -cfg80211_vendor_event_alloc(struct wiphy *wiphy, int approxlen, - int event_idx, gfp_t gfp) +cfg80211_vendor_event_alloc(struct wiphy *wiphy, struct wireless_dev *wdev, + int approxlen, int event_idx, gfp_t gfp) { - return __cfg80211_alloc_event_skb(wiphy, NL80211_CMD_VENDOR, + return __cfg80211_alloc_event_skb(wiphy, wdev, NL80211_CMD_VENDOR, NL80211_ATTR_VENDOR_DATA, event_idx, approxlen, gfp); } @@ -4432,7 +4472,7 @@ static inline int cfg80211_testmode_reply(struct sk_buff *skb) static inline struct sk_buff * cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, int approxlen, gfp_t gfp) { - return __cfg80211_alloc_event_skb(wiphy, NL80211_CMD_TESTMODE, + return __cfg80211_alloc_event_skb(wiphy, NULL, NL80211_CMD_TESTMODE, NL80211_ATTR_TESTDATA, -1, approxlen, gfp); } @@ -4862,6 +4902,17 @@ void cfg80211_ch_switch_started_notify(struct net_device *dev, bool ieee80211_operating_class_to_band(u8 operating_class, enum ieee80211_band *band); +/** + * ieee80211_chandef_to_operating_class - convert chandef to operation class + * + * @chandef: the chandef to convert + * @op_class: a pointer to the resulting operating class + * + * Returns %true if the conversion was successful, %false otherwise. + */ +bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef, + u8 *op_class); + /* * cfg80211_tdls_oper_request - request userspace to perform TDLS operation * @dev: the device on which the operation is requested @@ -4950,6 +5001,64 @@ int cfg80211_get_p2p_attr(const u8 *ies, unsigned int len, u8 *buf, unsigned int bufsize); /** + * ieee80211_ie_split_ric - split an IE buffer according to ordering (with RIC) + * @ies: the IE buffer + * @ielen: the length of the IE buffer + * @ids: an array with element IDs that are allowed before + * the split + * @n_ids: the size of the element ID array + * @after_ric: array IE types that come after the RIC element + * @n_after_ric: size of the @after_ric array + * @offset: offset where to start splitting in the buffer + * + * This function splits an IE buffer by updating the @offset + * variable to point to the location where the buffer should be + * split. + * + * It assumes that the given IE buffer is well-formed, this + * has to be guaranteed by the caller! + * + * It also assumes that the IEs in the buffer are ordered + * correctly, if not the result of using this function will not + * be ordered correctly either, i.e. it does no reordering. + * + * The function returns the offset where the next part of the + * buffer starts, which may be @ielen if the entire (remainder) + * of the buffer should be used. + */ +size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen, + const u8 *ids, int n_ids, + const u8 *after_ric, int n_after_ric, + size_t offset); + +/** + * ieee80211_ie_split - split an IE buffer according to ordering + * @ies: the IE buffer + * @ielen: the length of the IE buffer + * @ids: an array with element IDs that are allowed before + * the split + * @n_ids: the size of the element ID array + * @offset: offset where to start splitting in the buffer + * + * This function splits an IE buffer by updating the @offset + * variable to point to the location where the buffer should be + * split. + * + * It assumes that the given IE buffer is well-formed, this + * has to be guaranteed by the caller! + * + * It also assumes that the IEs in the buffer are ordered + * correctly, if not the result of using this function will not + * be ordered correctly either, i.e. it does no reordering. + * + * The function returns the offset where the next part of the + * buffer starts, which may be @ielen if the entire (remainder) + * of the buffer should be used. + */ +size_t ieee80211_ie_split(const u8 *ies, size_t ielen, + const u8 *ids, int n_ids, size_t offset); + +/** * cfg80211_report_wowlan_wakeup - report wakeup from WoWLAN * @wdev: the wireless device reporting the wakeup * @wakeup: the wakeup report diff --git a/include/net/compat.h b/include/net/compat.h index 42a9c8431177..48103cf94e97 100644 --- a/include/net/compat.h +++ b/include/net/compat.h @@ -40,7 +40,7 @@ int compat_sock_get_timestampns(struct sock *, struct timespec __user *); #define compat_mmsghdr mmsghdr #endif /* defined(CONFIG_COMPAT) */ -ssize_t get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *, +int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *, struct sockaddr __user **, struct iovec **); asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *, unsigned int); diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h index 597b88a94332..207d9ba1f92c 100644 --- a/include/net/dcbnl.h +++ b/include/net/dcbnl.h @@ -49,6 +49,9 @@ struct dcbnl_rtnl_ops { int (*ieee_setets) (struct net_device *, struct ieee_ets *); int (*ieee_getmaxrate) (struct net_device *, struct ieee_maxrate *); int (*ieee_setmaxrate) (struct net_device *, struct ieee_maxrate *); + int (*ieee_getqcn) (struct net_device *, struct ieee_qcn *); + int (*ieee_setqcn) (struct net_device *, struct ieee_qcn *); + int (*ieee_getqcnstats) (struct net_device *, struct ieee_qcn_stats *); int (*ieee_getpfc) (struct net_device *, struct ieee_pfc *); int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *); int (*ieee_getapp) (struct net_device *, struct dcb_app *); diff --git a/include/net/dn_neigh.h b/include/net/dn_neigh.h index fac4e3f4a6d3..d0424269313f 100644 --- a/include/net/dn_neigh.h +++ b/include/net/dn_neigh.h @@ -18,10 +18,11 @@ struct dn_neigh { void dn_neigh_init(void); void dn_neigh_cleanup(void); -int dn_neigh_router_hello(struct sk_buff *skb); -int dn_neigh_endnode_hello(struct sk_buff *skb); +int dn_neigh_router_hello(struct sock *sk, struct sk_buff *skb); +int dn_neigh_endnode_hello(struct sock *sk, struct sk_buff *skb); void dn_neigh_pointopoint_hello(struct sk_buff *skb); int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n); +int dn_to_neigh_output(struct sock *sk, struct sk_buff *skb); extern struct neigh_table dn_neigh_table; diff --git a/include/net/dsa.h b/include/net/dsa.h index ed3c34bbb67a..fbca63ba8f73 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -72,6 +72,7 @@ struct dsa_platform_data { * to the root switch chip of the tree. */ struct device *netdev; + struct net_device *of_netdev; /* * Info structs describing each of the switch chips @@ -128,6 +129,11 @@ struct dsa_switch { int index; /* + * Tagging protocol understood by this switch + */ + enum dsa_tag_protocol tag_protocol; + + /* * Configuration data for this switch. */ struct dsa_chip_data *pd; @@ -165,6 +171,11 @@ static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p) return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port); } +static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p) +{ + return ds->phys_port_mask & (1 << p) && ds->ports[p]; +} + static inline u8 dsa_upstream_port(struct dsa_switch *ds) { struct dsa_switch_tree *dst = ds->dst; @@ -275,6 +286,22 @@ struct dsa_switch_driver { int (*get_regs_len)(struct dsa_switch *ds, int port); void (*get_regs)(struct dsa_switch *ds, int port, struct ethtool_regs *regs, void *p); + + /* + * Bridge integration + */ + int (*port_join_bridge)(struct dsa_switch *ds, int port, + u32 br_port_mask); + int (*port_leave_bridge)(struct dsa_switch *ds, int port, + u32 br_port_mask); + int (*port_stp_update)(struct dsa_switch *ds, int port, + u8 state); + int (*fdb_add)(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid); + int (*fdb_del)(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid); + int (*fdb_getnext)(struct dsa_switch *ds, int port, + unsigned char *addr, bool *is_static); }; void register_switch_driver(struct dsa_switch_driver *type); diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index 1f99a1de0e4f..d64253914a6a 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h @@ -12,7 +12,6 @@ struct sock; struct dst_ops { unsigned short family; - __be16 protocol; unsigned int gc_thresh; int (*gc)(struct dst_ops *ops); diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index e584de16e4c3..6d67383a5114 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -58,7 +58,7 @@ struct fib_rules_ops { struct sk_buff *, struct fib_rule_hdr *, struct nlattr **); - void (*delete)(struct fib_rule *); + int (*delete)(struct fib_rule *); int (*compare)(struct fib_rule *, struct fib_rule_hdr *, struct nlattr **); @@ -95,17 +95,10 @@ static inline void fib_rule_get(struct fib_rule *rule) atomic_inc(&rule->refcnt); } -static inline void fib_rule_put_rcu(struct rcu_head *head) -{ - struct fib_rule *rule = container_of(head, struct fib_rule, rcu); - release_net(rule->fr_net); - kfree(rule); -} - static inline void fib_rule_put(struct fib_rule *rule) { if (atomic_dec_and_test(&rule->refcnt)) - call_rcu(&rule->rcu, fib_rule_put_rcu); + kfree_rcu(rule, rcu); } static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla) diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 0574abd3db86..a9af1cc8c1bc 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -92,9 +92,7 @@ struct genl_info { struct genlmsghdr * genlhdr; void * userhdr; struct nlattr ** attrs; -#ifdef CONFIG_NET_NS - struct net * _net; -#endif + possible_net_t _net; void * user_ptr[2]; struct sock * dst_sk; }; diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 98e5f9578f86..1c8b6820b694 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -41,18 +41,18 @@ enum { struct inet6_ifaddr { struct in6_addr addr; __u32 prefix_len; - + /* In seconds, relative to tstamp. Expiry is at tstamp + HZ * lft. */ __u32 valid_lft; __u32 prefered_lft; atomic_t refcnt; spinlock_t lock; - spinlock_t state_lock; int state; __u32 flags; __u8 dad_probes; + __u8 stable_privacy_retry; __u16 scope; diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h index 74af137304be..6d539e4e5ba7 100644 --- a/include/net/inet6_connection_sock.h +++ b/include/net/inet6_connection_sock.h @@ -28,8 +28,7 @@ int inet6_csk_bind_conflict(const struct sock *sk, struct dst_entry *inet6_csk_route_req(struct sock *sk, struct flowi6 *fl6, const struct request_sock *req); -struct request_sock *inet6_csk_search_req(const struct sock *sk, - struct request_sock ***prevp, +struct request_sock *inet6_csk_search_req(struct sock *sk, const __be16 rport, const struct in6_addr *raddr, const struct in6_addr *laddr, diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index 9201afe083fa..7ff588ca6817 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -38,8 +38,6 @@ static inline unsigned int __inet6_ehashfn(const u32 lhash, return jhash_3words(lhash, fhash, ports, initval); } -int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp); - /* * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM diff --git a/include/net/inet_common.h b/include/net/inet_common.h index b2828a06a5a6..4a92423eefa5 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -21,12 +21,11 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags); int inet_accept(struct socket *sock, struct socket *newsock, int flags); -int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, - size_t size); +int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size); ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); -int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, - size_t size, int flags); +int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + int flags); int inet_shutdown(struct socket *sock, int how); int inet_listen(struct socket *sock, int backlog); void inet_sock_destruct(struct sock *sk); diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 5976bdecf58b..7b5887cd1172 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -126,6 +126,8 @@ struct inet_connection_sock { /* Information on the current probe. */ int probe_size; + + u32 probe_timestamp; } icsk_mtup; u32 icsk_ca_priv[16]; u32 icsk_user_timeout; @@ -254,8 +256,7 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk, struct sock *inet_csk_accept(struct sock *sk, int flags, int *err); -struct request_sock *inet_csk_search_req(const struct sock *sk, - struct request_sock ***prevp, +struct request_sock *inet_csk_search_req(struct sock *sk, const __be16 rport, const __be32 raddr, const __be32 laddr); @@ -281,15 +282,13 @@ void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, static inline void inet_csk_reqsk_queue_removed(struct sock *sk, struct request_sock *req) { - if (reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req) == 0) - inet_csk_delete_keepalive_timer(sk); + reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); } static inline void inet_csk_reqsk_queue_added(struct sock *sk, const unsigned long timeout) { - if (reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue) == 0) - inet_csk_reset_keepalive_timer(sk, timeout); + reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue); } static inline int inet_csk_reqsk_queue_len(const struct sock *sk) @@ -308,26 +307,19 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) } static inline void inet_csk_reqsk_queue_unlink(struct sock *sk, - struct request_sock *req, - struct request_sock **prev) + struct request_sock *req) { - reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req, prev); + reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req); } static inline void inet_csk_reqsk_queue_drop(struct sock *sk, - struct request_sock *req, - struct request_sock **prev) + struct request_sock *req) { - inet_csk_reqsk_queue_unlink(sk, req, prev); + inet_csk_reqsk_queue_unlink(sk, req); inet_csk_reqsk_queue_removed(sk, req); - reqsk_free(req); + reqsk_put(req); } -void inet_csk_reqsk_queue_prune(struct sock *parent, - const unsigned long interval, - const unsigned long timeout, - const unsigned long max_rto); - void inet_csk_destroy_sock(struct sock *sk); void inet_csk_prepare_forced_close(struct sock *sk); diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index dd1950a7e273..73fe0f9525d9 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -76,9 +76,7 @@ struct inet_ehash_bucket { * ports are created in O(1) time? I thought so. ;-) -DaveM */ struct inet_bind_bucket { -#ifdef CONFIG_NET_NS - struct net *ib_net; -#endif + possible_net_t ib_net; unsigned short port; signed char fastreuse; signed char fastreuseport; @@ -223,8 +221,8 @@ inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb); -static inline int inet_bhashfn(struct net *net, const __u16 lport, - const int bhash_size) +static inline u32 inet_bhashfn(const struct net *net, const __u16 lport, + const u32 bhash_size) { return (lport + net_hash_mix(net)) & (bhash_size - 1); } @@ -233,7 +231,7 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, const unsigned short snum); /* These can have wildcards, don't try too hard. */ -static inline int inet_lhashfn(struct net *net, const unsigned short num) +static inline u32 inet_lhashfn(const struct net *net, const unsigned short num) { return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1); } @@ -251,6 +249,7 @@ void inet_put_port(struct sock *sk); void inet_hashinfo_init(struct inet_hashinfo *h); int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw); +int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw); void inet_hash(struct sock *sk); void inet_unhash(struct sock *sk); @@ -385,13 +384,32 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, iph->daddr, dport, inet_iif(skb)); } +u32 sk_ehashfn(const struct sock *sk); +u32 inet6_ehashfn(const struct net *net, + const struct in6_addr *laddr, const u16 lport, + const struct in6_addr *faddr, const __be16 fport); + +static inline void sk_daddr_set(struct sock *sk, __be32 addr) +{ + sk->sk_daddr = addr; /* alias of inet_daddr */ +#if IS_ENABLED(CONFIG_IPV6) + ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr); +#endif +} + +static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr) +{ + sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */ +#if IS_ENABLED(CONFIG_IPV6) + ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr); +#endif +} + int __inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk, u32 port_offset, int (*check_established)(struct inet_timewait_death_row *, struct sock *, __u16, - struct inet_timewait_sock **), - int (*hash)(struct sock *sk, - struct inet_timewait_sock *twp)); + struct inet_timewait_sock **)); int inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk); diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index eb16c7beed1e..b6c3737da4e9 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -27,6 +27,7 @@ #include <net/sock.h> #include <net/request_sock.h> #include <net/netns/hash.h> +#include <net/tcp_states.h> /** struct ip_options - IP Options * @@ -77,6 +78,10 @@ struct inet_request_sock { #define ir_v6_rmt_addr req.__req_common.skc_v6_daddr #define ir_v6_loc_addr req.__req_common.skc_v6_rcv_saddr #define ir_iif req.__req_common.skc_bound_dev_if +#define ir_cookie req.__req_common.skc_cookie +#define ireq_net req.__req_common.skc_net +#define ireq_state req.__req_common.skc_state +#define ireq_family req.__req_common.skc_family kmemcheck_bitfield_begin(flags); u16 snd_wscale : 4, @@ -88,11 +93,11 @@ struct inet_request_sock { acked : 1, no_srccheck: 1; kmemcheck_bitfield_end(flags); + u32 ir_mark; union { struct ip_options_rcu *opt; struct sk_buff *pktopts; }; - u32 ir_mark; }; static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk) @@ -100,13 +105,12 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk) return (struct inet_request_sock *)sk; } -static inline u32 inet_request_mark(struct sock *sk, struct sk_buff *skb) +static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb) { - if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept) { + if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept) return skb->mark; - } else { - return sk->sk_mark; - } + + return sk->sk_mark; } struct inet_cork { @@ -239,18 +243,8 @@ static inline unsigned int __inet_ehashfn(const __be32 laddr, initval); } -static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops) -{ - struct request_sock *req = reqsk_alloc(ops); - struct inet_request_sock *ireq = inet_rsk(req); - - if (req != NULL) { - kmemcheck_annotate_bitfield(ireq, flags); - ireq->opt = NULL; - } - - return req; -} +struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, + struct sock *sk_listener); static inline __u8 inet_sk_flowi_flags(const struct sock *sk) { diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 6c566034e26d..360c4802288d 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -31,67 +31,14 @@ struct inet_hashinfo; -#define INET_TWDR_RECYCLE_SLOTS_LOG 5 -#define INET_TWDR_RECYCLE_SLOTS (1 << INET_TWDR_RECYCLE_SLOTS_LOG) - -/* - * If time > 4sec, it is "slow" path, no recycling is required, - * so that we select tick to get range about 4 seconds. - */ -#if HZ <= 16 || HZ > 4096 -# error Unsupported: HZ <= 16 or HZ > 4096 -#elif HZ <= 32 -# define INET_TWDR_RECYCLE_TICK (5 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) -#elif HZ <= 64 -# define INET_TWDR_RECYCLE_TICK (6 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) -#elif HZ <= 128 -# define INET_TWDR_RECYCLE_TICK (7 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) -#elif HZ <= 256 -# define INET_TWDR_RECYCLE_TICK (8 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) -#elif HZ <= 512 -# define INET_TWDR_RECYCLE_TICK (9 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) -#elif HZ <= 1024 -# define INET_TWDR_RECYCLE_TICK (10 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) -#elif HZ <= 2048 -# define INET_TWDR_RECYCLE_TICK (11 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) -#else -# define INET_TWDR_RECYCLE_TICK (12 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) -#endif - -static inline u32 inet_tw_time_stamp(void) -{ - return jiffies; -} - -/* TIME_WAIT reaping mechanism. */ -#define INET_TWDR_TWKILL_SLOTS 8 /* Please keep this a power of 2. */ - -#define INET_TWDR_TWKILL_QUOTA 100 - struct inet_timewait_death_row { - /* Short-time timewait calendar */ - int twcal_hand; - unsigned long twcal_jiffie; - struct timer_list twcal_timer; - struct hlist_head twcal_row[INET_TWDR_RECYCLE_SLOTS]; - - spinlock_t death_lock; - int tw_count; - int period; - u32 thread_slots; - struct work_struct twkill_work; - struct timer_list tw_timer; - int slot; - struct hlist_head cells[INET_TWDR_TWKILL_SLOTS]; - struct inet_hashinfo *hashinfo; + atomic_t tw_count; + + struct inet_hashinfo *hashinfo ____cacheline_aligned_in_smp; int sysctl_tw_recycle; int sysctl_max_tw_buckets; }; -void inet_twdr_hangman(unsigned long data); -void inet_twdr_twkill_work(struct work_struct *work); -void inet_twdr_twcal_tick(unsigned long data); - struct inet_bind_bucket; /* @@ -122,6 +69,7 @@ struct inet_timewait_sock { #define tw_v6_rcv_saddr __tw_common.skc_v6_rcv_saddr #define tw_dport __tw_common.skc_dport #define tw_num __tw_common.skc_num +#define tw_cookie __tw_common.skc_cookie int tw_timeout; volatile unsigned char tw_substate; @@ -132,52 +80,18 @@ struct inet_timewait_sock { __be16 tw_sport; kmemcheck_bitfield_begin(flags); /* And these are ours. */ - unsigned int tw_pad0 : 1, /* 1 bit hole */ + unsigned int tw_kill : 1, tw_transparent : 1, tw_flowlabel : 20, tw_pad : 2, /* 2 bits hole */ tw_tos : 8; kmemcheck_bitfield_end(flags); - u32 tw_ttd; + struct timer_list tw_timer; struct inet_bind_bucket *tw_tb; - struct hlist_node tw_death_node; + struct inet_timewait_death_row *tw_dr; }; #define tw_tclass tw_tos -static inline int inet_twsk_dead_hashed(const struct inet_timewait_sock *tw) -{ - return !hlist_unhashed(&tw->tw_death_node); -} - -static inline void inet_twsk_dead_node_init(struct inet_timewait_sock *tw) -{ - tw->tw_death_node.pprev = NULL; -} - -static inline void __inet_twsk_del_dead_node(struct inet_timewait_sock *tw) -{ - __hlist_del(&tw->tw_death_node); - inet_twsk_dead_node_init(tw); -} - -static inline int inet_twsk_del_dead_node(struct inet_timewait_sock *tw) -{ - if (inet_twsk_dead_hashed(tw)) { - __inet_twsk_del_dead_node(tw); - return 1; - } - return 0; -} - -#define inet_twsk_for_each(tw, node, head) \ - hlist_nulls_for_each_entry(tw, node, head, tw_node) - -#define inet_twsk_for_each_inmate(tw, jail) \ - hlist_for_each_entry(tw, jail, tw_death_node) - -#define inet_twsk_for_each_inmate_safe(tw, safe, jail) \ - hlist_for_each_entry_safe(tw, safe, jail, tw_death_node) - static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk) { return (struct inet_timewait_sock *)sk; @@ -192,16 +106,14 @@ int inet_twsk_bind_unhash(struct inet_timewait_sock *tw, struct inet_hashinfo *hashinfo); struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, + struct inet_timewait_death_row *dr, const int state); void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, struct inet_hashinfo *hashinfo); -void inet_twsk_schedule(struct inet_timewait_sock *tw, - struct inet_timewait_death_row *twdr, - const int timeo, const int timewait_len); -void inet_twsk_deschedule(struct inet_timewait_sock *tw, - struct inet_timewait_death_row *twdr); +void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo); +void inet_twsk_deschedule(struct inet_timewait_sock *tw); void inet_twsk_purge(struct inet_hashinfo *hashinfo, struct inet_timewait_death_row *twdr, int family); diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index 80479abddf73..d5332ddcea3f 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h @@ -19,6 +19,7 @@ struct inetpeer_addr_base { union { __be32 a4; __be32 a6[4]; + struct in6_addr in6; }; }; @@ -151,7 +152,7 @@ static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base, { struct inetpeer_addr daddr; - *(struct in6_addr *)daddr.addr.a6 = *v6daddr; + daddr.addr.in6 = *v6daddr; daddr.family = AF_INET6; return inet_getpeer(base, &daddr, create); } diff --git a/include/net/ip.h b/include/net/ip.h index 6cc1eafb153a..d14af7edd197 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -108,7 +108,8 @@ int ip_local_deliver(struct sk_buff *skb); int ip_mr_input(struct sk_buff *skb); int ip_output(struct sock *sk, struct sk_buff *skb); int ip_mc_output(struct sock *sk, struct sk_buff *skb); -int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); +int ip_fragment(struct sock *sk, struct sk_buff *skb, + int (*output)(struct sock *, struct sk_buff *)); int ip_do_nat(struct sk_buff *skb); void ip_send_check(struct iphdr *ip); int __ip_local_out(struct sk_buff *skb); @@ -318,9 +319,10 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) } u32 ip_idents_reserve(u32 hash, int segs); -void __ip_select_ident(struct iphdr *iph, int segs); +void __ip_select_ident(struct net *net, struct iphdr *iph, int segs); -static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs) +static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb, + struct sock *sk, int segs) { struct iphdr *iph = ip_hdr(skb); @@ -337,13 +339,14 @@ static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, in iph->id = 0; } } else { - __ip_select_ident(iph, segs); + __ip_select_ident(net, iph, segs); } } -static inline void ip_select_ident(struct sk_buff *skb, struct sock *sk) +static inline void ip_select_ident(struct net *net, struct sk_buff *skb, + struct sock *sk) { - ip_select_ident_segs(skb, sk, 1); + ip_select_ident_segs(net, skb, sk, 1); } static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto) diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index eda131d179d9..5e192068e6cb 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -170,7 +170,8 @@ static inline bool ipv6_anycast_destination(const struct sk_buff *skb) return rt->rt6i_flags & RTF_ANYCAST; } -int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); +int ip6_fragment(struct sock *sk, struct sk_buff *skb, + int (*output)(struct sock *, struct sk_buff *)); static inline int ip6_skb_dst_mtu(struct sk_buff *skb) { diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index 76c091b53dae..b8529aa1dae7 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -71,14 +71,16 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw); __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr, const struct in6_addr *raddr); struct net *ip6_tnl_get_link_net(const struct net_device *dev); +int ip6_tnl_get_iflink(const struct net_device *dev); -static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev) +static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, + struct net_device *dev) { struct net_device_stats *stats = &dev->stats; int pkt_len, err; pkt_len = skb->len; - err = ip6_local_out(skb); + err = ip6_local_out_sk(sk, skb); if (net_xmit_eval(err) == 0) { struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 5bd120e4bc0a..54271ed0ed45 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -136,7 +136,7 @@ struct fib_result { u32 tclassid; struct fib_info *fi; struct fib_table *table; - struct list_head *fa_head; + struct hlist_head *fa_head; }; struct fib_result_nl { @@ -185,7 +185,9 @@ struct fib_table { u32 tb_id; int tb_default; int tb_num_default; - unsigned long tb_data[0]; + struct rcu_head rcu; + unsigned long *tb_data; + unsigned long __data[0]; }; int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, @@ -195,10 +197,10 @@ int fib_table_delete(struct fib_table *, struct fib_config *); int fib_table_dump(struct fib_table *table, struct sk_buff *skb, struct netlink_callback *cb); int fib_table_flush(struct fib_table *table); +struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); +void fib_table_flush_external(struct fib_table *table); void fib_free_table(struct fib_table *tb); - - #ifndef CONFIG_IP_MULTIPLE_TABLES #define TABLE_LOCAL_INDEX (RT_TABLE_LOCAL & (FIB_TABLE_HASHSZ - 1)) @@ -206,12 +208,16 @@ void fib_free_table(struct fib_table *tb); static inline struct fib_table *fib_get_table(struct net *net, u32 id) { + struct hlist_node *tb_hlist; struct hlist_head *ptr; ptr = id == RT_TABLE_LOCAL ? &net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX] : &net->ipv4.fib_table_hash[TABLE_MAIN_INDEX]; - return hlist_entry(ptr->first, struct fib_table, tb_hlist); + + tb_hlist = rcu_dereference_rtnl(hlist_first_rcu(ptr)); + + return hlist_entry(tb_hlist, struct fib_table, tb_hlist); } static inline struct fib_table *fib_new_table(struct net *net, u32 id) @@ -222,14 +228,13 @@ static inline struct fib_table *fib_new_table(struct net *net, u32 id) static inline int fib_lookup(struct net *net, const struct flowi4 *flp, struct fib_result *res) { + struct fib_table *tb; int err = -ENETUNREACH; rcu_read_lock(); - if (!fib_table_lookup(fib_get_table(net, RT_TABLE_LOCAL), flp, res, - FIB_LOOKUP_NOREF) || - !fib_table_lookup(fib_get_table(net, RT_TABLE_MAIN), flp, res, - FIB_LOOKUP_NOREF)) + tb = fib_get_table(net, RT_TABLE_MAIN); + if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF)) err = 0; rcu_read_unlock(); @@ -249,28 +254,29 @@ int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res); static inline int fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res) { - if (!net->ipv4.fib_has_custom_rules) { - int err = -ENETUNREACH; - - rcu_read_lock(); - - res->tclassid = 0; - if ((net->ipv4.fib_local && - !fib_table_lookup(net->ipv4.fib_local, flp, res, - FIB_LOOKUP_NOREF)) || - (net->ipv4.fib_main && - !fib_table_lookup(net->ipv4.fib_main, flp, res, - FIB_LOOKUP_NOREF)) || - (net->ipv4.fib_default && - !fib_table_lookup(net->ipv4.fib_default, flp, res, - FIB_LOOKUP_NOREF))) - err = 0; - - rcu_read_unlock(); - - return err; + struct fib_table *tb; + int err; + + if (net->ipv4.fib_has_custom_rules) + return __fib_lookup(net, flp, res); + + rcu_read_lock(); + + res->tclassid = 0; + + for (err = 0; !err; err = -ENETUNREACH) { + tb = rcu_dereference_rtnl(net->ipv4.fib_main); + if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF)) + break; + + tb = rcu_dereference_rtnl(net->ipv4.fib_default); + if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF)) + break; } - return __fib_lookup(net, flp, res); + + rcu_read_unlock(); + + return err; } #endif /* CONFIG_IP_MULTIPLE_TABLES */ @@ -294,6 +300,8 @@ static inline int fib_num_tclassid_users(struct net *net) return 0; } #endif +int fib_unmerge(struct net *net); +void fib_flush_external(struct net *net); /* Exported by fib_semantics.c */ int ip_fib_check_default(__be32 gw, struct net_device *dev); @@ -304,7 +312,7 @@ void fib_select_multipath(struct fib_result *res); /* Exported by fib_trie.c */ void fib_trie_init(void); -struct fib_table *fib_trie_table(u32 id); +struct fib_table *fib_trie_table(u32 id, struct fib_table *alias); static inline void fib_combine_itag(u32 *itag, const struct fib_result *res) { diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 2c47061a6954..d8214cb88bbc 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -142,6 +142,7 @@ int ip_tunnel_init(struct net_device *dev); void ip_tunnel_uninit(struct net_device *dev); void ip_tunnel_dellink(struct net_device *dev, struct list_head *head); struct net *ip_tunnel_get_link_net(const struct net_device *dev); +int ip_tunnel_get_iflink(const struct net_device *dev); int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, struct rtnl_link_ops *ops, char *devname); diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 615b20b58545..4e3731ee4eac 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -47,13 +47,13 @@ static inline struct net *skb_net(const struct sk_buff *skb) * Start with the most likely hit * End with BUG */ - if (likely(skb->dev && skb->dev->nd_net)) + if (likely(skb->dev && dev_net(skb->dev))) return dev_net(skb->dev); if (skb_dst(skb) && skb_dst(skb)->dev) return dev_net(skb_dst(skb)->dev); WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n", __func__, __LINE__); - if (likely(skb->sk && skb->sk->sk_net)) + if (likely(skb->sk && sock_net(skb->sk))) return sock_net(skb->sk); pr_err("There is no net ptr to find in the skb in %s() line:%d\n", __func__, __LINE__); @@ -71,11 +71,11 @@ static inline struct net *skb_sknet(const struct sk_buff *skb) #ifdef CONFIG_NET_NS #ifdef CONFIG_IP_VS_DEBUG /* Start with the most likely hit */ - if (likely(skb->sk && skb->sk->sk_net)) + if (likely(skb->sk && sock_net(skb->sk))) return sock_net(skb->sk); WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n", __func__, __LINE__); - if (likely(skb->dev && skb->dev->nd_net)) + if (likely(skb->dev && dev_net(skb->dev))) return dev_net(skb->dev); pr_err("There is no net ptr to find in the skb in %s() line:%d\n", __func__, __LINE__); @@ -365,15 +365,15 @@ struct ip_vs_seq { /* counters per cpu */ struct ip_vs_counters { - __u32 conns; /* connections scheduled */ - __u32 inpkts; /* incoming packets */ - __u32 outpkts; /* outgoing packets */ + __u64 conns; /* connections scheduled */ + __u64 inpkts; /* incoming packets */ + __u64 outpkts; /* outgoing packets */ __u64 inbytes; /* incoming bytes */ __u64 outbytes; /* outgoing bytes */ }; /* Stats per cpu */ struct ip_vs_cpu_stats { - struct ip_vs_counters ustats; + struct ip_vs_counters cnt; struct u64_stats_sync syncp; }; @@ -383,23 +383,40 @@ struct ip_vs_estimator { u64 last_inbytes; u64 last_outbytes; - u32 last_conns; - u32 last_inpkts; - u32 last_outpkts; - - u32 cps; - u32 inpps; - u32 outpps; - u32 inbps; - u32 outbps; + u64 last_conns; + u64 last_inpkts; + u64 last_outpkts; + + u64 cps; + u64 inpps; + u64 outpps; + u64 inbps; + u64 outbps; +}; + +/* + * IPVS statistics object, 64-bit kernel version of struct ip_vs_stats_user + */ +struct ip_vs_kstats { + u64 conns; /* connections scheduled */ + u64 inpkts; /* incoming packets */ + u64 outpkts; /* outgoing packets */ + u64 inbytes; /* incoming bytes */ + u64 outbytes; /* outgoing bytes */ + + u64 cps; /* current connection rate */ + u64 inpps; /* current in packet rate */ + u64 outpps; /* current out packet rate */ + u64 inbps; /* current in byte rate */ + u64 outbps; /* current out byte rate */ }; struct ip_vs_stats { - struct ip_vs_stats_user ustats; /* statistics */ + struct ip_vs_kstats kstats; /* kernel statistics */ struct ip_vs_estimator est; /* estimator */ struct ip_vs_cpu_stats __percpu *cpustats; /* per cpu counters */ spinlock_t lock; /* spin lock */ - struct ip_vs_stats_user ustats0; /* reset values */ + struct ip_vs_kstats kstats0; /* reset values */ }; struct dst_entry; @@ -924,6 +941,7 @@ struct netns_ipvs { int sysctl_nat_icmp_send; int sysctl_pmtu_disc; int sysctl_backup_only; + int sysctl_conn_reuse_mode; /* ip_vs_lblc */ int sysctl_lblc_expiration; @@ -1042,6 +1060,11 @@ static inline int sysctl_backup_only(struct netns_ipvs *ipvs) ipvs->sysctl_backup_only; } +static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_conn_reuse_mode; +} + #else static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) @@ -1109,6 +1132,11 @@ static inline int sysctl_backup_only(struct netns_ipvs *ipvs) return 0; } +static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) +{ + return 1; +} + #endif /* IPVS core functions @@ -1388,8 +1416,7 @@ void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts); void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats); void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats); void ip_vs_zero_estimator(struct ip_vs_stats *stats); -void ip_vs_read_estimator(struct ip_vs_stats_user *dst, - struct ip_vs_stats *stats); +void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats); /* Various IPVS packet transmitters (from ip_vs_xmit.c) */ int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 4c9fe224d73b..eec8ad3c9843 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -47,8 +47,6 @@ #define NEXTHDR_MAX 255 - - #define IPV6_DEFAULT_HOPLIMIT 64 #define IPV6_DEFAULT_MCASTHOPS 1 @@ -671,8 +669,9 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); } -void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt); -void ipv6_proxy_select_ident(struct sk_buff *skb); +void ipv6_select_ident(struct net *net, struct frag_hdr *fhdr, + struct rt6_info *rt); +void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb); int ip6_dst_hoplimit(struct dst_entry *dst); @@ -768,7 +767,7 @@ static inline u8 ip6_tclass(__be32 flowinfo) int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); -int ip6_rcv_finish(struct sk_buff *skb); +int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb); /* * upper-layer output functions @@ -826,6 +825,7 @@ int ip6_input(struct sk_buff *skb); int ip6_mc_input(struct sk_buff *skb); int __ip6_local_out(struct sk_buff *skb); +int ip6_local_out_sk(struct sock *sk, struct sk_buff *skb); int ip6_local_out(struct sk_buff *skb); /* @@ -940,4 +940,8 @@ int ipv6_sysctl_register(void); void ipv6_sysctl_unregister(void); #endif +int ipv6_sock_mc_join(struct sock *sk, int ifindex, + const struct in6_addr *addr); +int ipv6_sock_mc_drop(struct sock *sk, int ifindex, + const struct in6_addr *addr); #endif /* _NET_IPV6_H */ diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h index a830b01baba4..8f81bbbc38fc 100644 --- a/include/net/iw_handler.h +++ b/include/net/iw_handler.h @@ -519,6 +519,17 @@ iwe_stream_add_event(struct iw_request_info *info, char *stream, char *ends, return stream; } +static inline char * +iwe_stream_add_event_check(struct iw_request_info *info, char *stream, + char *ends, struct iw_event *iwe, int event_len) +{ + char *res = iwe_stream_add_event(info, stream, ends, iwe, event_len); + + if (res == stream) + return ERR_PTR(-E2BIG); + return res; +} + /*------------------------------------------------------------------*/ /* * Wrapper to add an short Wireless Event containing a pointer to a @@ -545,6 +556,17 @@ iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends, return stream; } +static inline char * +iwe_stream_add_point_check(struct iw_request_info *info, char *stream, + char *ends, struct iw_event *iwe, char *extra) +{ + char *res = iwe_stream_add_point(info, stream, ends, iwe, extra); + + if (res == stream) + return ERR_PTR(-E2BIG); + return res; +} + /*------------------------------------------------------------------*/ /* * Wrapper to add a value to a Wireless Event in a stream of events. diff --git a/include/net/mac80211.h b/include/net/mac80211.h index d52914b75331..b4bef1152c05 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -84,6 +84,39 @@ * */ +/** + * DOC: mac80211 software tx queueing + * + * mac80211 provides an optional intermediate queueing implementation designed + * to allow the driver to keep hardware queues short and provide some fairness + * between different stations/interfaces. + * In this model, the driver pulls data frames from the mac80211 queue instead + * of letting mac80211 push them via drv_tx(). + * Other frames (e.g. control or management) are still pushed using drv_tx(). + * + * Drivers indicate that they use this model by implementing the .wake_tx_queue + * driver operation. + * + * Intermediate queues (struct ieee80211_txq) are kept per-sta per-tid, with a + * single per-vif queue for multicast data frames. + * + * The driver is expected to initialize its private per-queue data for stations + * and interfaces in the .add_interface and .sta_add ops. + * + * The driver can't access the queue directly. To dequeue a frame, it calls + * ieee80211_tx_dequeue(). Whenever mac80211 adds a new frame to a queue, it + * calls the .wake_tx_queue driver op. + * + * For AP powersave TIM handling, the driver only needs to indicate if it has + * buffered packets in the driver specific data structures by calling + * ieee80211_sta_set_buffered(). For frames buffered in the ieee80211_txq + * struct, mac80211 sets the appropriate TIM PVB bits and calls + * .release_buffered_frames(). + * In that callback the driver is therefore expected to release its own + * buffered frames and afterwards also frames from the ieee80211_txq (obtained + * via the usual ieee80211_tx_dequeue). + */ + struct device; /** @@ -301,17 +334,86 @@ enum ieee80211_bss_change { #define IEEE80211_BSS_ARP_ADDR_LIST_LEN 4 /** - * enum ieee80211_rssi_event - RSSI threshold event - * An indicator for when RSSI goes below/above a certain threshold. - * @RSSI_EVENT_HIGH: AP's rssi crossed the high threshold set by the driver. - * @RSSI_EVENT_LOW: AP's rssi crossed the low threshold set by the driver. + * enum ieee80211_event_type - event to be notified to the low level driver + * @RSSI_EVENT: AP's rssi crossed the a threshold set by the driver. + * @MLME_EVENT: event related to MLME */ -enum ieee80211_rssi_event { +enum ieee80211_event_type { + RSSI_EVENT, + MLME_EVENT, +}; + +/** + * enum ieee80211_rssi_event_data - relevant when event type is %RSSI_EVENT + * @RSSI_EVENT_HIGH: AP's rssi went below the threshold set by the driver. + * @RSSI_EVENT_LOW: AP's rssi went above the threshold set by the driver. + */ +enum ieee80211_rssi_event_data { RSSI_EVENT_HIGH, RSSI_EVENT_LOW, }; /** + * enum ieee80211_rssi_event - data attached to an %RSSI_EVENT + * @data: See &enum ieee80211_rssi_event_data + */ +struct ieee80211_rssi_event { + enum ieee80211_rssi_event_data data; +}; + +/** + * enum ieee80211_mlme_event_data - relevant when event type is %MLME_EVENT + * @AUTH_EVENT: the MLME operation is authentication + * @ASSOC_EVENT: the MLME operation is association + * @DEAUTH_RX_EVENT: deauth received.. + * @DEAUTH_TX_EVENT: deauth sent. + */ +enum ieee80211_mlme_event_data { + AUTH_EVENT, + ASSOC_EVENT, + DEAUTH_RX_EVENT, + DEAUTH_TX_EVENT, +}; + +/** + * enum ieee80211_mlme_event_status - relevant when event type is %MLME_EVENT + * @MLME_SUCCESS: the MLME operation completed successfully. + * @MLME_DENIED: the MLME operation was denied by the peer. + * @MLME_TIMEOUT: the MLME operation timed out. + */ +enum ieee80211_mlme_event_status { + MLME_SUCCESS, + MLME_DENIED, + MLME_TIMEOUT, +}; + +/** + * enum ieee80211_mlme_event - data attached to an %MLME_EVENT + * @data: See &enum ieee80211_mlme_event_data + * @status: See &enum ieee80211_mlme_event_status + * @reason: the reason code if applicable + */ +struct ieee80211_mlme_event { + enum ieee80211_mlme_event_data data; + enum ieee80211_mlme_event_status status; + u16 reason; +}; + +/** + * struct ieee80211_event - event to be sent to the driver + * @type The event itself. See &enum ieee80211_event_type. + * @rssi: relevant if &type is %RSSI_EVENT + * @mlme: relevant if &type is %AUTH_EVENT + */ +struct ieee80211_event { + enum ieee80211_event_type type; + union { + struct ieee80211_rssi_event rssi; + struct ieee80211_mlme_event mlme; + } u; +}; + +/** * struct ieee80211_bss_conf - holds the BSS's changing parameters * * This structure keeps information about a BSS (and an association @@ -337,12 +439,15 @@ enum ieee80211_rssi_event { * HW flag %IEEE80211_HW_TIMING_BEACON_ONLY is set, then this can * only come from a beacon, but might not become valid until after * association when a beacon is received (which is notified with the - * %BSS_CHANGED_DTIM flag.) + * %BSS_CHANGED_DTIM flag.). See also sync_dtim_count important notice. * @sync_device_ts: the device timestamp corresponding to the sync_tsf, * the driver/device can use this to calculate synchronisation - * (see @sync_tsf) + * (see @sync_tsf). See also sync_dtim_count important notice. * @sync_dtim_count: Only valid when %IEEE80211_HW_TIMING_BEACON_ONLY * is requested, see @sync_tsf/@sync_device_ts. + * IMPORTANT: These three sync_* parameters would possibly be out of sync + * by the time the driver will use them. The synchronized view is currently + * guaranteed only in certain callbacks. * @beacon_int: beacon interval * @assoc_capability: capabilities taken from assoc resp * @basic_rates: bitmap of basic rates, each bit stands for an @@ -1234,6 +1339,7 @@ enum ieee80211_vif_flags { * monitor interface (if that is requested.) * @drv_priv: data area for driver use, will always be aligned to * sizeof(void *). + * @txq: the multicast data TX queue (if driver uses the TXQ abstraction) */ struct ieee80211_vif { enum nl80211_iftype type; @@ -1245,6 +1351,8 @@ struct ieee80211_vif { u8 cab_queue; u8 hw_queue[IEEE80211_NUM_ACS]; + struct ieee80211_txq *txq; + struct ieee80211_chanctx_conf __rcu *chanctx_conf; u32 driver_flags; @@ -1279,6 +1387,19 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif) struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); /** + * ieee80211_vif_to_wdev - return a wdev struct from a vif + * @vif: the vif to get the wdev for + * + * This can be used by mac80211 drivers with direct cfg80211 APIs + * (like the vendor commands) that needs to get the wdev for a vif. + * + * Note that this function may return %NULL if the given wdev isn't + * associated with a vif that the driver knows about (e.g. monitor + * or AP_VLAN interfaces.) + */ +struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif); + +/** * enum ieee80211_key_flags - key flags * * These flags are used for communication about keys between the driver @@ -1472,7 +1593,8 @@ struct ieee80211_sta_rates { * @supp_rates: Bitmap of supported rates (per band) * @ht_cap: HT capabilities of this STA; restricted to our own capabilities * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities - * @wme: indicates whether the STA supports QoS/WME. + * @wme: indicates whether the STA supports QoS/WME (if local devices does, + * otherwise always false) * @drv_priv: data area for driver use, will always be aligned to * sizeof(void *), size is determined in hw information. * @uapsd_queues: bitmap of queues configured for uapsd. Only valid @@ -1488,6 +1610,8 @@ struct ieee80211_sta_rates { * @tdls: indicates whether the STA is a TDLS peer * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only * valid if the STA is a TDLS peer in the first place. + * @mfp: indicates whether the STA uses management frame protection or not. + * @txq: per-TID data TX queues (if driver uses the TXQ abstraction) */ struct ieee80211_sta { u32 supp_rates[IEEE80211_NUM_BANDS]; @@ -1504,6 +1628,9 @@ struct ieee80211_sta { struct ieee80211_sta_rates __rcu *rates; bool tdls; bool tdls_initiator; + bool mfp; + + struct ieee80211_txq *txq[IEEE80211_NUM_TIDS]; /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); @@ -1533,6 +1660,27 @@ struct ieee80211_tx_control { }; /** + * struct ieee80211_txq - Software intermediate tx queue + * + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @sta: station table entry, %NULL for per-vif queue + * @tid: the TID for this queue (unused for per-vif queue) + * @ac: the AC for this queue + * + * The driver can obtain packets from this queue by calling + * ieee80211_tx_dequeue(). + */ +struct ieee80211_txq { + struct ieee80211_vif *vif; + struct ieee80211_sta *sta; + u8 tid; + u8 ac; + + /* must be last */ + u8 drv_priv[0] __aligned(sizeof(void *)); +}; + +/** * enum ieee80211_hw_flags - hardware flags * * These flags are used to indicate hardware capabilities to @@ -1756,6 +1904,8 @@ enum ieee80211_hw_flags { * within &struct ieee80211_sta. * @chanctx_data_size: size (in bytes) of the drv_priv data area * within &struct ieee80211_chanctx_conf. + * @txq_data_size: size (in bytes) of the drv_priv data area + * within @struct ieee80211_txq. * * @max_rates: maximum number of alternate rate retry stages the hw * can handle. @@ -1804,6 +1954,9 @@ enum ieee80211_hw_flags { * @n_cipher_schemes: a size of an array of cipher schemes definitions. * @cipher_schemes: a pointer to an array of cipher scheme definitions * supported by HW. + * + * @txq_ac_max_pending: maximum number of frames per AC pending in all txq + * entries for a vif. */ struct ieee80211_hw { struct ieee80211_conf conf; @@ -1816,6 +1969,7 @@ struct ieee80211_hw { int vif_data_size; int sta_data_size; int chanctx_data_size; + int txq_data_size; u16 queues; u16 max_listen_interval; s8 max_signal; @@ -1832,6 +1986,7 @@ struct ieee80211_hw { u8 uapsd_max_sp_len; u8 n_cipher_schemes; const struct ieee80211_cipher_scheme *cipher_schemes; + int txq_ac_max_pending; }; /** @@ -2844,8 +2999,9 @@ enum ieee80211_reconfig_type { * @set_bitrate_mask: Set a mask of rates to be used for rate control selection * when transmitting a frame. Currently only legacy rates are handled. * The callback can sleep. - * @rssi_callback: Notify driver when the average RSSI goes above/below - * thresholds that were registered previously. The callback can sleep. + * @event_callback: Notify driver about any event in mac80211. See + * &enum ieee80211_event_type for the different types. + * The callback can sleep. * * @release_buffered_frames: Release buffered frames according to the given * parameters. In the case where the driver buffers some frames for @@ -2993,6 +3149,8 @@ enum ieee80211_reconfig_type { * response template is provided, together with the location of the * switch-timing IE within the template. The skb can only be used within * the function call. + * + * @wake_tx_queue: Called when new packets have been added to the queue. */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, @@ -3141,9 +3299,9 @@ struct ieee80211_ops { bool (*tx_frames_pending)(struct ieee80211_hw *hw); int (*set_bitrate_mask)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask); - void (*rssi_callback)(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum ieee80211_rssi_event rssi_event); + void (*event_callback)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const struct ieee80211_event *event); void (*allow_buffered_frames)(struct ieee80211_hw *hw, struct ieee80211_sta *sta, @@ -3224,6 +3382,9 @@ struct ieee80211_ops { void (*tdls_recv_channel_switch)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_tdls_ch_sw_params *params); + + void (*wake_tx_queue)(struct ieee80211_hw *hw, + struct ieee80211_txq *txq); }; /** @@ -4343,13 +4504,33 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw); * haven't been re-added to the driver yet. * @IEEE80211_IFACE_ITER_RESUME_ALL: During resume, iterate over all * interfaces, even if they haven't been re-added to the driver yet. + * @IEEE80211_IFACE_ITER_ACTIVE: Iterate only active interfaces (netdev is up). */ enum ieee80211_interface_iteration_flags { IEEE80211_IFACE_ITER_NORMAL = 0, IEEE80211_IFACE_ITER_RESUME_ALL = BIT(0), + IEEE80211_IFACE_ITER_ACTIVE = BIT(1), }; /** + * ieee80211_iterate_interfaces - iterate interfaces + * + * This function iterates over the interfaces associated with a given + * hardware and calls the callback for them. This includes active as well as + * inactive interfaces. This function allows the iterator function to sleep. + * Will iterate over a new interface during add_interface(). + * + * @hw: the hardware struct of which the interfaces should be iterated over + * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags + * @iterator: the iterator function to call + * @data: first argument of the iterator function + */ +void ieee80211_iterate_interfaces(struct ieee80211_hw *hw, u32 iter_flags, + void (*iterator)(void *data, u8 *mac, + struct ieee80211_vif *vif), + void *data); + +/** * ieee80211_iterate_active_interfaces - iterate active interfaces * * This function iterates over the interfaces associated with a given @@ -4364,11 +4545,16 @@ enum ieee80211_interface_iteration_flags { * @iterator: the iterator function to call * @data: first argument of the iterator function */ -void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw, - u32 iter_flags, - void (*iterator)(void *data, u8 *mac, - struct ieee80211_vif *vif), - void *data); +static inline void +ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw, u32 iter_flags, + void (*iterator)(void *data, u8 *mac, + struct ieee80211_vif *vif), + void *data) +{ + ieee80211_iterate_interfaces(hw, + iter_flags | IEEE80211_IFACE_ITER_ACTIVE, + iterator, data); +} /** * ieee80211_iterate_active_interfaces_atomic - iterate active interfaces @@ -5194,30 +5380,13 @@ int ieee80211_reserve_tid(struct ieee80211_sta *sta, u8 tid); void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid); /** - * ieee80211_ie_split - split an IE buffer according to ordering - * - * @ies: the IE buffer - * @ielen: the length of the IE buffer - * @ids: an array with element IDs that are allowed before - * the split - * @n_ids: the size of the element ID array - * @offset: offset where to start splitting in the buffer - * - * This function splits an IE buffer by updating the @offset - * variable to point to the location where the buffer should be - * split. + * ieee80211_tx_dequeue - dequeue a packet from a software tx queue * - * It assumes that the given IE buffer is well-formed, this - * has to be guaranteed by the caller! - * - * It also assumes that the IEs in the buffer are ordered - * correctly, if not the result of using this function will not - * be ordered correctly either, i.e. it does no reordering. + * @hw: pointer as obtained from ieee80211_alloc_hw() + * @txq: pointer obtained from station or virtual interface * - * The function returns the offset where the next part of the - * buffer starts, which may be @ielen if the entire (remainder) - * of the buffer should be used. + * Returns the skb if successful, %NULL if no frame was available. */ -size_t ieee80211_ie_split(const u8 *ies, size_t ielen, - const u8 *ids, int n_ids, size_t offset); +struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, + struct ieee80211_txq *txq); #endif /* MAC80211_H */ diff --git a/include/net/mac802154.h b/include/net/mac802154.h index 850647811749..e18e7fd43f47 100644 --- a/include/net/mac802154.h +++ b/include/net/mac802154.h @@ -19,6 +19,7 @@ #include <net/af_ieee802154.h> #include <linux/ieee802154.h> #include <linux/skbuff.h> +#include <linux/unaligned/memmove.h> #include <net/cfg802154.h> @@ -212,7 +213,7 @@ struct ieee802154_ops { int (*set_hw_addr_filt)(struct ieee802154_hw *hw, struct ieee802154_hw_addr_filt *filt, unsigned long changed); - int (*set_txpower)(struct ieee802154_hw *hw, int db); + int (*set_txpower)(struct ieee802154_hw *hw, s8 dbm); int (*set_lbt)(struct ieee802154_hw *hw, bool on); int (*set_cca_mode)(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca); @@ -233,9 +234,7 @@ struct ieee802154_ops { */ static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src) { - __le64 tmp = (__force __le64)swab64p(be64_src); - - memcpy(le64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN); + __put_unaligned_memmove64(swab64p(be64_src), le64_dst); } /** @@ -245,12 +244,10 @@ static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src) */ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src) { - __be64 tmp = (__force __be64)swab64p(le64_src); - - memcpy(be64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN); + __put_unaligned_memmove64(swab64p(le64_src), be64_dst); } -/* Basic interface to register ieee802154 hwice */ +/* Basic interface to register ieee802154 device */ struct ieee802154_hw * ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops); void ieee802154_free_hw(struct ieee802154_hw *hw); diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 6bbda34d5e59..b3a7751251b4 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -156,24 +156,7 @@ static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, _ static inline struct neighbour *__ipv6_neigh_lookup_noref(struct net_device *dev, const void *pkey) { - struct neigh_hash_table *nht; - const u32 *p32 = pkey; - struct neighbour *n; - u32 hash_val; - - nht = rcu_dereference_bh(nd_tbl.nht); - hash_val = ndisc_hashfn(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift); - for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); - n != NULL; - n = rcu_dereference_bh(n->next)) { - u32 *n32 = (u32 *) n->primary_key; - if (n->dev == dev && - ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) | - (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0) - return n; - } - - return NULL; + return ___neigh_lookup_noref(&nd_tbl, neigh_key_eq128, ndisc_hashfn, pkey, dev); } static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, const void *pkey) diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 76f708486aae..bd33e66f49aa 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -42,6 +42,7 @@ enum { NEIGH_VAR_MCAST_PROBES, NEIGH_VAR_UCAST_PROBES, NEIGH_VAR_APP_PROBES, + NEIGH_VAR_MCAST_REPROBES, NEIGH_VAR_RETRANS_TIME, NEIGH_VAR_BASE_REACHABLE_TIME, NEIGH_VAR_DELAY_PROBE_TIME, @@ -65,9 +66,7 @@ enum { }; struct neigh_parms { -#ifdef CONFIG_NET_NS - struct net *net; -#endif + possible_net_t net; struct net_device *dev; struct list_head list; int (*neigh_setup)(struct neighbour *); @@ -167,9 +166,7 @@ struct neigh_ops { struct pneigh_entry { struct pneigh_entry *next; -#ifdef CONFIG_NET_NS - struct net *net; -#endif + possible_net_t net; struct net_device *dev; u8 flags; u8 key[0]; @@ -193,9 +190,11 @@ struct neigh_table { int family; int entry_size; int key_len; + __be16 protocol; __u32 (*hash)(const void *pkey, const struct net_device *dev, __u32 *hash_rnd); + bool (*key_eq)(const struct neighbour *, const void *pkey); int (*constructor)(struct neighbour *); int (*pconstructor)(struct pneigh_entry *); void (*pdestructor)(struct pneigh_entry *); @@ -224,6 +223,7 @@ enum { NEIGH_ND_TABLE = 1, NEIGH_DN_TABLE = 2, NEIGH_NR_TABLES, + NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */ }; static inline int neigh_parms_family(struct neigh_parms *p) @@ -246,6 +246,57 @@ static inline void *neighbour_priv(const struct neighbour *n) #define NEIGH_UPDATE_F_ISROUTER 0x40000000 #define NEIGH_UPDATE_F_ADMIN 0x80000000 + +static inline bool neigh_key_eq16(const struct neighbour *n, const void *pkey) +{ + return *(const u16 *)n->primary_key == *(const u16 *)pkey; +} + +static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey) +{ + return *(const u32 *)n->primary_key == *(const u32 *)pkey; +} + +static inline bool neigh_key_eq128(const struct neighbour *n, const void *pkey) +{ + const u32 *n32 = (const u32 *)n->primary_key; + const u32 *p32 = pkey; + + return ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) | + (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0; +} + +static inline struct neighbour *___neigh_lookup_noref( + struct neigh_table *tbl, + bool (*key_eq)(const struct neighbour *n, const void *pkey), + __u32 (*hash)(const void *pkey, + const struct net_device *dev, + __u32 *hash_rnd), + const void *pkey, + struct net_device *dev) +{ + struct neigh_hash_table *nht = rcu_dereference_bh(tbl->nht); + struct neighbour *n; + u32 hash_val; + + hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift); + for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); + n != NULL; + n = rcu_dereference_bh(n->next)) { + if (n->dev == dev && key_eq(n, pkey)) + return n; + } + + return NULL; +} + +static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl, + const void *pkey, + struct net_device *dev) +{ + return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev); +} + void neigh_table_init(int index, struct neigh_table *tbl); int neigh_table_clear(int index, struct neigh_table *tbl); struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, @@ -268,7 +319,6 @@ void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev); int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb); int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb); -int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb); int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb); struct neighbour *neigh_event_ns(struct neigh_table *tbl, u8 *lladdr, void *saddr, @@ -306,6 +356,7 @@ void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie); void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *)); +int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *); void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *)); @@ -459,4 +510,6 @@ static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, memcpy(dst, n->ha, dev->addr_len); } while (read_seqretry(&n->ha_lock, seq)); } + + #endif diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 36faf4990c4b..f733656404de 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -26,6 +26,7 @@ #endif #include <net/netns/nftables.h> #include <net/netns/xfrm.h> +#include <net/netns/mpls.h> #include <linux/ns_common.h> struct user_namespace; @@ -48,13 +49,10 @@ struct net { atomic_t count; /* To decided when the network * namespace should be shut down. */ -#ifdef NETNS_REFCNT_DEBUG - atomic_t use_count; /* To track references we - * destroy on demand - */ -#endif spinlock_t rules_mod_lock; + atomic64_t cookie_gen; + struct list_head list; /* list of network namespaces */ struct list_head cleanup_list; /* namespaces on death row */ struct list_head exit_list; /* Use only net_mutex */ @@ -130,6 +128,9 @@ struct net { #if IS_ENABLED(CONFIG_IP_VS) struct netns_ipvs *ipvs; #endif +#if IS_ENABLED(CONFIG_MPLS) + struct netns_mpls mpls; +#endif struct sock *diag_nlsk; atomic_t fnhe_genid; }; @@ -230,48 +231,27 @@ int net_eq(const struct net *net1, const struct net *net2) #endif -#ifdef NETNS_REFCNT_DEBUG -static inline struct net *hold_net(struct net *net) -{ - if (net) - atomic_inc(&net->use_count); - return net; -} - -static inline void release_net(struct net *net) -{ - if (net) - atomic_dec(&net->use_count); -} -#else -static inline struct net *hold_net(struct net *net) -{ - return net; -} - -static inline void release_net(struct net *net) -{ -} -#endif - +typedef struct { #ifdef CONFIG_NET_NS + struct net *net; +#endif +} possible_net_t; -static inline void write_pnet(struct net **pnet, struct net *net) +static inline void write_pnet(possible_net_t *pnet, struct net *net) { - *pnet = net; +#ifdef CONFIG_NET_NS + pnet->net = net; +#endif } -static inline struct net *read_pnet(struct net * const *pnet) +static inline struct net *read_pnet(const possible_net_t *pnet) { - return *pnet; -} - +#ifdef CONFIG_NET_NS + return pnet->net; #else - -#define write_pnet(pnet, net) do { (void)(net);} while (0) -#define read_pnet(pnet) (&init_net) - + return &init_net; #endif +} #define for_each_net(VAR) \ list_for_each_entry(VAR, &net_namespace_list, list) diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h index 03e928a55229..77862c3645f0 100644 --- a/include/net/netfilter/ipv4/nf_reject.h +++ b/include/net/netfilter/ipv4/nf_reject.h @@ -5,18 +5,14 @@ #include <net/ip.h> #include <net/icmp.h> -static inline void nf_send_unreach(struct sk_buff *skb_in, int code) -{ - icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); -} - +void nf_send_unreach(struct sk_buff *skb_in, int code, int hook); void nf_send_reset(struct sk_buff *oldskb, int hook); const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb, struct tcphdr *_oth, int hook); struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb, - __be16 protocol, int ttl); + __u8 protocol, int ttl); void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb, const struct tcphdr *oth); diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h index 23216d48abf9..0ea4fa37db16 100644 --- a/include/net/netfilter/ipv6/nf_reject.h +++ b/include/net/netfilter/ipv6/nf_reject.h @@ -3,15 +3,8 @@ #include <linux/icmpv6.h> -static inline void -nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code, - unsigned int hooknum) -{ - if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL) - skb_in->dev = net->loopback_dev; - - icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0); -} +void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code, + unsigned int hooknum); void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook); @@ -20,7 +13,7 @@ const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb, unsigned int *otcplen, int hook); struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb, - __be16 protocol, int hoplimit); + __u8 protocol, int hoplimit); void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb, const struct tcphdr *oth, unsigned int otcplen); diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 74f271a172dd..095433b8a8b0 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -95,9 +95,8 @@ struct nf_conn { /* Timer function; drops refcnt when it goes off. */ struct timer_list timeout; -#ifdef CONFIG_NET_NS - struct net *ct_net; -#endif + possible_net_t ct_net; + /* all members below initialized via memset */ u8 __nfct_init_offset[0]; diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h index 340c013795a4..a3127325f624 100644 --- a/include/net/netfilter/nf_nat_l3proto.h +++ b/include/net/netfilter/nf_nat_l3proto.h @@ -44,40 +44,32 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct, unsigned int hooknum); unsigned int nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, unsigned int (*do_chain)(const struct nf_hook_ops *ops, struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, struct nf_conn *ct)); unsigned int nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, unsigned int (*do_chain)(const struct nf_hook_ops *ops, struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, struct nf_conn *ct)); unsigned int nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, unsigned int (*do_chain)(const struct nf_hook_ops *ops, struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, struct nf_conn *ct)); unsigned int nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, unsigned int (*do_chain)(const struct nf_hook_ops *ops, struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, struct nf_conn *ct)); int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct, @@ -85,40 +77,32 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct, unsigned int hooknum, unsigned int hdrlen); unsigned int nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, unsigned int (*do_chain)(const struct nf_hook_ops *ops, struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, struct nf_conn *ct)); unsigned int nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, unsigned int (*do_chain)(const struct nf_hook_ops *ops, struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, struct nf_conn *ct)); unsigned int nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, unsigned int (*do_chain)(const struct nf_hook_ops *ops, struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, struct nf_conn *ct)); unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, unsigned int (*do_chain)(const struct nf_hook_ops *ops, struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, + const struct nf_hook_state *state, struct nf_conn *ct)); #endif /* _NF_NAT_L3PROTO_H */ diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h index 84a53d780306..d81d584157e1 100644 --- a/include/net/netfilter/nf_queue.h +++ b/include/net/netfilter/nf_queue.h @@ -12,12 +12,8 @@ struct nf_queue_entry { unsigned int id; struct nf_hook_ops *elem; - u_int8_t pf; + struct nf_hook_state state; u16 size; /* sizeof(entry) + saved route keys */ - unsigned int hook; - struct net_device *indev; - struct net_device *outdev; - int (*okfn)(struct sk_buff *); /* extra space to store route keys */ }; diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index decb9a095ae7..e6bcf55dcf20 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1,6 +1,7 @@ #ifndef _NET_NF_TABLES_H #define _NET_NF_TABLES_H +#include <linux/module.h> #include <linux/list.h> #include <linux/netfilter.h> #include <linux/netfilter/nfnetlink.h> @@ -26,40 +27,53 @@ struct nft_pktinfo { static inline void nft_set_pktinfo(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops, struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out) + const struct nf_hook_state *state) { pkt->skb = skb; - pkt->in = pkt->xt.in = in; - pkt->out = pkt->xt.out = out; + pkt->in = pkt->xt.in = state->in; + pkt->out = pkt->xt.out = state->out; pkt->ops = ops; pkt->xt.hooknum = ops->hooknum; pkt->xt.family = ops->pf; } +/** + * struct nft_verdict - nf_tables verdict + * + * @code: nf_tables/netfilter verdict code + * @chain: destination chain for NFT_JUMP/NFT_GOTO + */ +struct nft_verdict { + u32 code; + struct nft_chain *chain; +}; + struct nft_data { union { - u32 data[4]; - struct { - u32 verdict; - struct nft_chain *chain; - }; + u32 data[4]; + struct nft_verdict verdict; }; } __attribute__((aligned(__alignof__(u64)))); -static inline int nft_data_cmp(const struct nft_data *d1, - const struct nft_data *d2, - unsigned int len) -{ - return memcmp(d1->data, d2->data, len); -} +/** + * struct nft_regs - nf_tables register set + * + * @data: data registers + * @verdict: verdict register + * + * The first four data registers alias to the verdict register. + */ +struct nft_regs { + union { + u32 data[20]; + struct nft_verdict verdict; + }; +}; -static inline void nft_data_copy(struct nft_data *dst, - const struct nft_data *src) +static inline void nft_data_copy(u32 *dst, const struct nft_data *src, + unsigned int len) { - BUILD_BUG_ON(__alignof__(*dst) != __alignof__(u64)); - *(u64 *)&dst->data[0] = *(u64 *)&src->data[0]; - *(u64 *)&dst->data[2] = *(u64 *)&src->data[2]; + memcpy(dst, src, len); } static inline void nft_data_debug(const struct nft_data *data) @@ -97,7 +111,8 @@ struct nft_data_desc { unsigned int len; }; -int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data, +int nft_data_init(const struct nft_ctx *ctx, + struct nft_data *data, unsigned int size, struct nft_data_desc *desc, const struct nlattr *nla); void nft_data_uninit(const struct nft_data *data, enum nft_data_types type); int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data, @@ -113,12 +128,14 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type) return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1; } -int nft_validate_input_register(enum nft_registers reg); -int nft_validate_output_register(enum nft_registers reg); -int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg, - const struct nft_data *data, - enum nft_data_types type); +unsigned int nft_parse_register(const struct nlattr *attr); +int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg); +int nft_validate_register_load(enum nft_registers reg, unsigned int len); +int nft_validate_register_store(const struct nft_ctx *ctx, + enum nft_registers reg, + const struct nft_data *data, + enum nft_data_types type, unsigned int len); /** * struct nft_userdata - user defined data associated with an object @@ -138,19 +155,15 @@ struct nft_userdata { /** * struct nft_set_elem - generic representation of set elements * - * @cookie: implementation specific element cookie * @key: element key - * @data: element data (maps only) - * @flags: element flags (end of interval) - * - * The cookie can be used to store a handle to the element for subsequent - * removal. + * @priv: element private data and extensions */ struct nft_set_elem { - void *cookie; - struct nft_data key; - struct nft_data data; - u32 flags; + union { + u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)]; + struct nft_data val; + } key; + void *priv; }; struct nft_set; @@ -202,11 +215,16 @@ struct nft_set_estimate { enum nft_set_class class; }; +struct nft_set_ext; +struct nft_expr; + /** * struct nft_set_ops - nf_tables set operations * * @lookup: look up an element within the set * @insert: insert new element into set + * @activate: activate new element in the next generation + * @deactivate: deactivate element in the next generation * @remove: remove element from set * @walk: iterate over all set elemeennts * @privsize: function to return size of set private data @@ -214,16 +232,28 @@ struct nft_set_estimate { * @destroy: destroy private data of set instance * @list: nf_tables_set_ops list node * @owner: module reference + * @elemsize: element private size * @features: features supported by the implementation */ struct nft_set_ops { bool (*lookup)(const struct nft_set *set, - const struct nft_data *key, - struct nft_data *data); - int (*get)(const struct nft_set *set, - struct nft_set_elem *elem); + const u32 *key, + const struct nft_set_ext **ext); + bool (*update)(struct nft_set *set, + const u32 *key, + void *(*new)(struct nft_set *, + const struct nft_expr *, + struct nft_regs *), + const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_set_ext **ext); + int (*insert)(const struct nft_set *set, const struct nft_set_elem *elem); + void (*activate)(const struct nft_set *set, + const struct nft_set_elem *elem); + void * (*deactivate)(const struct nft_set *set, + const struct nft_set_elem *elem); void (*remove)(const struct nft_set *set, const struct nft_set_elem *elem); void (*walk)(const struct nft_ctx *ctx, @@ -241,6 +271,7 @@ struct nft_set_ops { struct list_head list; struct module *owner; + unsigned int elemsize; u32 features; }; @@ -257,8 +288,12 @@ void nft_unregister_set(struct nft_set_ops *ops); * @dtype: data type (verdict or numeric type defined by userspace) * @size: maximum set size * @nelems: number of elements + * @ndeact: number of deactivated elements queued for removal + * @timeout: default timeout value in msecs + * @gc_int: garbage collection interval in msecs * @policy: set parameterization (see enum nft_set_policies) * @ops: set ops + * @pnet: network namespace * @flags: set flags * @klen: key length * @dlen: data length @@ -271,10 +306,14 @@ struct nft_set { u32 ktype; u32 dtype; u32 size; - u32 nelems; + atomic_t nelems; + u32 ndeact; + u64 timeout; + u32 gc_int; u16 policy; /* runtime data below here */ const struct nft_set_ops *ops ____cacheline_aligned; + possible_net_t pnet; u16 flags; u8 klen; u8 dlen; @@ -287,16 +326,27 @@ static inline void *nft_set_priv(const struct nft_set *set) return (void *)set->data; } +static inline struct nft_set *nft_set_container_of(const void *priv) +{ + return (void *)priv - offsetof(struct nft_set, data); +} + struct nft_set *nf_tables_set_lookup(const struct nft_table *table, const struct nlattr *nla); struct nft_set *nf_tables_set_lookup_byid(const struct net *net, const struct nlattr *nla); +static inline unsigned long nft_set_gc_interval(const struct nft_set *set) +{ + return set->gc_int ? msecs_to_jiffies(set->gc_int) : HZ; +} + /** * struct nft_set_binding - nf_tables set binding * * @list: set bindings list node * @chain: chain containing the rule bound to the set + * @flags: set action flags * * A set binding contains all information necessary for validation * of new elements added to a bound set. @@ -304,6 +354,7 @@ struct nft_set *nf_tables_set_lookup_byid(const struct net *net, struct nft_set_binding { struct list_head list; const struct nft_chain *chain; + u32 flags; }; int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, @@ -311,6 +362,215 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding); +/** + * enum nft_set_extensions - set extension type IDs + * + * @NFT_SET_EXT_KEY: element key + * @NFT_SET_EXT_DATA: mapping data + * @NFT_SET_EXT_FLAGS: element flags + * @NFT_SET_EXT_TIMEOUT: element timeout + * @NFT_SET_EXT_EXPIRATION: element expiration time + * @NFT_SET_EXT_USERDATA: user data associated with the element + * @NFT_SET_EXT_EXPR: expression assiociated with the element + * @NFT_SET_EXT_NUM: number of extension types + */ +enum nft_set_extensions { + NFT_SET_EXT_KEY, + NFT_SET_EXT_DATA, + NFT_SET_EXT_FLAGS, + NFT_SET_EXT_TIMEOUT, + NFT_SET_EXT_EXPIRATION, + NFT_SET_EXT_USERDATA, + NFT_SET_EXT_EXPR, + NFT_SET_EXT_NUM +}; + +/** + * struct nft_set_ext_type - set extension type + * + * @len: fixed part length of the extension + * @align: alignment requirements of the extension + */ +struct nft_set_ext_type { + u8 len; + u8 align; +}; + +extern const struct nft_set_ext_type nft_set_ext_types[]; + +/** + * struct nft_set_ext_tmpl - set extension template + * + * @len: length of extension area + * @offset: offsets of individual extension types + */ +struct nft_set_ext_tmpl { + u16 len; + u8 offset[NFT_SET_EXT_NUM]; +}; + +/** + * struct nft_set_ext - set extensions + * + * @genmask: generation mask + * @offset: offsets of individual extension types + * @data: beginning of extension data + */ +struct nft_set_ext { + u8 genmask; + u8 offset[NFT_SET_EXT_NUM]; + char data[0]; +}; + +static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl) +{ + memset(tmpl, 0, sizeof(*tmpl)); + tmpl->len = sizeof(struct nft_set_ext); +} + +static inline void nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id, + unsigned int len) +{ + tmpl->len = ALIGN(tmpl->len, nft_set_ext_types[id].align); + BUG_ON(tmpl->len > U8_MAX); + tmpl->offset[id] = tmpl->len; + tmpl->len += nft_set_ext_types[id].len + len; +} + +static inline void nft_set_ext_add(struct nft_set_ext_tmpl *tmpl, u8 id) +{ + nft_set_ext_add_length(tmpl, id, 0); +} + +static inline void nft_set_ext_init(struct nft_set_ext *ext, + const struct nft_set_ext_tmpl *tmpl) +{ + memcpy(ext->offset, tmpl->offset, sizeof(ext->offset)); +} + +static inline bool __nft_set_ext_exists(const struct nft_set_ext *ext, u8 id) +{ + return !!ext->offset[id]; +} + +static inline bool nft_set_ext_exists(const struct nft_set_ext *ext, u8 id) +{ + return ext && __nft_set_ext_exists(ext, id); +} + +static inline void *nft_set_ext(const struct nft_set_ext *ext, u8 id) +{ + return (void *)ext + ext->offset[id]; +} + +static inline struct nft_data *nft_set_ext_key(const struct nft_set_ext *ext) +{ + return nft_set_ext(ext, NFT_SET_EXT_KEY); +} + +static inline struct nft_data *nft_set_ext_data(const struct nft_set_ext *ext) +{ + return nft_set_ext(ext, NFT_SET_EXT_DATA); +} + +static inline u8 *nft_set_ext_flags(const struct nft_set_ext *ext) +{ + return nft_set_ext(ext, NFT_SET_EXT_FLAGS); +} + +static inline u64 *nft_set_ext_timeout(const struct nft_set_ext *ext) +{ + return nft_set_ext(ext, NFT_SET_EXT_TIMEOUT); +} + +static inline unsigned long *nft_set_ext_expiration(const struct nft_set_ext *ext) +{ + return nft_set_ext(ext, NFT_SET_EXT_EXPIRATION); +} + +static inline struct nft_userdata *nft_set_ext_userdata(const struct nft_set_ext *ext) +{ + return nft_set_ext(ext, NFT_SET_EXT_USERDATA); +} + +static inline struct nft_expr *nft_set_ext_expr(const struct nft_set_ext *ext) +{ + return nft_set_ext(ext, NFT_SET_EXT_EXPR); +} + +static inline bool nft_set_elem_expired(const struct nft_set_ext *ext) +{ + return nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION) && + time_is_before_eq_jiffies(*nft_set_ext_expiration(ext)); +} + +static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set, + void *elem) +{ + return elem + set->ops->elemsize; +} + +void *nft_set_elem_init(const struct nft_set *set, + const struct nft_set_ext_tmpl *tmpl, + const u32 *key, const u32 *data, + u64 timeout, gfp_t gfp); +void nft_set_elem_destroy(const struct nft_set *set, void *elem); + +/** + * struct nft_set_gc_batch_head - nf_tables set garbage collection batch + * + * @rcu: rcu head + * @set: set the elements belong to + * @cnt: count of elements + */ +struct nft_set_gc_batch_head { + struct rcu_head rcu; + const struct nft_set *set; + unsigned int cnt; +}; + +#define NFT_SET_GC_BATCH_SIZE ((PAGE_SIZE - \ + sizeof(struct nft_set_gc_batch_head)) / \ + sizeof(void *)) + +/** + * struct nft_set_gc_batch - nf_tables set garbage collection batch + * + * @head: GC batch head + * @elems: garbage collection elements + */ +struct nft_set_gc_batch { + struct nft_set_gc_batch_head head; + void *elems[NFT_SET_GC_BATCH_SIZE]; +}; + +struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set, + gfp_t gfp); +void nft_set_gc_batch_release(struct rcu_head *rcu); + +static inline void nft_set_gc_batch_complete(struct nft_set_gc_batch *gcb) +{ + if (gcb != NULL) + call_rcu(&gcb->head.rcu, nft_set_gc_batch_release); +} + +static inline struct nft_set_gc_batch * +nft_set_gc_batch_check(const struct nft_set *set, struct nft_set_gc_batch *gcb, + gfp_t gfp) +{ + if (gcb != NULL) { + if (gcb->head.cnt + 1 < ARRAY_SIZE(gcb->elems)) + return gcb; + nft_set_gc_batch_complete(gcb); + } + return nft_set_gc_batch_alloc(set, gfp); +} + +static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb, + void *elem) +{ + gcb->elems[gcb->head.cnt++] = elem; +} /** * struct nft_expr_type - nf_tables expression type @@ -323,6 +583,7 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, * @policy: netlink attribute policy * @maxattr: highest netlink attribute number * @family: address family for AF-specific types + * @flags: expression type flags */ struct nft_expr_type { const struct nft_expr_ops *(*select_ops)(const struct nft_ctx *, @@ -334,8 +595,11 @@ struct nft_expr_type { const struct nla_policy *policy; unsigned int maxattr; u8 family; + u8 flags; }; +#define NFT_EXPR_STATEFUL 0x1 + /** * struct nft_expr_ops - nf_tables expression operations * @@ -351,7 +615,7 @@ struct nft_expr_type { struct nft_expr; struct nft_expr_ops { void (*eval)(const struct nft_expr *expr, - struct nft_data data[NFT_REG_MAX + 1], + struct nft_regs *regs, const struct nft_pktinfo *pkt); unsigned int size; @@ -389,6 +653,18 @@ static inline void *nft_expr_priv(const struct nft_expr *expr) return (void *)expr->data; } +struct nft_expr *nft_expr_init(const struct nft_ctx *ctx, + const struct nlattr *nla); +void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr); +int nft_expr_dump(struct sk_buff *skb, unsigned int attr, + const struct nft_expr *expr); + +static inline void nft_expr_clone(struct nft_expr *dst, struct nft_expr *src) +{ + __module_get(src->ops->type->owner); + memcpy(dst, src, src->ops->size); +} + /** * struct nft_rule - nf_tables rule * @@ -409,74 +685,6 @@ struct nft_rule { __attribute__((aligned(__alignof__(struct nft_expr)))); }; -/** - * struct nft_trans - nf_tables object update in transaction - * - * @list: used internally - * @msg_type: message type - * @ctx: transaction context - * @data: internal information related to the transaction - */ -struct nft_trans { - struct list_head list; - int msg_type; - struct nft_ctx ctx; - char data[0]; -}; - -struct nft_trans_rule { - struct nft_rule *rule; -}; - -#define nft_trans_rule(trans) \ - (((struct nft_trans_rule *)trans->data)->rule) - -struct nft_trans_set { - struct nft_set *set; - u32 set_id; -}; - -#define nft_trans_set(trans) \ - (((struct nft_trans_set *)trans->data)->set) -#define nft_trans_set_id(trans) \ - (((struct nft_trans_set *)trans->data)->set_id) - -struct nft_trans_chain { - bool update; - char name[NFT_CHAIN_MAXNAMELEN]; - struct nft_stats __percpu *stats; - u8 policy; -}; - -#define nft_trans_chain_update(trans) \ - (((struct nft_trans_chain *)trans->data)->update) -#define nft_trans_chain_name(trans) \ - (((struct nft_trans_chain *)trans->data)->name) -#define nft_trans_chain_stats(trans) \ - (((struct nft_trans_chain *)trans->data)->stats) -#define nft_trans_chain_policy(trans) \ - (((struct nft_trans_chain *)trans->data)->policy) - -struct nft_trans_table { - bool update; - bool enable; -}; - -#define nft_trans_table_update(trans) \ - (((struct nft_trans_table *)trans->data)->update) -#define nft_trans_table_enable(trans) \ - (((struct nft_trans_table *)trans->data)->enable) - -struct nft_trans_elem { - struct nft_set *set; - struct nft_set_elem elem; -}; - -#define nft_trans_elem_set(trans) \ - (((struct nft_trans_elem *)trans->data)->set) -#define nft_trans_elem(trans) \ - (((struct nft_trans_elem *)trans->data)->elem) - static inline struct nft_expr *nft_expr_first(const struct nft_rule *rule) { return (struct nft_expr *)&rule->data[0]; @@ -517,7 +725,6 @@ enum nft_chain_flags { * * @rules: list of rules in the chain * @list: used internally - * @net: net namespace that this chain belongs to * @table: table that this chain belongs to * @handle: chain handle * @use: number of jump references to this chain @@ -528,7 +735,6 @@ enum nft_chain_flags { struct nft_chain { struct list_head rules; struct list_head list; - struct net *net; struct nft_table *table; u64 handle; u32 use; @@ -544,6 +750,25 @@ enum nft_chain_type { NFT_CHAIN_T_MAX }; +/** + * struct nf_chain_type - nf_tables chain type info + * + * @name: name of the type + * @type: numeric identifier + * @family: address family + * @owner: module owner + * @hook_mask: mask of valid hooks + * @hooks: hookfn overrides + */ +struct nf_chain_type { + const char *name; + enum nft_chain_type type; + int family; + struct module *owner; + unsigned int hook_mask; + nf_hookfn *hooks[NF_MAX_HOOKS]; +}; + int nft_chain_validate_dependency(const struct nft_chain *chain, enum nft_chain_type type); int nft_chain_validate_hooks(const struct nft_chain *chain, @@ -561,6 +786,7 @@ struct nft_stats { * struct nft_base_chain - nf_tables base chain * * @ops: netfilter hook ops + * @pnet: net namespace that this chain belongs to * @type: chain type * @policy: default policy * @stats: per-cpu chain stats @@ -568,6 +794,7 @@ struct nft_stats { */ struct nft_base_chain { struct nf_hook_ops ops[NFT_HOOK_OPS_MAX]; + possible_net_t pnet; const struct nf_chain_type *type; u8 policy; struct nft_stats __percpu *stats; @@ -600,7 +827,7 @@ struct nft_table { u64 hgenerator; u32 use; u16 flags; - char name[]; + char name[NFT_TABLE_MAXNAMELEN]; }; /** @@ -630,25 +857,6 @@ struct nft_af_info { int nft_register_afinfo(struct net *, struct nft_af_info *); void nft_unregister_afinfo(struct nft_af_info *); -/** - * struct nf_chain_type - nf_tables chain type info - * - * @name: name of the type - * @type: numeric identifier - * @family: address family - * @owner: module owner - * @hook_mask: mask of valid hooks - * @hooks: hookfn overrides - */ -struct nf_chain_type { - const char *name; - enum nft_chain_type type; - int family; - struct module *owner; - unsigned int hook_mask; - nf_hookfn *hooks[NF_MAX_HOOKS]; -}; - int nft_register_chain_type(const struct nf_chain_type *); void nft_unregister_chain_type(const struct nf_chain_type *); @@ -673,4 +881,153 @@ void nft_unregister_expr(struct nft_expr_type *); #define MODULE_ALIAS_NFT_SET() \ MODULE_ALIAS("nft-set") +/* + * The gencursor defines two generations, the currently active and the + * next one. Objects contain a bitmask of 2 bits specifying the generations + * they're active in. A set bit means they're inactive in the generation + * represented by that bit. + * + * New objects start out as inactive in the current and active in the + * next generation. When committing the ruleset the bitmask is cleared, + * meaning they're active in all generations. When removing an object, + * it is set inactive in the next generation. After committing the ruleset, + * the objects are removed. + */ +static inline unsigned int nft_gencursor_next(const struct net *net) +{ + return net->nft.gencursor + 1 == 1 ? 1 : 0; +} + +static inline u8 nft_genmask_next(const struct net *net) +{ + return 1 << nft_gencursor_next(net); +} + +static inline u8 nft_genmask_cur(const struct net *net) +{ + /* Use ACCESS_ONCE() to prevent refetching the value for atomicity */ + return 1 << ACCESS_ONCE(net->nft.gencursor); +} + +#define NFT_GENMASK_ANY ((1 << 0) | (1 << 1)) + +/* + * Set element transaction helpers + */ + +static inline bool nft_set_elem_active(const struct nft_set_ext *ext, + u8 genmask) +{ + return !(ext->genmask & genmask); +} + +static inline void nft_set_elem_change_active(const struct nft_set *set, + struct nft_set_ext *ext) +{ + ext->genmask ^= nft_genmask_next(read_pnet(&set->pnet)); +} + +/* + * We use a free bit in the genmask field to indicate the element + * is busy, meaning it is currently being processed either by + * the netlink API or GC. + * + * Even though the genmask is only a single byte wide, this works + * because the extension structure if fully constant once initialized, + * so there are no non-atomic write accesses unless it is already + * marked busy. + */ +#define NFT_SET_ELEM_BUSY_MASK (1 << 2) + +#if defined(__LITTLE_ENDIAN_BITFIELD) +#define NFT_SET_ELEM_BUSY_BIT 2 +#elif defined(__BIG_ENDIAN_BITFIELD) +#define NFT_SET_ELEM_BUSY_BIT (BITS_PER_LONG - BITS_PER_BYTE + 2) +#else +#error +#endif + +static inline int nft_set_elem_mark_busy(struct nft_set_ext *ext) +{ + unsigned long *word = (unsigned long *)ext; + + BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0); + return test_and_set_bit(NFT_SET_ELEM_BUSY_BIT, word); +} + +static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext) +{ + unsigned long *word = (unsigned long *)ext; + + clear_bit(NFT_SET_ELEM_BUSY_BIT, word); +} + +/** + * struct nft_trans - nf_tables object update in transaction + * + * @list: used internally + * @msg_type: message type + * @ctx: transaction context + * @data: internal information related to the transaction + */ +struct nft_trans { + struct list_head list; + int msg_type; + struct nft_ctx ctx; + char data[0]; +}; + +struct nft_trans_rule { + struct nft_rule *rule; +}; + +#define nft_trans_rule(trans) \ + (((struct nft_trans_rule *)trans->data)->rule) + +struct nft_trans_set { + struct nft_set *set; + u32 set_id; +}; + +#define nft_trans_set(trans) \ + (((struct nft_trans_set *)trans->data)->set) +#define nft_trans_set_id(trans) \ + (((struct nft_trans_set *)trans->data)->set_id) + +struct nft_trans_chain { + bool update; + char name[NFT_CHAIN_MAXNAMELEN]; + struct nft_stats __percpu *stats; + u8 policy; +}; + +#define nft_trans_chain_update(trans) \ + (((struct nft_trans_chain *)trans->data)->update) +#define nft_trans_chain_name(trans) \ + (((struct nft_trans_chain *)trans->data)->name) +#define nft_trans_chain_stats(trans) \ + (((struct nft_trans_chain *)trans->data)->stats) +#define nft_trans_chain_policy(trans) \ + (((struct nft_trans_chain *)trans->data)->policy) + +struct nft_trans_table { + bool update; + bool enable; +}; + +#define nft_trans_table_update(trans) \ + (((struct nft_trans_table *)trans->data)->update) +#define nft_trans_table_enable(trans) \ + (((struct nft_trans_table *)trans->data)->enable) + +struct nft_trans_elem { + struct nft_set *set; + struct nft_set_elem elem; +}; + +#define nft_trans_elem_set(trans) \ + (((struct nft_trans_elem *)trans->data)->set) +#define nft_trans_elem(trans) \ + (((struct nft_trans_elem *)trans->data)->elem) + #endif /* _NET_NF_TABLES_H */ diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h index a75fc8e27cd6..c6f400cfaac8 100644 --- a/include/net/netfilter/nf_tables_core.h +++ b/include/net/netfilter/nf_tables_core.h @@ -31,6 +31,9 @@ void nft_cmp_module_exit(void); int nft_lookup_module_init(void); void nft_lookup_module_exit(void); +int nft_dynset_module_init(void); +void nft_dynset_module_exit(void); + int nft_bitwise_module_init(void); void nft_bitwise_module_exit(void); diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h index cba143fbd2e4..2df7f96902ee 100644 --- a/include/net/netfilter/nf_tables_ipv4.h +++ b/include/net/netfilter/nf_tables_ipv4.h @@ -8,12 +8,11 @@ static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops, struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out) + const struct nf_hook_state *state) { struct iphdr *ip; - nft_set_pktinfo(pkt, ops, skb, in, out); + nft_set_pktinfo(pkt, ops, skb, state); ip = ip_hdr(pkt->skb); pkt->tprot = ip->protocol; diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h index 74d976137658..97db2e3a5e65 100644 --- a/include/net/netfilter/nf_tables_ipv6.h +++ b/include/net/netfilter/nf_tables_ipv6.h @@ -8,13 +8,12 @@ static inline int nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops, struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out) + const struct nf_hook_state *state) { int protohdr, thoff = 0; unsigned short frag_off; - nft_set_pktinfo(pkt, ops, skb, in, out); + nft_set_pktinfo(pkt, ops, skb, state); protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL); /* If malformed, drop it */ diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h index 0ee47c3e2e31..711887a09e91 100644 --- a/include/net/netfilter/nft_meta.h +++ b/include/net/netfilter/nft_meta.h @@ -26,11 +26,11 @@ int nft_meta_set_dump(struct sk_buff *skb, const struct nft_expr *expr); void nft_meta_get_eval(const struct nft_expr *expr, - struct nft_data data[NFT_REG_MAX + 1], + struct nft_regs *regs, const struct nft_pktinfo *pkt); void nft_meta_set_eval(const struct nft_expr *expr, - struct nft_data data[NFT_REG_MAX + 1], + struct nft_regs *regs, const struct nft_pktinfo *pkt); #endif diff --git a/include/net/netlink.h b/include/net/netlink.h index e010ee8da41d..2a5dbcc90d1c 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -4,6 +4,7 @@ #include <linux/types.h> #include <linux/netlink.h> #include <linux/jiffies.h> +#include <linux/in6.h> /* ======================================================================== * Netlink Messages and Attributes Interface (As Seen On TV) @@ -105,6 +106,8 @@ * nla_put_string(skb, type, str) add string attribute to skb * nla_put_flag(skb, type) add flag attribute to skb * nla_put_msecs(skb, type, jiffies) add msecs attribute to skb + * nla_put_in_addr(skb, type, addr) add IPv4 address attribute to skb + * nla_put_in6_addr(skb, type, addr) add IPv6 address attribute to skb * * Nested Attributes Construction: * nla_nest_start(skb, type) start a nested attribute @@ -957,6 +960,32 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype, } /** + * nla_put_in_addr - Add an IPv4 address netlink attribute to a socket + * buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @addr: IPv4 address + */ +static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype, + __be32 addr) +{ + return nla_put_be32(skb, attrtype, addr); +} + +/** + * nla_put_in6_addr - Add an IPv6 address netlink attribute to a socket + * buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @addr: IPv6 address + */ +static inline int nla_put_in6_addr(struct sk_buff *skb, int attrtype, + const struct in6_addr *addr) +{ + return nla_put(skb, attrtype, sizeof(*addr), addr); +} + +/** * nla_get_u32 - return payload of u32 attribute * @nla: u32 netlink attribute */ @@ -1099,6 +1128,27 @@ static inline unsigned long nla_get_msecs(const struct nlattr *nla) } /** + * nla_get_in_addr - return payload of IPv4 address attribute + * @nla: IPv4 address netlink attribute + */ +static inline __be32 nla_get_in_addr(const struct nlattr *nla) +{ + return *(__be32 *) nla_data(nla); +} + +/** + * nla_get_in6_addr - return payload of IPv6 address attribute + * @nla: IPv6 address netlink attribute + */ +static inline struct in6_addr nla_get_in6_addr(const struct nlattr *nla) +{ + struct in6_addr tmp; + + nla_memcpy(&tmp, nla, sizeof(tmp)); + return tmp; +} + +/** * nla_nest_start - Start a new level of nested attributes * @skb: socket buffer to add attributes to * @attrtype: attribute type of container diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h index c06ac58ca107..69a6715d9f3f 100644 --- a/include/net/netns/hash.h +++ b/include/net/netns/hash.h @@ -5,7 +5,7 @@ struct net; -static inline unsigned int net_hash_mix(struct net *net) +static inline u32 net_hash_mix(const struct net *net) { #ifdef CONFIG_NET_NS /* @@ -13,7 +13,7 @@ static inline unsigned int net_hash_mix(struct net *net) * always zeroed */ - return (unsigned)(((unsigned long)net) >> L1_CACHE_SHIFT); + return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT); #else return 0; #endif diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index dbe225478adb..614a49be68a9 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -7,6 +7,7 @@ #include <linux/uidgid.h> #include <net/inet_frag.h> +#include <linux/rcupdate.h> struct tcpm_hash_bucket; struct ctl_table_header; @@ -38,21 +39,21 @@ struct netns_ipv4 { #ifdef CONFIG_IP_MULTIPLE_TABLES struct fib_rules_ops *rules_ops; bool fib_has_custom_rules; - struct fib_table *fib_local; - struct fib_table *fib_main; - struct fib_table *fib_default; + struct fib_table __rcu *fib_local; + struct fib_table __rcu *fib_main; + struct fib_table __rcu *fib_default; #endif #ifdef CONFIG_IP_ROUTE_CLASSID int fib_num_tclassid_users; #endif struct hlist_head *fib_table_hash; + bool fib_offload_disabled; struct sock *fibnl; struct sock * __percpu *icmp_sk; + struct sock *mc_autojoin_sk; struct inet_peer_base *peers; - struct tcpm_hash_bucket *tcp_metrics_hash; - unsigned int tcp_metrics_hash_log; struct sock * __percpu *tcp_sk; struct netns_frags frags; #ifdef CONFIG_NETFILTER @@ -84,6 +85,8 @@ struct netns_ipv4 { int sysctl_tcp_fwmark_accept; int sysctl_tcp_mtu_probing; int sysctl_tcp_base_mss; + int sysctl_tcp_probe_threshold; + u32 sysctl_tcp_probe_interval; struct ping_group_range ping_group_range; diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 69ae41f2098c..d2527bf81142 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -32,6 +32,8 @@ struct netns_sysctl_ipv6 { int icmpv6_time; int anycast_src_echo_reply; int fwmark_reflect; + int idgen_retries; + int idgen_delay; }; struct netns_ipv6 { @@ -67,6 +69,7 @@ struct netns_ipv6 { struct sock *ndisc_sk; struct sock *tcp_sk; struct sock *igmp_sk; + struct sock *mc_autojoin_sk; #ifdef CONFIG_IPV6_MROUTE #ifndef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES struct mr6_table *mrt6; diff --git a/include/net/netns/mpls.h b/include/net/netns/mpls.h new file mode 100644 index 000000000000..d29203651c01 --- /dev/null +++ b/include/net/netns/mpls.h @@ -0,0 +1,17 @@ +/* + * mpls in net namespaces + */ + +#ifndef __NETNS_MPLS_H__ +#define __NETNS_MPLS_H__ + +struct mpls_route; +struct ctl_table_header; + +struct netns_mpls { + size_t platform_labels; + struct mpls_route __rcu * __rcu *platform_label; + struct ctl_table_header *ctl; +}; + +#endif /* __NETNS_MPLS_H__ */ diff --git a/include/net/netns/x_tables.h b/include/net/netns/x_tables.h index c24060ee411e..4d6597ad6067 100644 --- a/include/net/netns/x_tables.h +++ b/include/net/netns/x_tables.h @@ -9,6 +9,7 @@ struct ebt_table; struct netns_xt { struct list_head tables[NFPROTO_NUMPROTO]; bool notrack_deprecated_warning; + bool clusterip_deprecated_warning; #if defined(CONFIG_BRIDGE_NF_EBTABLES) || \ defined(CONFIG_BRIDGE_NF_EBTABLES_MODULE) struct ebt_table *broute_table; diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h index ab672b537dd4..020a814bc8ed 100644 --- a/include/net/nfc/hci.h +++ b/include/net/nfc/hci.h @@ -83,6 +83,10 @@ struct nfc_hci_pipe { }; #define NFC_HCI_MAX_CUSTOM_GATES 50 +/* + * According to specification 102 622 chapter 4.4 Pipes, + * the pipe identifier is 7 bits long. + */ #define NFC_HCI_MAX_PIPES 127 struct nfc_hci_init_data { u8 gate_count; diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index ff87f8611fa3..d4dcc7199fd7 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h @@ -71,6 +71,7 @@ struct nci_ops { int (*close)(struct nci_dev *ndev); int (*send)(struct nci_dev *ndev, struct sk_buff *skb); int (*setup)(struct nci_dev *ndev); + int (*fw_download)(struct nci_dev *ndev, const char *firmware_name); __u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol); int (*discover_se)(struct nci_dev *ndev); int (*disable_se)(struct nci_dev *ndev, u32 se_idx); @@ -137,6 +138,10 @@ struct nci_conn_info { #define NCI_HCI_INVALID_HOST 0x80 #define NCI_HCI_MAX_CUSTOM_GATES 50 +/* + * According to specification 102 622 chapter 4.4 Pipes, + * the pipe identifier is 7 bits long. + */ #define NCI_HCI_MAX_PIPES 127 struct nci_hci_gate { diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 73190e65d5c1..7ac029c07546 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -157,7 +157,7 @@ struct nfc_evt_transaction { u32 aid_len; u8 aid[NFC_MAX_AID_LENGTH]; u8 params_len; - u8 params[NFC_MAX_PARAMS_LENGTH]; + u8 params[0]; } __packed; struct nfc_genl_data { diff --git a/include/net/ping.h b/include/net/ping.h index cc16d413f681..ac80cb45e630 100644 --- a/include/net/ping.h +++ b/include/net/ping.h @@ -75,12 +75,11 @@ void ping_err(struct sk_buff *skb, int offset, u32 info); int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd, struct sk_buff *); -int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len, int noblock, int flags, int *addr_len); +int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, + int flags, int *addr_len); int ping_common_sendmsg(int family, struct msghdr *msg, size_t len, void *user_icmph, size_t icmph_len); -int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len); +int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); bool ping_rcv(struct sk_buff *skb); diff --git a/include/net/request_sock.h b/include/net/request_sock.h index 7f830ff67f08..fe41f3ceb008 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -39,8 +39,7 @@ struct request_sock_ops { void (*send_reset)(struct sock *sk, struct sk_buff *skb); void (*destructor)(struct request_sock *req); - void (*syn_ack_timeout)(struct sock *sk, - struct request_sock *req); + void (*syn_ack_timeout)(const struct request_sock *req); }; int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req); @@ -49,7 +48,11 @@ int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req); */ struct request_sock { struct sock_common __req_common; +#define rsk_refcnt __req_common.skc_refcnt +#define rsk_hash __req_common.skc_hash + struct request_sock *dl_next; + struct sock *rsk_listener; u16 mss; u8 num_retrans; /* number of retransmits */ u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */ @@ -58,32 +61,56 @@ struct request_sock { u32 window_clamp; /* window clamp at creation time */ u32 rcv_wnd; /* rcv_wnd offered first time */ u32 ts_recent; - unsigned long expires; + struct timer_list rsk_timer; const struct request_sock_ops *rsk_ops; struct sock *sk; u32 secid; u32 peer_secid; }; -static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops) +static inline struct request_sock * +reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener) { struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC); - if (req != NULL) + if (req) { req->rsk_ops = ops; - + sock_hold(sk_listener); + req->rsk_listener = sk_listener; + + /* Following is temporary. It is coupled with debugging + * helpers in reqsk_put() & reqsk_free() + */ + atomic_set(&req->rsk_refcnt, 0); + } return req; } -static inline void __reqsk_free(struct request_sock *req) +static inline struct request_sock *inet_reqsk(struct sock *sk) { - kmem_cache_free(req->rsk_ops->slab, req); + return (struct request_sock *)sk; +} + +static inline struct sock *req_to_sk(struct request_sock *req) +{ + return (struct sock *)req; } static inline void reqsk_free(struct request_sock *req) { + /* temporary debugging */ + WARN_ON_ONCE(atomic_read(&req->rsk_refcnt) != 0); + req->rsk_ops->destructor(req); - __reqsk_free(req); + if (req->rsk_listener) + sock_put(req->rsk_listener); + kmem_cache_free(req->rsk_ops->slab, req); +} + +static inline void reqsk_put(struct request_sock *req) +{ + if (atomic_dec_and_test(&req->rsk_refcnt)) + reqsk_free(req); } extern int sysctl_max_syn_backlog; @@ -93,12 +120,16 @@ extern int sysctl_max_syn_backlog; * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs */ struct listen_sock { - u8 max_qlen_log; + int qlen_inc; /* protected by listener lock */ + int young_inc;/* protected by listener lock */ + + /* following fields can be updated by timer */ + atomic_t qlen_dec; /* qlen = qlen_inc - qlen_dec */ + atomic_t young_dec; + + u8 max_qlen_log ____cacheline_aligned_in_smp; u8 synflood_warned; /* 2 bytes hole, try to use */ - int qlen; - int qlen_young; - int clock_hand; u32 hash_rnd; u32 nr_table_entries; struct request_sock *syn_table[0]; @@ -142,18 +173,11 @@ struct fastopen_queue { * %syn_wait_lock is necessary only to avoid proc interface having to grab the main * lock sock while browsing the listening hash (otherwise it's deadlock prone). * - * This lock is acquired in read mode only from listening_get_next() seq_file - * op and it's acquired in write mode _only_ from code that is actively - * changing rskq_accept_head. All readers that are holding the master sock lock - * don't need to grab this lock in read mode too as rskq_accept_head. writes - * are always protected from the main sock lock. */ struct request_sock_queue { struct request_sock *rskq_accept_head; struct request_sock *rskq_accept_tail; - rwlock_t syn_wait_lock; u8 rskq_defer_accept; - /* 3 bytes hole, try to pack */ struct listen_sock *listen_opt; struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been * enabled on this listener. Check @@ -161,6 +185,9 @@ struct request_sock_queue { * to determine if TFO is enabled * right at this moment. */ + + /* temporary alignment, our goal is to get rid of this lock */ + spinlock_t syn_wait_lock ____cacheline_aligned_in_smp; }; int reqsk_queue_alloc(struct request_sock_queue *queue, @@ -186,12 +213,21 @@ static inline int reqsk_queue_empty(struct request_sock_queue *queue) } static inline void reqsk_queue_unlink(struct request_sock_queue *queue, - struct request_sock *req, - struct request_sock **prev_req) + struct request_sock *req) { - write_lock(&queue->syn_wait_lock); - *prev_req = req->dl_next; - write_unlock(&queue->syn_wait_lock); + struct listen_sock *lopt = queue->listen_opt; + struct request_sock **prev; + + spin_lock(&queue->syn_wait_lock); + + prev = &lopt->syn_table[req->rsk_hash]; + while (*prev != req) + prev = &(*prev)->dl_next; + *prev = req->dl_next; + + spin_unlock(&queue->syn_wait_lock); + if (del_timer(&req->rsk_timer)) + reqsk_put(req); } static inline void reqsk_queue_add(struct request_sock_queue *queue, @@ -224,57 +260,53 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue return req; } -static inline int reqsk_queue_removed(struct request_sock_queue *queue, - struct request_sock *req) +static inline void reqsk_queue_removed(struct request_sock_queue *queue, + const struct request_sock *req) { struct listen_sock *lopt = queue->listen_opt; if (req->num_timeout == 0) - --lopt->qlen_young; - - return --lopt->qlen; + atomic_inc(&lopt->young_dec); + atomic_inc(&lopt->qlen_dec); } -static inline int reqsk_queue_added(struct request_sock_queue *queue) +static inline void reqsk_queue_added(struct request_sock_queue *queue) { struct listen_sock *lopt = queue->listen_opt; - const int prev_qlen = lopt->qlen; - lopt->qlen_young++; - lopt->qlen++; - return prev_qlen; + lopt->young_inc++; + lopt->qlen_inc++; } -static inline int reqsk_queue_len(const struct request_sock_queue *queue) +static inline int listen_sock_qlen(const struct listen_sock *lopt) { - return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0; + return lopt->qlen_inc - atomic_read(&lopt->qlen_dec); } -static inline int reqsk_queue_len_young(const struct request_sock_queue *queue) +static inline int listen_sock_young(const struct listen_sock *lopt) { - return queue->listen_opt->qlen_young; + return lopt->young_inc - atomic_read(&lopt->young_dec); } -static inline int reqsk_queue_is_full(const struct request_sock_queue *queue) +static inline int reqsk_queue_len(const struct request_sock_queue *queue) { - return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log; + const struct listen_sock *lopt = queue->listen_opt; + + return lopt ? listen_sock_qlen(lopt) : 0; } -static inline void reqsk_queue_hash_req(struct request_sock_queue *queue, - u32 hash, struct request_sock *req, - unsigned long timeout) +static inline int reqsk_queue_len_young(const struct request_sock_queue *queue) { - struct listen_sock *lopt = queue->listen_opt; - - req->expires = jiffies + timeout; - req->num_retrans = 0; - req->num_timeout = 0; - req->sk = NULL; - req->dl_next = lopt->syn_table[hash]; + return listen_sock_young(queue->listen_opt); +} - write_lock(&queue->syn_wait_lock); - lopt->syn_table[hash] = req; - write_unlock(&queue->syn_wait_lock); +static inline int reqsk_queue_is_full(const struct request_sock_queue *queue) +{ + return reqsk_queue_len(queue) >> queue->listen_opt->max_qlen_log; } +void reqsk_queue_hash_req(struct request_sock_queue *queue, + u32 hash, struct request_sock *req, + unsigned long timeout); + #endif /* _REQUEST_SOCK_H */ diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 6c6d5393fc34..343d922d15c2 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -137,7 +137,7 @@ void rtnl_af_register(struct rtnl_af_ops *ops); void rtnl_af_unregister(struct rtnl_af_ops *ops); struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]); -struct net_device *rtnl_create_link(struct net *net, char *ifname, +struct net_device *rtnl_create_link(struct net *net, const char *ifname, unsigned char name_assign_type, const struct rtnl_link_ops *ops, struct nlattr *tb[]); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index c605d305c577..6d778efcfdfd 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -213,7 +213,7 @@ struct tcf_proto_ops { const struct tcf_proto *, struct tcf_result *); int (*init)(struct tcf_proto*); - void (*destroy)(struct tcf_proto*); + bool (*destroy)(struct tcf_proto*, bool); unsigned long (*get)(struct tcf_proto*, u32 handle); int (*change)(struct net *net, struct sk_buff *, @@ -399,7 +399,7 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, const struct Qdisc_ops *ops, u32 parentid); void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab); -void tcf_destroy(struct tcf_proto *tp); +bool tcf_destroy(struct tcf_proto *tp, bool force); void tcf_destroy_chain(struct tcf_proto __rcu **fl); /* Reset all TX qdiscs greater then index of a device. */ diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 856f01cb51dd..c56a438c3a1e 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -166,6 +166,9 @@ void sctp_remaddr_proc_exit(struct net *net); */ extern struct kmem_cache *sctp_chunk_cachep __read_mostly; extern struct kmem_cache *sctp_bucket_cachep __read_mostly; +extern long sysctl_sctp_mem[3]; +extern int sysctl_sctp_rmem[3]; +extern int sysctl_sctp_wmem[3]; /* * Section: Macros, externs, and inlines diff --git a/include/net/sock.h b/include/net/sock.h index 81c81ead9a35..3a4898ec8c67 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -66,6 +66,7 @@ #include <linux/atomic.h> #include <net/dst.h> #include <net/checksum.h> +#include <net/tcp_states.h> #include <linux/net_tstamp.h> struct cgroup; @@ -189,15 +190,15 @@ struct sock_common { struct hlist_nulls_node skc_portaddr_node; }; struct proto *skc_prot; -#ifdef CONFIG_NET_NS - struct net *skc_net; -#endif + possible_net_t skc_net; #if IS_ENABLED(CONFIG_IPV6) struct in6_addr skc_v6_daddr; struct in6_addr skc_v6_rcv_saddr; #endif + atomic64_t skc_cookie; + /* * fields between dontcopy_begin/dontcopy_end * are not copied in sock_copy() @@ -328,6 +329,7 @@ struct sock { #define sk_net __sk_common.skc_net #define sk_v6_daddr __sk_common.skc_v6_daddr #define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr +#define sk_cookie __sk_common.skc_cookie socket_lock_t sk_lock; struct sk_buff_head sk_receive_queue; @@ -402,8 +404,8 @@ struct sock { rwlock_t sk_callback_lock; int sk_err, sk_err_soft; - unsigned short sk_ack_backlog; - unsigned short sk_max_ack_backlog; + u32 sk_ack_backlog; + u32 sk_max_ack_backlog; __u32 sk_priority; #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) __u32 sk_cgrp_prioidx; @@ -957,10 +959,9 @@ struct proto { int (*compat_ioctl)(struct sock *sk, unsigned int cmd, unsigned long arg); #endif - int (*sendmsg)(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t len); - int (*recvmsg)(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, + int (*sendmsg)(struct sock *sk, struct msghdr *msg, + size_t len); + int (*recvmsg)(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len); int (*sendpage)(struct sock *sk, struct page *page, @@ -1561,9 +1562,8 @@ int sock_no_listen(struct socket *, int); int sock_no_shutdown(struct socket *, int); int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *); int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int); -int sock_no_sendmsg(struct kiocb *, struct socket *, struct msghdr *, size_t); -int sock_no_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t, - int); +int sock_no_sendmsg(struct socket *, struct msghdr *, size_t); +int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int); int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma); ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, @@ -1575,8 +1575,8 @@ ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, */ int sock_common_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); -int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, int flags); +int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + int flags); int sock_common_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen); int compat_sock_common_getsockopt(struct socket *sock, int level, @@ -1625,7 +1625,7 @@ static inline void sock_put(struct sock *sk) sk_free(sk); } /* Generic version of sock_put(), dealing with all sockets - * (TCP_TIMEWAIT, ESTABLISHED...) + * (TCP_TIMEWAIT, TCP_NEW_SYN_RECV, ESTABLISHED...) */ void sock_gen_put(struct sock *sk); @@ -2079,6 +2079,29 @@ static inline int sock_intr_errno(long timeo) return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR; } +struct sock_skb_cb { + u32 dropcount; +}; + +/* Store sock_skb_cb at the end of skb->cb[] so protocol families + * using skb->cb[] would keep using it directly and utilize its + * alignement guarantee. + */ +#define SOCK_SKB_CB_OFFSET ((FIELD_SIZEOF(struct sk_buff, cb) - \ + sizeof(struct sock_skb_cb))) + +#define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \ + SOCK_SKB_CB_OFFSET)) + +#define sock_skb_cb_check_size(size) \ + BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET) + +static inline void +sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb) +{ + SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops); +} + void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb); void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, @@ -2181,7 +2204,7 @@ static inline void sk_change_net(struct sock *sk, struct net *net) if (!net_eq(current_net, net)) { put_net(current_net); - sock_net_set(sk, hold_net(net)); + sock_net_set(sk, net); } } @@ -2197,6 +2220,14 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb) return NULL; } +/* This helper checks if a socket is a full socket, + * ie _not_ a timewait or request socket. + */ +static inline bool sk_fullsock(const struct sock *sk) +{ + return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV); +} + void sock_enable_timestamp(struct sock *sk, int flag); int sock_get_timestamp(struct sock *, struct timeval __user *); int sock_get_timestampns(struct sock *, struct timespec __user *); diff --git a/include/net/switchdev.h b/include/net/switchdev.h index cfcdac2e5d25..d2e69ee3019a 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -1,6 +1,7 @@ /* * include/net/switchdev.h - Switch device API * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us> + * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -13,6 +14,35 @@ #include <linux/netdevice.h> #include <linux/notifier.h> +struct fib_info; + +/** + * struct switchdev_ops - switchdev operations + * + * @swdev_parent_id_get: Called to get an ID of the switch chip this port + * is part of. If driver implements this, it indicates that it + * represents a port of a switch chip. + * + * @swdev_port_stp_update: Called to notify switch device port of bridge + * port STP state change. + * + * @swdev_fib_ipv4_add: Called to add/modify IPv4 route to switch device. + * + * @swdev_fib_ipv4_del: Called to delete IPv4 route from switch device. + */ +struct swdev_ops { + int (*swdev_parent_id_get)(struct net_device *dev, + struct netdev_phys_item_id *psid); + int (*swdev_port_stp_update)(struct net_device *dev, u8 state); + int (*swdev_fib_ipv4_add)(struct net_device *dev, __be32 dst, + int dst_len, struct fib_info *fi, + u8 tos, u8 type, u32 nlflags, + u32 tb_id); + int (*swdev_fib_ipv4_del)(struct net_device *dev, __be32 dst, + int dst_len, struct fib_info *fi, + u8 tos, u8 type, u32 tb_id); +}; + enum netdev_switch_notifier_type { NETDEV_SWITCH_FDB_ADD = 1, NETDEV_SWITCH_FDB_DEL, @@ -51,6 +81,12 @@ int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags); int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags); +int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, + u8 tos, u8 type, u32 nlflags, u32 tb_id); +int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, + u8 tos, u8 type, u32 tb_id); +void netdev_switch_fib_ipv4_abort(struct fib_info *fi); + #else static inline int netdev_switch_parent_id_get(struct net_device *dev, @@ -109,6 +145,25 @@ static inline int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device * return 0; } +static inline int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, + struct fib_info *fi, + u8 tos, u8 type, + u32 nlflags, u32 tb_id) +{ + return 0; +} + +static inline int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, + struct fib_info *fi, + u8 tos, u8 type, u32 tb_id) +{ + return 0; +} + +static inline void netdev_switch_fib_ipv4_abort(struct fib_info *fi) +{ +} + #endif #endif /* _LINUX_SWITCHDEV_H_ */ diff --git a/include/net/tc_act/tc_bpf.h b/include/net/tc_act/tc_bpf.h index 86a070ffc930..a152e9858b2c 100644 --- a/include/net/tc_act/tc_bpf.h +++ b/include/net/tc_act/tc_bpf.h @@ -16,8 +16,12 @@ struct tcf_bpf { struct tcf_common common; struct bpf_prog *filter; + union { + u32 bpf_fd; + u16 bpf_num_ops; + }; struct sock_filter *bpf_ops; - u16 bpf_num_ops; + const char *bpf_name; }; #define to_bpf(a) \ container_of(a->priv, struct tcf_bpf, common) diff --git a/include/net/tcp.h b/include/net/tcp.h index 8d6b983d5099..9598871485ce 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -65,7 +65,13 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCP_MIN_MSS 88U /* The least MTU to use for probing */ -#define TCP_BASE_MSS 512 +#define TCP_BASE_MSS 1024 + +/* probing interval, default to 10 minutes as per RFC4821 */ +#define TCP_PROBE_INTERVAL 600 + +/* Specify interval when tcp mtu probing will stop */ +#define TCP_PROBE_THRESHOLD 8 /* After receiving this amount of duplicate ACKs fast retransmit starts. */ #define TCP_FASTRETRANS_THRESH 3 @@ -173,6 +179,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCPOPT_SACK 5 /* SACK Block */ #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ +#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */ #define TCPOPT_EXP 254 /* Experimental */ /* Magic number to be after the option value for sharing TCP * experimental options. See draft-ietf-tcpm-experimental-options-00.txt @@ -188,6 +195,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCPOLEN_SACK_PERM 2 #define TCPOLEN_TIMESTAMP 10 #define TCPOLEN_MD5SIG 18 +#define TCPOLEN_FASTOPEN_BASE 2 #define TCPOLEN_EXP_FASTOPEN_BASE 4 /* But this is what stacks really send out. */ @@ -349,8 +357,7 @@ void tcp_v4_early_demux(struct sk_buff *skb); int tcp_v4_rcv(struct sk_buff *skb); int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); -int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t size); +int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); void tcp_release_cb(struct sock *sk); @@ -401,8 +408,7 @@ enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, const struct tcphdr *th); struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, - struct request_sock *req, struct request_sock **prev, - bool fastopen); + struct request_sock *req, bool fastopen); int tcp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb); void tcp_enter_loss(struct sock *sk); @@ -429,9 +435,9 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname, int compat_tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); void tcp_set_keepalive(struct sock *sk, int val); -void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req); -int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len, int nonblock, int flags, int *addr_len); +void tcp_syn_ack_timeout(const struct request_sock *req); +int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, + int flags, int *addr_len); void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab, struct tcp_fastopen_cookie *foc); @@ -443,6 +449,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); void tcp_v4_mtu_reduced(struct sock *sk); +void tcp_req_err(struct sock *sk, u32 seq); int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, @@ -524,8 +531,6 @@ int tcp_write_wakeup(struct sock *); void tcp_send_fin(struct sock *sk); void tcp_send_active_reset(struct sock *sk, gfp_t priority); int tcp_send_synack(struct sock *); -bool tcp_syn_flood_action(struct sock *sk, const struct sk_buff *skb, - const char *proto); void tcp_push_one(struct sock *, unsigned int mss_now); void tcp_send_ack(struct sock *sk); void tcp_send_delayed_ack(struct sock *sk); @@ -1132,31 +1137,6 @@ static inline int tcp_full_space(const struct sock *sk) return tcp_win_from_space(sk->sk_rcvbuf); } -static inline void tcp_openreq_init(struct request_sock *req, - struct tcp_options_received *rx_opt, - struct sk_buff *skb, struct sock *sk) -{ - struct inet_request_sock *ireq = inet_rsk(req); - - req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ - req->cookie_ts = 0; - tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; - tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; - tcp_rsk(req)->snt_synack = tcp_time_stamp; - tcp_rsk(req)->last_oow_ack_time = 0; - req->mss = rx_opt->mss_clamp; - req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; - ireq->tstamp_ok = rx_opt->tstamp_ok; - ireq->sack_ok = rx_opt->sack_ok; - ireq->snd_wscale = rx_opt->snd_wscale; - ireq->wscale_ok = rx_opt->wscale_ok; - ireq->acked = 0; - ireq->ecn_ok = 0; - ireq->ir_rmt_port = tcp_hdr(skb)->source; - ireq->ir_num = ntohs(tcp_hdr(skb)->dest); - ireq->ir_mark = inet_request_mark(sk, skb); -} - extern void tcp_openreq_init_rwin(struct request_sock *req, struct sock *sk, struct dst_entry *dst); @@ -1236,36 +1216,8 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt, return true; } -/* Return true if we're currently rate-limiting out-of-window ACKs and - * thus shouldn't send a dupack right now. We rate-limit dupacks in - * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS - * attacks that send repeated SYNs or ACKs for the same connection. To - * do this, we do not send a duplicate SYNACK or ACK if the remote - * endpoint is sending out-of-window SYNs or pure ACKs at a high rate. - */ -static inline bool tcp_oow_rate_limited(struct net *net, - const struct sk_buff *skb, - int mib_idx, u32 *last_oow_ack_time) -{ - /* Data packets without SYNs are not likely part of an ACK loop. */ - if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) && - !tcp_hdr(skb)->syn) - goto not_rate_limited; - - if (*last_oow_ack_time) { - s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time); - - if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) { - NET_INC_STATS_BH(net, mib_idx); - return true; /* rate-limited: don't send yet! */ - } - } - - *last_oow_ack_time = tcp_time_stamp; - -not_rate_limited: - return false; /* not rate-limited: go ahead, send dupack now! */ -} +bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, + int mib_idx, u32 *last_oow_ack_time); static inline void tcp_mib_init(struct net *net) { @@ -1344,15 +1296,14 @@ struct tcp_md5sig_pool { }; /* - functions */ -int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, - const struct sock *sk, const struct request_sock *req, - const struct sk_buff *skb); +int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, + const struct sock *sk, const struct sk_buff *skb); int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, int family, const u8 *newkey, u8 newkeylen, gfp_t gfp); int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family); struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, - struct sock *addr_sk); + const struct sock *addr_sk); #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, @@ -1388,7 +1339,8 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, struct tcp_fastopen_cookie *cookie, int *syn_loss, unsigned long *last_syn_loss); void tcp_fastopen_cache_set(struct sock *sk, u16 mss, - struct tcp_fastopen_cookie *cookie, bool syn_lost); + struct tcp_fastopen_cookie *cookie, bool syn_lost, + u16 try_exp); struct tcp_fastopen_request { /* Fast Open cookie. Size 0 means a cookie request */ struct tcp_fastopen_cookie cookie; @@ -1663,28 +1615,26 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, struct tcp_sock_af_ops { #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk, - struct sock *addr_sk); - int (*calc_md5_hash) (char *location, - struct tcp_md5sig_key *md5, - const struct sock *sk, - const struct request_sock *req, - const struct sk_buff *skb); - int (*md5_parse) (struct sock *sk, - char __user *optval, - int optlen); + const struct sock *addr_sk); + int (*calc_md5_hash)(char *location, + const struct tcp_md5sig_key *md5, + const struct sock *sk, + const struct sk_buff *skb); + int (*md5_parse)(struct sock *sk, + char __user *optval, + int optlen); #endif }; struct tcp_request_sock_ops { u16 mss_clamp; #ifdef CONFIG_TCP_MD5SIG - struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk, - struct request_sock *req); - int (*calc_md5_hash) (char *location, - struct tcp_md5sig_key *md5, - const struct sock *sk, - const struct request_sock *req, - const struct sk_buff *skb); + struct tcp_md5sig_key *(*req_md5_lookup)(struct sock *sk, + const struct sock *addr_sk); + int (*calc_md5_hash) (char *location, + const struct tcp_md5sig_key *md5, + const struct sock *sk, + const struct sk_buff *skb); #endif void (*init_req)(struct request_sock *req, struct sock *sk, struct sk_buff *skb); diff --git a/include/net/tcp_states.h b/include/net/tcp_states.h index b0b645988bd8..50e78a74d0df 100644 --- a/include/net/tcp_states.h +++ b/include/net/tcp_states.h @@ -25,6 +25,7 @@ enum { TCP_LAST_ACK, TCP_LISTEN, TCP_CLOSING, /* Now a valid state */ + TCP_NEW_SYN_RECV, TCP_MAX_STATES /* Leave at the end! */ }; @@ -44,7 +45,8 @@ enum { TCPF_CLOSE_WAIT = (1 << 8), TCPF_LAST_ACK = (1 << 9), TCPF_LISTEN = (1 << 10), - TCPF_CLOSING = (1 << 11) + TCPF_CLOSING = (1 << 11), + TCPF_NEW_SYN_RECV = (1 << 12), }; #endif /* _LINUX_TCP_STATES_H */ diff --git a/include/net/udp.h b/include/net/udp.h index 07f9b70962f6..6d4ed18e1427 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -194,6 +194,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, int (*)(const struct sock *, const struct sock *), unsigned int hash2_nulladdr); +u32 udp_flow_hashrnd(void); + static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb, int min, int max, bool use_eth) { @@ -205,12 +207,19 @@ static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb, } hash = skb_get_hash(skb); - if (unlikely(!hash) && use_eth) { - /* Can't find a normal hash, caller has indicated an Ethernet - * packet so use that to compute a hash. - */ - hash = jhash(skb->data, 2 * ETH_ALEN, - (__force u32) skb->protocol); + if (unlikely(!hash)) { + if (use_eth) { + /* Can't find a normal hash, caller has indicated an + * Ethernet packet so use that to compute a hash. + */ + hash = jhash(skb->data, 2 * ETH_ALEN, + (__force u32) skb->protocol); + } else { + /* Can't derive any sort of hash for the packet, set + * to some consistent random value. + */ + hash = udp_flow_hashrnd(); + } } /* Since this is being sent on the wire obfuscate hash a bit @@ -229,8 +238,7 @@ int udp_get_port(struct sock *sk, unsigned short snum, int (*saddr_cmp)(const struct sock *, const struct sock *)); void udp_err(struct sk_buff *, u32); -int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len); +int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); int udp_push_pending_frames(struct sock *sk); void udp_flush_pending_frames(struct sock *sk); void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst); diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index 1a20d33d56bc..c491c1221606 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -77,13 +77,14 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock, struct udp_tunnel_sock_cfg *sock_cfg); /* Transmit the skb using UDP encapsulation. */ -int udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb, +int udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, bool xnet, bool nocheck); #if IS_ENABLED(CONFIG_IPV6) -int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb, +int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, + struct sk_buff *skb, struct net_device *dev, struct in6_addr *saddr, struct in6_addr *daddr, __u8 prio, __u8 ttl, __be16 src_port, diff --git a/include/net/vxlan.h b/include/net/vxlan.h index c73e7abbbaa5..0082b5d33d7d 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -131,7 +131,7 @@ struct vxlan_sock { #define VXLAN_F_GBP 0x800 #define VXLAN_F_REMCSUM_NOPARTIAL 0x1000 -/* Flags that are used in the receive patch. These flags must match in +/* Flags that are used in the receive path. These flags must match in * order for a socket to be shareable */ #define VXLAN_F_RCV_FLAGS (VXLAN_F_GBP | \ @@ -145,7 +145,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, void vxlan_sock_release(struct vxlan_sock *vs); -int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb, +int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, struct vxlan_metadata *md, bool xnet, u32 vxflags); diff --git a/include/net/xfrm.h b/include/net/xfrm.h index dc4865e90fe4..36ac102c97c7 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -126,9 +126,7 @@ struct xfrm_state_walk { /* Full description of state of transformer. */ struct xfrm_state { -#ifdef CONFIG_NET_NS - struct net *xs_net; -#endif + possible_net_t xs_net; union { struct hlist_node gclist; struct hlist_node bydst; @@ -334,7 +332,7 @@ struct xfrm_state_afinfo { int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); int (*output)(struct sock *sk, struct sk_buff *skb); - int (*output_finish)(struct sk_buff *skb); + int (*output_finish)(struct sock *sk, struct sk_buff *skb); int (*extract_input)(struct xfrm_state *x, struct sk_buff *skb); int (*extract_output)(struct xfrm_state *x, @@ -522,9 +520,7 @@ struct xfrm_policy_queue { }; struct xfrm_policy { -#ifdef CONFIG_NET_NS - struct net *xp_net; -#endif + possible_net_t xp_net; struct hlist_node bydst; struct hlist_node byidx; @@ -1029,7 +1025,7 @@ xfrm_addr_any(const xfrm_address_t *addr, unsigned short family) case AF_INET: return addr->a4 == 0; case AF_INET6: - return ipv6_addr_any((struct in6_addr *)&addr->a6); + return ipv6_addr_any(&addr->in6); } return 0; } @@ -1242,8 +1238,8 @@ void xfrm_flowi_addr_get(const struct flowi *fl, memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4)); break; case AF_INET6: - *(struct in6_addr *)saddr->a6 = fl->u.ip6.saddr; - *(struct in6_addr *)daddr->a6 = fl->u.ip6.daddr; + saddr->in6 = fl->u.ip6.saddr; + daddr->in6 = fl->u.ip6.daddr; break; } } @@ -1507,7 +1503,7 @@ int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb); int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); int xfrm_input_resume(struct sk_buff *skb, int nexthdr); int xfrm_output_resume(struct sk_buff *skb, int err); -int xfrm_output(struct sk_buff *skb); +int xfrm_output(struct sock *sk, struct sk_buff *skb); int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); void xfrm_local_error(struct sk_buff *skb, int mtu); int xfrm4_extract_header(struct sk_buff *skb); @@ -1528,7 +1524,7 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb); int xfrm4_output(struct sock *sk, struct sk_buff *skb); -int xfrm4_output_finish(struct sk_buff *skb); +int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb); int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err); int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol); int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol); @@ -1553,7 +1549,7 @@ __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr); int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); int xfrm6_output(struct sock *sk, struct sk_buff *skb); -int xfrm6_output_finish(struct sk_buff *skb); +int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb); int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr); diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h index f2902ef7ab75..4dce116bfd80 100644 --- a/include/rxrpc/packet.h +++ b/include/rxrpc/packet.h @@ -47,7 +47,8 @@ struct rxrpc_header { #define RXRPC_PACKET_TYPE_CHALLENGE 6 /* connection security challenge (SRVR->CLNT) */ #define RXRPC_PACKET_TYPE_RESPONSE 7 /* connection secutity response (CLNT->SRVR) */ #define RXRPC_PACKET_TYPE_DEBUG 8 /* debug info request */ -#define RXRPC_N_PACKET_TYPES 9 /* number of packet types (incl type 0) */ +#define RXRPC_PACKET_TYPE_VERSION 13 /* version string request */ +#define RXRPC_N_PACKET_TYPES 14 /* number of packet types (incl type 0) */ uint8_t flags; /* packet flags */ #define RXRPC_CLIENT_INITIATED 0x01 /* signifies a packet generated by a client */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index cc47ef41076a..5c1cee11f777 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -119,8 +119,12 @@ enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC, BPF_PROG_TYPE_SOCKET_FILTER, BPF_PROG_TYPE_KPROBE, + BPF_PROG_TYPE_SCHED_CLS, + BPF_PROG_TYPE_SCHED_ACT, }; +#define BPF_PSEUDO_MAP_FD 1 + /* flags for BPF_MAP_UPDATE_ELEM command */ #define BPF_ANY 0 /* create new element or update existing */ #define BPF_NOEXIST 1 /* create new element if it didn't exist */ @@ -167,7 +171,61 @@ enum bpf_func_id { BPF_FUNC_probe_read, /* int bpf_probe_read(void *dst, int size, void *src) */ BPF_FUNC_ktime_get_ns, /* u64 bpf_ktime_get_ns(void) */ BPF_FUNC_trace_printk, /* int bpf_trace_printk(const char *fmt, int fmt_size, ...) */ + BPF_FUNC_get_prandom_u32, /* u32 prandom_u32(void) */ + BPF_FUNC_get_smp_processor_id, /* u32 raw_smp_processor_id(void) */ + + /** + * skb_store_bytes(skb, offset, from, len, flags) - store bytes into packet + * @skb: pointer to skb + * @offset: offset within packet from skb->data + * @from: pointer where to copy bytes from + * @len: number of bytes to store into packet + * @flags: bit 0 - if true, recompute skb->csum + * other bits - reserved + * Return: 0 on success + */ + BPF_FUNC_skb_store_bytes, + + /** + * l3_csum_replace(skb, offset, from, to, flags) - recompute IP checksum + * @skb: pointer to skb + * @offset: offset within packet where IP checksum is located + * @from: old value of header field + * @to: new value of header field + * @flags: bits 0-3 - size of header field + * other bits - reserved + * Return: 0 on success + */ + BPF_FUNC_l3_csum_replace, + + /** + * l4_csum_replace(skb, offset, from, to, flags) - recompute TCP/UDP checksum + * @skb: pointer to skb + * @offset: offset within packet where TCP/UDP checksum is located + * @from: old value of header field + * @to: new value of header field + * @flags: bits 0-3 - size of header field + * bit 4 - is pseudo header + * other bits - reserved + * Return: 0 on success + */ + BPF_FUNC_l4_csum_replace, __BPF_FUNC_MAX_ID, }; +/* user accessible mirror of in-kernel sk_buff. + * new fields can only be added to the end of this structure + */ +struct __sk_buff { + __u32 len; + __u32 pkt_type; + __u32 mark; + __u32 queue_mapping; + __u32 protocol; + __u32 vlan_present; + __u32 vlan_tci; + __u32 vlan_proto; + __u32 priority; +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/include/uapi/linux/can/raw.h b/include/uapi/linux/can/raw.h index 78ec76fd89a6..8735f1080385 100644 --- a/include/uapi/linux/can/raw.h +++ b/include/uapi/linux/can/raw.h @@ -57,6 +57,7 @@ enum { CAN_RAW_LOOPBACK, /* local loopback (default:on) */ CAN_RAW_RECV_OWN_MSGS, /* receive my own msgs (default:off) */ CAN_RAW_FD_FRAMES, /* allow CAN FD frames (default:off) */ + CAN_RAW_JOIN_FILTERS, /* all filters must match to trigger */ }; #endif /* !_UAPI_CAN_RAW_H */ diff --git a/include/uapi/linux/dcbnl.h b/include/uapi/linux/dcbnl.h index e711f20dc522..6497d7933d5b 100644 --- a/include/uapi/linux/dcbnl.h +++ b/include/uapi/linux/dcbnl.h @@ -78,6 +78,70 @@ struct ieee_maxrate { __u64 tc_maxrate[IEEE_8021QAZ_MAX_TCS]; }; +enum dcbnl_cndd_states { + DCB_CNDD_RESET = 0, + DCB_CNDD_EDGE, + DCB_CNDD_INTERIOR, + DCB_CNDD_INTERIOR_READY, +}; + +/* This structure contains the IEEE 802.1Qau QCN managed object. + * + *@rpg_enable: enable QCN RP + *@rppp_max_rps: maximum number of RPs allowed for this CNPV on this port + *@rpg_time_reset: time between rate increases if no CNMs received. + * given in u-seconds + *@rpg_byte_reset: transmitted data between rate increases if no CNMs received. + * given in Bytes + *@rpg_threshold: The number of times rpByteStage or rpTimeStage can count + * before RP rate control state machine advances states + *@rpg_max_rate: the maxinun rate, in Mbits per second, + * at which an RP can transmit + *@rpg_ai_rate: The rate, in Mbits per second, + * used to increase rpTargetRate in the RPR_ACTIVE_INCREASE + *@rpg_hai_rate: The rate, in Mbits per second, + * used to increase rpTargetRate in the RPR_HYPER_INCREASE state + *@rpg_gd: Upon CNM receive, flow rate is limited to (Fb/Gd)*CurrentRate. + * rpgGd is given as log2(Gd), where Gd may only be powers of 2 + *@rpg_min_dec_fac: The minimum factor by which the current transmit rate + * can be changed by reception of a CNM. + * value is given as percentage (1-100) + *@rpg_min_rate: The minimum value, in bits per second, for rate to limit + *@cndd_state_machine: The state of the congestion notification domain + * defense state machine, as defined by IEEE 802.3Qau + * section 32.1.1. In the interior ready state, + * the QCN capable hardware may add CN-TAG TLV to the + * outgoing traffic, to specifically identify outgoing + * flows. + */ + +struct ieee_qcn { + __u8 rpg_enable[IEEE_8021QAZ_MAX_TCS]; + __u32 rppp_max_rps[IEEE_8021QAZ_MAX_TCS]; + __u32 rpg_time_reset[IEEE_8021QAZ_MAX_TCS]; + __u32 rpg_byte_reset[IEEE_8021QAZ_MAX_TCS]; + __u32 rpg_threshold[IEEE_8021QAZ_MAX_TCS]; + __u32 rpg_max_rate[IEEE_8021QAZ_MAX_TCS]; + __u32 rpg_ai_rate[IEEE_8021QAZ_MAX_TCS]; + __u32 rpg_hai_rate[IEEE_8021QAZ_MAX_TCS]; + __u32 rpg_gd[IEEE_8021QAZ_MAX_TCS]; + __u32 rpg_min_dec_fac[IEEE_8021QAZ_MAX_TCS]; + __u32 rpg_min_rate[IEEE_8021QAZ_MAX_TCS]; + __u32 cndd_state_machine[IEEE_8021QAZ_MAX_TCS]; +}; + +/* This structure contains the IEEE 802.1Qau QCN statistics. + * + *@rppp_rp_centiseconds: the number of RP-centiseconds accumulated + * by RPs at this priority level on this Port + *@rppp_created_rps: number of active RPs(flows) that react to CNMs + */ + +struct ieee_qcn_stats { + __u64 rppp_rp_centiseconds[IEEE_8021QAZ_MAX_TCS]; + __u32 rppp_created_rps[IEEE_8021QAZ_MAX_TCS]; +}; + /* This structure contains the IEEE 802.1Qaz PFC managed object * * @pfc_cap: Indicates the number of traffic classes on the local device @@ -334,6 +398,8 @@ enum ieee_attrs { DCB_ATTR_IEEE_PEER_PFC, DCB_ATTR_IEEE_PEER_APP, DCB_ATTR_IEEE_MAXRATE, + DCB_ATTR_IEEE_QCN, + DCB_ATTR_IEEE_QCN_STATS, __DCB_ATTR_IEEE_MAX }; #define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1) diff --git a/include/uapi/linux/filter.h b/include/uapi/linux/filter.h index 47785d5ecf17..34c7936ca114 100644 --- a/include/uapi/linux/filter.h +++ b/include/uapi/linux/filter.h @@ -77,7 +77,8 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */ #define SKF_AD_VLAN_TAG_PRESENT 48 #define SKF_AD_PAY_OFFSET 52 #define SKF_AD_RANDOM 56 -#define SKF_AD_MAX 60 +#define SKF_AD_VLAN_TPID 60 +#define SKF_AD_MAX 64 #define SKF_NET_OFF (-0x100000) #define SKF_LL_OFF (-0x200000) diff --git a/include/uapi/linux/fou.h b/include/uapi/linux/fou.h index c303588bb767..d2947c52dc67 100644 --- a/include/uapi/linux/fou.h +++ b/include/uapi/linux/fou.h @@ -25,6 +25,7 @@ enum { FOU_CMD_UNSPEC, FOU_CMD_ADD, FOU_CMD_DEL, + FOU_CMD_GET, __FOU_CMD_MAX, }; diff --git a/include/uapi/linux/if_addr.h b/include/uapi/linux/if_addr.h index dea10a87dfd1..4318ab1635ce 100644 --- a/include/uapi/linux/if_addr.h +++ b/include/uapi/linux/if_addr.h @@ -50,6 +50,8 @@ enum { #define IFA_F_PERMANENT 0x80 #define IFA_F_MANAGETEMPADDR 0x100 #define IFA_F_NOPREFIXROUTE 0x200 +#define IFA_F_MCAUTOJOIN 0x400 +#define IFA_F_STABLE_PRIVACY 0x800 struct ifa_cacheinfo { __u32 ifa_prefered; diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index dfd0bb22e554..d9cd19214b98 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -147,6 +147,7 @@ enum { IFLA_CARRIER_CHANGES, IFLA_PHYS_SWITCH_ID, IFLA_LINK_NETNSID, + IFLA_PHYS_PORT_NAME, __IFLA_MAX }; @@ -215,6 +216,7 @@ enum { enum in6_addr_gen_mode { IN6_ADDR_GEN_MODE_EUI64, IN6_ADDR_GEN_MODE_NONE, + IN6_ADDR_GEN_MODE_STABLE_PRIVACY, }; /* Bridge section */ @@ -224,6 +226,9 @@ enum { IFLA_BR_FORWARD_DELAY, IFLA_BR_HELLO_TIME, IFLA_BR_MAX_AGE, + IFLA_BR_AGEING_TIME, + IFLA_BR_STP_STATE, + IFLA_BR_PRIORITY, __IFLA_BR_MAX, }; @@ -247,6 +252,7 @@ enum { IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */ IFLA_BRPORT_PROXYARP, /* proxy ARP */ IFLA_BRPORT_LEARNING_SYNC, /* mac learning sync from device */ + IFLA_BRPORT_PROXYARP_WIFI, /* proxy ARP for Wi-Fi */ __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) @@ -459,6 +465,9 @@ enum { IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */ IFLA_VF_LINK_STATE, /* link state enable/disable/auto switch */ IFLA_VF_RATE, /* Min and Max TX Bandwidth Allocation */ + IFLA_VF_RSS_QUERY_EN, /* RSS Redirection Table and Hash Key query + * on/off switch + */ __IFLA_VF_MAX, }; @@ -503,6 +512,11 @@ struct ifla_vf_link_state { __u32 link_state; }; +struct ifla_vf_rss_query_en { + __u32 vf; + __u32 setting; +}; + /* VF ports management section * * Nested layout of set/get msg is: diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h index da2d668b8cf1..053bd102fbe0 100644 --- a/include/uapi/linux/if_packet.h +++ b/include/uapi/linux/if_packet.h @@ -99,6 +99,7 @@ struct tpacket_auxdata { #define TP_STATUS_VLAN_VALID (1 << 4) /* auxdata has valid tp_vlan_tci */ #define TP_STATUS_BLK_TMO (1 << 5) #define TP_STATUS_VLAN_TPID_VALID (1 << 6) /* auxdata has valid tp_vlan_tpid */ +#define TP_STATUS_CSUM_VALID (1 << 7) /* Tx ring - header status */ #define TP_STATUS_AVAILABLE 0 diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h index cabe95d5b461..3199243f2028 100644 --- a/include/uapi/linux/ip_vs.h +++ b/include/uapi/linux/ip_vs.h @@ -358,6 +358,8 @@ enum { IPVS_SVC_ATTR_PE_NAME, /* name of ct retriever */ + IPVS_SVC_ATTR_STATS64, /* nested attribute for service stats */ + __IPVS_SVC_ATTR_MAX, }; @@ -387,6 +389,8 @@ enum { IPVS_DEST_ATTR_ADDR_FAMILY, /* Address family of address */ + IPVS_DEST_ATTR_STATS64, /* nested attribute for dest stats */ + __IPVS_DEST_ATTR_MAX, }; @@ -410,7 +414,8 @@ enum { /* * Attributes used to describe service or destination entry statistics * - * Used inside nested attributes IPVS_SVC_ATTR_STATS and IPVS_DEST_ATTR_STATS + * Used inside nested attributes IPVS_SVC_ATTR_STATS, IPVS_DEST_ATTR_STATS, + * IPVS_SVC_ATTR_STATS64 and IPVS_DEST_ATTR_STATS64. */ enum { IPVS_STATS_ATTR_UNSPEC = 0, diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index 437a6a4b125a..5efa54ae567c 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -170,6 +170,7 @@ enum { DEVCONF_ACCEPT_RA_FROM_LOCAL, DEVCONF_USE_OPTIMISTIC, DEVCONF_ACCEPT_RA_MTU, + DEVCONF_STABLE_SECRET, DEVCONF_MAX }; diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index 3873a35509aa..2e35c61bbdd1 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -126,6 +126,7 @@ enum { NDTPA_PROXY_QLEN, /* u32 */ NDTPA_LOCKTIME, /* u64, msecs */ NDTPA_QUEUE_LENBYTES, /* u32 */ + NDTPA_MCAST_REPROBES, /* u32 */ __NDTPA_MAX }; #define NDTPA_MAX (__NDTPA_MAX - 1) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 832bc46db78b..5fa1cd04762e 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -1,19 +1,49 @@ #ifndef _LINUX_NF_TABLES_H #define _LINUX_NF_TABLES_H +#define NFT_TABLE_MAXNAMELEN 32 #define NFT_CHAIN_MAXNAMELEN 32 #define NFT_USERDATA_MAXLEN 256 +/** + * enum nft_registers - nf_tables registers + * + * nf_tables used to have five registers: a verdict register and four data + * registers of size 16. The data registers have been changed to 16 registers + * of size 4. For compatibility reasons, the NFT_REG_[1-4] registers still + * map to areas of size 16, the 4 byte registers are addressed using + * NFT_REG32_00 - NFT_REG32_15. + */ enum nft_registers { NFT_REG_VERDICT, NFT_REG_1, NFT_REG_2, NFT_REG_3, NFT_REG_4, - __NFT_REG_MAX + __NFT_REG_MAX, + + NFT_REG32_00 = 8, + MFT_REG32_01, + NFT_REG32_02, + NFT_REG32_03, + NFT_REG32_04, + NFT_REG32_05, + NFT_REG32_06, + NFT_REG32_07, + NFT_REG32_08, + NFT_REG32_09, + NFT_REG32_10, + NFT_REG32_11, + NFT_REG32_12, + NFT_REG32_13, + NFT_REG32_14, + NFT_REG32_15, }; #define NFT_REG_MAX (__NFT_REG_MAX - 1) +#define NFT_REG_SIZE 16 +#define NFT_REG32_SIZE 4 + /** * enum nft_verdicts - nf_tables internal verdicts * @@ -207,12 +237,16 @@ enum nft_rule_compat_attributes { * @NFT_SET_CONSTANT: set contents may not change while bound * @NFT_SET_INTERVAL: set contains intervals * @NFT_SET_MAP: set is used as a dictionary + * @NFT_SET_TIMEOUT: set uses timeouts + * @NFT_SET_EVAL: set contains expressions for evaluation */ enum nft_set_flags { NFT_SET_ANONYMOUS = 0x1, NFT_SET_CONSTANT = 0x2, NFT_SET_INTERVAL = 0x4, NFT_SET_MAP = 0x8, + NFT_SET_TIMEOUT = 0x10, + NFT_SET_EVAL = 0x20, }; /** @@ -251,6 +285,8 @@ enum nft_set_desc_attributes { * @NFTA_SET_POLICY: selection policy (NLA_U32) * @NFTA_SET_DESC: set description (NLA_NESTED) * @NFTA_SET_ID: uniquely identifies a set in a transaction (NLA_U32) + * @NFTA_SET_TIMEOUT: default timeout value (NLA_U64) + * @NFTA_SET_GC_INTERVAL: garbage collection interval (NLA_U32) */ enum nft_set_attributes { NFTA_SET_UNSPEC, @@ -264,6 +300,8 @@ enum nft_set_attributes { NFTA_SET_POLICY, NFTA_SET_DESC, NFTA_SET_ID, + NFTA_SET_TIMEOUT, + NFTA_SET_GC_INTERVAL, __NFTA_SET_MAX }; #define NFTA_SET_MAX (__NFTA_SET_MAX - 1) @@ -283,12 +321,20 @@ enum nft_set_elem_flags { * @NFTA_SET_ELEM_KEY: key value (NLA_NESTED: nft_data) * @NFTA_SET_ELEM_DATA: data value of mapping (NLA_NESTED: nft_data_attributes) * @NFTA_SET_ELEM_FLAGS: bitmask of nft_set_elem_flags (NLA_U32) + * @NFTA_SET_ELEM_TIMEOUT: timeout value (NLA_U64) + * @NFTA_SET_ELEM_EXPIRATION: expiration time (NLA_U64) + * @NFTA_SET_ELEM_USERDATA: user data (NLA_BINARY) + * @NFTA_SET_ELEM_EXPR: expression (NLA_NESTED: nft_expr_attributes) */ enum nft_set_elem_attributes { NFTA_SET_ELEM_UNSPEC, NFTA_SET_ELEM_KEY, NFTA_SET_ELEM_DATA, NFTA_SET_ELEM_FLAGS, + NFTA_SET_ELEM_TIMEOUT, + NFTA_SET_ELEM_EXPIRATION, + NFTA_SET_ELEM_USERDATA, + NFTA_SET_ELEM_EXPR, __NFTA_SET_ELEM_MAX }; #define NFTA_SET_ELEM_MAX (__NFTA_SET_ELEM_MAX - 1) @@ -346,6 +392,9 @@ enum nft_data_attributes { }; #define NFTA_DATA_MAX (__NFTA_DATA_MAX - 1) +/* Maximum length of a value */ +#define NFT_DATA_VALUE_MAXLEN 64 + /** * enum nft_verdict_attributes - nf_tables verdict netlink attributes * @@ -504,6 +553,35 @@ enum nft_lookup_attributes { }; #define NFTA_LOOKUP_MAX (__NFTA_LOOKUP_MAX - 1) +enum nft_dynset_ops { + NFT_DYNSET_OP_ADD, + NFT_DYNSET_OP_UPDATE, +}; + +/** + * enum nft_dynset_attributes - dynset expression attributes + * + * @NFTA_DYNSET_SET_NAME: name of set the to add data to (NLA_STRING) + * @NFTA_DYNSET_SET_ID: uniquely identifier of the set in the transaction (NLA_U32) + * @NFTA_DYNSET_OP: operation (NLA_U32) + * @NFTA_DYNSET_SREG_KEY: source register of the key (NLA_U32) + * @NFTA_DYNSET_SREG_DATA: source register of the data (NLA_U32) + * @NFTA_DYNSET_TIMEOUT: timeout value for the new element (NLA_U64) + * @NFTA_DYNSET_EXPR: expression (NLA_NESTED: nft_expr_attributes) + */ +enum nft_dynset_attributes { + NFTA_DYNSET_UNSPEC, + NFTA_DYNSET_SET_NAME, + NFTA_DYNSET_SET_ID, + NFTA_DYNSET_OP, + NFTA_DYNSET_SREG_KEY, + NFTA_DYNSET_SREG_DATA, + NFTA_DYNSET_TIMEOUT, + NFTA_DYNSET_EXPR, + __NFTA_DYNSET_MAX, +}; +#define NFTA_DYNSET_MAX (__NFTA_DYNSET_MAX - 1) + /** * enum nft_payload_bases - nf_tables payload expression offset bases * diff --git a/include/uapi/linux/netfilter_bridge/ebtables.h b/include/uapi/linux/netfilter_bridge/ebtables.h index ba993360dbe9..773dfe8924c7 100644 --- a/include/uapi/linux/netfilter_bridge/ebtables.h +++ b/include/uapi/linux/netfilter_bridge/ebtables.h @@ -12,9 +12,7 @@ #ifndef _UAPI__LINUX_BRIDGE_EFF_H #define _UAPI__LINUX_BRIDGE_EFF_H -#include <linux/if.h> #include <linux/netfilter_bridge.h> -#include <linux/if_ether.h> #define EBT_TABLE_MAXNAMELEN 32 #define EBT_CHAIN_MAXNAMELEN EBT_TABLE_MAXNAMELEN diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 68b294e83944..241220c43e86 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -25,6 +25,19 @@ * */ +/* + * This header file defines the userspace API to the wireless stack. Please + * be careful not to break things - i.e. don't move anything around or so + * unless you can demonstrate that it breaks neither API nor ABI. + * + * Additions to the API should be accompanied by actual implementations in + * an upstream driver, so that example implementations exist in case there + * are ever concerns about the precise semantics of the API or changes are + * needed, and to ensure that code for dead (no longer implemented) API + * can actually be identified and removed. + * Nonetheless, semantics should also be documented carefully in this file. + */ + #include <linux/types.h> #define NL80211_GENL_NAME "nl80211" @@ -1684,6 +1697,10 @@ enum nl80211_commands { * If set during scheduled scan start then the new scan req will be * owned by the netlink socket that created it and the scheduled scan will * be stopped when the socket is closed. + * If set during configuration of regulatory indoor operation then the + * regulatory indoor configuration would be owned by the netlink socket + * that configured the indoor setting, and the indoor operation would be + * cleared when the socket is closed. * * @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is * the TDLS link initiator. @@ -1737,8 +1754,12 @@ enum nl80211_commands { * should be contained in the result as the sum of the respective counters * over all channels. * - * @NL80211_ATTR_SCHED_SCAN_DELAY: delay before a scheduled scan (or a - * WoWLAN net-detect scan) is started, u32 in seconds. + * @NL80211_ATTR_SCHED_SCAN_DELAY: delay before the first cycle of a + * scheduled scan (or a WoWLAN net-detect scan) is started, u32 + * in seconds. + + * @NL80211_ATTR_REG_INDOOR: flag attribute, if set indicates that the device + * is operating in an indoor environment. * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined @@ -2107,6 +2128,8 @@ enum nl80211_attrs { NL80211_ATTR_SCHED_SCAN_DELAY, + NL80211_ATTR_REG_INDOOR, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3092,7 +3115,8 @@ enum nl80211_mesh_power_mode { * * @NL80211_MESHCONF_PLINK_TIMEOUT: If no tx activity is seen from a STA we've * established peering with for longer than this time (in seconds), then - * remove it from the STA's list of peers. Default is 30 minutes. + * remove it from the STA's list of peers. You may set this to 0 to disable + * the removal of the STA. Default is 30 minutes. * * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use */ @@ -3694,6 +3718,8 @@ struct nl80211_pattern_support { * @NL80211_WOWLAN_TRIG_ANY: wake up on any activity, do not really put * the chip into a special state -- works best with chips that have * support for low-power operation already (flag) + * Note that this mode is incompatible with all of the others, if + * any others are even supported by the device. * @NL80211_WOWLAN_TRIG_DISCONNECT: wake up on disconnect, the way disconnect * is detected is implementation-specific (flag) * @NL80211_WOWLAN_TRIG_MAGIC_PKT: wake up on magic packet (6x 0xff, followed @@ -4327,11 +4353,13 @@ enum nl80211_feature_flags { /** * enum nl80211_ext_feature_index - bit index of extended features. + * @NL80211_EXT_FEATURE_VHT_IBSS: This driver supports IBSS with VHT datarates. * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. */ enum nl80211_ext_feature_index { + NL80211_EXT_FEATURE_VHT_IBSS, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 25731dfb3fcc..bf08e76bf505 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -397,6 +397,8 @@ enum { TCA_BPF_CLASSID, TCA_BPF_OPS_LEN, TCA_BPF_OPS, + TCA_BPF_FD, + TCA_BPF_NAME, __TCA_BPF_MAX, }; diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 5cc5d66bf519..974db03f7b1a 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -134,6 +134,8 @@ enum { RTM_NEWNSID = 88, #define RTM_NEWNSID RTM_NEWNSID + RTM_DELNSID = 89, +#define RTM_DELNSID RTM_DELNSID RTM_GETNSID = 90, #define RTM_GETNSID RTM_GETNSID @@ -303,6 +305,9 @@ enum rtattr_type_t { RTA_TABLE, RTA_MARK, RTA_MFC_STATS, + RTA_VIA, + RTA_NEWDST, + RTA_PREF, __RTA_MAX }; @@ -332,6 +337,7 @@ struct rtnexthop { #define RTNH_F_DEAD 1 /* Nexthop is dead (used by multipath) */ #define RTNH_F_PERVASIVE 2 /* Do recursive gateway lookup */ #define RTNH_F_ONLINK 4 /* Gateway is forced on link */ +#define RTNH_F_EXTERNAL 8 /* Route installed externally */ /* Macros to handle hexthops */ @@ -344,6 +350,12 @@ struct rtnexthop { #define RTNH_SPACE(len) RTNH_ALIGN(RTNH_LENGTH(len)) #define RTNH_DATA(rtnh) ((struct rtattr*)(((char*)(rtnh)) + RTNH_LENGTH(0))) +/* RTA_VIA */ +struct rtvia { + __kernel_sa_family_t rtvia_family; + __u8 rtvia_addr[0]; +}; + /* RTM_CACHEINFO */ struct rta_cacheinfo { @@ -623,6 +635,10 @@ enum rtnetlink_groups { #define RTNLGRP_IPV6_NETCONF RTNLGRP_IPV6_NETCONF RTNLGRP_MDB, #define RTNLGRP_MDB RTNLGRP_MDB + RTNLGRP_MPLS_ROUTE, +#define RTNLGRP_MPLS_ROUTE RTNLGRP_MPLS_ROUTE + RTNLGRP_NSID, +#define RTNLGRP_NSID RTNLGRP_NSID __RTNLGRP_MAX }; #define RTNLGRP_MAX (__RTNLGRP_MAX - 1) diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h index 5288bd77e63b..07f17cc70bb3 100644 --- a/include/uapi/linux/tc_act/tc_bpf.h +++ b/include/uapi/linux/tc_act/tc_bpf.h @@ -24,6 +24,8 @@ enum { TCA_ACT_BPF_PARMS, TCA_ACT_BPF_OPS_LEN, TCA_ACT_BPF_OPS, + TCA_ACT_BPF_FD, + TCA_ACT_BPF_NAME, __TCA_ACT_BPF_MAX, }; #define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1) diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h index 8d723824ad69..d4c8f142ba63 100644 --- a/include/uapi/linux/tipc_netlink.h +++ b/include/uapi/linux/tipc_netlink.h @@ -83,11 +83,20 @@ enum { TIPC_NLA_BEARER_NAME, /* string */ TIPC_NLA_BEARER_PROP, /* nest */ TIPC_NLA_BEARER_DOMAIN, /* u32 */ + TIPC_NLA_BEARER_UDP_OPTS, /* nest */ __TIPC_NLA_BEARER_MAX, TIPC_NLA_BEARER_MAX = __TIPC_NLA_BEARER_MAX - 1 }; +enum { + TIPC_NLA_UDP_UNSPEC, + TIPC_NLA_UDP_LOCAL, /* sockaddr_storage */ + TIPC_NLA_UDP_REMOTE, /* sockaddr_storage */ + + __TIPC_NLA_UDP_MAX, + TIPC_NLA_UDP_MAX = __TIPC_NLA_UDP_MAX - 1 +}; /* Socket info */ enum { TIPC_NLA_SOCK_UNSPEC, diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index 02d5125a5ee8..2cd9e608d0d1 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -1,6 +1,7 @@ #ifndef _LINUX_XFRM_H #define _LINUX_XFRM_H +#include <linux/in6.h> #include <linux/types.h> /* All of the structures in this file may not change size as they are @@ -13,6 +14,7 @@ typedef union { __be32 a4; __be32 a6[4]; + struct in6_addr in6; } xfrm_address_t; /* Ident of a specific xfrm_state. It is used on input to lookup |