diff options
author | Pablo Neira Ayuso <pablo@netfilter.org> | 2015-10-17 14:11:08 +0200 |
---|---|---|
committer | Pablo Neira Ayuso <pablo@netfilter.org> | 2015-10-17 14:28:03 +0200 |
commit | f0a0a978b66fea782a52b0a7075b3fa9ab27ad0a (patch) | |
tree | 52ecc0eafbac697c6afaa542efe324984484120c /net | |
parent | c8d71d08aa23679f56e7072358383442c6ede352 (diff) | |
parent | 4be3158abe1e02d24f82b34101e41d662fae2185 (diff) | |
download | linux-f0a0a978b66fea782a52b0a7075b3fa9ab27ad0a.tar.bz2 |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
This merge resolves conflicts with 75aec9df3a78 ("bridge: Remove
br_nf_push_frag_xmit_sk") as part of Eric Biederman's effort to improve
netns support in the network stack that reached upstream via David's
net-next tree.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Conflicts:
net/bridge/br_netfilter_hooks.c
Diffstat (limited to 'net')
178 files changed, 5237 insertions, 1983 deletions
diff --git a/net/6lowpan/core.c b/net/6lowpan/core.c index ae1896fa45e2..83b19e072224 100644 --- a/net/6lowpan/core.c +++ b/net/6lowpan/core.c @@ -17,6 +17,11 @@ void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype) { + dev->addr_len = EUI64_ADDR_LEN; + dev->type = ARPHRD_6LOWPAN; + dev->mtu = IPV6_MIN_MTU; + dev->priv_flags |= IFF_NO_QUEUE; + lowpan_priv(dev)->lltype = lltype; } EXPORT_SYMBOL(lowpan_netdev_setup); diff --git a/net/6lowpan/nhc.h b/net/6lowpan/nhc.h index ed44938eb5de..c249f17fa37b 100644 --- a/net/6lowpan/nhc.h +++ b/net/6lowpan/nhc.h @@ -8,8 +8,6 @@ #include <net/6lowpan.h> #include <net/ipv6.h> -#define LOWPAN_NHC_MAX_ID_LEN 1 - /** * LOWPAN_NHC - helper macro to generate nh id fields and lowpan_nhc struct * diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index 131e79cde350..db73b8a1433f 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -35,7 +35,6 @@ static struct dentry *lowpan_enable_debugfs; static struct dentry *lowpan_control_debugfs; #define IFACE_NAME_TEMPLATE "bt%d" -#define EUI64_ADDR_LEN 8 struct skb_cb { struct in6_addr addr; @@ -674,13 +673,8 @@ static struct header_ops header_ops = { static void netdev_setup(struct net_device *dev) { - dev->addr_len = EUI64_ADDR_LEN; - dev->type = ARPHRD_6LOWPAN; - dev->hard_header_len = 0; dev->needed_tailroom = 0; - dev->mtu = IPV6_MIN_MTU; - dev->tx_queue_len = 0; dev->flags = IFF_RUNNING | IFF_POINTOPOINT | IFF_MULTICAST; dev->watchdog_timeo = 0; @@ -775,24 +769,7 @@ static struct l2cap_chan *chan_create(void) chan->chan_type = L2CAP_CHAN_CONN_ORIENTED; chan->mode = L2CAP_MODE_LE_FLOWCTL; - chan->omtu = 65535; - chan->imtu = chan->omtu; - - return chan; -} - -static struct l2cap_chan *chan_open(struct l2cap_chan *pchan) -{ - struct l2cap_chan *chan; - - chan = chan_create(); - if (!chan) - return NULL; - - chan->remote_mps = chan->omtu; - chan->mps = chan->omtu; - - chan->state = BT_CONNECTED; + chan->imtu = 1280; return chan; } @@ -919,7 +896,10 @@ static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan) { struct l2cap_chan *chan; - chan = chan_open(pchan); + chan = chan_create(); + if (!chan) + return NULL; + chan->ops = pchan->ops; BT_DBG("chan %p pchan %p", chan, pchan); @@ -1065,34 +1045,23 @@ static inline __u8 bdaddr_type(__u8 type) return BDADDR_LE_RANDOM; } -static struct l2cap_chan *chan_get(void) -{ - struct l2cap_chan *pchan; - - pchan = chan_create(); - if (!pchan) - return NULL; - - pchan->ops = &bt_6lowpan_chan_ops; - - return pchan; -} - static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type) { - struct l2cap_chan *pchan; + struct l2cap_chan *chan; int err; - pchan = chan_get(); - if (!pchan) + chan = chan_create(); + if (!chan) return -EINVAL; - err = l2cap_chan_connect(pchan, cpu_to_le16(L2CAP_PSM_IPSP), 0, + chan->ops = &bt_6lowpan_chan_ops; + + err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0, addr, dst_type); - BT_DBG("chan %p err %d", pchan, err); + BT_DBG("chan %p err %d", chan, err); if (err < 0) - l2cap_chan_put(pchan); + l2cap_chan_put(chan); return err; } @@ -1117,31 +1086,32 @@ static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type) static struct l2cap_chan *bt_6lowpan_listen(void) { bdaddr_t *addr = BDADDR_ANY; - struct l2cap_chan *pchan; + struct l2cap_chan *chan; int err; if (!enable_6lowpan) return NULL; - pchan = chan_get(); - if (!pchan) + chan = chan_create(); + if (!chan) return NULL; - pchan->state = BT_LISTEN; - pchan->src_type = BDADDR_LE_PUBLIC; + chan->ops = &bt_6lowpan_chan_ops; + chan->state = BT_LISTEN; + chan->src_type = BDADDR_LE_PUBLIC; - atomic_set(&pchan->nesting, L2CAP_NESTING_PARENT); + atomic_set(&chan->nesting, L2CAP_NESTING_PARENT); - BT_DBG("chan %p src type %d", pchan, pchan->src_type); + BT_DBG("chan %p src type %d", chan, chan->src_type); - err = l2cap_add_psm(pchan, addr, cpu_to_le16(L2CAP_PSM_IPSP)); + err = l2cap_add_psm(chan, addr, cpu_to_le16(L2CAP_PSM_IPSP)); if (err) { - l2cap_chan_put(pchan); + l2cap_chan_put(chan); BT_ERR("psm cannot be added err %d", err); return NULL; } - return pchan; + return chan; } static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type, diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index a7cdd99ec3f1..d2b3dd32d6cf 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -134,6 +134,66 @@ static const struct file_operations dut_mode_fops = { .llseek = default_llseek, }; +static ssize_t vendor_diag_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + char buf[3]; + + buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N'; + buf[1] = '\n'; + buf[2] = '\0'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + char buf[32]; + size_t buf_size = min(count, (sizeof(buf)-1)); + bool enable; + int err; + + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + if (strtobool(buf, &enable)) + return -EINVAL; + + hci_req_lock(hdev); + err = hdev->set_diag(hdev, enable); + hci_req_unlock(hdev); + + if (err < 0) + return err; + + if (enable) + hci_dev_set_flag(hdev, HCI_VENDOR_DIAG); + else + hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG); + + return count; +} + +static const struct file_operations vendor_diag_fops = { + .open = simple_open, + .read = vendor_diag_read, + .write = vendor_diag_write, + .llseek = default_llseek, +}; + +static void hci_debugfs_create_basic(struct hci_dev *hdev) +{ + debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev, + &dut_mode_fops); + + if (hdev->set_diag) + debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev, + &vendor_diag_fops); +} + /* ---- HCI requests ---- */ static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, @@ -850,13 +910,8 @@ static int __hci_init(struct hci_dev *hdev) if (err < 0) return err; - /* The Device Under Test (DUT) mode is special and available for - * all controller types. So just create it early on. - */ - if (hci_dev_test_flag(hdev, HCI_SETUP)) { - debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev, - &dut_mode_fops); - } + if (hci_dev_test_flag(hdev, HCI_SETUP)) + hci_debugfs_create_basic(hdev); err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT); if (err < 0) @@ -933,6 +988,9 @@ static int __hci_unconf_init(struct hci_dev *hdev) if (err < 0) return err; + if (hci_dev_test_flag(hdev, HCI_SETUP)) + hci_debugfs_create_basic(hdev); + return 0; } @@ -1385,6 +1443,9 @@ static int hci_dev_do_open(struct hci_dev *hdev) goto done; } + set_bit(HCI_RUNNING, &hdev->flags); + hci_notify(hdev, HCI_DEV_OPEN); + atomic_set(&hdev->cmd_cnt, 1); set_bit(HCI_INIT, &hdev->flags); @@ -1466,6 +1527,9 @@ static int hci_dev_do_open(struct hci_dev *hdev) hdev->sent_cmd = NULL; } + clear_bit(HCI_RUNNING, &hdev->flags); + hci_notify(hdev, HCI_DEV_CLOSE); + hdev->close(hdev); hdev->flags &= BIT(HCI_RAW); } @@ -1551,6 +1615,8 @@ static void hci_pend_le_actions_clear(struct hci_dev *hdev) int hci_dev_do_close(struct hci_dev *hdev) { + bool auto_off; + BT_DBG("%s %p", hdev->name, hdev); if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && @@ -1606,10 +1672,10 @@ int hci_dev_do_close(struct hci_dev *hdev) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); - if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { - if (hdev->dev_type == HCI_BREDR) - mgmt_powered(hdev, 0); - } + auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); + + if (!auto_off && hdev->dev_type == HCI_BREDR) + mgmt_powered(hdev, 0); hci_inquiry_cache_flush(hdev); hci_pend_le_actions_clear(hdev); @@ -1626,9 +1692,8 @@ int hci_dev_do_close(struct hci_dev *hdev) /* Reset device */ skb_queue_purge(&hdev->cmd_q); atomic_set(&hdev->cmd_cnt, 1); - if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) && - !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && - test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { + if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && + !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { set_bit(HCI_INIT, &hdev->flags); __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT); clear_bit(HCI_INIT, &hdev->flags); @@ -1649,6 +1714,9 @@ int hci_dev_do_close(struct hci_dev *hdev) hdev->sent_cmd = NULL; } + clear_bit(HCI_RUNNING, &hdev->flags); + hci_notify(hdev, HCI_DEV_CLOSE); + /* After this point our queues are empty * and no tasks are scheduled. */ hdev->close(hdev); @@ -3471,6 +3539,13 @@ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) return -ENXIO; } + if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT && + bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT && + bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) { + kfree_skb(skb); + return -EINVAL; + } + /* Incoming skb */ bt_cb(skb)->incoming = 1; @@ -3484,6 +3559,21 @@ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) } EXPORT_SYMBOL(hci_recv_frame); +/* Receive diagnostic message from HCI drivers */ +int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb) +{ + /* Time stamp */ + __net_timestamp(skb); + + /* Mark as diagnostic packet and send to monitor */ + bt_cb(skb)->pkt_type = HCI_DIAG_PKT; + hci_send_to_monitor(hdev, skb); + + kfree_skb(skb); + return 0; +} +EXPORT_SYMBOL(hci_recv_diag); + /* ---- Interface to upper protocols ---- */ int hci_register_cb(struct hci_cb *cb) @@ -3530,6 +3620,11 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) /* Get rid of skb owner, prior to sending to the driver. */ skb_orphan(skb); + if (!test_bit(HCI_RUNNING, &hdev->flags)) { + kfree_skb(skb); + return; + } + err = hdev->send(hdev, skb); if (err < 0) { BT_ERR("%s sending frame failed (%d)", hdev->name, err); @@ -3580,6 +3675,25 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; } +/* Send HCI command and wait for command commplete event */ +struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u32 timeout) +{ + struct sk_buff *skb; + + if (!test_bit(HCI_UP, &hdev->flags)) + return ERR_PTR(-ENETDOWN); + + bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); + + hci_req_lock(hdev); + skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout); + hci_req_unlock(hdev); + + return skb; +} +EXPORT_SYMBOL(hci_cmd_sync); + /* Send ACL data */ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) { diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 150556345263..9a100c1fd7b5 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -279,6 +279,9 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) else opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT); break; + case HCI_DIAG_PKT: + opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG); + break; default: return; } @@ -303,6 +306,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event) { struct hci_mon_hdr *hdr; struct hci_mon_new_index *ni; + struct hci_mon_index_info *ii; struct sk_buff *skb; __le16 opcode; @@ -312,7 +316,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event) if (!skb) return NULL; - ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE); + ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE); ni->type = hdev->dev_type; ni->bus = hdev->bus; bacpy(&ni->bdaddr, &hdev->bdaddr); @@ -329,6 +333,34 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event) opcode = cpu_to_le16(HCI_MON_DEL_INDEX); break; + case HCI_DEV_UP: + skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC); + if (!skb) + return NULL; + + ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE); + bacpy(&ii->bdaddr, &hdev->bdaddr); + ii->manufacturer = cpu_to_le16(hdev->manufacturer); + + opcode = cpu_to_le16(HCI_MON_INDEX_INFO); + break; + + case HCI_DEV_OPEN: + skb = bt_skb_alloc(0, GFP_ATOMIC); + if (!skb) + return NULL; + + opcode = cpu_to_le16(HCI_MON_OPEN_INDEX); + break; + + case HCI_DEV_CLOSE: + skb = bt_skb_alloc(0, GFP_ATOMIC); + if (!skb) + return NULL; + + opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX); + break; + default: return NULL; } @@ -358,6 +390,26 @@ static void send_monitor_replay(struct sock *sk) if (sock_queue_rcv_skb(sk, skb)) kfree_skb(skb); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + continue; + + skb = create_monitor_event(hdev, HCI_DEV_OPEN); + if (!skb) + continue; + + if (sock_queue_rcv_skb(sk, skb)) + kfree_skb(skb); + + if (!test_bit(HCI_UP, &hdev->flags)) + continue; + + skb = create_monitor_event(hdev, HCI_DEV_UP); + if (!skb) + continue; + + if (sock_queue_rcv_skb(sk, skb)) + kfree_skb(skb); } read_unlock(&hci_dev_list_lock); @@ -392,14 +444,12 @@ static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data) void hci_sock_dev_event(struct hci_dev *hdev, int event) { - struct hci_ev_si_device ev; - BT_DBG("hdev %s event %d", hdev->name, event); - /* Send event to monitor */ if (atomic_read(&monitor_promisc)) { struct sk_buff *skb; + /* Send event to monitor */ skb = create_monitor_event(hdev, event); if (skb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, @@ -408,10 +458,14 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event) } } - /* Send event to sockets */ - ev.event = event; - ev.dev_id = hdev->id; - hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev); + if (event <= HCI_DEV_DOWN) { + struct hci_ev_si_device ev; + + /* Send event to sockets */ + ev.event = event; + ev.dev_id = hdev->id; + hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev); + } if (event == HCI_DEV_UNREG) { struct sock *sk; diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c index 8b4cdce3f62e..aa4cf64e32a6 100644 --- a/net/bluetooth/lib.c +++ b/net/bluetooth/lib.c @@ -151,6 +151,22 @@ void bt_info(const char *format, ...) } EXPORT_SYMBOL(bt_info); +void bt_warn(const char *format, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, format); + + vaf.fmt = format; + vaf.va = &args; + + pr_warn("%pV", &vaf); + + va_end(args); +} +EXPORT_SYMBOL(bt_warn); + void bt_err(const char *format, ...) { struct va_format vaf; diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index bdfb9544ca03..5e88d3e17546 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -56,7 +56,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN); - if (!br_allowed_ingress(br, br_vlan_group(br), skb, &vid)) + if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid)) goto out; if (is_broadcast_ether_addr(dest)) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 7f7d55132dd5..c88bd8e8937e 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -134,11 +134,14 @@ static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr) static void fdb_del_external_learn(struct net_bridge_fdb_entry *f) { struct switchdev_obj_port_fdb fdb = { - .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB, - .addr = f->addr.addr, + .obj = { + .id = SWITCHDEV_OBJ_ID_PORT_FDB, + .flags = SWITCHDEV_F_DEFER, + }, .vid = f->vlan_id, }; + ether_addr_copy(fdb.addr, f->addr.addr); switchdev_port_obj_del(f->dst->dev, &fdb.obj); } @@ -608,13 +611,14 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, } } -static int fdb_to_nud(const struct net_bridge_fdb_entry *fdb) +static int fdb_to_nud(const struct net_bridge *br, + const struct net_bridge_fdb_entry *fdb) { if (fdb->is_local) return NUD_PERMANENT; else if (fdb->is_static) return NUD_NOARP; - else if (has_expired(fdb->dst->br, fdb)) + else if (has_expired(br, fdb)) return NUD_STALE; else return NUD_REACHABLE; @@ -640,7 +644,7 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br, ndm->ndm_flags = fdb->added_by_external_learn ? NTF_EXT_LEARNED : 0; ndm->ndm_type = 0; ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex; - ndm->ndm_state = fdb_to_nud(fdb); + ndm->ndm_state = fdb_to_nud(br, fdb); if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr)) goto nla_put_failure; @@ -785,7 +789,7 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, } } - if (fdb_to_nud(fdb) != state) { + if (fdb_to_nud(br, fdb) != state) { if (state & NUD_PERMANENT) { fdb->is_local = 1; if (!fdb->is_static) { @@ -846,8 +850,9 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], const unsigned char *addr, u16 vid, u16 nlh_flags) { struct net_bridge_vlan_group *vg; - struct net_bridge_port *p; + struct net_bridge_port *p = NULL; struct net_bridge_vlan *v; + struct net_bridge *br = NULL; int err = 0; if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) { @@ -860,26 +865,36 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return -EINVAL; } - p = br_port_get_rtnl(dev); - if (p == NULL) { - pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n", - dev->name); - return -EINVAL; + if (dev->priv_flags & IFF_EBRIDGE) { + br = netdev_priv(dev); + vg = br_vlan_group(br); + } else { + p = br_port_get_rtnl(dev); + if (!p) { + pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n", + dev->name); + return -EINVAL; + } + vg = nbp_vlan_group(p); } - vg = nbp_vlan_group(p); if (vid) { v = br_vlan_find(vg, vid); - if (!v) { - pr_info("bridge: RTM_NEWNEIGH with unconfigured " - "vlan %d on port %s\n", vid, dev->name); + if (!v || !br_vlan_should_use(v)) { + pr_info("bridge: RTM_NEWNEIGH with unconfigured vlan %d on %s\n", vid, dev->name); return -EINVAL; } /* VID was specified, so use it. */ - err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); + if (dev->priv_flags & IFF_EBRIDGE) + err = br_fdb_insert(br, NULL, addr, vid); + else + err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); } else { - err = __br_fdb_add(ndm, p, addr, nlh_flags, 0); + if (dev->priv_flags & IFF_EBRIDGE) + err = br_fdb_insert(br, NULL, addr, 0); + else + err = __br_fdb_add(ndm, p, addr, nlh_flags, 0); if (err || !vg || !vg->num_vlans) goto out; @@ -888,7 +903,13 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], * vlan on this port. */ list_for_each_entry(v, &vg->vlan_list, vlist) { - err = __br_fdb_add(ndm, p, addr, nlh_flags, v->vid); + if (!br_vlan_should_use(v)) + continue; + if (dev->priv_flags & IFF_EBRIDGE) + err = br_fdb_insert(br, NULL, addr, v->vid); + else + err = __br_fdb_add(ndm, p, addr, nlh_flags, + v->vid); if (err) goto out; } @@ -898,6 +919,32 @@ out: return err; } +static int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, + u16 vid) +{ + struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; + struct net_bridge_fdb_entry *fdb; + + fdb = fdb_find(head, addr, vid); + if (!fdb) + return -ENOENT; + + fdb_delete(br, fdb); + return 0; +} + +static int __br_fdb_delete_by_addr(struct net_bridge *br, + const unsigned char *addr, u16 vid) +{ + int err; + + spin_lock_bh(&br->hash_lock); + err = fdb_delete_by_addr(br, addr, vid); + spin_unlock_bh(&br->hash_lock); + + return err; +} + static int fdb_delete_by_addr_and_port(struct net_bridge_port *p, const u8 *addr, u16 vlan) { @@ -931,35 +978,53 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], const unsigned char *addr, u16 vid) { struct net_bridge_vlan_group *vg; - struct net_bridge_port *p; + struct net_bridge_port *p = NULL; struct net_bridge_vlan *v; + struct net_bridge *br = NULL; int err; - p = br_port_get_rtnl(dev); - if (p == NULL) { - pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n", - dev->name); - return -EINVAL; + if (dev->priv_flags & IFF_EBRIDGE) { + br = netdev_priv(dev); + vg = br_vlan_group(br); + } else { + p = br_port_get_rtnl(dev); + if (!p) { + pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n", + dev->name); + return -EINVAL; + } + vg = nbp_vlan_group(p); } - vg = nbp_vlan_group(p); if (vid) { v = br_vlan_find(vg, vid); if (!v) { - pr_info("bridge: RTM_DELNEIGH with unconfigured " - "vlan %d on port %s\n", vid, dev->name); + pr_info("bridge: RTM_DELNEIGH with unconfigured vlan %d on %s\n", vid, dev->name); return -EINVAL; } - err = __br_fdb_delete(p, addr, vid); + if (dev->priv_flags & IFF_EBRIDGE) + err = __br_fdb_delete_by_addr(br, addr, vid); + else + err = __br_fdb_delete(p, addr, vid); } else { err = -ENOENT; - err &= __br_fdb_delete(p, addr, 0); + if (dev->priv_flags & IFF_EBRIDGE) + err = __br_fdb_delete_by_addr(br, addr, 0); + else + err &= __br_fdb_delete(p, addr, 0); + if (!vg || !vg->num_vlans) goto out; - list_for_each_entry(v, &vg->vlan_list, vlist) - err &= __br_fdb_delete(p, addr, v->vid); + list_for_each_entry(v, &vg->vlan_list, vlist) { + if (!br_vlan_should_use(v)) + continue; + if (dev->priv_flags & IFF_EBRIDGE) + err = __br_fdb_delete_by_addr(br, addr, v->vid); + else + err &= __br_fdb_delete(p, addr, v->vid); + } } out: return err; diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 6d5ed795c3e2..a9d424e20229 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -32,7 +32,7 @@ static inline int should_deliver(const struct net_bridge_port *p, { struct net_bridge_vlan_group *vg; - vg = nbp_vlan_group(p); + vg = nbp_vlan_group_rcu(p); return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING; } @@ -80,7 +80,7 @@ static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) { struct net_bridge_vlan_group *vg; - vg = nbp_vlan_group(to); + vg = nbp_vlan_group_rcu(to); skb = br_handle_vlan(to->br, vg, skb); if (!skb) return; @@ -112,7 +112,7 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) return; } - vg = nbp_vlan_group(to); + vg = nbp_vlan_group_rcu(to); skb = br_handle_vlan(to->br, vg, skb); if (!skb) return; diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 934cae9fa317..ec02f5869a78 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -24,6 +24,7 @@ #include <linux/slab.h> #include <net/sock.h> #include <linux/if_vlan.h> +#include <net/switchdev.h> #include "br_private.h" @@ -248,7 +249,10 @@ static void del_nbp(struct net_bridge_port *p) list_del_rcu(&p->list); + nbp_vlan_flush(p); br_fdb_delete_by_port(br, p, 0, 1); + switchdev_deferred_process(); + nbp_update_port_count(br); netdev_upper_dev_unlink(dev, br->dev); @@ -256,8 +260,6 @@ static void del_nbp(struct net_bridge_port *p) dev->priv_flags &= ~IFF_BRIDGE_PORT; netdev_rx_handler_unregister(dev); - /* use the synchronize_rcu done by netdev_rx_handler_unregister */ - nbp_vlan_flush(p); br_multicast_del_port(p); diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index f5c5a4500e2f..f7fba74108a9 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -44,7 +44,7 @@ static int br_pass_frame_up(struct sk_buff *skb) brstats->rx_bytes += skb->len; u64_stats_update_end(&brstats->syncp); - vg = br_vlan_group(br); + vg = br_vlan_group_rcu(br); /* Bridge is just like any other port. Make sure the * packet is allowed except in promisc modue when someone * may be running packet capture. @@ -140,7 +140,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb if (!p || p->state == BR_STATE_DISABLED) goto drop; - if (!br_allowed_ingress(p->br, nbp_vlan_group(p), skb, &vid)) + if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid)) goto out; /* insert into forwarding database after filtering to avoid spoofing */ diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index 8d423bc649b9..263b4de4de57 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c @@ -200,8 +200,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) return -EPERM; - br->ageing_time = clock_t_to_jiffies(args[1]); - return 0; + return br_set_ageing_time(br, args[1]); case BRCTL_GET_PORT_INFO: { diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 9d3051916a64..7ddbe7ec81d6 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -688,15 +688,10 @@ static int br_nf_push_frag_xmit(struct net *net, struct sock *sk, struct sk_buff nf_bridge_info_free(skb); return br_dev_queue_push_xmit(net, sk, skb); } -static int br_nf_push_frag_xmit_sk(struct sock *sk, struct sk_buff *skb) -{ - struct net *net = dev_net(skb_dst(skb)->dev); - return br_nf_push_frag_xmit(net, sk, skb); -} static int br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, - int (*output)(struct sock *, struct sk_buff *)) + int (*output)(struct net *, struct sock *, struct sk_buff *)) { unsigned int mtu = ip_skb_dst_mtu(skb); struct iphdr *iph = ip_hdr(skb); @@ -709,7 +704,7 @@ br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, return -EMSGSIZE; } - return ip_do_fragment(sk, skb, output); + return ip_do_fragment(net, sk, skb, output); } static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) @@ -757,7 +752,7 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff skb_copy_from_linear_data_offset(skb, -data->size, data->mac, data->size); - return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit_sk); + return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit); } if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) { @@ -779,7 +774,7 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff data->size); if (v6ops) - return v6ops->fragment(sk, skb, br_nf_push_frag_xmit_sk); + return v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit); kfree_skb(skb); return -EMSGSIZE; diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index c64dcad11662..94b4de8c4646 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -34,7 +34,7 @@ static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg, pvid = br_get_pvid(vg); /* Count number of vlan infos */ - list_for_each_entry(v, &vg->vlan_list, vlist) { + list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { flags = 0; /* only a context, bridge vlan not activated */ if (!br_vlan_should_use(v)) @@ -76,13 +76,19 @@ initvars: static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg, u32 filter_mask) { + int num_vlans; + if (!vg) return 0; if (filter_mask & RTEXT_FILTER_BRVLAN) return vg->num_vlans; - return __get_num_vlan_infos(vg, filter_mask); + rcu_read_lock(); + num_vlans = __get_num_vlan_infos(vg, filter_mask); + rcu_read_unlock(); + + return num_vlans; } static size_t br_get_link_af_size_filtered(const struct net_device *dev, @@ -96,10 +102,10 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev, rcu_read_lock(); if (br_port_exists(dev)) { p = br_port_get_rcu(dev); - vg = nbp_vlan_group(p); + vg = nbp_vlan_group_rcu(p); } else if (dev->priv_flags & IFF_EBRIDGE) { br = netdev_priv(dev); - vg = br_vlan_group(br); + vg = br_vlan_group_rcu(br); } num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask); rcu_read_unlock(); @@ -121,6 +127,20 @@ static inline size_t br_port_info_size(void) + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */ + + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */ + + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */ + + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */ + + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_COST */ + + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_ID */ + + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */ + + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */ + + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */ + + nla_total_size(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */ + + nla_total_size(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */ + + nla_total_size(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */ +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING + + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */ +#endif + 0; } @@ -142,6 +162,7 @@ static int br_port_fill_attrs(struct sk_buff *skb, const struct net_bridge_port *p) { u8 mode = !!(p->flags & BR_HAIRPIN_MODE); + u64 timerval; if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) || nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) || @@ -154,8 +175,35 @@ static int br_port_fill_attrs(struct sk_buff *skb, nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)) || nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) || nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI, - !!(p->flags & BR_PROXYARP_WIFI))) + !!(p->flags & BR_PROXYARP_WIFI)) || + nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id), + &p->designated_root) || + nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id), + &p->designated_bridge) || + nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) || + nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) || + nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) || + nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) || + nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, + p->topology_change_ack) || + nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending)) + return -EMSGSIZE; + + timerval = br_timer_value(&p->message_age_timer); + if (nla_put_u64(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval)) return -EMSGSIZE; + timerval = br_timer_value(&p->forward_delay_timer); + if (nla_put_u64(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval)) + return -EMSGSIZE; + timerval = br_timer_value(&p->hold_timer); + if (nla_put_u64(skb, IFLA_BRPORT_HOLD_TIMER, timerval)) + return -EMSGSIZE; + +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING + if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER, + p->multicast_router)) + return -EMSGSIZE; +#endif return 0; } @@ -205,7 +253,7 @@ static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb, * if vlaninfo represents a range */ pvid = br_get_pvid(vg); - list_for_each_entry(v, &vg->vlan_list, vlist) { + list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { flags = 0; if (!br_vlan_should_use(v)) continue; @@ -255,7 +303,7 @@ static int br_fill_ifvlaninfo(struct sk_buff *skb, u16 pvid; pvid = br_get_pvid(vg); - list_for_each_entry(v, &vg->vlan_list, vlist) { + list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { if (!br_vlan_should_use(v)) continue; @@ -338,22 +386,27 @@ static int br_fill_ifinfo(struct sk_buff *skb, struct nlattr *af; int err; + /* RCU needed because of the VLAN locking rules (rcu || rtnl) */ + rcu_read_lock(); if (port) - vg = nbp_vlan_group(port); + vg = nbp_vlan_group_rcu(port); else - vg = br_vlan_group(br); + vg = br_vlan_group_rcu(br); - if (!vg || !vg->num_vlans) + if (!vg || !vg->num_vlans) { + rcu_read_unlock(); goto done; - + } af = nla_nest_start(skb, IFLA_AF_SPEC); - if (!af) + if (!af) { + rcu_read_unlock(); goto nla_put_failure; - + } if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) err = br_fill_ifvlaninfo_compressed(skb, vg); else err = br_fill_ifvlaninfo(skb, vg); + rcu_read_unlock(); if (err) goto nla_put_failure; nla_nest_end(skb, af); @@ -476,6 +529,9 @@ static int br_afspec(struct net_bridge *br, if (vinfo_start) return -EINVAL; vinfo_start = vinfo; + /* don't allow range of pvids */ + if (vinfo_start->flags & BRIDGE_VLAN_INFO_PVID) + return -EINVAL; continue; } @@ -521,6 +577,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 }, [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 }, + [IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 }, }; /* Change the state of the port and notify spanning tree */ @@ -592,6 +649,18 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) return err; } + if (tb[IFLA_BRPORT_FLUSH]) + br_fdb_delete_by_port(p->br, p, 0, 0); + +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING + if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) { + u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]); + + err = br_multicast_set_port_router(p, mcast_router); + if (err) + return err; + } +#endif br_port_flags_change(p, old_flags ^ p->flags); return 0; } @@ -758,6 +827,27 @@ static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = { [IFLA_BR_PRIORITY] = { .type = NLA_U16 }, [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 }, [IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 }, + [IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 }, + [IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY, + .len = ETH_ALEN }, + [IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 }, + [IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 }, + [IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 }, + [IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 }, + [IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 }, + [IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 }, + [IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 }, + [IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 }, + [IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 }, + [IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 }, + [IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 }, + [IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 }, + [IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 }, + [IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 }, + [IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 }, + [IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 }, + [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 }, + [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 }, }; static int br_changelink(struct net_device *brdev, struct nlattr *tb[], @@ -788,9 +878,9 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[], } if (data[IFLA_BR_AGEING_TIME]) { - u32 ageing_time = nla_get_u32(data[IFLA_BR_AGEING_TIME]); - - br->ageing_time = clock_t_to_jiffies(ageing_time); + err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME])); + if (err) + return err; } if (data[IFLA_BR_STP_STATE]) { @@ -821,6 +911,158 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[], if (err) return err; } + + if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { + __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); + + err = __br_vlan_set_default_pvid(br, defpvid); + if (err) + return err; + } +#endif + + if (data[IFLA_BR_GROUP_FWD_MASK]) { + u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]); + + if (fwd_mask & BR_GROUPFWD_RESTRICTED) + return -EINVAL; + br->group_fwd_mask = fwd_mask; + } + + if (data[IFLA_BR_GROUP_ADDR]) { + u8 new_addr[ETH_ALEN]; + + if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN) + return -EINVAL; + memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN); + if (!is_link_local_ether_addr(new_addr)) + return -EINVAL; + if (new_addr[5] == 1 || /* 802.3x Pause address */ + new_addr[5] == 2 || /* 802.3ad Slow protocols */ + new_addr[5] == 3) /* 802.1X PAE address */ + return -EINVAL; + spin_lock_bh(&br->lock); + memcpy(br->group_addr, new_addr, sizeof(br->group_addr)); + spin_unlock_bh(&br->lock); + br->group_addr_set = true; + br_recalculate_fwd_mask(br); + } + + if (data[IFLA_BR_FDB_FLUSH]) + br_fdb_flush(br); + +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING + if (data[IFLA_BR_MCAST_ROUTER]) { + u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]); + + err = br_multicast_set_router(br, multicast_router); + if (err) + return err; + } + + if (data[IFLA_BR_MCAST_SNOOPING]) { + u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]); + + err = br_multicast_toggle(br, mcast_snooping); + if (err) + return err; + } + + if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) { + u8 val; + + val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]); + br->multicast_query_use_ifaddr = !!val; + } + + if (data[IFLA_BR_MCAST_QUERIER]) { + u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]); + + err = br_multicast_set_querier(br, mcast_querier); + if (err) + return err; + } + + if (data[IFLA_BR_MCAST_HASH_ELASTICITY]) { + u32 val = nla_get_u32(data[IFLA_BR_MCAST_HASH_ELASTICITY]); + + br->hash_elasticity = val; + } + + if (data[IFLA_BR_MCAST_HASH_MAX]) { + u32 hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]); + + err = br_multicast_set_hash_max(br, hash_max); + if (err) + return err; + } + + if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) { + u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]); + + br->multicast_last_member_count = val; + } + + if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) { + u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]); + + br->multicast_startup_query_count = val; + } + + if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) { + u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]); + + br->multicast_last_member_interval = clock_t_to_jiffies(val); + } + + if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) { + u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]); + + br->multicast_membership_interval = clock_t_to_jiffies(val); + } + + if (data[IFLA_BR_MCAST_QUERIER_INTVL]) { + u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]); + + br->multicast_querier_interval = clock_t_to_jiffies(val); + } + + if (data[IFLA_BR_MCAST_QUERY_INTVL]) { + u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]); + + br->multicast_query_interval = clock_t_to_jiffies(val); + } + + if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) { + u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]); + + br->multicast_query_response_interval = clock_t_to_jiffies(val); + } + + if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) { + u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]); + + br->multicast_startup_query_interval = clock_t_to_jiffies(val); + } +#endif +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + if (data[IFLA_BR_NF_CALL_IPTABLES]) { + u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]); + + br->nf_call_iptables = val ? true : false; + } + + if (data[IFLA_BR_NF_CALL_IP6TABLES]) { + u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]); + + br->nf_call_ip6tables = val ? true : false; + } + + if (data[IFLA_BR_NF_CALL_ARPTABLES]) { + u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]); + + br->nf_call_arptables = val ? true : false; + } #endif return 0; @@ -837,6 +1079,40 @@ static size_t br_get_size(const struct net_device *brdev) nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */ #ifdef CONFIG_BRIDGE_VLAN_FILTERING nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */ + nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */ +#endif + nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */ + nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */ + nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_BRIDGE_ID */ + nla_total_size(sizeof(u16)) + /* IFLA_BR_ROOT_PORT */ + nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */ + nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */ + nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */ + nla_total_size(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */ + nla_total_size(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */ + nla_total_size(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */ + nla_total_size(sizeof(u64)) + /* IFLA_BR_GC_TIMER */ + nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */ +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING + nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */ + nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */ + nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */ + nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */ + nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */ + nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */ + nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */ + nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */ + nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */ + nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */ + nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */ + nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */ + nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */ + nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */ +#endif +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */ + nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IP6TABLES */ + nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_ARPTABLES */ #endif 0; } @@ -851,6 +1127,20 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) u32 stp_enabled = br->stp_enabled; u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]; u8 vlan_enabled = br_vlan_enabled(br); + u64 clockval; + + clockval = br_timer_value(&br->hello_timer); + if (nla_put_u64(skb, IFLA_BR_HELLO_TIMER, clockval)) + return -EMSGSIZE; + clockval = br_timer_value(&br->tcn_timer); + if (nla_put_u64(skb, IFLA_BR_TCN_TIMER, clockval)) + return -EMSGSIZE; + clockval = br_timer_value(&br->topology_change_timer); + if (nla_put_u64(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval)) + return -EMSGSIZE; + clockval = br_timer_value(&br->gc_timer); + if (nla_put_u64(skb, IFLA_BR_GC_TIMER, clockval)) + return -EMSGSIZE; if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) || nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) || @@ -858,11 +1148,66 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) || nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) || nla_put_u16(skb, IFLA_BR_PRIORITY, priority) || - nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled)) + nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) || + nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) || + nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id), + &br->bridge_id) || + nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id), + &br->designated_root) || + nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) || + nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) || + nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) || + nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED, + br->topology_change_detected) || + nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr)) return -EMSGSIZE; #ifdef CONFIG_BRIDGE_VLAN_FILTERING - if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto)) + if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) || + nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid)) + return -EMSGSIZE; +#endif +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING + if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) || + nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING, !br->multicast_disabled) || + nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR, + br->multicast_query_use_ifaddr) || + nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, br->multicast_querier) || + nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, + br->hash_elasticity) || + nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) || + nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT, + br->multicast_last_member_count) || + nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT, + br->multicast_startup_query_count)) + return -EMSGSIZE; + + clockval = jiffies_to_clock_t(br->multicast_last_member_interval); + if (nla_put_u64(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval)) + return -EMSGSIZE; + clockval = jiffies_to_clock_t(br->multicast_membership_interval); + if (nla_put_u64(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval)) + return -EMSGSIZE; + clockval = jiffies_to_clock_t(br->multicast_querier_interval); + if (nla_put_u64(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval)) + return -EMSGSIZE; + clockval = jiffies_to_clock_t(br->multicast_query_interval); + if (nla_put_u64(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval)) + return -EMSGSIZE; + clockval = jiffies_to_clock_t(br->multicast_query_response_interval); + if (nla_put_u64(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval)) + return -EMSGSIZE; + clockval = jiffies_to_clock_t(br->multicast_startup_query_interval); + if (nla_put_u64(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval)) + return -EMSGSIZE; +#endif +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES, + br->nf_call_iptables ? 1 : 0) || + nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES, + br->nf_call_ip6tables ? 1 : 0) || + nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES, + br->nf_call_arptables ? 1 : 0)) return -EMSGSIZE; #endif diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 4ed8308db66e..216018c76018 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -229,7 +229,7 @@ struct net_bridge_port struct netpoll *np; #endif #ifdef CONFIG_BRIDGE_VLAN_FILTERING - struct net_bridge_vlan_group *vlgrp; + struct net_bridge_vlan_group __rcu *vlgrp; #endif }; @@ -337,7 +337,7 @@ struct net_bridge struct kobject *ifobj; u32 auto_cnt; #ifdef CONFIG_BRIDGE_VLAN_FILTERING - struct net_bridge_vlan_group *vlgrp; + struct net_bridge_vlan_group __rcu *vlgrp; u8 vlan_enabled; __be16 vlan_proto; u16 default_pvid; @@ -400,7 +400,7 @@ static inline bool br_vlan_is_brentry(const struct net_bridge_vlan *v) return v->flags & BRIDGE_VLAN_INFO_BRENTRY; } -/* check if we should use the vlan entry is usable */ +/* check if we should use the vlan entry, returns false if it's only context */ static inline bool br_vlan_should_use(const struct net_bridge_vlan *v) { if (br_vlan_is_master(v)) { @@ -690,6 +690,7 @@ int __br_vlan_set_proto(struct net_bridge *br, __be16 proto); int br_vlan_set_proto(struct net_bridge *br, unsigned long val); int br_vlan_init(struct net_bridge *br); int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val); +int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid); int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags); int nbp_vlan_delete(struct net_bridge_port *port, u16 vid); void nbp_vlan_flush(struct net_bridge_port *port); @@ -699,13 +700,25 @@ int nbp_get_num_vlan_infos(struct net_bridge_port *p, u32 filter_mask); static inline struct net_bridge_vlan_group *br_vlan_group( const struct net_bridge *br) { - return br->vlgrp; + return rtnl_dereference(br->vlgrp); } static inline struct net_bridge_vlan_group *nbp_vlan_group( const struct net_bridge_port *p) { - return p->vlgrp; + return rtnl_dereference(p->vlgrp); +} + +static inline struct net_bridge_vlan_group *br_vlan_group_rcu( + const struct net_bridge *br) +{ + return rcu_dereference(br->vlgrp); +} + +static inline struct net_bridge_vlan_group *nbp_vlan_group_rcu( + const struct net_bridge_port *p) +{ + return rcu_dereference(p->vlgrp); } /* Since bridge now depends on 8021Q module, but the time bridge sees the @@ -852,6 +865,19 @@ static inline struct net_bridge_vlan_group *nbp_vlan_group( { return NULL; } + +static inline struct net_bridge_vlan_group *br_vlan_group_rcu( + const struct net_bridge *br) +{ + return NULL; +} + +static inline struct net_bridge_vlan_group *nbp_vlan_group_rcu( + const struct net_bridge_port *p) +{ + return NULL; +} + #endif struct nf_br_ops { @@ -881,6 +907,7 @@ void __br_set_forward_delay(struct net_bridge *br, unsigned long t); int br_set_forward_delay(struct net_bridge *br, unsigned long x); int br_set_hello_time(struct net_bridge *br, unsigned long x); int br_set_max_age(struct net_bridge *br, unsigned long x); +int br_set_ageing_time(struct net_bridge *br, u32 ageing_time); /* br_stp_if.c */ diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index 3a982c02599a..80c34d70218c 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c @@ -41,13 +41,14 @@ void br_set_state(struct net_bridge_port *p, unsigned int state) { struct switchdev_attr attr = { .id = SWITCHDEV_ATTR_ID_PORT_STP_STATE, + .flags = SWITCHDEV_F_DEFER, .u.stp_state = state, }; int err; p->state = state; err = switchdev_port_attr_set(p->dev, &attr); - if (err && err != -EOPNOTSUPP) + if (err) br_warn(p->br, "error setting offload STP state on port %u(%s)\n", (unsigned int) p->port_no, p->dev->name); } @@ -566,6 +567,29 @@ int br_set_max_age(struct net_bridge *br, unsigned long val) } +int br_set_ageing_time(struct net_bridge *br, u32 ageing_time) +{ + struct switchdev_attr attr = { + .id = SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME, + .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP, + .u.ageing_time = ageing_time, + }; + unsigned long t = clock_t_to_jiffies(ageing_time); + int err; + + if (t < BR_MIN_AGEING_TIME || t > BR_MAX_AGEING_TIME) + return -ERANGE; + + err = switchdev_port_attr_set(br->dev, &attr); + if (err) + return err; + + br->ageing_time = t; + mod_timer(&br->gc_timer, jiffies); + + return 0; +} + void __br_set_forward_delay(struct net_bridge *br, unsigned long t) { br->bridge_forward_delay = t; diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index 4c97fc50fb70..8365bd53c421 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -102,8 +102,15 @@ static ssize_t ageing_time_show(struct device *d, static int set_ageing_time(struct net_bridge *br, unsigned long val) { - br->ageing_time = clock_t_to_jiffies(val); - return 0; + int ret; + + if (!rtnl_trylock()) + return restart_syscall(); + + ret = br_set_ageing_time(br, val); + rtnl_unlock(); + + return ret; } static ssize_t ageing_time_store(struct device *d, diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 1a79e199ca3b..5f0d0cc4744f 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -54,9 +54,9 @@ static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags) struct net_bridge_vlan_group *vg; if (br_vlan_is_master(v)) - vg = v->br->vlgrp; + vg = br_vlan_group(v->br); else - vg = v->port->vlgrp; + vg = nbp_vlan_group(v->port); if (flags & BRIDGE_VLAN_INFO_PVID) __vlan_add_pvid(vg, v->vid); @@ -72,38 +72,35 @@ static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags) static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br, u16 vid, u16 flags) { - const struct net_device_ops *ops = dev->netdev_ops; + struct switchdev_obj_port_vlan v = { + .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, + .flags = flags, + .vid_begin = vid, + .vid_end = vid, + }; int err; - /* If driver uses VLAN ndo ops, use 8021q to install vid - * on device, otherwise try switchdev ops to install vid. + /* Try switchdev op first. In case it is not supported, fallback to + * 8021q add. */ - - if (ops->ndo_vlan_rx_add_vid) { - err = vlan_vid_add(dev, br->vlan_proto, vid); - } else { - struct switchdev_obj_port_vlan v = { - .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, - .flags = flags, - .vid_begin = vid, - .vid_end = vid, - }; - - err = switchdev_port_obj_add(dev, &v.obj); - if (err == -EOPNOTSUPP) - err = 0; - } - + err = switchdev_port_obj_add(dev, &v.obj); + if (err == -EOPNOTSUPP) + return vlan_vid_add(dev, br->vlan_proto, vid); return err; } static void __vlan_add_list(struct net_bridge_vlan *v) { + struct net_bridge_vlan_group *vg; struct list_head *headp, *hpos; struct net_bridge_vlan *vent; - headp = br_vlan_is_master(v) ? &v->br->vlgrp->vlan_list : - &v->port->vlgrp->vlan_list; + if (br_vlan_is_master(v)) + vg = br_vlan_group(v->br); + else + vg = nbp_vlan_group(v->port); + + headp = &vg->vlan_list; list_for_each_prev(hpos, headp) { vent = list_entry(hpos, struct net_bridge_vlan, vlist); if (v->vid < vent->vid) @@ -111,39 +108,72 @@ static void __vlan_add_list(struct net_bridge_vlan *v) else break; } - list_add(&v->vlist, hpos); + list_add_rcu(&v->vlist, hpos); } static void __vlan_del_list(struct net_bridge_vlan *v) { - list_del(&v->vlist); + list_del_rcu(&v->vlist); } static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br, u16 vid) { - const struct net_device_ops *ops = dev->netdev_ops; - int err = 0; + struct switchdev_obj_port_vlan v = { + .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, + .vid_begin = vid, + .vid_end = vid, + }; + int err; - /* If driver uses VLAN ndo ops, use 8021q to delete vid - * on device, otherwise try switchdev ops to delete vid. + /* Try switchdev op first. In case it is not supported, fallback to + * 8021q del. */ - - if (ops->ndo_vlan_rx_kill_vid) { + err = switchdev_port_obj_del(dev, &v.obj); + if (err == -EOPNOTSUPP) { vlan_vid_del(dev, br->vlan_proto, vid); - } else { - struct switchdev_obj_port_vlan v = { - .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, - .vid_begin = vid, - .vid_end = vid, - }; + return 0; + } + return err; +} + +/* Returns a master vlan, if it didn't exist it gets created. In all cases a + * a reference is taken to the master vlan before returning. + */ +static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid) +{ + struct net_bridge_vlan_group *vg; + struct net_bridge_vlan *masterv; - err = switchdev_port_obj_del(dev, &v.obj); - if (err == -EOPNOTSUPP) - err = 0; + vg = br_vlan_group(br); + masterv = br_vlan_find(vg, vid); + if (!masterv) { + /* missing global ctx, create it now */ + if (br_vlan_add(br, vid, 0)) + return NULL; + masterv = br_vlan_find(vg, vid); + if (WARN_ON(!masterv)) + return NULL; } + atomic_inc(&masterv->refcnt); - return err; + return masterv; +} + +static void br_vlan_put_master(struct net_bridge_vlan *masterv) +{ + struct net_bridge_vlan_group *vg; + + if (!br_vlan_is_master(masterv)) + return; + + vg = br_vlan_group(masterv->br); + if (atomic_dec_and_test(&masterv->refcnt)) { + rhashtable_remove_fast(&vg->vlan_hash, + &masterv->vnode, br_vlan_rht_params); + __vlan_del_list(masterv); + kfree_rcu(masterv, rcu); + } } /* This is the shared VLAN add function which works for both ports and bridge @@ -161,7 +191,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags) { struct net_bridge_vlan *masterv = NULL; struct net_bridge_port *p = NULL; - struct rhashtable *tbl; + struct net_bridge_vlan_group *vg; struct net_device *dev; struct net_bridge *br; int err; @@ -169,17 +199,15 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags) if (br_vlan_is_master(v)) { br = v->br; dev = br->dev; - tbl = &br->vlgrp->vlan_hash; + vg = br_vlan_group(br); } else { p = v->port; br = p->br; dev = p->dev; - tbl = &p->vlgrp->vlan_hash; + vg = nbp_vlan_group(p); } if (p) { - u16 master_flags = flags; - /* Add VLAN to the device filter if it is supported. * This ensures tagged traffic enters the bridge when * promiscuous mode is disabled by br_manage_promisc(). @@ -190,57 +218,49 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags) /* need to work on the master vlan too */ if (flags & BRIDGE_VLAN_INFO_MASTER) { - master_flags |= BRIDGE_VLAN_INFO_BRENTRY; - err = br_vlan_add(br, v->vid, master_flags); + err = br_vlan_add(br, v->vid, flags | + BRIDGE_VLAN_INFO_BRENTRY); if (err) goto out_filt; } - masterv = br_vlan_find(br->vlgrp, v->vid); - if (!masterv) { - /* missing global ctx, create it now */ - err = br_vlan_add(br, v->vid, 0); - if (err) - goto out_filt; - masterv = br_vlan_find(br->vlgrp, v->vid); - WARN_ON(!masterv); - } - atomic_inc(&masterv->refcnt); + masterv = br_vlan_get_master(br, v->vid); + if (!masterv) + goto out_filt; v->brvlan = masterv; } - /* Add the dev mac only if it's a usable vlan */ + /* Add the dev mac and count the vlan only if it's usable */ if (br_vlan_should_use(v)) { err = br_fdb_insert(br, p, dev->dev_addr, v->vid); if (err) { br_err(br, "failed insert local address into bridge forwarding table\n"); goto out_filt; } + vg->num_vlans++; } - err = rhashtable_lookup_insert_fast(tbl, &v->vnode, br_vlan_rht_params); + err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode, + br_vlan_rht_params); if (err) goto out_fdb_insert; __vlan_add_list(v); __vlan_add_flags(v, flags); - if (br_vlan_is_master(v)) { - if (br_vlan_is_brentry(v)) - br->vlgrp->num_vlans++; - } else { - p->vlgrp->num_vlans++; - } out: return err; out_fdb_insert: - br_fdb_find_delete_local(br, p, br->dev->dev_addr, v->vid); + if (br_vlan_should_use(v)) { + br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid); + vg->num_vlans--; + } out_filt: if (p) { __vlan_vid_del(dev, br, v->vid); if (masterv) { - atomic_dec(&masterv->refcnt); + br_vlan_put_master(masterv); v->brvlan = NULL; } } @@ -253,16 +273,13 @@ static int __vlan_del(struct net_bridge_vlan *v) struct net_bridge_vlan *masterv = v; struct net_bridge_vlan_group *vg; struct net_bridge_port *p = NULL; - struct net_bridge *br; int err = 0; if (br_vlan_is_master(v)) { - br = v->br; - vg = v->br->vlgrp; + vg = br_vlan_group(v->br); } else { p = v->port; - br = p->br; - vg = v->port->vlgrp; + vg = nbp_vlan_group(v->port); masterv = v->brvlan; } @@ -273,13 +290,9 @@ static int __vlan_del(struct net_bridge_vlan *v) goto out; } - if (br_vlan_is_master(v)) { - if (br_vlan_is_brentry(v)) { - v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY; - br->vlgrp->num_vlans--; - } - } else { - p->vlgrp->num_vlans--; + if (br_vlan_should_use(v)) { + v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY; + vg->num_vlans--; } if (masterv != v) { @@ -289,25 +302,25 @@ static int __vlan_del(struct net_bridge_vlan *v) kfree_rcu(v, rcu); } - if (atomic_dec_and_test(&masterv->refcnt)) { - rhashtable_remove_fast(&masterv->br->vlgrp->vlan_hash, - &masterv->vnode, br_vlan_rht_params); - __vlan_del_list(masterv); - kfree_rcu(masterv, rcu); - } + br_vlan_put_master(masterv); out: return err; } -static void __vlan_flush(struct net_bridge_vlan_group *vlgrp) +static void __vlan_group_free(struct net_bridge_vlan_group *vg) +{ + WARN_ON(!list_empty(&vg->vlan_list)); + rhashtable_destroy(&vg->vlan_hash); + kfree(vg); +} + +static void __vlan_flush(struct net_bridge_vlan_group *vg) { struct net_bridge_vlan *vlan, *tmp; - __vlan_delete_pvid(vlgrp, vlgrp->pvid); - list_for_each_entry_safe(vlan, tmp, &vlgrp->vlan_list, vlist) + __vlan_delete_pvid(vg, vg->pvid); + list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) __vlan_del(vlan); - rhashtable_destroy(&vlgrp->vlan_hash); - kfree(vlgrp); } struct sk_buff *br_handle_vlan(struct net_bridge *br, @@ -469,7 +482,7 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid) if (!br->vlan_enabled) return true; - vg = p->vlgrp; + vg = nbp_vlan_group(p); if (!vg || !vg->num_vlans) return false; @@ -495,12 +508,14 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid) */ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags) { + struct net_bridge_vlan_group *vg; struct net_bridge_vlan *vlan; int ret; ASSERT_RTNL(); - vlan = br_vlan_find(br->vlgrp, vid); + vg = br_vlan_group(br); + vlan = br_vlan_find(vg, vid); if (vlan) { if (!br_vlan_is_brentry(vlan)) { /* Trying to change flags of non-existent bridge vlan */ @@ -515,7 +530,7 @@ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags) } atomic_inc(&vlan->refcnt); vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY; - br->vlgrp->num_vlans++; + vg->num_vlans++; } __vlan_add_flags(vlan, flags); return 0; @@ -543,24 +558,33 @@ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags) */ int br_vlan_delete(struct net_bridge *br, u16 vid) { + struct net_bridge_vlan_group *vg; struct net_bridge_vlan *v; ASSERT_RTNL(); - v = br_vlan_find(br->vlgrp, vid); + vg = br_vlan_group(br); + v = br_vlan_find(vg, vid); if (!v || !br_vlan_is_brentry(v)) return -ENOENT; br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid); + br_fdb_delete_by_port(br, NULL, vid, 0); return __vlan_del(v); } void br_vlan_flush(struct net_bridge *br) { + struct net_bridge_vlan_group *vg; + ASSERT_RTNL(); - __vlan_flush(br_vlan_group(br)); + vg = br_vlan_group(br); + __vlan_flush(vg); + RCU_INIT_POINTER(br->vlgrp, NULL); + synchronize_rcu(); + __vlan_group_free(vg); } struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid) @@ -627,6 +651,7 @@ int __br_vlan_set_proto(struct net_bridge *br, __be16 proto) int err = 0; struct net_bridge_port *p; struct net_bridge_vlan *vlan; + struct net_bridge_vlan_group *vg; __be16 oldproto; if (br->vlan_proto == proto) @@ -634,7 +659,8 @@ int __br_vlan_set_proto(struct net_bridge *br, __be16 proto) /* Add VLANs for the new proto to the device filter. */ list_for_each_entry(p, &br->port_list, list) { - list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist) { + vg = nbp_vlan_group(p); + list_for_each_entry(vlan, &vg->vlan_list, vlist) { err = vlan_vid_add(p->dev, proto, vlan->vid); if (err) goto err_filt; @@ -648,19 +674,23 @@ int __br_vlan_set_proto(struct net_bridge *br, __be16 proto) br_recalculate_fwd_mask(br); /* Delete VLANs for the old proto from the device filter. */ - list_for_each_entry(p, &br->port_list, list) - list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist) + list_for_each_entry(p, &br->port_list, list) { + vg = nbp_vlan_group(p); + list_for_each_entry(vlan, &vg->vlan_list, vlist) vlan_vid_del(p->dev, oldproto, vlan->vid); + } return 0; err_filt: - list_for_each_entry_continue_reverse(vlan, &p->vlgrp->vlan_list, vlist) + list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist) vlan_vid_del(p->dev, proto, vlan->vid); - list_for_each_entry_continue_reverse(p, &br->port_list, list) - list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist) + list_for_each_entry_continue_reverse(p, &br->port_list, list) { + vg = nbp_vlan_group(p); + list_for_each_entry(vlan, &vg->vlan_list, vlist) vlan_vid_del(p->dev, proto, vlan->vid); + } return err; } @@ -704,25 +734,31 @@ static void br_vlan_disable_default_pvid(struct net_bridge *br) /* Disable default_pvid on all ports where it is still * configured. */ - if (vlan_default_pvid(br->vlgrp, pvid)) + if (vlan_default_pvid(br_vlan_group(br), pvid)) br_vlan_delete(br, pvid); list_for_each_entry(p, &br->port_list, list) { - if (vlan_default_pvid(p->vlgrp, pvid)) + if (vlan_default_pvid(nbp_vlan_group(p), pvid)) nbp_vlan_delete(p, pvid); } br->default_pvid = 0; } -static int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid) +int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid) { const struct net_bridge_vlan *pvent; + struct net_bridge_vlan_group *vg; struct net_bridge_port *p; u16 old_pvid; int err = 0; unsigned long *changed; + if (!pvid) { + br_vlan_disable_default_pvid(br); + return 0; + } + changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long), GFP_KERNEL); if (!changed) @@ -733,8 +769,9 @@ static int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid) /* Update default_pvid config only if we do not conflict with * user configuration. */ - pvent = br_vlan_find(br->vlgrp, pvid); - if ((!old_pvid || vlan_default_pvid(br->vlgrp, old_pvid)) && + vg = br_vlan_group(br); + pvent = br_vlan_find(vg, pvid); + if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) && (!pvent || !br_vlan_should_use(pvent))) { err = br_vlan_add(br, pvid, BRIDGE_VLAN_INFO_PVID | @@ -750,9 +787,10 @@ static int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid) /* Update default_pvid config only if we do not conflict with * user configuration. */ + vg = nbp_vlan_group(p); if ((old_pvid && - !vlan_default_pvid(p->vlgrp, old_pvid)) || - br_vlan_find(p->vlgrp, pvid)) + !vlan_default_pvid(vg, old_pvid)) || + br_vlan_find(vg, pvid)) continue; err = nbp_vlan_add(p, pvid, @@ -813,12 +851,7 @@ int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val) err = -EPERM; goto unlock; } - - if (!pvid) - br_vlan_disable_default_pvid(br); - else - err = __br_vlan_set_default_pvid(br, pvid); - + err = __br_vlan_set_default_pvid(br, pvid); unlock: rtnl_unlock(); return err; @@ -826,17 +859,19 @@ unlock: int br_vlan_init(struct net_bridge *br) { + struct net_bridge_vlan_group *vg; int ret = -ENOMEM; - br->vlgrp = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL); - if (!br->vlgrp) + vg = kzalloc(sizeof(*vg), GFP_KERNEL); + if (!vg) goto out; - ret = rhashtable_init(&br->vlgrp->vlan_hash, &br_vlan_rht_params); + ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params); if (ret) goto err_rhtbl; - INIT_LIST_HEAD(&br->vlgrp->vlan_list); + INIT_LIST_HEAD(&vg->vlan_list); br->vlan_proto = htons(ETH_P_8021Q); br->default_pvid = 1; + rcu_assign_pointer(br->vlgrp, vg); ret = br_vlan_add(br, 1, BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_BRENTRY); @@ -847,9 +882,9 @@ out: return ret; err_vlan_add: - rhashtable_destroy(&br->vlgrp->vlan_hash); + rhashtable_destroy(&vg->vlan_hash); err_rhtbl: - kfree(br->vlgrp); + kfree(vg); goto out; } @@ -867,9 +902,7 @@ int nbp_vlan_init(struct net_bridge_port *p) if (ret) goto err_rhtbl; INIT_LIST_HEAD(&vg->vlan_list); - /* Make sure everything's committed before publishing vg */ - smp_wmb(); - p->vlgrp = vg; + rcu_assign_pointer(p->vlgrp, vg); if (p->br->default_pvid) { ret = nbp_vlan_add(p, p->br->default_pvid, BRIDGE_VLAN_INFO_PVID | @@ -898,7 +931,7 @@ int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags) ASSERT_RTNL(); - vlan = br_vlan_find(port->vlgrp, vid); + vlan = br_vlan_find(nbp_vlan_group(port), vid); if (vlan) { __vlan_add_flags(vlan, flags); return 0; @@ -926,7 +959,7 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid) ASSERT_RTNL(); - v = br_vlan_find(port->vlgrp, vid); + v = br_vlan_find(nbp_vlan_group(port), vid); if (!v) return -ENOENT; br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid); @@ -937,12 +970,13 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid) void nbp_vlan_flush(struct net_bridge_port *port) { - struct net_bridge_vlan *vlan; + struct net_bridge_vlan_group *vg; ASSERT_RTNL(); - list_for_each_entry(vlan, &port->vlgrp->vlan_list, vlist) - vlan_vid_del(port->dev, port->br->vlan_proto, vlan->vid); - - __vlan_flush(nbp_vlan_group(port)); + vg = nbp_vlan_group(port); + __vlan_flush(vg); + RCU_INIT_POINTER(port->vlgrp, NULL); + synchronize_rcu(); + __vlan_group_free(vg); } diff --git a/net/can/bcm.c b/net/can/bcm.c index a1ba6875c2a2..6863310d6973 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -96,7 +96,7 @@ struct bcm_op { canid_t can_id; u32 flags; unsigned long frames_abs, frames_filtered; - struct timeval ival1, ival2; + struct bcm_timeval ival1, ival2; struct hrtimer timer, thrtimer; struct tasklet_struct tsklet, thrtsklet; ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; @@ -131,6 +131,11 @@ static inline struct bcm_sock *bcm_sk(const struct sock *sk) return (struct bcm_sock *)sk; } +static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv) +{ + return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); +} + #define CFSIZ sizeof(struct can_frame) #define OPSIZ sizeof(struct bcm_op) #define MHSIZ sizeof(struct bcm_msg_head) @@ -953,8 +958,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, op->count = msg_head->count; op->ival1 = msg_head->ival1; op->ival2 = msg_head->ival2; - op->kt_ival1 = timeval_to_ktime(msg_head->ival1); - op->kt_ival2 = timeval_to_ktime(msg_head->ival2); + op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1); + op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2); /* disable an active timer due to zero values? */ if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64) @@ -1134,8 +1139,8 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, /* set timer value */ op->ival1 = msg_head->ival1; op->ival2 = msg_head->ival2; - op->kt_ival1 = timeval_to_ktime(msg_head->ival1); - op->kt_ival2 = timeval_to_ktime(msg_head->ival2); + op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1); + op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2); /* disable an active timer due to zero value? */ if (!op->kt_ival1.tv64) diff --git a/net/core/dev.c b/net/core/dev.c index 323c04edd779..1225b4be8ed6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2974,6 +2974,7 @@ static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) new_index = skb_tx_hash(dev, skb); if (queue_index != new_index && sk && + sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) sk_tx_queue_set(sk, new_index); @@ -5345,6 +5346,12 @@ static int __netdev_upper_dev_link(struct net_device *dev, changeupper_info.master = master; changeupper_info.linking = true; + ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev, + &changeupper_info.info); + ret = notifier_to_errno(ret); + if (ret) + return ret; + ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private, master); if (ret) @@ -5487,6 +5494,9 @@ void netdev_upper_dev_unlink(struct net_device *dev, changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev; changeupper_info.linking = false; + call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev, + &changeupper_info.info); + __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); /* Here is the tricky part. We must remove all dev's lower diff --git a/net/core/dst.c b/net/core/dst.c index 0771c8cb9307..2a1818065e12 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -144,12 +144,12 @@ loop: mutex_unlock(&dst_gc_mutex); } -int dst_discard_sk(struct sock *sk, struct sk_buff *skb) +int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb) { kfree_skb(skb); return 0; } -EXPORT_SYMBOL(dst_discard_sk); +EXPORT_SYMBOL(dst_discard_out); const u32 dst_default_metrics[RTAX_MAX + 1] = { /* This initializer is needed to force linker to place this variable @@ -177,7 +177,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops, dst->xfrm = NULL; #endif dst->input = dst_discard; - dst->output = dst_discard_sk; + dst->output = dst_discard_out; dst->error = 0; dst->obsolete = initial_obsolete; dst->header_len = 0; @@ -224,7 +224,7 @@ static void ___dst_free(struct dst_entry *dst) */ if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) { dst->input = dst_discard; - dst->output = dst_discard_sk; + dst->output = dst_discard_out; } dst->obsolete = DST_OBSOLETE_DEAD; } @@ -352,7 +352,7 @@ static struct dst_ops md_dst_ops = { .family = AF_UNSPEC, }; -static int dst_md_discard_sk(struct sock *sk, struct sk_buff *skb) +static int dst_md_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb) { WARN_ONCE(1, "Attempting to call output on metadata dst\n"); kfree_skb(skb); @@ -375,7 +375,7 @@ static void __metadata_dst_init(struct metadata_dst *md_dst, u8 optslen) DST_METADATA | DST_NOCACHE | DST_NOCOUNT); dst->input = dst_md_discard; - dst->output = dst_md_discard_sk; + dst->output = dst_md_discard_out; memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst)); } @@ -430,7 +430,7 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev, if (!unregister) { dst->input = dst_discard; - dst->output = dst_discard_sk; + dst->output = dst_discard_out; } else { dst->dev = dev_net(dst->dev)->loopback_dev; dev_hold(dst->dev); diff --git a/net/core/filter.c b/net/core/filter.c index 53a5036fb32d..0b00094932ab 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -56,10 +56,10 @@ * @sk: sock associated with &sk_buff * @skb: buffer to filter * - * Run the filter code and then cut skb->data to correct size returned by - * SK_RUN_FILTER. If pkt_len is 0 we toss packet. If skb->len is smaller + * Run the eBPF program and then cut skb->data to correct size returned by + * the program. If pkt_len is 0 we toss packet. If skb->len is smaller * than pkt_len we keep whole skb->data. This is the socket level - * wrapper to SK_RUN_FILTER. It returns 0 if the packet should + * wrapper to BPF_PROG_RUN. It returns 0 if the packet should * be accepted or -EPERM if the packet should be tossed. * */ @@ -83,7 +83,7 @@ int sk_filter(struct sock *sk, struct sk_buff *skb) rcu_read_lock(); filter = rcu_dereference(sk->sk_filter); if (filter) { - unsigned int pkt_len = SK_RUN_FILTER(filter, skb); + unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb); err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; } @@ -149,12 +149,6 @@ static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) return raw_smp_processor_id(); } -/* note that this only generates 32-bit random numbers */ -static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) -{ - return prandom_u32(); -} - static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, struct bpf_insn *insn_buf) { @@ -313,7 +307,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp, *insn = BPF_EMIT_CALL(__get_raw_cpu_id); break; case SKF_AD_OFF + SKF_AD_RANDOM: - *insn = BPF_EMIT_CALL(__get_random_u32); + *insn = BPF_EMIT_CALL(bpf_user_rnd_u32); + bpf_user_rnd_init_once(); break; } break; @@ -1084,16 +1079,18 @@ EXPORT_SYMBOL_GPL(bpf_prog_create); * @pfp: the unattached filter that is created * @fprog: the filter program * @trans: post-classic verifier transformation handler + * @save_orig: save classic BPF program * * This function effectively does the same as bpf_prog_create(), only * that it builds up its insns buffer from user space provided buffer. * It also allows for passing a bpf_aux_classic_check_t handler. */ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, - bpf_aux_classic_check_t trans) + bpf_aux_classic_check_t trans, bool save_orig) { unsigned int fsize = bpf_classic_proglen(fprog); struct bpf_prog *fp; + int err; /* Make sure new filter is there and in the right amounts. */ if (fprog->filter == NULL) @@ -1109,12 +1106,16 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, } fp->len = fprog->len; - /* Since unattached filters are not copied back to user - * space through sk_get_filter(), we do not need to hold - * a copy here, and can spare us the work. - */ fp->orig_prog = NULL; + if (save_orig) { + err = bpf_prog_store_orig_filter(fp, fprog); + if (err) { + __bpf_prog_free(fp); + return -ENOMEM; + } + } + /* bpf_prepare_filter() already takes care of freeing * memory in case something goes wrong. */ @@ -1456,6 +1457,7 @@ int skb_do_redirect(struct sk_buff *skb) return dev_forward_skb(dev, skb); skb->dev = dev; + skb_sender_cpu_clear(skb); return dev_queue_xmit(skb); } @@ -1638,7 +1640,8 @@ sk_filter_func_proto(enum bpf_func_id func_id) case BPF_FUNC_ktime_get_ns: return &bpf_ktime_get_ns_proto; case BPF_FUNC_trace_printk: - return bpf_get_trace_printk_proto(); + if (capable(CAP_SYS_ADMIN)) + return bpf_get_trace_printk_proto(); default: return NULL; } @@ -1734,7 +1737,8 @@ static bool tc_cls_act_is_valid_access(int off, int size, static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg, int src_reg, int ctx_off, - struct bpf_insn *insn_buf) + struct bpf_insn *insn_buf, + struct bpf_prog *prog) { struct bpf_insn *insn = insn_buf; @@ -1825,6 +1829,7 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg, offsetof(struct __sk_buff, cb[4]): BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20); + prog->cb_access = 1; ctx_off -= offsetof(struct __sk_buff, cb[0]); ctx_off += offsetof(struct sk_buff, cb); ctx_off += offsetof(struct qdisc_skb_cb, data); diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c index dfb1a9ca0835..299cfc24d888 100644 --- a/net/core/lwtunnel.c +++ b/net/core/lwtunnel.c @@ -180,7 +180,7 @@ int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b) } EXPORT_SYMBOL(lwtunnel_cmp_encap); -int lwtunnel_output(struct sock *sk, struct sk_buff *skb) +int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); const struct lwtunnel_encap_ops *ops; @@ -199,7 +199,7 @@ int lwtunnel_output(struct sock *sk, struct sk_buff *skb) rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[lwtstate->type]); if (likely(ops && ops->output)) - ret = ops->output(sk, skb); + ret = ops->output(net, sk, skb); rcu_read_unlock(); if (ret == -EOPNOTSUPP) diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 8c57fdf4d68e..1aa8437ed6c4 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -2249,6 +2249,14 @@ static bool neigh_master_filtered(struct net_device *dev, int master_idx) return false; } +static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx) +{ + if (filter_idx && dev->ifindex != filter_idx) + return true; + + return false; +} + static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, struct netlink_callback *cb) { @@ -2259,16 +2267,19 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, int rc, h, s_h = cb->args[1]; int idx, s_idx = idx = cb->args[2]; struct neigh_hash_table *nht; - int filter_master_idx = 0; + int filter_master_idx = 0, filter_idx = 0; unsigned int flags = NLM_F_MULTI; int err; err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL); if (!err) { + if (tb[NDA_IFINDEX]) + filter_idx = nla_get_u32(tb[NDA_IFINDEX]); + if (tb[NDA_MASTER]) filter_master_idx = nla_get_u32(tb[NDA_MASTER]); - if (filter_master_idx) + if (filter_idx || filter_master_idx) flags |= NLM_F_DUMP_FILTERED; } @@ -2283,6 +2294,8 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, n = rcu_dereference_bh(n->next)) { if (!net_eq(dev_net(n->dev), net)) continue; + if (neigh_ifindex_filtered(n->dev, filter_idx)) + continue; if (neigh_master_filtered(n->dev, filter_master_idx)) continue; if (idx < s_idx) diff --git a/net/core/request_sock.c b/net/core/request_sock.c index 15c853806518..5d26056b6d8f 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c @@ -45,7 +45,6 @@ void reqsk_queue_alloc(struct request_sock_queue *queue) queue->fastopenq.rskq_rst_head = NULL; queue->fastopenq.rskq_rst_tail = NULL; queue->fastopenq.qlen = 0; - queue->fastopenq.max_qlen = 0; queue->rskq_accept_head = NULL; } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index b2258a36d894..24775953fa68 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -96,7 +96,7 @@ int rtnl_is_locked(void) EXPORT_SYMBOL(rtnl_is_locked); #ifdef CONFIG_PROVE_LOCKING -int lockdep_rtnl_is_held(void) +bool lockdep_rtnl_is_held(void) { return lockdep_is_held(&rtnl_mutex); } diff --git a/net/core/sock.c b/net/core/sock.c index 7dd1263e4c24..dcc7d62654d5 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -988,6 +988,10 @@ set_rcvbuf: sk->sk_max_pacing_rate); break; + case SO_INCOMING_CPU: + sk->sk_incoming_cpu = val; + break; + default: ret = -ENOPROTOOPT; break; @@ -1852,6 +1856,32 @@ struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, } EXPORT_SYMBOL(sock_alloc_send_skb); +int sock_cmsg_send(struct sock *sk, struct msghdr *msg, + struct sockcm_cookie *sockc) +{ + struct cmsghdr *cmsg; + + for_each_cmsghdr(cmsg, msg) { + if (!CMSG_OK(msg, cmsg)) + return -EINVAL; + if (cmsg->cmsg_level != SOL_SOCKET) + continue; + switch (cmsg->cmsg_type) { + case SO_MARK: + if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) + return -EPERM; + if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) + return -EINVAL; + sockc->mark = *(u32 *)CMSG_DATA(cmsg); + break; + default: + return -EINVAL; + } + } + return 0; +} +EXPORT_SYMBOL(sock_cmsg_send); + /* On 32bit arches, an skb frag is limited to 2^15 */ #define SKB_FRAG_PAGE_ORDER get_order(32768) @@ -2353,6 +2383,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) sk->sk_max_pacing_rate = ~0U; sk->sk_pacing_rate = ~0U; + sk->sk_incoming_cpu = -1; /* * Before updating sk_refcnt, we must commit prior changes to memory * (Documentation/RCU/rculist_nulls.txt for details) diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c index 817622f3dbb7..0c1d58d43f67 100644 --- a/net/core/sock_diag.c +++ b/net/core/sock_diag.c @@ -1,3 +1,5 @@ +/* License: GPL */ + #include <linux/mutex.h> #include <linux/socket.h> #include <linux/skbuff.h> @@ -323,14 +325,4 @@ static int __init sock_diag_init(void) BUG_ON(!broadcast_wq); return register_pernet_subsys(&diag_net_ops); } - -static void __exit sock_diag_exit(void) -{ - unregister_pernet_subsys(&diag_net_ops); - destroy_workqueue(broadcast_wq); -} - -module_init(sock_diag_init); -module_exit(sock_diag_exit); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_SOCK_DIAG); +device_initcall(sock_diag_init); diff --git a/net/core/utils.c b/net/core/utils.c index 3dffce953c39..3d17ca8b4744 100644 --- a/net/core/utils.c +++ b/net/core/utils.c @@ -348,52 +348,3 @@ void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb, } } EXPORT_SYMBOL(inet_proto_csum_replace_by_diff); - -struct __net_random_once_work { - struct work_struct work; - struct static_key *key; -}; - -static void __net_random_once_deferred(struct work_struct *w) -{ - struct __net_random_once_work *work = - container_of(w, struct __net_random_once_work, work); - BUG_ON(!static_key_enabled(work->key)); - static_key_slow_dec(work->key); - kfree(work); -} - -static void __net_random_once_disable_jump(struct static_key *key) -{ - struct __net_random_once_work *w; - - w = kmalloc(sizeof(*w), GFP_ATOMIC); - if (!w) - return; - - INIT_WORK(&w->work, __net_random_once_deferred); - w->key = key; - schedule_work(&w->work); -} - -bool __net_get_random_once(void *buf, int nbytes, bool *done, - struct static_key *once_key) -{ - static DEFINE_SPINLOCK(lock); - unsigned long flags; - - spin_lock_irqsave(&lock, flags); - if (*done) { - spin_unlock_irqrestore(&lock, flags); - return false; - } - - get_random_bytes(buf, nbytes); - *done = true; - spin_unlock_irqrestore(&lock, flags); - - __net_random_once_disable_jump(once_key); - - return true; -} -EXPORT_SYMBOL(__net_get_random_once); diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 5b21f6f88e97..4f6c1862dfd2 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -13,6 +13,7 @@ * You should have received a copy of the GNU General Public License along with * this program; if not, see <http://www.gnu.org/licenses/>. * + * Description: Data Center Bridging netlink interface * Author: Lucy Liu <lucy.liu@intel.com> */ @@ -24,7 +25,7 @@ #include <linux/dcbnl.h> #include <net/dcbevent.h> #include <linux/rtnetlink.h> -#include <linux/module.h> +#include <linux/init.h> #include <net/sock.h> /* Data Center Bridging (DCB) is a collection of Ethernet enhancements @@ -48,10 +49,6 @@ * features for capable devices. */ -MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>"); -MODULE_DESCRIPTION("Data Center Bridging netlink interface"); -MODULE_LICENSE("GPL"); - /**************** DCB attribute policies *************************************/ /* DCB netlink attributes policy */ @@ -1935,19 +1932,6 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del) } EXPORT_SYMBOL(dcb_ieee_delapp); -static void dcb_flushapp(void) -{ - struct dcb_app_type *app; - struct dcb_app_type *tmp; - - spin_lock_bh(&dcb_lock); - list_for_each_entry_safe(app, tmp, &dcb_app_list, list) { - list_del(&app->list); - kfree(app); - } - spin_unlock_bh(&dcb_lock); -} - static int __init dcbnl_init(void) { INIT_LIST_HEAD(&dcb_app_list); @@ -1957,12 +1941,4 @@ static int __init dcbnl_init(void) return 0; } -module_init(dcbnl_init); - -static void __exit dcbnl_exit(void) -{ - rtnl_unregister(PF_UNSPEC, RTM_GETDCB); - rtnl_unregister(PF_UNSPEC, RTM_SETDCB); - dcb_flushapp(); -} -module_exit(dcbnl_exit); +device_initcall(dcbnl_init); diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index e1f823451565..923f5a180134 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -325,13 +325,13 @@ void dccp_send_close(struct sock *sk, const int active); int dccp_invalid_packet(struct sk_buff *skb); u32 dccp_sample_rtt(struct sock *sk, long delta); -static inline int dccp_bad_service_code(const struct sock *sk, +static inline bool dccp_bad_service_code(const struct sock *sk, const __be32 service) { const struct dccp_sock *dp = dccp_sk(sk); if (dp->dccps_service == service) - return 0; + return false; return !dccp_list_has_service(dp->dccps_service_list, service); } diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 8910c9567719..59bc180b02d8 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -208,7 +208,6 @@ void dccp_req_err(struct sock *sk, u64 seq) if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) { NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); - reqsk_put(req); } else { /* * Still in RESPOND, just remove it silently. @@ -218,6 +217,7 @@ void dccp_req_err(struct sock *sk, u64 seq) */ inet_csk_reqsk_queue_drop(req->rsk_listener, req); } + reqsk_put(req); } EXPORT_SYMBOL(dccp_req_err); @@ -595,7 +595,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) goto drop; - req = inet_reqsk_alloc(&dccp_request_sock_ops, sk); + req = inet_reqsk_alloc(&dccp_request_sock_ops, sk, true); if (req == NULL) goto drop; @@ -799,15 +799,10 @@ static int dccp_v4_rcv(struct sk_buff *skb) DCCP_SKB_CB(skb)->dccpd_ack_seq); } - /* Step 2: - * Look up flow ID in table and get corresponding socket */ +lookup: sk = __inet_lookup_skb(&dccp_hashinfo, skb, dh->dccph_sport, dh->dccph_dport); - /* - * Step 2: - * If no socket ... - */ - if (sk == NULL) { + if (!sk) { dccp_pr_debug("failed to look up flow ID in table and " "get corresponding socket\n"); goto no_dccp_socket; @@ -830,8 +825,12 @@ static int dccp_v4_rcv(struct sk_buff *skb) struct sock *nsk = NULL; sk = req->rsk_listener; - if (sk->sk_state == DCCP_LISTEN) + if (likely(sk->sk_state == DCCP_LISTEN)) { nsk = dccp_check_req(sk, skb, req); + } else { + inet_csk_reqsk_queue_drop_and_put(sk, req); + goto lookup; + } if (!nsk) { reqsk_put(req); goto discard_it; diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 1361a3f45df7..d9cc731f2619 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -319,7 +319,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) goto drop; - req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk); + req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk, true); if (req == NULL) goto drop; @@ -656,16 +656,11 @@ static int dccp_v6_rcv(struct sk_buff *skb) else DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb); - /* Step 2: - * Look up flow ID in table and get corresponding socket */ +lookup: sk = __inet6_lookup_skb(&dccp_hashinfo, skb, dh->dccph_sport, dh->dccph_dport, inet6_iif(skb)); - /* - * Step 2: - * If no socket ... - */ - if (sk == NULL) { + if (!sk) { dccp_pr_debug("failed to look up flow ID in table and " "get corresponding socket\n"); goto no_dccp_socket; @@ -688,8 +683,12 @@ static int dccp_v6_rcv(struct sk_buff *skb) struct sock *nsk = NULL; sk = req->rsk_listener; - if (sk->sk_state == DCCP_LISTEN) + if (likely(sk->sk_state == DCCP_LISTEN)) { nsk = dccp_check_req(sk, skb, req); + } else { + inet_csk_reqsk_queue_drop_and_put(sk, req); + goto lookup; + } if (!nsk) { reqsk_put(req); goto discard_it; diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c index 4b02dd300f50..849805e7af52 100644 --- a/net/decnet/dn_nsp_out.c +++ b/net/decnet/dn_nsp_out.c @@ -85,7 +85,7 @@ static void dn_nsp_send(struct sk_buff *skb) if (dst) { try_again: skb_dst_set(skb, dst); - dst_output(skb->sk, skb); + dst_output(&init_net, skb->sk, skb); return; } @@ -582,7 +582,7 @@ static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, * associations. */ skb_dst_set(skb, dst_clone(dst)); - dst_output(skb->sk, skb); + dst_output(&init_net, skb->sk, skb); } diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 0c491fc0e254..607a14f20d88 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -744,7 +744,7 @@ out: return NET_RX_DROP; } -static int dn_output(struct sock *sk, struct sk_buff *skb) +static int dn_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct dn_route *rt = (struct dn_route *)dst; @@ -830,7 +830,7 @@ drop: * Used to catch bugs. This should never normally get * called. */ -static int dn_rt_bug_sk(struct sock *sk, struct sk_buff *skb) +static int dn_rt_bug_out(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dn_skb_cb *cb = DN_SKB_CB(skb); @@ -1467,7 +1467,7 @@ make_route: rt->n = neigh; rt->dst.lastuse = jiffies; - rt->dst.output = dn_rt_bug_sk; + rt->dst.output = dn_rt_bug_out; switch (res.type) { case RTN_UNICAST: rt->dst.input = dn_forward; diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index c59fa5d9c22c..aa398bcef9e3 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -326,8 +326,8 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) ret = dsa_slave_create(ds, parent, i, pd->port_names[i]); if (ret < 0) { - netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s)\n", - index, i, pd->port_names[i]); + netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s): %d\n", + index, i, pd->port_names[i], ret); ret = 0; } } diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 5f65f929902e..b0b8da0f5af8 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -346,12 +346,15 @@ static int dsa_slave_port_fdb_add(struct net_device *dev, { struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->parent; - int ret = -EOPNOTSUPP; + int ret; + + if (!ds->drv->port_fdb_prepare || !ds->drv->port_fdb_add) + return -EOPNOTSUPP; if (switchdev_trans_ph_prepare(trans)) - ret = ds->drv->port_fdb_add ? 0 : -EOPNOTSUPP; + ret = ds->drv->port_fdb_prepare(ds, p->port, fdb, trans); else - ret = ds->drv->port_fdb_add(ds, p->port, fdb->addr, fdb->vid); + ret = ds->drv->port_fdb_add(ds, p->port, fdb, trans); return ret; } @@ -364,7 +367,7 @@ static int dsa_slave_port_fdb_del(struct net_device *dev, int ret = -EOPNOTSUPP; if (ds->drv->port_fdb_del) - ret = ds->drv->port_fdb_del(ds, p->port, fdb->addr, fdb->vid); + ret = ds->drv->port_fdb_del(ds, p->port, fdb); return ret; } @@ -390,7 +393,7 @@ static int dsa_slave_port_fdb_dump(struct net_device *dev, if (ret < 0) break; - fdb->addr = addr; + ether_addr_copy(fdb->addr, addr); fdb->vid = vid; fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; @@ -450,7 +453,7 @@ static int dsa_slave_stp_update(struct net_device *dev, u8 state) } static int dsa_slave_port_attr_set(struct net_device *dev, - struct switchdev_attr *attr, + const struct switchdev_attr *attr, struct switchdev_trans *trans) { struct dsa_slave_priv *p = netdev_priv(dev); @@ -1026,8 +1029,10 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p, struct dsa_switch *ds = p->parent; p->phy = ds->slave_mii_bus->phy_map[addr]; - if (!p->phy) + if (!p->phy) { + netdev_err(slave_dev, "no phy at %d\n", addr); return -ENODEV; + } /* Use already configured phy mode */ if (p->phy_interface == PHY_INTERFACE_MODE_NA) @@ -1061,7 +1066,7 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p, */ ret = of_phy_register_fixed_link(port_dn); if (ret) { - netdev_err(slave_dev, "failed to register fixed PHY\n"); + netdev_err(slave_dev, "failed to register fixed PHY: %d\n", ret); return ret; } phy_is_fixed = true; @@ -1072,17 +1077,20 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p, phy_flags = ds->drv->get_phy_flags(ds, p->port); if (phy_dn) { - ret = of_mdio_parse_addr(&slave_dev->dev, phy_dn); + int phy_id = of_mdio_parse_addr(&slave_dev->dev, phy_dn); + /* If this PHY address is part of phys_mii_mask, which means * that we need to divert reads and writes to/from it, then we * want to bind this device using the slave MII bus created by * DSA to make that happen. */ - if (!phy_is_fixed && ret >= 0 && - (ds->phys_mii_mask & (1 << ret))) { - ret = dsa_slave_phy_connect(p, slave_dev, ret); - if (ret) + if (!phy_is_fixed && phy_id >= 0 && + (ds->phys_mii_mask & (1 << phy_id))) { + ret = dsa_slave_phy_connect(p, slave_dev, phy_id); + if (ret) { + netdev_err(slave_dev, "failed to connect to phy%d: %d\n", phy_id, ret); return ret; + } } else { p->phy = of_phy_connect(slave_dev, phy_dn, dsa_slave_adjust_link, @@ -1099,8 +1107,10 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p, */ if (!p->phy) { ret = dsa_slave_phy_connect(p, slave_dev, p->port); - if (ret) + if (ret) { + netdev_err(slave_dev, "failed to connect to port %d: %d\n", p->port, ret); return ret; + } } else { netdev_info(slave_dev, "attached PHY at address %d [%s]\n", p->phy->addr, p->phy->drv->name); @@ -1212,6 +1222,7 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent, ret = dsa_slave_phy_setup(p, slave_dev); if (ret) { + netdev_err(master, "error %d setting up slave phy\n", ret); free_netdev(slave_dev); return ret; } @@ -1265,7 +1276,7 @@ int dsa_slave_netdevice_event(struct notifier_block *unused, goto out; err = dsa_slave_master_changed(dev); - if (err) + if (err && err != -EOPNOTSUPP) netdev_warn(dev, "failed to reflect master change\n"); break; diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c index 9f0cfa598e3a..20c49c724ba0 100644 --- a/net/ieee802154/6lowpan/core.c +++ b/net/ieee802154/6lowpan/core.c @@ -101,14 +101,9 @@ static const struct net_device_ops lowpan_netdev_ops = { static void lowpan_setup(struct net_device *ldev) { - ldev->addr_len = IEEE802154_ADDR_LEN; memset(ldev->broadcast, 0xff, IEEE802154_ADDR_LEN); - ldev->type = ARPHRD_6LOWPAN; - /* Frame Control + Sequence Number + Address fields + Security Header */ - ldev->hard_header_len = 2 + 1 + 20 + 14; - ldev->needed_tailroom = 2; /* FCS */ - ldev->mtu = IPV6_MIN_MTU; - ldev->priv_flags |= IFF_NO_QUEUE; + /* We need an ipv6hdr as minimum len when calling xmit */ + ldev->hard_header_len = sizeof(struct ipv6hdr); ldev->flags = IFF_BROADCAST | IFF_MULTICAST; ldev->netdev_ops = &lowpan_netdev_ops; @@ -156,6 +151,15 @@ static int lowpan_newlink(struct net *src_net, struct net_device *ldev, lowpan_dev_info(ldev)->wdev = wdev; /* Set the lowpan hardware address to the wpan hardware address. */ memcpy(ldev->dev_addr, wdev->dev_addr, IEEE802154_ADDR_LEN); + /* We need headroom for possible wpan_dev_hard_header call and tailroom + * for encryption/fcs handling. The lowpan interface will replace + * the IPv6 header with 6LoWPAN header. At worst case the 6LoWPAN + * header has LOWPAN_IPHC_MAX_HEADER_LEN more bytes than the IPv6 + * header. + */ + ldev->needed_headroom = LOWPAN_IPHC_MAX_HEADER_LEN + + wdev->needed_headroom; + ldev->needed_tailroom = wdev->needed_tailroom; lowpan_netdev_setup(ldev, LOWPAN_LLTYPE_IEEE802154); diff --git a/net/ieee802154/6lowpan/rx.c b/net/ieee802154/6lowpan/rx.c index b1fd47d2802b..65d55e05516c 100644 --- a/net/ieee802154/6lowpan/rx.c +++ b/net/ieee802154/6lowpan/rx.c @@ -29,6 +29,8 @@ static int lowpan_give_skb_to_device(struct sk_buff *skb) { skb->protocol = htons(ETH_P_IPV6); + skb->dev->stats.rx_packets++; + skb->dev->stats.rx_bytes += skb->len; return netif_rx(skb); } diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c index 54939d031ea5..62a21f6f021e 100644 --- a/net/ieee802154/6lowpan/tx.c +++ b/net/ieee802154/6lowpan/tx.c @@ -10,6 +10,7 @@ #include <net/6lowpan.h> #include <net/ieee802154_netdev.h> +#include <net/mac802154.h> #include "6lowpan_i.h" @@ -36,6 +37,13 @@ lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb) sizeof(struct lowpan_addr_info)); } +/* This callback will be called from AF_PACKET and IPv6 stack, the AF_PACKET + * sockets gives an 8 byte array for addresses only! + * + * TODO I think AF_PACKET DGRAM (sending/receiving) RAW (sending) makes no + * sense here. We should disable it, the right use-case would be AF_INET6 + * RAW/DGRAM sockets. + */ int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev, unsigned short type, const void *_daddr, const void *_saddr, unsigned int len) @@ -71,27 +79,33 @@ int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev, static struct sk_buff* lowpan_alloc_frag(struct sk_buff *skb, int size, - const struct ieee802154_hdr *master_hdr) + const struct ieee802154_hdr *master_hdr, bool frag1) { struct net_device *wdev = lowpan_dev_info(skb->dev)->wdev; struct sk_buff *frag; int rc; - frag = alloc_skb(wdev->hard_header_len + wdev->needed_tailroom + size, + frag = alloc_skb(wdev->needed_headroom + wdev->needed_tailroom + size, GFP_ATOMIC); if (likely(frag)) { frag->dev = wdev; frag->priority = skb->priority; - skb_reserve(frag, wdev->hard_header_len); + skb_reserve(frag, wdev->needed_headroom); skb_reset_network_header(frag); *mac_cb(frag) = *mac_cb(skb); - rc = dev_hard_header(frag, wdev, 0, &master_hdr->dest, - &master_hdr->source, size); - if (rc < 0) { - kfree_skb(frag); - return ERR_PTR(rc); + if (frag1) { + memcpy(skb_put(frag, skb->mac_len), + skb_mac_header(skb), skb->mac_len); + } else { + rc = wpan_dev_hard_header(frag, wdev, + &master_hdr->dest, + &master_hdr->source, size); + if (rc < 0) { + kfree_skb(frag); + return ERR_PTR(rc); + } } } else { frag = ERR_PTR(-ENOMEM); @@ -103,13 +117,13 @@ lowpan_alloc_frag(struct sk_buff *skb, int size, static int lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr, u8 *frag_hdr, int frag_hdrlen, - int offset, int len) + int offset, int len, bool frag1) { struct sk_buff *frag; raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen); - frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr); + frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr, frag1); if (IS_ERR(frag)) return PTR_ERR(frag); @@ -148,7 +162,8 @@ lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *ldev, rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr, LOWPAN_FRAG1_HEAD_SIZE, 0, - frag_len + skb_network_header_len(skb)); + frag_len + skb_network_header_len(skb), + true); if (rc) { pr_debug("%s unable to send FRAG1 packet (tag: %d)", __func__, ntohs(frag_tag)); @@ -169,7 +184,7 @@ lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *ldev, rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr, LOWPAN_FRAGN_HEAD_SIZE, skb_offset, - frag_len); + frag_len, false); if (rc) { pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n", __func__, ntohs(frag_tag), skb_offset); @@ -177,6 +192,8 @@ lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *ldev, } } while (skb_unprocessed > frag_cap); + ldev->stats.tx_packets++; + ldev->stats.tx_bytes += dgram_size; consume_skb(skb); return NET_XMIT_SUCCESS; @@ -228,8 +245,8 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *ldev, cb->ackreq = wpan_dev->ackreq; } - return dev_hard_header(skb, lowpan_dev_info(ldev)->wdev, ETH_P_IPV6, - (void *)&da, (void *)&sa, 0); + return wpan_dev_hard_header(skb, lowpan_dev_info(ldev)->wdev, &da, &sa, + 0); } netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev) @@ -240,6 +257,8 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev) pr_debug("package xmit\n"); + WARN_ON_ONCE(skb->len > IPV6_MIN_MTU); + /* We must take a copy of the skb before we modify/replace the ipv6 * header as the header could be used elsewhere */ @@ -262,6 +281,8 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev) if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) { skb->dev = lowpan_dev_info(ldev)->wdev; + ldev->stats.tx_packets++; + ldev->stats.tx_bytes += dgram_size; return dev_queue_xmit(skb); } else { netdev_tx_t rc; diff --git a/net/ieee802154/Kconfig b/net/ieee802154/Kconfig index 1370d5b0041b..188135bcb803 100644 --- a/net/ieee802154/Kconfig +++ b/net/ieee802154/Kconfig @@ -12,6 +12,11 @@ menuconfig IEEE802154 if IEEE802154 +config IEEE802154_NL802154_EXPERIMENTAL + bool "IEEE 802.15.4 experimental netlink support" + ---help--- + Adds experimental netlink support for nl802154. + config IEEE802154_SOCKET tristate "IEEE 802.15.4 socket interface" default y diff --git a/net/ieee802154/core.c b/net/ieee802154/core.c index b0248e934230..c35fdfa6d04e 100644 --- a/net/ieee802154/core.c +++ b/net/ieee802154/core.c @@ -95,6 +95,18 @@ cfg802154_rdev_by_wpan_phy_idx(int wpan_phy_idx) return result; } +struct wpan_phy *wpan_phy_idx_to_wpan_phy(int wpan_phy_idx) +{ + struct cfg802154_registered_device *rdev; + + ASSERT_RTNL(); + + rdev = cfg802154_rdev_by_wpan_phy_idx(wpan_phy_idx); + if (!rdev) + return NULL; + return &rdev->wpan_phy; +} + struct wpan_phy * wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size) { diff --git a/net/ieee802154/core.h b/net/ieee802154/core.h index f3e95580caee..231fade959f3 100644 --- a/net/ieee802154/core.h +++ b/net/ieee802154/core.h @@ -42,5 +42,6 @@ extern int cfg802154_rdev_list_generation; void cfg802154_dev_free(struct cfg802154_registered_device *rdev); struct cfg802154_registered_device * cfg802154_rdev_by_wpan_phy_idx(int wpan_phy_idx); +struct wpan_phy *wpan_phy_idx_to_wpan_phy(int wpan_phy_idx); #endif /* __IEEE802154_CORE_H */ diff --git a/net/ieee802154/header_ops.c b/net/ieee802154/header_ops.c index a051b6993177..c7439f0fbbdf 100644 --- a/net/ieee802154/header_ops.c +++ b/net/ieee802154/header_ops.c @@ -83,35 +83,35 @@ ieee802154_hdr_push_sechdr(u8 *buf, const struct ieee802154_sechdr *hdr) } int -ieee802154_hdr_push(struct sk_buff *skb, const struct ieee802154_hdr *hdr) +ieee802154_hdr_push(struct sk_buff *skb, struct ieee802154_hdr *hdr) { - u8 buf[MAC802154_FRAME_HARD_HEADER_LEN]; + u8 buf[IEEE802154_MAX_HEADER_LEN]; int pos = 2; int rc; - struct ieee802154_hdr_fc fc = hdr->fc; + struct ieee802154_hdr_fc *fc = &hdr->fc; buf[pos++] = hdr->seq; - fc.dest_addr_mode = hdr->dest.mode; + fc->dest_addr_mode = hdr->dest.mode; rc = ieee802154_hdr_push_addr(buf + pos, &hdr->dest, false); if (rc < 0) return -EINVAL; pos += rc; - fc.source_addr_mode = hdr->source.mode; + fc->source_addr_mode = hdr->source.mode; if (hdr->source.pan_id == hdr->dest.pan_id && hdr->dest.mode != IEEE802154_ADDR_NONE) - fc.intra_pan = true; + fc->intra_pan = true; - rc = ieee802154_hdr_push_addr(buf + pos, &hdr->source, fc.intra_pan); + rc = ieee802154_hdr_push_addr(buf + pos, &hdr->source, fc->intra_pan); if (rc < 0) return -EINVAL; pos += rc; - if (fc.security_enabled) { - fc.version = 1; + if (fc->security_enabled) { + fc->version = 1; rc = ieee802154_hdr_push_sechdr(buf + pos, &hdr->sec); if (rc < 0) @@ -120,7 +120,7 @@ ieee802154_hdr_push(struct sk_buff *skb, const struct ieee802154_hdr *hdr) pos += rc; } - memcpy(buf, &fc, 2); + memcpy(buf, fc, 2); memcpy(skb_push(skb, pos), buf, pos); diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index 3f89c0abdab1..16ef0d9f566e 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -232,8 +232,86 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = { [NL802154_ATTR_SUPPORTED_COMMANDS] = { .type = NLA_NESTED }, [NL802154_ATTR_ACKREQ_DEFAULT] = { .type = NLA_U8 }, + +#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL + [NL802154_ATTR_SEC_ENABLED] = { .type = NLA_U8, }, + [NL802154_ATTR_SEC_OUT_LEVEL] = { .type = NLA_U32, }, + [NL802154_ATTR_SEC_OUT_KEY_ID] = { .type = NLA_NESTED, }, + [NL802154_ATTR_SEC_FRAME_COUNTER] = { .type = NLA_U32 }, + + [NL802154_ATTR_SEC_LEVEL] = { .type = NLA_NESTED }, + [NL802154_ATTR_SEC_DEVICE] = { .type = NLA_NESTED }, + [NL802154_ATTR_SEC_DEVKEY] = { .type = NLA_NESTED }, + [NL802154_ATTR_SEC_KEY] = { .type = NLA_NESTED }, +#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ }; +#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL +static int +nl802154_prepare_wpan_dev_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct cfg802154_registered_device **rdev, + struct wpan_dev **wpan_dev) +{ + int err; + + rtnl_lock(); + + if (!cb->args[0]) { + err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl802154_fam.hdrsize, + nl802154_fam.attrbuf, nl802154_fam.maxattr, + nl802154_policy); + if (err) + goto out_unlock; + + *wpan_dev = __cfg802154_wpan_dev_from_attrs(sock_net(skb->sk), + nl802154_fam.attrbuf); + if (IS_ERR(*wpan_dev)) { + err = PTR_ERR(*wpan_dev); + goto out_unlock; + } + *rdev = wpan_phy_to_rdev((*wpan_dev)->wpan_phy); + /* 0 is the first index - add 1 to parse only once */ + cb->args[0] = (*rdev)->wpan_phy_idx + 1; + cb->args[1] = (*wpan_dev)->identifier; + } else { + /* subtract the 1 again here */ + struct wpan_phy *wpan_phy = wpan_phy_idx_to_wpan_phy(cb->args[0] - 1); + struct wpan_dev *tmp; + + if (!wpan_phy) { + err = -ENODEV; + goto out_unlock; + } + *rdev = wpan_phy_to_rdev(wpan_phy); + *wpan_dev = NULL; + + list_for_each_entry(tmp, &(*rdev)->wpan_dev_list, list) { + if (tmp->identifier == cb->args[1]) { + *wpan_dev = tmp; + break; + } + } + + if (!*wpan_dev) { + err = -ENODEV; + goto out_unlock; + } + } + + return 0; + out_unlock: + rtnl_unlock(); + return err; +} + +static void +nl802154_finish_wpan_dev_dump(struct cfg802154_registered_device *rdev) +{ + rtnl_unlock(); +} +#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ + /* message building helper */ static inline void *nl802154hdr_put(struct sk_buff *skb, u32 portid, u32 seq, int flags, u8 cmd) @@ -612,6 +690,107 @@ static inline u64 wpan_dev_id(struct wpan_dev *wpan_dev) ((u64)wpan_phy_to_rdev(wpan_dev->wpan_phy)->wpan_phy_idx << 32); } +#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL +#include <net/ieee802154_netdev.h> + +static int +ieee802154_llsec_send_key_id(struct sk_buff *msg, + const struct ieee802154_llsec_key_id *desc) +{ + struct nlattr *nl_dev_addr; + + if (nla_put_u32(msg, NL802154_KEY_ID_ATTR_MODE, desc->mode)) + return -ENOBUFS; + + switch (desc->mode) { + case NL802154_KEY_ID_MODE_IMPLICIT: + nl_dev_addr = nla_nest_start(msg, NL802154_KEY_ID_ATTR_IMPLICIT); + if (!nl_dev_addr) + return -ENOBUFS; + + if (nla_put_le16(msg, NL802154_DEV_ADDR_ATTR_PAN_ID, + desc->device_addr.pan_id) || + nla_put_u32(msg, NL802154_DEV_ADDR_ATTR_MODE, + desc->device_addr.mode)) + return -ENOBUFS; + + switch (desc->device_addr.mode) { + case NL802154_DEV_ADDR_SHORT: + if (nla_put_le16(msg, NL802154_DEV_ADDR_ATTR_SHORT, + desc->device_addr.short_addr)) + return -ENOBUFS; + break; + case NL802154_DEV_ADDR_EXTENDED: + if (nla_put_le64(msg, NL802154_DEV_ADDR_ATTR_EXTENDED, + desc->device_addr.extended_addr)) + return -ENOBUFS; + break; + default: + /* userspace should handle unknown */ + break; + } + + nla_nest_end(msg, nl_dev_addr); + break; + case NL802154_KEY_ID_MODE_INDEX: + break; + case NL802154_KEY_ID_MODE_INDEX_SHORT: + /* TODO renmae short_source? */ + if (nla_put_le32(msg, NL802154_KEY_ID_ATTR_SOURCE_SHORT, + desc->short_source)) + return -ENOBUFS; + break; + case NL802154_KEY_ID_MODE_INDEX_EXTENDED: + if (nla_put_le64(msg, NL802154_KEY_ID_ATTR_SOURCE_EXTENDED, + desc->extended_source)) + return -ENOBUFS; + break; + default: + /* userspace should handle unknown */ + break; + } + + /* TODO key_id to key_idx ? Check naming */ + if (desc->mode != NL802154_KEY_ID_MODE_IMPLICIT) { + if (nla_put_u8(msg, NL802154_KEY_ID_ATTR_INDEX, desc->id)) + return -ENOBUFS; + } + + return 0; +} + +static int nl802154_get_llsec_params(struct sk_buff *msg, + struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev) +{ + struct nlattr *nl_key_id; + struct ieee802154_llsec_params params; + int ret; + + ret = rdev_get_llsec_params(rdev, wpan_dev, ¶ms); + if (ret < 0) + return ret; + + if (nla_put_u8(msg, NL802154_ATTR_SEC_ENABLED, params.enabled) || + nla_put_u32(msg, NL802154_ATTR_SEC_OUT_LEVEL, params.out_level) || + nla_put_be32(msg, NL802154_ATTR_SEC_FRAME_COUNTER, + params.frame_counter)) + return -ENOBUFS; + + nl_key_id = nla_nest_start(msg, NL802154_ATTR_SEC_OUT_KEY_ID); + if (!nl_key_id) + return -ENOBUFS; + + ret = ieee802154_llsec_send_key_id(msg, ¶ms.out_key); + if (ret < 0) + return ret; + + nla_nest_end(msg, nl_key_id); + + return 0; +} +#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ + static int nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, struct cfg802154_registered_device *rdev, @@ -663,6 +842,11 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, if (nla_put_u8(msg, NL802154_ATTR_ACKREQ_DEFAULT, wpan_dev->ackreq)) goto nla_put_failure; +#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL + if (nl802154_get_llsec_params(msg, rdev, wpan_dev) < 0) + goto nla_put_failure; +#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ + genlmsg_end(msg, hdr); return 0; @@ -753,10 +937,8 @@ static int nl802154_new_interface(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } - /* TODO add nla_get_le64 to netlink */ if (info->attrs[NL802154_ATTR_EXTENDED_ADDR]) - extended_addr = (__force __le64)nla_get_u64( - info->attrs[NL802154_ATTR_EXTENDED_ADDR]); + extended_addr = nla_get_le64(info->attrs[NL802154_ATTR_EXTENDED_ADDR]); if (!rdev->ops->add_virtual_intf) return -EOPNOTSUPP; @@ -1075,6 +1257,838 @@ nl802154_set_ackreq_default(struct sk_buff *skb, struct genl_info *info) return rdev_set_ackreq_default(rdev, wpan_dev, ackreq); } +#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL +static const struct nla_policy nl802154_dev_addr_policy[NL802154_DEV_ADDR_ATTR_MAX + 1] = { + [NL802154_DEV_ADDR_ATTR_PAN_ID] = { .type = NLA_U16 }, + [NL802154_DEV_ADDR_ATTR_MODE] = { .type = NLA_U32 }, + [NL802154_DEV_ADDR_ATTR_SHORT] = { .type = NLA_U16 }, + [NL802154_DEV_ADDR_ATTR_EXTENDED] = { .type = NLA_U64 }, +}; + +static int +ieee802154_llsec_parse_dev_addr(struct nlattr *nla, + struct ieee802154_addr *addr) +{ + struct nlattr *attrs[NL802154_DEV_ADDR_ATTR_MAX + 1]; + + if (!nla || nla_parse_nested(attrs, NL802154_DEV_ADDR_ATTR_MAX, nla, + nl802154_dev_addr_policy)) + return -EINVAL; + + if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] && + !attrs[NL802154_DEV_ADDR_ATTR_MODE] && + !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] || + attrs[NL802154_DEV_ADDR_ATTR_EXTENDED])) + return -EINVAL; + + addr->pan_id = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_PAN_ID]); + addr->mode = nla_get_u32(attrs[NL802154_DEV_ADDR_ATTR_MODE]); + switch (addr->mode) { + case NL802154_DEV_ADDR_SHORT: + addr->short_addr = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_SHORT]); + break; + case NL802154_DEV_ADDR_EXTENDED: + addr->extended_addr = nla_get_le64(attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]); + break; + default: + return -EINVAL; + } + + return 0; +} + +static const struct nla_policy nl802154_key_id_policy[NL802154_KEY_ID_ATTR_MAX + 1] = { + [NL802154_KEY_ID_ATTR_MODE] = { .type = NLA_U32 }, + [NL802154_KEY_ID_ATTR_INDEX] = { .type = NLA_U8 }, + [NL802154_KEY_ID_ATTR_IMPLICIT] = { .type = NLA_NESTED }, + [NL802154_KEY_ID_ATTR_SOURCE_SHORT] = { .type = NLA_U32 }, + [NL802154_KEY_ID_ATTR_SOURCE_EXTENDED] = { .type = NLA_U64 }, +}; + +static int +ieee802154_llsec_parse_key_id(struct nlattr *nla, + struct ieee802154_llsec_key_id *desc) +{ + struct nlattr *attrs[NL802154_KEY_ID_ATTR_MAX + 1]; + + if (!nla || nla_parse_nested(attrs, NL802154_KEY_ID_ATTR_MAX, nla, + nl802154_key_id_policy)) + return -EINVAL; + + if (!attrs[NL802154_KEY_ID_ATTR_MODE]) + return -EINVAL; + + desc->mode = nla_get_u32(attrs[NL802154_KEY_ID_ATTR_MODE]); + switch (desc->mode) { + case NL802154_KEY_ID_MODE_IMPLICIT: + if (!attrs[NL802154_KEY_ID_ATTR_IMPLICIT]) + return -EINVAL; + + if (ieee802154_llsec_parse_dev_addr(attrs[NL802154_KEY_ID_ATTR_IMPLICIT], + &desc->device_addr) < 0) + return -EINVAL; + break; + case NL802154_KEY_ID_MODE_INDEX: + break; + case NL802154_KEY_ID_MODE_INDEX_SHORT: + if (!attrs[NL802154_KEY_ID_ATTR_SOURCE_SHORT]) + return -EINVAL; + + desc->short_source = nla_get_le32(attrs[NL802154_KEY_ID_ATTR_SOURCE_SHORT]); + break; + case NL802154_KEY_ID_MODE_INDEX_EXTENDED: + if (!attrs[NL802154_KEY_ID_ATTR_SOURCE_EXTENDED]) + return -EINVAL; + + desc->extended_source = nla_get_le64(attrs[NL802154_KEY_ID_ATTR_SOURCE_EXTENDED]); + break; + default: + return -EINVAL; + } + + if (desc->mode != NL802154_KEY_ID_MODE_IMPLICIT) { + if (!attrs[NL802154_KEY_ID_ATTR_INDEX]) + return -EINVAL; + + /* TODO change id to idx */ + desc->id = nla_get_u8(attrs[NL802154_KEY_ID_ATTR_INDEX]); + } + + return 0; +} + +static int nl802154_set_llsec_params(struct sk_buff *skb, + struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; + struct ieee802154_llsec_params params; + u32 changed = 0; + int ret; + + if (info->attrs[NL802154_ATTR_SEC_ENABLED]) { + u8 enabled; + + enabled = nla_get_u8(info->attrs[NL802154_ATTR_SEC_ENABLED]); + if (enabled != 0 && enabled != 1) + return -EINVAL; + + params.enabled = nla_get_u8(info->attrs[NL802154_ATTR_SEC_ENABLED]); + changed |= IEEE802154_LLSEC_PARAM_ENABLED; + } + + if (info->attrs[NL802154_ATTR_SEC_OUT_KEY_ID]) { + ret = ieee802154_llsec_parse_key_id(info->attrs[NL802154_ATTR_SEC_OUT_KEY_ID], + ¶ms.out_key); + if (ret < 0) + return ret; + + changed |= IEEE802154_LLSEC_PARAM_OUT_KEY; + } + + if (info->attrs[NL802154_ATTR_SEC_OUT_LEVEL]) { + params.out_level = nla_get_u32(info->attrs[NL802154_ATTR_SEC_OUT_LEVEL]); + if (params.out_level > NL802154_SECLEVEL_MAX) + return -EINVAL; + + changed |= IEEE802154_LLSEC_PARAM_OUT_LEVEL; + } + + if (info->attrs[NL802154_ATTR_SEC_FRAME_COUNTER]) { + params.frame_counter = nla_get_be32(info->attrs[NL802154_ATTR_SEC_FRAME_COUNTER]); + changed |= IEEE802154_LLSEC_PARAM_FRAME_COUNTER; + } + + return rdev_set_llsec_params(rdev, wpan_dev, ¶ms, changed); +} + +static int nl802154_send_key(struct sk_buff *msg, u32 cmd, u32 portid, + u32 seq, int flags, + struct cfg802154_registered_device *rdev, + struct net_device *dev, + const struct ieee802154_llsec_key_entry *key) +{ + void *hdr; + u32 commands[NL802154_CMD_FRAME_NR_IDS / 32]; + struct nlattr *nl_key, *nl_key_id; + + hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); + if (!hdr) + return -1; + + if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) + goto nla_put_failure; + + nl_key = nla_nest_start(msg, NL802154_ATTR_SEC_KEY); + if (!nl_key) + goto nla_put_failure; + + nl_key_id = nla_nest_start(msg, NL802154_KEY_ATTR_ID); + if (!nl_key_id) + goto nla_put_failure; + + if (ieee802154_llsec_send_key_id(msg, &key->id) < 0) + goto nla_put_failure; + + nla_nest_end(msg, nl_key_id); + + if (nla_put_u8(msg, NL802154_KEY_ATTR_USAGE_FRAMES, + key->key->frame_types)) + goto nla_put_failure; + + if (key->key->frame_types & BIT(NL802154_FRAME_CMD)) { + /* TODO for each nested */ + memset(commands, 0, sizeof(commands)); + commands[7] = key->key->cmd_frame_ids; + if (nla_put(msg, NL802154_KEY_ATTR_USAGE_CMDS, + sizeof(commands), commands)) + goto nla_put_failure; + } + + if (nla_put(msg, NL802154_KEY_ATTR_BYTES, NL802154_KEY_SIZE, + key->key->key)) + goto nla_put_failure; + + nla_nest_end(msg, nl_key); + genlmsg_end(msg, hdr); + + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int +nl802154_dump_llsec_key(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct cfg802154_registered_device *rdev = NULL; + struct ieee802154_llsec_key_entry *key; + struct ieee802154_llsec_table *table; + struct wpan_dev *wpan_dev; + int err; + + err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev); + if (err) + return err; + + if (!wpan_dev->netdev) { + err = -EINVAL; + goto out_err; + } + + rdev_lock_llsec_table(rdev, wpan_dev); + rdev_get_llsec_table(rdev, wpan_dev, &table); + + /* TODO make it like station dump */ + if (cb->args[2]) + goto out; + + list_for_each_entry(key, &table->keys, list) { + if (nl802154_send_key(skb, NL802154_CMD_NEW_SEC_KEY, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + rdev, wpan_dev->netdev, key) < 0) { + /* TODO */ + err = -EIO; + rdev_unlock_llsec_table(rdev, wpan_dev); + goto out_err; + } + } + + cb->args[2] = 1; + +out: + rdev_unlock_llsec_table(rdev, wpan_dev); + err = skb->len; +out_err: + nl802154_finish_wpan_dev_dump(rdev); + + return err; +} + +static const struct nla_policy nl802154_key_policy[NL802154_KEY_ATTR_MAX + 1] = { + [NL802154_KEY_ATTR_ID] = { NLA_NESTED }, + /* TODO handle it as for_each_nested and NLA_FLAG? */ + [NL802154_KEY_ATTR_USAGE_FRAMES] = { NLA_U8 }, + /* TODO handle it as for_each_nested, not static array? */ + [NL802154_KEY_ATTR_USAGE_CMDS] = { .len = NL802154_CMD_FRAME_NR_IDS / 8 }, + [NL802154_KEY_ATTR_BYTES] = { .len = NL802154_KEY_SIZE }, +}; + +static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; + struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1]; + struct ieee802154_llsec_key key = { }; + struct ieee802154_llsec_key_id id = { }; + u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { }; + + if (nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX, + info->attrs[NL802154_ATTR_SEC_KEY], + nl802154_key_policy)) + return -EINVAL; + + if (!attrs[NL802154_KEY_ATTR_USAGE_FRAMES] || + !attrs[NL802154_KEY_ATTR_BYTES]) + return -EINVAL; + + if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0) + return -ENOBUFS; + + key.frame_types = nla_get_u8(attrs[NL802154_KEY_ATTR_USAGE_FRAMES]); + if (key.frame_types > BIT(NL802154_FRAME_MAX) || + ((key.frame_types & BIT(NL802154_FRAME_CMD)) && + !attrs[NL802154_KEY_ATTR_USAGE_CMDS])) + return -EINVAL; + + if (attrs[NL802154_KEY_ATTR_USAGE_CMDS]) { + /* TODO for each nested */ + nla_memcpy(commands, attrs[NL802154_KEY_ATTR_USAGE_CMDS], + NL802154_CMD_FRAME_NR_IDS / 8); + + /* TODO understand the -EINVAL logic here? last condition */ + if (commands[0] || commands[1] || commands[2] || commands[3] || + commands[4] || commands[5] || commands[6] || + commands[7] > BIT(NL802154_CMD_FRAME_MAX)) + return -EINVAL; + + key.cmd_frame_ids = commands[7]; + } else { + key.cmd_frame_ids = 0; + } + + nla_memcpy(key.key, attrs[NL802154_KEY_ATTR_BYTES], NL802154_KEY_SIZE); + + if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0) + return -ENOBUFS; + + return rdev_add_llsec_key(rdev, wpan_dev, &id, &key); +} + +static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; + struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1]; + struct ieee802154_llsec_key_id id; + + if (nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX, + info->attrs[NL802154_ATTR_SEC_KEY], + nl802154_key_policy)) + return -EINVAL; + + if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0) + return -ENOBUFS; + + return rdev_del_llsec_key(rdev, wpan_dev, &id); +} + +static int nl802154_send_device(struct sk_buff *msg, u32 cmd, u32 portid, + u32 seq, int flags, + struct cfg802154_registered_device *rdev, + struct net_device *dev, + const struct ieee802154_llsec_device *dev_desc) +{ + void *hdr; + struct nlattr *nl_device; + + hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); + if (!hdr) + return -1; + + if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) + goto nla_put_failure; + + nl_device = nla_nest_start(msg, NL802154_ATTR_SEC_DEVICE); + if (!nl_device) + goto nla_put_failure; + + if (nla_put_u32(msg, NL802154_DEV_ATTR_FRAME_COUNTER, + dev_desc->frame_counter) || + nla_put_le16(msg, NL802154_DEV_ATTR_PAN_ID, dev_desc->pan_id) || + nla_put_le16(msg, NL802154_DEV_ATTR_SHORT_ADDR, + dev_desc->short_addr) || + nla_put_le64(msg, NL802154_DEV_ATTR_EXTENDED_ADDR, + dev_desc->hwaddr) || + nla_put_u8(msg, NL802154_DEV_ATTR_SECLEVEL_EXEMPT, + dev_desc->seclevel_exempt) || + nla_put_u32(msg, NL802154_DEV_ATTR_KEY_MODE, dev_desc->key_mode)) + goto nla_put_failure; + + nla_nest_end(msg, nl_device); + genlmsg_end(msg, hdr); + + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int +nl802154_dump_llsec_dev(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct cfg802154_registered_device *rdev = NULL; + struct ieee802154_llsec_device *dev; + struct ieee802154_llsec_table *table; + struct wpan_dev *wpan_dev; + int err; + + err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev); + if (err) + return err; + + if (!wpan_dev->netdev) { + err = -EINVAL; + goto out_err; + } + + rdev_lock_llsec_table(rdev, wpan_dev); + rdev_get_llsec_table(rdev, wpan_dev, &table); + + /* TODO make it like station dump */ + if (cb->args[2]) + goto out; + + list_for_each_entry(dev, &table->devices, list) { + if (nl802154_send_device(skb, NL802154_CMD_NEW_SEC_LEVEL, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + rdev, wpan_dev->netdev, dev) < 0) { + /* TODO */ + err = -EIO; + rdev_unlock_llsec_table(rdev, wpan_dev); + goto out_err; + } + } + + cb->args[2] = 1; + +out: + rdev_unlock_llsec_table(rdev, wpan_dev); + err = skb->len; +out_err: + nl802154_finish_wpan_dev_dump(rdev); + + return err; +} + +static const struct nla_policy nl802154_dev_policy[NL802154_DEV_ATTR_MAX + 1] = { + [NL802154_DEV_ATTR_FRAME_COUNTER] = { NLA_U32 }, + [NL802154_DEV_ATTR_PAN_ID] = { .type = NLA_U16 }, + [NL802154_DEV_ATTR_SHORT_ADDR] = { .type = NLA_U16 }, + [NL802154_DEV_ATTR_EXTENDED_ADDR] = { .type = NLA_U64 }, + [NL802154_DEV_ATTR_SECLEVEL_EXEMPT] = { NLA_U8 }, + [NL802154_DEV_ATTR_KEY_MODE] = { NLA_U32 }, +}; + +static int +ieee802154_llsec_parse_device(struct nlattr *nla, + struct ieee802154_llsec_device *dev) +{ + struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1]; + + if (!nla || nla_parse_nested(attrs, NL802154_DEV_ATTR_MAX, nla, + nl802154_dev_policy)) + return -EINVAL; + + memset(dev, 0, sizeof(*dev)); + + if (!attrs[NL802154_DEV_ATTR_FRAME_COUNTER] || + !attrs[NL802154_DEV_ATTR_PAN_ID] || + !attrs[NL802154_DEV_ATTR_SHORT_ADDR] || + !attrs[NL802154_DEV_ATTR_EXTENDED_ADDR] || + !attrs[NL802154_DEV_ATTR_SECLEVEL_EXEMPT] || + !attrs[NL802154_DEV_ATTR_KEY_MODE]) + return -EINVAL; + + /* TODO be32 */ + dev->frame_counter = nla_get_u32(attrs[NL802154_DEV_ATTR_FRAME_COUNTER]); + dev->pan_id = nla_get_le16(attrs[NL802154_DEV_ATTR_PAN_ID]); + dev->short_addr = nla_get_le16(attrs[NL802154_DEV_ATTR_SHORT_ADDR]); + /* TODO rename hwaddr to extended_addr */ + dev->hwaddr = nla_get_le64(attrs[NL802154_DEV_ATTR_EXTENDED_ADDR]); + dev->seclevel_exempt = nla_get_u8(attrs[NL802154_DEV_ATTR_SECLEVEL_EXEMPT]); + dev->key_mode = nla_get_u32(attrs[NL802154_DEV_ATTR_KEY_MODE]); + + if (dev->key_mode > NL802154_DEVKEY_MAX || + (dev->seclevel_exempt != 0 && dev->seclevel_exempt != 1)) + return -EINVAL; + + return 0; +} + +static int nl802154_add_llsec_dev(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; + struct ieee802154_llsec_device dev_desc; + + if (ieee802154_llsec_parse_device(info->attrs[NL802154_ATTR_SEC_DEVICE], + &dev_desc) < 0) + return -EINVAL; + + return rdev_add_device(rdev, wpan_dev, &dev_desc); +} + +static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; + struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1]; + __le64 extended_addr; + + if (nla_parse_nested(attrs, NL802154_DEV_ATTR_MAX, + info->attrs[NL802154_ATTR_SEC_DEVICE], + nl802154_dev_policy)) + return -EINVAL; + + if (!attrs[NL802154_DEV_ATTR_EXTENDED_ADDR]) + return -EINVAL; + + extended_addr = nla_get_le64(attrs[NL802154_DEV_ATTR_EXTENDED_ADDR]); + return rdev_del_device(rdev, wpan_dev, extended_addr); +} + +static int nl802154_send_devkey(struct sk_buff *msg, u32 cmd, u32 portid, + u32 seq, int flags, + struct cfg802154_registered_device *rdev, + struct net_device *dev, __le64 extended_addr, + const struct ieee802154_llsec_device_key *devkey) +{ + void *hdr; + struct nlattr *nl_devkey, *nl_key_id; + + hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); + if (!hdr) + return -1; + + if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) + goto nla_put_failure; + + nl_devkey = nla_nest_start(msg, NL802154_ATTR_SEC_DEVKEY); + if (!nl_devkey) + goto nla_put_failure; + + if (nla_put_le64(msg, NL802154_DEVKEY_ATTR_EXTENDED_ADDR, + extended_addr) || + nla_put_u32(msg, NL802154_DEVKEY_ATTR_FRAME_COUNTER, + devkey->frame_counter)) + goto nla_put_failure; + + nl_key_id = nla_nest_start(msg, NL802154_DEVKEY_ATTR_ID); + if (!nl_key_id) + goto nla_put_failure; + + if (ieee802154_llsec_send_key_id(msg, &devkey->key_id) < 0) + goto nla_put_failure; + + nla_nest_end(msg, nl_key_id); + nla_nest_end(msg, nl_devkey); + genlmsg_end(msg, hdr); + + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int +nl802154_dump_llsec_devkey(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct cfg802154_registered_device *rdev = NULL; + struct ieee802154_llsec_device_key *kpos; + struct ieee802154_llsec_device *dpos; + struct ieee802154_llsec_table *table; + struct wpan_dev *wpan_dev; + int err; + + err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev); + if (err) + return err; + + if (!wpan_dev->netdev) { + err = -EINVAL; + goto out_err; + } + + rdev_lock_llsec_table(rdev, wpan_dev); + rdev_get_llsec_table(rdev, wpan_dev, &table); + + /* TODO make it like station dump */ + if (cb->args[2]) + goto out; + + /* TODO look if remove devkey and do some nested attribute */ + list_for_each_entry(dpos, &table->devices, list) { + list_for_each_entry(kpos, &dpos->keys, list) { + if (nl802154_send_devkey(skb, + NL802154_CMD_NEW_SEC_LEVEL, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + NLM_F_MULTI, rdev, + wpan_dev->netdev, + dpos->hwaddr, + kpos) < 0) { + /* TODO */ + err = -EIO; + rdev_unlock_llsec_table(rdev, wpan_dev); + goto out_err; + } + } + } + + cb->args[2] = 1; + +out: + rdev_unlock_llsec_table(rdev, wpan_dev); + err = skb->len; +out_err: + nl802154_finish_wpan_dev_dump(rdev); + + return err; +} + +static const struct nla_policy nl802154_devkey_policy[NL802154_DEVKEY_ATTR_MAX + 1] = { + [NL802154_DEVKEY_ATTR_FRAME_COUNTER] = { NLA_U32 }, + [NL802154_DEVKEY_ATTR_EXTENDED_ADDR] = { NLA_U64 }, + [NL802154_DEVKEY_ATTR_ID] = { NLA_NESTED }, +}; + +static int nl802154_add_llsec_devkey(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; + struct nlattr *attrs[NL802154_DEVKEY_ATTR_MAX + 1]; + struct ieee802154_llsec_device_key key; + __le64 extended_addr; + + if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] || + nla_parse_nested(attrs, NL802154_DEVKEY_ATTR_MAX, + info->attrs[NL802154_ATTR_SEC_DEVKEY], + nl802154_devkey_policy) < 0) + return -EINVAL; + + if (!attrs[NL802154_DEVKEY_ATTR_FRAME_COUNTER] || + !attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR]) + return -EINVAL; + + /* TODO change key.id ? */ + if (ieee802154_llsec_parse_key_id(attrs[NL802154_DEVKEY_ATTR_ID], + &key.key_id) < 0) + return -ENOBUFS; + + /* TODO be32 */ + key.frame_counter = nla_get_u32(attrs[NL802154_DEVKEY_ATTR_FRAME_COUNTER]); + /* TODO change naming hwaddr -> extended_addr + * check unique identifier short+pan OR extended_addr + */ + extended_addr = nla_get_le64(attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR]); + return rdev_add_devkey(rdev, wpan_dev, extended_addr, &key); +} + +static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; + struct nlattr *attrs[NL802154_DEVKEY_ATTR_MAX + 1]; + struct ieee802154_llsec_device_key key; + __le64 extended_addr; + + if (nla_parse_nested(attrs, NL802154_DEVKEY_ATTR_MAX, + info->attrs[NL802154_ATTR_SEC_DEVKEY], + nl802154_devkey_policy)) + return -EINVAL; + + if (!attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR]) + return -EINVAL; + + /* TODO change key.id ? */ + if (ieee802154_llsec_parse_key_id(attrs[NL802154_DEVKEY_ATTR_ID], + &key.key_id) < 0) + return -ENOBUFS; + + /* TODO change naming hwaddr -> extended_addr + * check unique identifier short+pan OR extended_addr + */ + extended_addr = nla_get_le64(attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR]); + return rdev_del_devkey(rdev, wpan_dev, extended_addr, &key); +} + +static int nl802154_send_seclevel(struct sk_buff *msg, u32 cmd, u32 portid, + u32 seq, int flags, + struct cfg802154_registered_device *rdev, + struct net_device *dev, + const struct ieee802154_llsec_seclevel *sl) +{ + void *hdr; + struct nlattr *nl_seclevel; + + hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); + if (!hdr) + return -1; + + if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) + goto nla_put_failure; + + nl_seclevel = nla_nest_start(msg, NL802154_ATTR_SEC_LEVEL); + if (!nl_seclevel) + goto nla_put_failure; + + if (nla_put_u32(msg, NL802154_SECLEVEL_ATTR_FRAME, sl->frame_type) || + nla_put_u32(msg, NL802154_SECLEVEL_ATTR_LEVELS, sl->sec_levels) || + nla_put_u8(msg, NL802154_SECLEVEL_ATTR_DEV_OVERRIDE, + sl->device_override)) + goto nla_put_failure; + + if (sl->frame_type == NL802154_FRAME_CMD) { + if (nla_put_u32(msg, NL802154_SECLEVEL_ATTR_CMD_FRAME, + sl->cmd_frame_id)) + goto nla_put_failure; + } + + nla_nest_end(msg, nl_seclevel); + genlmsg_end(msg, hdr); + + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int +nl802154_dump_llsec_seclevel(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct cfg802154_registered_device *rdev = NULL; + struct ieee802154_llsec_seclevel *sl; + struct ieee802154_llsec_table *table; + struct wpan_dev *wpan_dev; + int err; + + err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev); + if (err) + return err; + + if (!wpan_dev->netdev) { + err = -EINVAL; + goto out_err; + } + + rdev_lock_llsec_table(rdev, wpan_dev); + rdev_get_llsec_table(rdev, wpan_dev, &table); + + /* TODO make it like station dump */ + if (cb->args[2]) + goto out; + + list_for_each_entry(sl, &table->security_levels, list) { + if (nl802154_send_seclevel(skb, NL802154_CMD_NEW_SEC_LEVEL, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + rdev, wpan_dev->netdev, sl) < 0) { + /* TODO */ + err = -EIO; + rdev_unlock_llsec_table(rdev, wpan_dev); + goto out_err; + } + } + + cb->args[2] = 1; + +out: + rdev_unlock_llsec_table(rdev, wpan_dev); + err = skb->len; +out_err: + nl802154_finish_wpan_dev_dump(rdev); + + return err; +} + +static const struct nla_policy nl802154_seclevel_policy[NL802154_SECLEVEL_ATTR_MAX + 1] = { + [NL802154_SECLEVEL_ATTR_LEVELS] = { .type = NLA_U8 }, + [NL802154_SECLEVEL_ATTR_FRAME] = { .type = NLA_U32 }, + [NL802154_SECLEVEL_ATTR_CMD_FRAME] = { .type = NLA_U32 }, + [NL802154_SECLEVEL_ATTR_DEV_OVERRIDE] = { .type = NLA_U8 }, +}; + +static int +llsec_parse_seclevel(struct nlattr *nla, struct ieee802154_llsec_seclevel *sl) +{ + struct nlattr *attrs[NL802154_SECLEVEL_ATTR_MAX + 1]; + + if (!nla || nla_parse_nested(attrs, NL802154_SECLEVEL_ATTR_MAX, nla, + nl802154_seclevel_policy)) + return -EINVAL; + + memset(sl, 0, sizeof(*sl)); + + if (!attrs[NL802154_SECLEVEL_ATTR_LEVELS] || + !attrs[NL802154_SECLEVEL_ATTR_FRAME] || + !attrs[NL802154_SECLEVEL_ATTR_DEV_OVERRIDE]) + return -EINVAL; + + sl->sec_levels = nla_get_u8(attrs[NL802154_SECLEVEL_ATTR_LEVELS]); + sl->frame_type = nla_get_u32(attrs[NL802154_SECLEVEL_ATTR_FRAME]); + sl->device_override = nla_get_u8(attrs[NL802154_SECLEVEL_ATTR_DEV_OVERRIDE]); + if (sl->frame_type > NL802154_FRAME_MAX || + (sl->device_override != 0 && sl->device_override != 1)) + return -EINVAL; + + if (sl->frame_type == NL802154_FRAME_CMD) { + if (!attrs[NL802154_SECLEVEL_ATTR_CMD_FRAME]) + return -EINVAL; + + sl->cmd_frame_id = nla_get_u32(attrs[NL802154_SECLEVEL_ATTR_CMD_FRAME]); + if (sl->cmd_frame_id > NL802154_CMD_FRAME_MAX) + return -EINVAL; + } + + return 0; +} + +static int nl802154_add_llsec_seclevel(struct sk_buff *skb, + struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; + struct ieee802154_llsec_seclevel sl; + + if (llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL], + &sl) < 0) + return -EINVAL; + + return rdev_add_seclevel(rdev, wpan_dev, &sl); +} + +static int nl802154_del_llsec_seclevel(struct sk_buff *skb, + struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; + struct ieee802154_llsec_seclevel sl; + + if (!info->attrs[NL802154_ATTR_SEC_LEVEL] || + llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL], + &sl) < 0) + return -EINVAL; + + return rdev_del_seclevel(rdev, wpan_dev, &sl); +} +#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ + #define NL802154_FLAG_NEED_WPAN_PHY 0x01 #define NL802154_FLAG_NEED_NETDEV 0x02 #define NL802154_FLAG_NEED_RTNL 0x04 @@ -1289,6 +2303,119 @@ static const struct genl_ops nl802154_ops[] = { .internal_flags = NL802154_FLAG_NEED_NETDEV | NL802154_FLAG_NEED_RTNL, }, +#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL + { + .cmd = NL802154_CMD_SET_SEC_PARAMS, + .doit = nl802154_set_llsec_params, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_NETDEV | + NL802154_FLAG_NEED_RTNL, + }, + { + .cmd = NL802154_CMD_GET_SEC_KEY, + /* TODO .doit by matching key id? */ + .dumpit = nl802154_dump_llsec_key, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_NETDEV | + NL802154_FLAG_NEED_RTNL, + }, + { + .cmd = NL802154_CMD_NEW_SEC_KEY, + .doit = nl802154_add_llsec_key, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_NETDEV | + NL802154_FLAG_NEED_RTNL, + }, + { + .cmd = NL802154_CMD_DEL_SEC_KEY, + .doit = nl802154_del_llsec_key, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_NETDEV | + NL802154_FLAG_NEED_RTNL, + }, + /* TODO unique identifier must short+pan OR extended_addr */ + { + .cmd = NL802154_CMD_GET_SEC_DEV, + /* TODO .doit by matching extended_addr? */ + .dumpit = nl802154_dump_llsec_dev, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_NETDEV | + NL802154_FLAG_NEED_RTNL, + }, + { + .cmd = NL802154_CMD_NEW_SEC_DEV, + .doit = nl802154_add_llsec_dev, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_NETDEV | + NL802154_FLAG_NEED_RTNL, + }, + { + .cmd = NL802154_CMD_DEL_SEC_DEV, + .doit = nl802154_del_llsec_dev, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_NETDEV | + NL802154_FLAG_NEED_RTNL, + }, + /* TODO remove complete devkey, put it as nested? */ + { + .cmd = NL802154_CMD_GET_SEC_DEVKEY, + /* TODO doit by matching ??? */ + .dumpit = nl802154_dump_llsec_devkey, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_NETDEV | + NL802154_FLAG_NEED_RTNL, + }, + { + .cmd = NL802154_CMD_NEW_SEC_DEVKEY, + .doit = nl802154_add_llsec_devkey, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_NETDEV | + NL802154_FLAG_NEED_RTNL, + }, + { + .cmd = NL802154_CMD_DEL_SEC_DEVKEY, + .doit = nl802154_del_llsec_devkey, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_NETDEV | + NL802154_FLAG_NEED_RTNL, + }, + { + .cmd = NL802154_CMD_GET_SEC_LEVEL, + /* TODO .doit by matching frame_type? */ + .dumpit = nl802154_dump_llsec_seclevel, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_NETDEV | + NL802154_FLAG_NEED_RTNL, + }, + { + .cmd = NL802154_CMD_NEW_SEC_LEVEL, + .doit = nl802154_add_llsec_seclevel, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_NETDEV | + NL802154_FLAG_NEED_RTNL, + }, + { + .cmd = NL802154_CMD_DEL_SEC_LEVEL, + /* TODO match frame_type only? */ + .doit = nl802154_del_llsec_seclevel, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_NETDEV | + NL802154_FLAG_NEED_RTNL, + }, +#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ }; /* initialisation/exit functions */ diff --git a/net/ieee802154/rdev-ops.h b/net/ieee802154/rdev-ops.h index 03b357501cc5..4441c63b3ea6 100644 --- a/net/ieee802154/rdev-ops.h +++ b/net/ieee802154/rdev-ops.h @@ -208,4 +208,113 @@ rdev_set_ackreq_default(struct cfg802154_registered_device *rdev, return ret; } +#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL +/* TODO this is already a nl802154, so move into ieee802154 */ +static inline void +rdev_get_llsec_table(struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev, + struct ieee802154_llsec_table **table) +{ + rdev->ops->get_llsec_table(&rdev->wpan_phy, wpan_dev, table); +} + +static inline void +rdev_lock_llsec_table(struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev) +{ + rdev->ops->lock_llsec_table(&rdev->wpan_phy, wpan_dev); +} + +static inline void +rdev_unlock_llsec_table(struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev) +{ + rdev->ops->unlock_llsec_table(&rdev->wpan_phy, wpan_dev); +} + +static inline int +rdev_get_llsec_params(struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev, + struct ieee802154_llsec_params *params) +{ + return rdev->ops->get_llsec_params(&rdev->wpan_phy, wpan_dev, params); +} + +static inline int +rdev_set_llsec_params(struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev, + const struct ieee802154_llsec_params *params, + u32 changed) +{ + return rdev->ops->set_llsec_params(&rdev->wpan_phy, wpan_dev, params, + changed); +} + +static inline int +rdev_add_llsec_key(struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev, + const struct ieee802154_llsec_key_id *id, + const struct ieee802154_llsec_key *key) +{ + return rdev->ops->add_llsec_key(&rdev->wpan_phy, wpan_dev, id, key); +} + +static inline int +rdev_del_llsec_key(struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev, + const struct ieee802154_llsec_key_id *id) +{ + return rdev->ops->del_llsec_key(&rdev->wpan_phy, wpan_dev, id); +} + +static inline int +rdev_add_seclevel(struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev, + const struct ieee802154_llsec_seclevel *sl) +{ + return rdev->ops->add_seclevel(&rdev->wpan_phy, wpan_dev, sl); +} + +static inline int +rdev_del_seclevel(struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev, + const struct ieee802154_llsec_seclevel *sl) +{ + return rdev->ops->del_seclevel(&rdev->wpan_phy, wpan_dev, sl); +} + +static inline int +rdev_add_device(struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev, + const struct ieee802154_llsec_device *dev_desc) +{ + return rdev->ops->add_device(&rdev->wpan_phy, wpan_dev, dev_desc); +} + +static inline int +rdev_del_device(struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev, __le64 extended_addr) +{ + return rdev->ops->del_device(&rdev->wpan_phy, wpan_dev, extended_addr); +} + +static inline int +rdev_add_devkey(struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev, __le64 extended_addr, + const struct ieee802154_llsec_device_key *devkey) +{ + return rdev->ops->add_devkey(&rdev->wpan_phy, wpan_dev, extended_addr, + devkey); +} + +static inline int +rdev_del_devkey(struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev, __le64 extended_addr, + const struct ieee802154_llsec_device_key *devkey) +{ + return rdev->ops->del_devkey(&rdev->wpan_phy, wpan_dev, extended_addr, + devkey); +} +#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ + #endif /* __CFG802154_RDEV_OPS */ diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index b6eacf30ee7a..a548be247e15 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -273,7 +273,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) goto out; } - mtu = dev->mtu; + mtu = IEEE802154_MTU; pr_debug("name = %s, mtu = %u\n", dev->name, mtu); if (size > mtu) { @@ -637,7 +637,7 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) err = -ENXIO; goto out; } - mtu = dev->mtu; + mtu = IEEE802154_MTU; pr_debug("name = %s, mtu = %u\n", dev->name, mtu); if (size > mtu) { @@ -676,8 +676,8 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) cb->seclevel = ro->seclevel; cb->seclevel_override = ro->seclevel_override; - err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &dst_addr, - ro->bound ? &ro->src_addr : NULL, size); + err = wpan_dev_hard_header(skb, dev, &dst_addr, + ro->bound ? &ro->src_addr : NULL, size); if (err < 0) goto out_skb; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 064bd3caaa4f..42778d9d71e5 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -57,8 +57,7 @@ static unsigned int fib_info_cnt; static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE]; #ifdef CONFIG_IP_ROUTE_MULTIPATH - -static DEFINE_SPINLOCK(fib_multipath_lock); +u32 fib_multipath_secret __read_mostly; #define for_nexthops(fi) { \ int nhsel; const struct fib_nh *nh; \ @@ -532,7 +531,67 @@ errout: return ret; } -#endif +static void fib_rebalance(struct fib_info *fi) +{ + int total; + int w; + struct in_device *in_dev; + + if (fi->fib_nhs < 2) + return; + + total = 0; + for_nexthops(fi) { + if (nh->nh_flags & RTNH_F_DEAD) + continue; + + in_dev = __in_dev_get_rtnl(nh->nh_dev); + + if (in_dev && + IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && + nh->nh_flags & RTNH_F_LINKDOWN) + continue; + + total += nh->nh_weight; + } endfor_nexthops(fi); + + w = 0; + change_nexthops(fi) { + int upper_bound; + + in_dev = __in_dev_get_rtnl(nexthop_nh->nh_dev); + + if (nexthop_nh->nh_flags & RTNH_F_DEAD) { + upper_bound = -1; + } else if (in_dev && + IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && + nexthop_nh->nh_flags & RTNH_F_LINKDOWN) { + upper_bound = -1; + } else { + w += nexthop_nh->nh_weight; + upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, + total) - 1; + } + + atomic_set(&nexthop_nh->nh_upper_bound, upper_bound); + } endfor_nexthops(fi); + + net_get_random_once(&fib_multipath_secret, + sizeof(fib_multipath_secret)); +} + +static inline void fib_add_weight(struct fib_info *fi, + const struct fib_nh *nh) +{ + fi->fib_weight += nh->nh_weight; +} + +#else /* CONFIG_IP_ROUTE_MULTIPATH */ + +#define fib_rebalance(fi) do { } while (0) +#define fib_add_weight(fi, nh) do { } while (0) + +#endif /* CONFIG_IP_ROUTE_MULTIPATH */ static int fib_encap_match(struct net *net, u16 encap_type, struct nlattr *encap, @@ -1094,8 +1153,11 @@ struct fib_info *fib_create_info(struct fib_config *cfg) change_nexthops(fi) { fib_info_update_nh_saddr(net, nexthop_nh); + fib_add_weight(fi, nexthop_nh); } endfor_nexthops(fi) + fib_rebalance(fi); + link_it: ofi = fib_find_info(fi); if (ofi) { @@ -1317,12 +1379,6 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event) nexthop_nh->nh_flags |= RTNH_F_LINKDOWN; break; } -#ifdef CONFIG_IP_ROUTE_MULTIPATH - spin_lock_bh(&fib_multipath_lock); - fi->fib_power -= nexthop_nh->nh_power; - nexthop_nh->nh_power = 0; - spin_unlock_bh(&fib_multipath_lock); -#endif dead++; } #ifdef CONFIG_IP_ROUTE_MULTIPATH @@ -1345,6 +1401,8 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event) } ret++; } + + fib_rebalance(fi); } return ret; @@ -1467,20 +1525,15 @@ int fib_sync_up(struct net_device *dev, unsigned int nh_flags) !__in_dev_get_rtnl(dev)) continue; alive++; -#ifdef CONFIG_IP_ROUTE_MULTIPATH - spin_lock_bh(&fib_multipath_lock); - nexthop_nh->nh_power = 0; nexthop_nh->nh_flags &= ~nh_flags; - spin_unlock_bh(&fib_multipath_lock); -#else - nexthop_nh->nh_flags &= ~nh_flags; -#endif } endfor_nexthops(fi) if (alive > 0) { fi->fib_flags &= ~nh_flags; ret++; } + + fib_rebalance(fi); } return ret; @@ -1488,62 +1541,40 @@ int fib_sync_up(struct net_device *dev, unsigned int nh_flags) #ifdef CONFIG_IP_ROUTE_MULTIPATH -/* - * The algorithm is suboptimal, but it provides really - * fair weighted route distribution. - */ -void fib_select_multipath(struct fib_result *res) +void fib_select_multipath(struct fib_result *res, int hash) { struct fib_info *fi = res->fi; - struct in_device *in_dev; - int w; - - spin_lock_bh(&fib_multipath_lock); - if (fi->fib_power <= 0) { - int power = 0; - change_nexthops(fi) { - in_dev = __in_dev_get_rcu(nexthop_nh->nh_dev); - if (nexthop_nh->nh_flags & RTNH_F_DEAD) - continue; - if (in_dev && - IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && - nexthop_nh->nh_flags & RTNH_F_LINKDOWN) - continue; - power += nexthop_nh->nh_weight; - nexthop_nh->nh_power = nexthop_nh->nh_weight; - } endfor_nexthops(fi); - fi->fib_power = power; - if (power <= 0) { - spin_unlock_bh(&fib_multipath_lock); - /* Race condition: route has just become dead. */ - res->nh_sel = 0; - return; - } - } - - /* w should be random number [0..fi->fib_power-1], - * it is pretty bad approximation. - */ - - w = jiffies % fi->fib_power; + for_nexthops(fi) { + if (hash > atomic_read(&nh->nh_upper_bound)) + continue; - change_nexthops(fi) { - if (!(nexthop_nh->nh_flags & RTNH_F_DEAD) && - nexthop_nh->nh_power) { - w -= nexthop_nh->nh_power; - if (w <= 0) { - nexthop_nh->nh_power--; - fi->fib_power--; - res->nh_sel = nhsel; - spin_unlock_bh(&fib_multipath_lock); - return; - } - } + res->nh_sel = nhsel; + return; } endfor_nexthops(fi); /* Race condition: route has just become dead. */ res->nh_sel = 0; - spin_unlock_bh(&fib_multipath_lock); } #endif + +void fib_select_path(struct net *net, struct fib_result *res, + struct flowi4 *fl4, int mp_hash) +{ +#ifdef CONFIG_IP_ROUTE_MULTIPATH + if (res->fi->fib_nhs > 1 && fl4->flowi4_oif == 0) { + if (mp_hash < 0) + mp_hash = fib_multipath_hash(fl4->saddr, fl4->daddr); + fib_select_multipath(res, mp_hash); + } + else +#endif + if (!res->prefixlen && + res->table->tb_num_default > 1 && + res->type == RTN_UNICAST && !fl4->flowi4_oif) + fib_select_default(fl4, res); + + if (!fl4->saddr) + fl4->saddr = FIB_RES_PREFSRC(net, *res); +} +EXPORT_SYMBOL_GPL(fib_select_path); diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 6b96dee2800b..36e26977c908 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -440,6 +440,22 @@ out_unlock: icmp_xmit_unlock(sk); } +#ifdef CONFIG_IP_ROUTE_MULTIPATH + +/* Source and destination is swapped. See ip_multipath_icmp_hash */ +static int icmp_multipath_hash_skb(const struct sk_buff *skb) +{ + const struct iphdr *iph = ip_hdr(skb); + + return fib_multipath_hash(iph->daddr, iph->saddr); +} + +#else + +#define icmp_multipath_hash_skb(skb) (-1) + +#endif + static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4, struct sk_buff *skb_in, @@ -464,7 +480,8 @@ static struct rtable *icmp_route_lookup(struct net *net, fl4->flowi4_oif = l3mdev_master_ifindex(skb_in->dev); security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); - rt = __ip_route_output_key(net, fl4); + rt = __ip_route_output_key_hash(net, fl4, + icmp_multipath_hash_skb(skb_in)); if (IS_ERR(rt)) return rt; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index de6d4c8ba600..64aaf3522a59 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -397,7 +397,7 @@ static int igmpv3_sendpack(struct sk_buff *skb) pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen); - return ip_local_out(skb); + return ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); } static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel) @@ -739,7 +739,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, ih->group = group; ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); - return ip_local_out(skb); + return ip_local_out(net, skb->sk, skb); } static void igmp_gq_timer_expire(unsigned long data) diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 89eedfbd4ad5..8430bc8ccd58 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -546,6 +546,13 @@ void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req) } EXPORT_SYMBOL(inet_csk_reqsk_queue_drop); +void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req) +{ + inet_csk_reqsk_queue_drop(sk, req); + reqsk_put(req); +} +EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put); + static void reqsk_timer_handler(unsigned long data) { struct request_sock *req = (struct request_sock *)data; @@ -579,7 +586,7 @@ static void reqsk_timer_handler(unsigned long data) * ones are about to clog our table. */ qlen = reqsk_queue_len(queue); - if ((qlen << 1) > sk_listener->sk_max_ack_backlog) { + if ((qlen << 1) > max(8U, sk_listener->sk_max_ack_backlog)) { int young = reqsk_queue_len_young(queue) << 1; while (thresh > 2) { @@ -608,8 +615,7 @@ static void reqsk_timer_handler(unsigned long data) return; } drop: - inet_csk_reqsk_queue_drop(sk_listener, req); - reqsk_put(req); + inet_csk_reqsk_queue_drop_and_put(sk_listener, req); } static void reqsk_queue_hash_req(struct request_sock *req, @@ -727,14 +733,14 @@ void inet_csk_prepare_forced_close(struct sock *sk) } EXPORT_SYMBOL(inet_csk_prepare_forced_close); -int inet_csk_listen_start(struct sock *sk, const int nr_table_entries) +int inet_csk_listen_start(struct sock *sk, int backlog) { struct inet_connection_sock *icsk = inet_csk(sk); struct inet_sock *inet = inet_sk(sk); reqsk_queue_alloc(&icsk->icsk_accept_queue); - sk->sk_max_ack_backlog = 0; + sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; inet_csk_delack_init(sk); @@ -758,6 +764,53 @@ int inet_csk_listen_start(struct sock *sk, const int nr_table_entries) } EXPORT_SYMBOL_GPL(inet_csk_listen_start); +static void inet_child_forget(struct sock *sk, struct request_sock *req, + struct sock *child) +{ + sk->sk_prot->disconnect(child, O_NONBLOCK); + + sock_orphan(child); + + percpu_counter_inc(sk->sk_prot->orphan_count); + + if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) { + BUG_ON(tcp_sk(child)->fastopen_rsk != req); + BUG_ON(sk != req->rsk_listener); + + /* Paranoid, to prevent race condition if + * an inbound pkt destined for child is + * blocked by sock lock in tcp_v4_rcv(). + * Also to satisfy an assertion in + * tcp_v4_destroy_sock(). + */ + tcp_sk(child)->fastopen_rsk = NULL; + } + inet_csk_destroy_sock(child); + reqsk_put(req); +} + +void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req, + struct sock *child) +{ + struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; + + spin_lock(&queue->rskq_lock); + if (unlikely(sk->sk_state != TCP_LISTEN)) { + inet_child_forget(sk, req, child); + } else { + req->sk = child; + req->dl_next = NULL; + if (queue->rskq_accept_head == NULL) + queue->rskq_accept_head = req; + else + queue->rskq_accept_tail->dl_next = req; + queue->rskq_accept_tail = req; + sk_acceptq_added(sk); + } + spin_unlock(&queue->rskq_lock); +} +EXPORT_SYMBOL(inet_csk_reqsk_queue_add); + /* * This routine closes sockets which have been at least partially * opened, but not yet accepted. @@ -784,31 +837,11 @@ void inet_csk_listen_stop(struct sock *sk) WARN_ON(sock_owned_by_user(child)); sock_hold(child); - sk->sk_prot->disconnect(child, O_NONBLOCK); - - sock_orphan(child); - - percpu_counter_inc(sk->sk_prot->orphan_count); - - if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) { - BUG_ON(tcp_sk(child)->fastopen_rsk != req); - BUG_ON(sk != req->rsk_listener); - - /* Paranoid, to prevent race condition if - * an inbound pkt destined for child is - * blocked by sock lock in tcp_v4_rcv(). - * Also to satisfy an assertion in - * tcp_v4_destroy_sock(). - */ - tcp_sk(child)->fastopen_rsk = NULL; - } - inet_csk_destroy_sock(child); - + inet_child_forget(sk, req, child); bh_unlock_sock(child); local_bh_enable(); sock_put(child); - reqsk_put(req); cond_resched(); } if (queue->fastopenq.rskq_rst_head) { @@ -823,7 +856,7 @@ void inet_csk_listen_stop(struct sock *sk) req = next; } } - WARN_ON(sk->sk_ack_backlog); + WARN_ON_ONCE(sk->sk_ack_backlog); } EXPORT_SYMBOL_GPL(inet_csk_listen_stop); diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index bed8886a4b6c..958728a22001 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -137,6 +137,10 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child) spin_lock(&head->lock); tb = inet_csk(sk)->icsk_bind_hash; + if (unlikely(!tb)) { + spin_unlock(&head->lock); + return -ENOENT; + } if (tb->port != port) { /* NOTE: using tproxy and redirecting skbs to a proxy * on a different listener port breaks the assumption @@ -185,6 +189,8 @@ static inline int compute_score(struct sock *sk, struct net *net, return -1; score += 4; } + if (sk->sk_incoming_cpu == raw_smp_processor_id()) + score++; } return score; } diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index d66cfb35ba74..da0d7ce85844 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -72,7 +72,7 @@ static int ip_forward_finish(struct net *net, struct sock *sk, struct sk_buff *s ip_forward_options(skb); skb_sender_cpu_clear(skb); - return dst_output(sk, skb); + return dst_output(net, sk, skb); } int ip_forward(struct sk_buff *skb) diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 9772b789adf3..5482745d5d68 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -654,11 +654,10 @@ out_fail: } /* Process an incoming IP datagram fragment. */ -int ip_defrag(struct sk_buff *skb, u32 user) +int ip_defrag(struct net *net, struct sk_buff *skb, u32 user) { struct net_device *dev = skb->dev ? : skb_dst(skb)->dev; int vif = l3mdev_master_ifindex_rcu(dev); - struct net *net = dev_net(dev); struct ipq *qp; IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); @@ -683,7 +682,7 @@ int ip_defrag(struct sk_buff *skb, u32 user) } EXPORT_SYMBOL(ip_defrag); -struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) +struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user) { struct iphdr iph; int netoff; @@ -712,7 +711,7 @@ struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) if (pskb_trim_rcsum(skb, netoff + len)) return skb; memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); - if (ip_defrag(skb, user)) + if (ip_defrag(net, skb, user)) return NULL; skb_clear_hash(skb); } diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 7cc9f7bb7fb7..b1209b63381f 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -157,6 +157,7 @@ bool ip_call_ra_chain(struct sk_buff *skb) u8 protocol = ip_hdr(skb)->protocol; struct sock *last = NULL; struct net_device *dev = skb->dev; + struct net *net = dev_net(dev); for (ra = rcu_dereference(ip_ra_chain); ra; ra = rcu_dereference(ra->next)) { struct sock *sk = ra->sk; @@ -167,9 +168,9 @@ bool ip_call_ra_chain(struct sk_buff *skb) if (sk && inet_sk(sk)->inet_num == protocol && (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dev->ifindex) && - net_eq(sock_net(sk), dev_net(dev))) { + net_eq(sock_net(sk), net)) { if (ip_is_fragment(ip_hdr(skb))) { - if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) + if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN)) return true; } if (last) { @@ -246,14 +247,15 @@ int ip_local_deliver(struct sk_buff *skb) /* * Reassemble IP fragments. */ + struct net *net = dev_net(skb->dev); if (ip_is_fragment(ip_hdr(skb))) { - if (ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER)) + if (ip_defrag(net, skb, IP_DEFRAG_LOCAL_DELIVER)) return 0; } return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, - dev_net(skb->dev), NULL, skb, skb->dev, NULL, + net, NULL, skb, skb->dev, NULL, ip_local_deliver_finish); } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index aff6766922e8..67404e1fe7d4 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -83,9 +83,10 @@ int sysctl_ip_default_ttl __read_mostly = IPDEFTTL; EXPORT_SYMBOL(sysctl_ip_default_ttl); -static int ip_fragment(struct sock *sk, struct sk_buff *skb, - unsigned int mtu, - int (*output)(struct sock *, struct sk_buff *)); +static int +ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, + unsigned int mtu, + int (*output)(struct net *, struct sock *, struct sk_buff *)); /* Generate a checksum for an outgoing IP datagram. */ void ip_send_check(struct iphdr *iph) @@ -95,34 +96,28 @@ void ip_send_check(struct iphdr *iph) } EXPORT_SYMBOL(ip_send_check); -static int __ip_local_out_sk(struct sock *sk, struct sk_buff *skb) +int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) { - struct net *net = dev_net(skb_dst(skb)->dev); struct iphdr *iph = ip_hdr(skb); iph->tot_len = htons(skb->len); ip_send_check(iph); return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, skb, NULL, skb_dst(skb)->dev, - dst_output_okfn); -} - -int __ip_local_out(struct sk_buff *skb) -{ - return __ip_local_out_sk(skb->sk, skb); + dst_output); } -int ip_local_out_sk(struct sock *sk, struct sk_buff *skb) +int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) { int err; - err = __ip_local_out(skb); + err = __ip_local_out(net, sk, skb); if (likely(err == 1)) - err = dst_output(sk, skb); + err = dst_output(net, sk, skb); return err; } -EXPORT_SYMBOL_GPL(ip_local_out_sk); +EXPORT_SYMBOL_GPL(ip_local_out); static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst) { @@ -142,6 +137,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, { struct inet_sock *inet = inet_sk(sk); struct rtable *rt = skb_rtable(skb); + struct net *net = sock_net(sk); struct iphdr *iph; /* Build the IP header. */ @@ -160,7 +156,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, iph->id = 0; } else { iph->frag_off = 0; - __ip_select_ident(sock_net(sk), iph, 1); + __ip_select_ident(net, iph, 1); } if (opt && opt->opt.optlen) { @@ -172,16 +168,15 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, skb->mark = sk->sk_mark; /* Send it out. */ - return ip_local_out(skb); + return ip_local_out(net, skb->sk, skb); } EXPORT_SYMBOL_GPL(ip_build_and_send_pkt); -static int ip_finish_output2(struct sock *sk, struct sk_buff *skb) +static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct rtable *rt = (struct rtable *)dst; struct net_device *dev = dst->dev; - struct net *net = dev_net(dev); unsigned int hh_len = LL_RESERVED_SPACE(dev); struct neighbour *neigh; u32 nexthop; @@ -225,8 +220,8 @@ static int ip_finish_output2(struct sock *sk, struct sk_buff *skb) return -EINVAL; } -static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb, - unsigned int mtu) +static int ip_finish_output_gso(struct net *net, struct sock *sk, + struct sk_buff *skb, unsigned int mtu) { netdev_features_t features; struct sk_buff *segs; @@ -235,7 +230,7 @@ static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb, /* common case: locally created skb or seglen is <= mtu */ if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) || skb_gso_network_seglen(skb) <= mtu) - return ip_finish_output2(sk, skb); + return ip_finish_output2(net, sk, skb); /* Slowpath - GSO segment length is exceeding the dst MTU. * @@ -258,7 +253,7 @@ static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb, int err; segs->next = NULL; - err = ip_fragment(sk, segs, mtu, ip_finish_output2); + err = ip_fragment(net, sk, segs, mtu, ip_finish_output2); if (err && ret == 0) ret = err; @@ -276,24 +271,23 @@ static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *sk /* Policy lookup after SNAT yielded a new policy */ if (skb_dst(skb)->xfrm) { IPCB(skb)->flags |= IPSKB_REROUTED; - return dst_output(sk, skb); + return dst_output(net, sk, skb); } #endif mtu = ip_skb_dst_mtu(skb); if (skb_is_gso(skb)) - return ip_finish_output_gso(sk, skb, mtu); + return ip_finish_output_gso(net, sk, skb, mtu); if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU)) - return ip_fragment(sk, skb, mtu, ip_finish_output2); + return ip_fragment(net, sk, skb, mtu, ip_finish_output2); - return ip_finish_output2(sk, skb); + return ip_finish_output2(net, sk, skb); } -int ip_mc_output(struct sock *sk, struct sk_buff *skb) +int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct rtable *rt = skb_rtable(skb); struct net_device *dev = rt->dst.dev; - struct net *net = dev_net(dev); /* * If the indicated interface is up and running, send the packet. @@ -352,10 +346,9 @@ int ip_mc_output(struct sock *sk, struct sk_buff *skb) !(IPCB(skb)->flags & IPSKB_REROUTED)); } -int ip_output(struct sock *sk, struct sk_buff *skb) +int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct net_device *dev = skb_dst(skb)->dev; - struct net *net = dev_net(dev); IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len); @@ -386,6 +379,7 @@ static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4) int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl) { struct inet_sock *inet = inet_sk(sk); + struct net *net = sock_net(sk); struct ip_options_rcu *inet_opt; struct flowi4 *fl4; struct rtable *rt; @@ -416,7 +410,7 @@ int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl) * keep trying until route appears or the connection times * itself out. */ - rt = ip_route_output_ports(sock_net(sk), fl4, sk, + rt = ip_route_output_ports(net, fl4, sk, daddr, inet->inet_saddr, inet->inet_dport, inet->inet_sport, @@ -453,20 +447,20 @@ packet_routed: ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0); } - ip_select_ident_segs(sock_net(sk), skb, sk, + ip_select_ident_segs(net, skb, sk, skb_shinfo(skb)->gso_segs ?: 1); /* TODO : should we use skb->sk here instead of sk ? */ skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; - res = ip_local_out(skb); + res = ip_local_out(net, sk, skb); rcu_read_unlock(); return res; no_route: rcu_read_unlock(); - IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); + IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); kfree_skb(skb); return -EHOSTUNREACH; } @@ -495,20 +489,18 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) skb_copy_secmark(to, from); } -static int ip_fragment(struct sock *sk, struct sk_buff *skb, +static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned int mtu, - int (*output)(struct sock *, struct sk_buff *)) + int (*output)(struct net *, struct sock *, struct sk_buff *)) { struct iphdr *iph = ip_hdr(skb); if ((iph->frag_off & htons(IP_DF)) == 0) - return ip_do_fragment(sk, skb, output); + return ip_do_fragment(net, sk, skb, output); if (unlikely(!skb->ignore_df || (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size > mtu))) { - struct net *net = dev_net(skb_rtable(skb)->dst.dev); - IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); @@ -516,7 +508,7 @@ static int ip_fragment(struct sock *sk, struct sk_buff *skb, return -EMSGSIZE; } - return ip_do_fragment(sk, skb, output); + return ip_do_fragment(net, sk, skb, output); } /* @@ -526,8 +518,8 @@ static int ip_fragment(struct sock *sk, struct sk_buff *skb, * single device frame, and queue such a frame for sending. */ -int ip_do_fragment(struct sock *sk, struct sk_buff *skb, - int (*output)(struct sock *, struct sk_buff *)) +int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, + int (*output)(struct net *, struct sock *, struct sk_buff *)) { struct iphdr *iph; int ptr; @@ -537,11 +529,9 @@ int ip_do_fragment(struct sock *sk, struct sk_buff *skb, int offset; __be16 not_last_frag; struct rtable *rt = skb_rtable(skb); - struct net *net; int err = 0; dev = rt->dst.dev; - net = dev_net(dev); /* * Point into the IP datagram header. @@ -631,7 +621,7 @@ int ip_do_fragment(struct sock *sk, struct sk_buff *skb, ip_send_check(iph); } - err = output(sk, skb); + err = output(net, sk, skb); if (!err) IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES); @@ -771,7 +761,7 @@ slow_path: ip_send_check(iph); - err = output(sk, skb2); + err = output(net, sk, skb2); if (err) goto fail; @@ -1444,7 +1434,7 @@ int ip_send_skb(struct net *net, struct sk_buff *skb) { int err; - err = ip_local_out(skb); + err = ip_local_out(net, skb->sk, skb); if (err) { if (err > 0) err = net_xmit_errno(err); diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 84dce6a92f93..6cb9009c3d96 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -53,6 +53,7 @@ int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, __u8 tos, __u8 ttl, __be16 df, bool xnet) { int pkt_len = skb->len - skb_inner_network_offset(skb); + struct net *net = dev_net(rt->dst.dev); struct iphdr *iph; int err; @@ -76,10 +77,9 @@ int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, iph->daddr = dst; iph->saddr = src; iph->ttl = ttl; - __ip_select_ident(dev_net(rt->dst.dev), iph, - skb_shinfo(skb)->gso_segs ?: 1); + __ip_select_ident(net, iph, skb_shinfo(skb)->gso_segs ?: 1); - err = ip_local_out_sk(sk, skb); + err = ip_local_out(net, sk, skb); if (unlikely(net_xmit_eval(err))) pkt_len = 0; return pkt_len; diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 3b87ec5178f9..4d8f0b698777 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -197,7 +197,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, skb_dst_set(skb, dst); skb->dev = skb_dst(skb)->dev; - err = dst_output(skb->sk, skb); + err = dst_output(tunnel->net, skb->sk, skb); if (net_xmit_eval(err) == 0) err = skb->len; iptunnel_xmit_stats(err, &dev->stats, dev->tstats); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index cfcb996ec51b..fc42525d8694 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1689,7 +1689,7 @@ static inline int ipmr_forward_finish(struct net *net, struct sock *sk, if (unlikely(opt->optlen)) ip_forward_options(skb); - return dst_output(sk, skb); + return dst_output(net, sk, skb); } /* diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index a1058363d2e9..5fdc556514ba 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c @@ -63,7 +63,7 @@ synproxy_send_tcp(const struct synproxy_net *snet, nf_conntrack_get(nfct); } - ip_local_out(nskb); + ip_local_out(net, nskb->sk, nskb); return; free_nskb: diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c index 9df3f93269d3..0e5591c2ee9f 100644 --- a/net/ipv4/netfilter/nf_defrag_ipv4.c +++ b/net/ipv4/netfilter/nf_defrag_ipv4.c @@ -22,14 +22,15 @@ #endif #include <net/netfilter/nf_conntrack_zones.h> -static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) +static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb, + u_int32_t user) { int err; skb_orphan(skb); local_bh_disable(); - err = ip_defrag(skb, user); + err = ip_defrag(net, skb, user); local_bh_enable(); if (!err) { @@ -85,7 +86,7 @@ static unsigned int ipv4_conntrack_defrag(void *priv, enum ip_defrag_users user = nf_ct_defrag_user(state->hook, skb); - if (nf_ct_ipv4_gather_frags(skb, user)) + if (nf_ct_ipv4_gather_frags(state->net, skb, user)) return NF_STOLEN; } return NF_ACCEPT; diff --git a/net/ipv4/netfilter/nf_dup_ipv4.c b/net/ipv4/netfilter/nf_dup_ipv4.c index ce2a59e5c665..ceb187308120 100644 --- a/net/ipv4/netfilter/nf_dup_ipv4.c +++ b/net/ipv4/netfilter/nf_dup_ipv4.c @@ -92,7 +92,7 @@ void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum, if (nf_dup_ipv4_route(net, skb, gw, oif)) { __this_cpu_write(nf_skb_duplicated, true); - ip_local_out(skb); + ip_local_out(net, skb->sk, skb); __this_cpu_write(nf_skb_duplicated, false); } else { kfree_skb(skb); diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c index 2f5e925d3264..c747b2d9eb77 100644 --- a/net/ipv4/netfilter/nf_reject_ipv4.c +++ b/net/ipv4/netfilter/nf_reject_ipv4.c @@ -157,7 +157,7 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook) dev_queue_xmit(nskb); } else #endif - ip_local_out(nskb); + ip_local_out(net, nskb->sk, nskb); return; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 28ef8a913130..8c0d0bdc2a7c 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -413,7 +413,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, skb, NULL, rt->dst.dev, - dst_output_okfn); + dst_output); if (err > 0) err = net_xmit_errno(err); if (err) @@ -484,6 +484,7 @@ static int raw_getfrag(void *from, char *to, int offset, int len, int odd, static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct inet_sock *inet = inet_sk(sk); + struct net *net = sock_net(sk); struct ipcm_cookie ipc; struct rtable *rt = NULL; struct flowi4 fl4; @@ -543,7 +544,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) ipc.oif = sk->sk_bound_dev_if; if (msg->msg_controllen) { - err = ip_cmsg_send(sock_net(sk), msg, &ipc, false); + err = ip_cmsg_send(net, msg, &ipc, false); if (err) goto out; if (ipc.opt) @@ -598,6 +599,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), daddr, saddr, 0, 0); + if (!saddr && ipc.oif) + l3mdev_get_saddr(net, ipc.oif, &fl4); + if (!inet->hdrincl) { rfv.msg = msg; rfv.hlen = 0; @@ -608,7 +612,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); - rt = ip_route_output_flow(sock_net(sk), &fl4, sk); + rt = ip_route_output_flow(net, &fl4, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 76ca4e75f785..85f184e429c6 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1152,7 +1152,7 @@ static void ipv4_link_failure(struct sk_buff *skb) dst_set_expires(&rt->dst, 0); } -static int ip_rt_bug(struct sock *sk, struct sk_buff *skb) +static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb) { pr_debug("%s: %pI4 -> %pI4, %s\n", __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, @@ -1651,6 +1651,48 @@ out: return err; } +#ifdef CONFIG_IP_ROUTE_MULTIPATH + +/* To make ICMP packets follow the right flow, the multipath hash is + * calculated from the inner IP addresses in reverse order. + */ +static int ip_multipath_icmp_hash(struct sk_buff *skb) +{ + const struct iphdr *outer_iph = ip_hdr(skb); + struct icmphdr _icmph; + const struct icmphdr *icmph; + struct iphdr _inner_iph; + const struct iphdr *inner_iph; + + if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0)) + goto standard_hash; + + icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph), + &_icmph); + if (!icmph) + goto standard_hash; + + if (icmph->type != ICMP_DEST_UNREACH && + icmph->type != ICMP_REDIRECT && + icmph->type != ICMP_TIME_EXCEEDED && + icmph->type != ICMP_PARAMETERPROB) { + goto standard_hash; + } + + inner_iph = skb_header_pointer(skb, + outer_iph->ihl * 4 + sizeof(_icmph), + sizeof(_inner_iph), &_inner_iph); + if (!inner_iph) + goto standard_hash; + + return fib_multipath_hash(inner_iph->daddr, inner_iph->saddr); + +standard_hash: + return fib_multipath_hash(outer_iph->saddr, outer_iph->daddr); +} + +#endif /* CONFIG_IP_ROUTE_MULTIPATH */ + static int ip_mkroute_input(struct sk_buff *skb, struct fib_result *res, const struct flowi4 *fl4, @@ -1658,8 +1700,15 @@ static int ip_mkroute_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, u32 tos) { #ifdef CONFIG_IP_ROUTE_MULTIPATH - if (res->fi && res->fi->fib_nhs > 1) - fib_select_multipath(res); + if (res->fi && res->fi->fib_nhs > 1) { + int h; + + if (unlikely(ip_hdr(skb)->protocol == IPPROTO_ICMP)) + h = ip_multipath_icmp_hash(skb); + else + h = fib_multipath_hash(saddr, daddr); + fib_select_multipath(res, h); + } #endif /* create a routing cache entry */ @@ -2026,7 +2075,8 @@ add: * Major route resolver routine. */ -struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) +struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4, + int mp_hash) { struct net_device *dev_out = NULL; __u8 tos = RT_FL_TOS(fl4); @@ -2146,7 +2196,8 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) if (err) { res.fi = NULL; res.table = NULL; - if (fl4->flowi4_oif) { + if (fl4->flowi4_oif && + !netif_index_is_l3_master(net, fl4->flowi4_oif)) { /* Apparently, routing tables are wrong. Assume, that the destination is on link. @@ -2188,18 +2239,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) goto make_route; } -#ifdef CONFIG_IP_ROUTE_MULTIPATH - if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0) - fib_select_multipath(&res); - else -#endif - if (!res.prefixlen && - res.table->tb_num_default > 1 && - res.type == RTN_UNICAST && !fl4->flowi4_oif) - fib_select_default(fl4, &res); - - if (!fl4->saddr) - fl4->saddr = FIB_RES_PREFSRC(net, res); + fib_select_path(net, &res, fl4, mp_hash); dev_out = FIB_RES_DEV(res); fl4->flowi4_oif = dev_out->ifindex; @@ -2212,7 +2252,7 @@ out: rcu_read_unlock(); return rth; } -EXPORT_SYMBOL_GPL(__ip_route_output_key); +EXPORT_SYMBOL_GPL(__ip_route_output_key_hash); static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie) { @@ -2264,7 +2304,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or new->__use = 1; new->input = dst_discard; - new->output = dst_discard_sk; + new->output = dst_discard_out; new->dev = ort->dst.dev; if (new->dev) @@ -2468,6 +2508,9 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0; fl4.flowi4_mark = mark; + if (netif_index_is_l3_master(net, fl4.flowi4_oif)) + fl4.flowi4_flags = FLOWI_FLAG_L3MDEV_SRC | FLOWI_FLAG_SKIP_NH_OIF; + if (iif) { struct net_device *dev; diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 729ceb5f63c6..4c0892badb8b 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -225,6 +225,7 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst); if (child) { atomic_set(&req->rsk_refcnt, 1); + sock_rps_save_rxhash(child, skb); inet_csk_reqsk_queue_add(sk, req, child); } else { reqsk_free(req); @@ -326,7 +327,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) goto out; ret = NULL; - req = inet_reqsk_alloc(&tcp_request_sock_ops, sk); /* for safety */ + req = inet_reqsk_alloc(&tcp_request_sock_ops, sk, false); /* for safety */ if (!req) goto out; @@ -381,10 +382,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) } /* Try to redo what tcp_v4_send_synack did. */ - req->window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW); + req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW); tcp_select_initial_window(tcp_full_space(sk), req->mss, - &req->rcv_wnd, &req->window_clamp, + &req->rsk_rcv_wnd, &req->rsk_window_clamp, ireq->wscale_ok, &rcv_wscale, dst_metric(&rt->dst, RTAX_INITRWND)); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 3c96fa87ff9e..ac1bdbb50352 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -900,7 +900,8 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, */ if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && !tcp_passive_fastopen(sk)) { - if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) + err = sk_stream_wait_connect(sk, &timeo); + if (err != 0) goto out_err; } @@ -967,7 +968,8 @@ new_segment: copied += copy; offset += copy; - if (!(size -= copy)) { + size -= copy; + if (!size) { tcp_tx_timestamp(sk, skb); goto out; } @@ -988,7 +990,8 @@ wait_for_memory: tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH, size_goal); - if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) + err = sk_stream_wait_memory(sk, &timeo); + if (err != 0) goto do_error; mss_now = tcp_send_mss(sk, &size_goal, flags); @@ -1111,7 +1114,8 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) */ if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && !tcp_passive_fastopen(sk)) { - if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) + err = sk_stream_wait_connect(sk, &timeo); + if (err != 0) goto do_error; } @@ -1267,7 +1271,8 @@ wait_for_memory: tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH, size_goal); - if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) + err = sk_stream_wait_memory(sk, &timeo); + if (err != 0) goto do_error; mss_now = tcp_send_mss(sk, &size_goal, flags); @@ -1767,7 +1772,8 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, /* __ Restore normal policy in scheduler __ */ - if ((chunk = len - tp->ucopy.len) != 0) { + chunk = len - tp->ucopy.len; + if (chunk != 0) { NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); len -= chunk; copied += chunk; @@ -1778,7 +1784,8 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, do_prequeue: tcp_prequeue_process(sk); - if ((chunk = len - tp->ucopy.len) != 0) { + chunk = len - tp->ucopy.len; + if (chunk != 0) { NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); len -= chunk; copied += chunk; @@ -2230,7 +2237,8 @@ int tcp_disconnect(struct sock *sk, int flags) sk->sk_shutdown = 0; sock_reset_flag(sk, SOCK_DONE); tp->srtt_us = 0; - if ((tp->write_seq += tp->max_window + 2) == 0) + tp->write_seq += tp->max_window + 2; + if (tp->write_seq == 0) tp->write_seq = 1; icsk->icsk_backoff = 0; tp->snd_cwnd = 2; diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 410ac481fda0..93396bf7b475 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -168,8 +168,6 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk, TCP_TIMEOUT_INIT, TCP_RTO_MAX); atomic_set(&req->rsk_refcnt, 2); - /* Add the child socket directly into the accept queue */ - inet_csk_reqsk_queue_add(sk, req, child); /* Now finish processing the fastopen child socket. */ inet_csk(child)->icsk_af_ops->rebuild_header(child); @@ -178,12 +176,10 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk, tcp_init_metrics(child); tcp_init_buffer_space(child); - /* Queue the data carried in the SYN packet. We need to first - * bump skb's refcnt because the caller will attempt to free it. - * Note that IPv6 might also have used skb_get() trick - * in tcp_v6_conn_request() to keep this SYN around (treq->pktopts) - * So we need to eventually get a clone of the packet, - * before inserting it in sk_receive_queue. + /* Queue the data carried in the SYN packet. + * We used to play tricky games with skb_get(). + * With lockless listener, it is a dead end. + * Do not think about it. * * XXX (TFO) - we honor a zero-payload TFO request for now, * (any reason not to?) but no need to queue the skb since @@ -191,12 +187,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk, */ end_seq = TCP_SKB_CB(skb)->end_seq; if (end_seq != TCP_SKB_CB(skb)->seq + 1) { - struct sk_buff *skb2; - - if (unlikely(skb_shared(skb))) - skb2 = skb_clone(skb, GFP_ATOMIC); - else - skb2 = skb_get(skb); + struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (likely(skb2)) { skb_dst_drop(skb2); @@ -214,12 +205,9 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk, } } tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = end_seq; - sk->sk_data_ready(sk); - bh_unlock_sock(child); - /* Note: sock_put(child) will be done by tcp_conn_request() - * after SYNACK packet is sent. + /* tcp_conn_request() is sending the SYNACK, + * and queues the child into listener accept queue. */ - WARN_ON(!req->sk); return child; } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 27108757c310..3b35c3f4d268 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6022,7 +6022,7 @@ static void tcp_openreq_init(struct request_sock *req, { struct inet_request_sock *ireq = inet_rsk(req); - req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ + req->rsk_rcv_wnd = 0; /* So that tcp_send_synack() knows! */ req->cookie_ts = 0; tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; @@ -6042,9 +6042,11 @@ static void tcp_openreq_init(struct request_sock *req, } struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, - struct sock *sk_listener) + struct sock *sk_listener, + bool attach_listener) { - struct request_sock *req = reqsk_alloc(ops, sk_listener); + struct request_sock *req = reqsk_alloc(ops, sk_listener, + attach_listener); if (req) { struct inet_request_sock *ireq = inet_rsk(req); @@ -6143,7 +6145,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, goto drop; } - req = inet_reqsk_alloc(rsk_ops, sk); + req = inet_reqsk_alloc(rsk_ops, sk, !want_cookie); if (!req) goto drop; @@ -6229,12 +6231,16 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, tcp_rsk(req)->txhash = net_tx_rndhash(); tcp_openreq_init_rwin(req, sk, dst); if (!want_cookie) { - fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst); tcp_reqsk_record_syn(sk, req, skb); + fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst); } if (fastopen_sk) { af_ops->send_synack(fastopen_sk, dst, &fl, req, skb_get_queue_mapping(skb), &foc, false); + /* Add the child socket directly into the accept queue */ + inet_csk_reqsk_queue_add(sk, req, fastopen_sk); + sk->sk_data_ready(sk); + bh_unlock_sock(fastopen_sk); sock_put(fastopen_sk); } else { tcp_rsk(req)->tfo_listener = false; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 34310748a365..9c68cf3762c4 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -324,7 +324,6 @@ void tcp_req_err(struct sock *sk, u32 seq) if (seq != tcp_rsk(req)->snt_isn) { NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); - reqsk_put(req); } else { /* * Still in SYN_RECV, just remove it silently. @@ -332,9 +331,10 @@ void tcp_req_err(struct sock *sk, u32 seq) * created socket, and POSIX does not want network * errors returned from accept(). */ - NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS); inet_csk_reqsk_queue_drop(req->rsk_listener, req); + NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS); } + reqsk_put(req); } EXPORT_SYMBOL(tcp_req_err); @@ -803,7 +803,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, */ tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, - tcp_rsk(req)->rcv_nxt, req->rcv_wnd, + tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd, tcp_time_stamp, req->ts_recent, 0, @@ -1572,6 +1572,7 @@ int tcp_v4_rcv(struct sk_buff *skb) TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph); TCP_SKB_CB(skb)->sacked = 0; +lookup: sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); if (!sk) goto no_tcp_socket; @@ -1587,8 +1588,12 @@ process: sk = req->rsk_listener; if (tcp_v4_inbound_md5_hash(sk, skb)) goto discard_and_relse; - if (sk->sk_state == TCP_LISTEN) + if (likely(sk->sk_state == TCP_LISTEN)) { nsk = tcp_check_req(sk, skb, req, false); + } else { + inet_csk_reqsk_queue_drop_and_put(sk, req); + goto lookup; + } if (!nsk) { reqsk_put(req); goto discard_it; diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 9adf1e2c3170..41828bdc5d32 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -381,18 +381,18 @@ void tcp_openreq_init_rwin(struct request_sock *req, window_clamp = READ_ONCE(tp->window_clamp); /* Set this up on the first call only */ - req->window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW); + req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW); /* limit the window selection if the user enforce a smaller rx buffer */ if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK && - (req->window_clamp > full_space || req->window_clamp == 0)) - req->window_clamp = full_space; + (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) + req->rsk_window_clamp = full_space; /* tcp_full_space because it is guaranteed to be the first packet */ tcp_select_initial_window(full_space, mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), - &req->rcv_wnd, - &req->window_clamp, + &req->rsk_rcv_wnd, + &req->rsk_window_clamp, ireq->wscale_ok, &rcv_wscale, dst_metric(dst, RTAX_INITRWND)); @@ -512,9 +512,9 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, if (sysctl_tcp_fack) tcp_enable_fack(newtp); } - newtp->window_clamp = req->window_clamp; - newtp->rcv_ssthresh = req->rcv_wnd; - newtp->rcv_wnd = req->rcv_wnd; + newtp->window_clamp = req->rsk_window_clamp; + newtp->rcv_ssthresh = req->rsk_rcv_wnd; + newtp->rcv_wnd = req->rsk_rcv_wnd; newtp->rx_opt.wscale_ok = ireq->wscale_ok; if (newtp->rx_opt.wscale_ok) { newtp->rx_opt.snd_wscale = ireq->snd_wscale; @@ -707,7 +707,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, /* RFC793: "first check sequence number". */ if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, - tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rcv_wnd)) { + tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) { /* Out of window: send ACK and drop. */ if (!(flg & TCP_FLAG_RST)) req->rsk_ops->send_ack(sk, skb, req); @@ -768,6 +768,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, if (!child) goto listen_overflow; + sock_rps_save_rxhash(child, skb); tcp_synack_rtt_meas(child, req); inet_csk_reqsk_queue_drop(sk, req); inet_csk_reqsk_queue_add(sk, req, child); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 55ed3266b05f..6e79fcb0addb 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3023,7 +3023,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ - th->window = htons(min(req->rcv_wnd, 65535U)); + th->window = htons(min(req->rsk_rcv_wnd, 65535U)); tcp_options_write((__be32 *)(th + 1), NULL, &opts); th->doff = (tcp_header_size >> 2); TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 7149ebc820c7..c9c716a483e4 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -83,7 +83,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset) } /* Calculate maximal number or retries on an orphaned socket. */ -static int tcp_orphan_retries(struct sock *sk, int alive) +static int tcp_orphan_retries(struct sock *sk, bool alive) { int retries = sysctl_tcp_orphan_retries; /* May be zero. */ @@ -184,7 +184,7 @@ static int tcp_write_timeout(struct sock *sk) retry_until = sysctl_tcp_retries2; if (sock_flag(sk, SOCK_DEAD)) { - const int alive = icsk->icsk_rto < TCP_RTO_MAX; + const bool alive = icsk->icsk_rto < TCP_RTO_MAX; retry_until = tcp_orphan_retries(sk, alive); do_reset = alive || @@ -298,7 +298,7 @@ static void tcp_probe_timer(struct sock *sk) max_probes = sysctl_tcp_retries2; if (sock_flag(sk, SOCK_DEAD)) { - const int alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX; + const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX; max_probes = tcp_orphan_retries(sk, alive); if (!alive && icsk->icsk_backoff >= max_probes) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 156ba75b6000..24ec14f9825c 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -375,7 +375,8 @@ static inline int compute_score(struct sock *sk, struct net *net, return -1; score += 4; } - + if (sk->sk_incoming_cpu == raw_smp_processor_id()) + score++; return score; } @@ -419,6 +420,9 @@ static inline int compute_score2(struct sock *sk, struct net *net, score += 4; } + if (sk->sk_incoming_cpu == raw_smp_processor_id()) + score++; + return score; } @@ -1017,30 +1021,14 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl4 = &fl4_stack; - /* unconnected socket. If output device is enslaved to a VRF - * device lookup source address from VRF table. This mimics - * behavior of ip_route_connect{_init}. - */ - if (netif_index_is_l3_master(net, ipc.oif)) { - flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, - RT_SCOPE_UNIVERSE, sk->sk_protocol, - (flow_flags | FLOWI_FLAG_VRFSRC | - FLOWI_FLAG_SKIP_NH_OIF), - faddr, saddr, dport, - inet->inet_sport); - - rt = ip_route_output_flow(net, fl4, sk); - if (!IS_ERR(rt)) { - saddr = fl4->saddr; - ip_rt_put(rt); - } - } - flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, sk->sk_protocol, flow_flags, faddr, saddr, dport, inet->inet_sport); + if (!saddr && ipc.oif) + l3mdev_get_saddr(net, ipc.oif, fl4); + security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); rt = ip_route_output_flow(net, fl4, sk); if (IS_ERR(rt)) { diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index cd6be736e19f..9f298d0dc9a1 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c @@ -87,17 +87,15 @@ static int __xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb) #ifdef CONFIG_NETFILTER if (!x) { IPCB(skb)->flags |= IPSKB_REROUTED; - return dst_output(sk, skb); + return dst_output(net, sk, skb); } #endif return x->outer_mode->afinfo->output_finish(sk, skb); } -int xfrm4_output(struct sock *sk, struct sk_buff *skb) +int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb) { - struct net *net = dev_net(skb_dst(skb)->dev); - return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb, NULL, skb_dst(skb)->dev, __xfrm4_output, diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index c8380f1876f1..f0326aae7a02 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -81,6 +81,7 @@ #include <net/ip.h> #include <net/netlink.h> #include <net/pkt_sched.h> +#include <net/l3mdev.h> #include <linux/if_tunnel.h> #include <linux/rtnetlink.h> #include <linux/netconf.h> @@ -2146,7 +2147,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev, unsigned long expires, u32 flags) { struct fib6_config cfg = { - .fc_table = RT6_TABLE_PREFIX, + .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX, .fc_metric = IP6_RT_PRIO_ADDRCONF, .fc_ifindex = dev->ifindex, .fc_expires = expires, @@ -2179,8 +2180,9 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, struct fib6_node *fn; struct rt6_info *rt = NULL; struct fib6_table *table; + u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX; - table = fib6_get_table(dev_net(dev), RT6_TABLE_PREFIX); + table = fib6_get_table(dev_net(dev), tb_id); if (!table) return NULL; @@ -2211,7 +2213,7 @@ out: static void addrconf_add_mroute(struct net_device *dev) { struct fib6_config cfg = { - .fc_table = RT6_TABLE_LOCAL, + .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL, .fc_metric = IP6_RT_PRIO_ADDRCONF, .fc_ifindex = dev->ifindex, .fc_dst_len = 8, @@ -3029,6 +3031,10 @@ static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route) { struct in6_addr addr; + /* no link local addresses on L3 master devices */ + if (netif_is_l3_master(idev->dev)) + return; + ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); if (idev->addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY) { diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 6c2b2132c8d3..efb1c00f2270 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -68,6 +68,7 @@ #include <net/xfrm.h> #include <net/inet_common.h> #include <net/dsfield.h> +#include <net/l3mdev.h> #include <asm/uaccess.h> @@ -496,6 +497,9 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) else if (!fl6.flowi6_oif) fl6.flowi6_oif = np->ucast_oif; + if (!fl6.flowi6_oif) + fl6.flowi6_oif = l3mdev_master_ifindex(skb->dev); + dst = icmpv6_route_lookup(net, skb, sk, &fl6); if (IS_ERR(dst)) goto out; @@ -575,7 +579,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) fl6.daddr = ipv6_hdr(skb)->saddr; if (saddr) fl6.saddr = *saddr; - fl6.flowi6_oif = skb->dev->ifindex; + fl6.flowi6_oif = l3mdev_fib_oif(skb->dev); fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY; fl6.flowi6_mark = mark; security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); diff --git a/net/ipv6/ila.c b/net/ipv6/ila.c index 678d2df4b8d9..1a6852e1ac69 100644 --- a/net/ipv6/ila.c +++ b/net/ipv6/ila.c @@ -91,7 +91,7 @@ static void update_ipv6_locator(struct sk_buff *skb, struct ila_params *p) *(__be64 *)&ip6h->daddr = p->locator; } -static int ila_output(struct sock *sk, struct sk_buff *skb) +static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); @@ -100,7 +100,7 @@ static int ila_output(struct sock *sk, struct sk_buff *skb) update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate)); - return dst->lwtstate->orig_output(sk, skb); + return dst->lwtstate->orig_output(net, sk, skb); drop: kfree_skb(skb); diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 6ac8dad0138a..21ace5a2bf7c 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -114,6 +114,8 @@ static inline int compute_score(struct sock *sk, struct net *net, return -1; score++; } + if (sk->sk_incoming_cpu == raw_smp_processor_id()) + score++; } return score; } diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 7d2e0023c72d..09fddf70cca4 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -264,6 +264,7 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id) return NULL; } +EXPORT_SYMBOL_GPL(fib6_get_table); static void __net_init fib6_tables_init(struct net *net) { diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index a598fe2c0849..23f97c4783bb 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -55,12 +55,12 @@ #include <net/xfrm.h> #include <net/checksum.h> #include <linux/mroute6.h> +#include <net/l3mdev.h> -static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb) +static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct net_device *dev = dst->dev; - struct net *net = dev_net(dev); struct neighbour *neigh; struct in6_addr *nexthop; int ret; @@ -126,16 +126,15 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || dst_allfrag(skb_dst(skb)) || (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size)) - return ip6_fragment(sk, skb, ip6_finish_output2); + return ip6_fragment(net, sk, skb, ip6_finish_output2); else - return ip6_finish_output2(sk, skb); + return ip6_finish_output2(net, sk, skb); } -int ip6_output(struct sock *sk, struct sk_buff *skb) +int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct net_device *dev = skb_dst(skb)->dev; struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); - struct net *net = dev_net(dev); if (unlikely(idev->cnf.disable_ipv6)) { IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); @@ -234,7 +233,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, */ return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, (struct sock *)sk, skb, NULL, dst->dev, - dst_output_okfn); + dst_output); } skb->dev = dst->dev; @@ -334,7 +333,7 @@ static inline int ip6_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { skb_sender_cpu_clear(skb); - return dst_output(sk, skb); + return dst_output(net, sk, skb); } static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst) @@ -554,8 +553,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) skb_copy_secmark(to, from); } -int ip6_fragment(struct sock *sk, struct sk_buff *skb, - int (*output)(struct sock *, struct sk_buff *)) +int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, + int (*output)(struct net *, struct sock *, struct sk_buff *)) { struct sk_buff *frag; struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); @@ -568,7 +567,6 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb, __be32 frag_id; int ptr, offset = 0, err = 0; u8 *prevhdr, nexthdr = 0; - struct net *net = dev_net(skb_dst(skb)->dev); hlen = ip6_find_1stfragopt(skb, &prevhdr); nexthdr = *prevhdr; @@ -688,7 +686,7 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb, ip6_copy_metadata(frag, skb); } - err = output(sk, skb); + err = output(net, sk, skb); if (!err) IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGCREATES); @@ -816,7 +814,7 @@ slow_path: /* * Put this fragment into the sending queue. */ - err = output(sk, frag); + err = output(net, sk, frag); if (err) goto fail; @@ -888,7 +886,8 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk, #ifdef CONFIG_IPV6_SUBTREES ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) || #endif - (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) { + (!(fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) && + (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex))) { dst_release(dst); dst = NULL; } @@ -1040,7 +1039,7 @@ struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, if (final_dst) fl6->daddr = *final_dst; if (!fl6->flowi6_oif) - fl6->flowi6_oif = dst->dev->ifindex; + fl6->flowi6_oif = l3mdev_fib_oif(dst->dev); return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); } @@ -1694,7 +1693,7 @@ int ip6_send_skb(struct sk_buff *skb) struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); int err; - err = ip6_local_out(skb); + err = ip6_local_out(net, skb->sk, skb); if (err) { if (err > 0) err = net_xmit_errno(err); diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index f96f1c19b4a8..0a8610b33d79 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -482,7 +482,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) return -EMSGSIZE; } - err = dst_output(skb->sk, skb); + err = dst_output(t->net, skb->sk, skb); if (net_xmit_eval(err) == 0) { struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 5e5d16e7ce85..ad19136086dd 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1991,7 +1991,7 @@ static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct IPSTATS_MIB_OUTFORWDATAGRAMS); IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTOCTETS, skb->len); - return dst_output(sk, skb); + return dst_output(net, sk, skb); } /* diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index a8bf57ca74d3..124338a39e29 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1646,7 +1646,7 @@ static void mld_sendpack(struct sk_buff *skb) err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, net->ipv6.igmp_sk, skb, NULL, skb->dev, - dst_output_okfn); + dst_output); out: if (!err) { ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT); @@ -2010,7 +2010,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) skb_dst_set(skb, dst); err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb, NULL, skb->dev, - dst_output_okfn); + dst_output); out: if (!err) { ICMP6MSGOUT_INC_STATS(net, idev, type); diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c index b9779d441b12..60c79a08e14a 100644 --- a/net/ipv6/mip6.c +++ b/net/ipv6/mip6.c @@ -118,7 +118,7 @@ static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb) struct mip6_report_rate_limiter { spinlock_t lock; - struct timeval stamp; + ktime_t stamp; int iif; struct in6_addr src; struct in6_addr dst; @@ -184,20 +184,18 @@ static int mip6_destopt_output(struct xfrm_state *x, struct sk_buff *skb) return 0; } -static inline int mip6_report_rl_allow(struct timeval *stamp, +static inline int mip6_report_rl_allow(ktime_t stamp, const struct in6_addr *dst, const struct in6_addr *src, int iif) { int allow = 0; spin_lock_bh(&mip6_report_rl.lock); - if (mip6_report_rl.stamp.tv_sec != stamp->tv_sec || - mip6_report_rl.stamp.tv_usec != stamp->tv_usec || + if (!ktime_equal(mip6_report_rl.stamp, stamp) || mip6_report_rl.iif != iif || !ipv6_addr_equal(&mip6_report_rl.src, src) || !ipv6_addr_equal(&mip6_report_rl.dst, dst)) { - mip6_report_rl.stamp.tv_sec = stamp->tv_sec; - mip6_report_rl.stamp.tv_usec = stamp->tv_usec; + mip6_report_rl.stamp = stamp; mip6_report_rl.iif = iif; mip6_report_rl.src = *src; mip6_report_rl.dst = *dst; @@ -216,7 +214,7 @@ static int mip6_destopt_reject(struct xfrm_state *x, struct sk_buff *skb, struct ipv6_destopt_hao *hao = NULL; struct xfrm_selector sel; int offset; - struct timeval stamp; + ktime_t stamp; int err = 0; if (unlikely(fl6->flowi6_proto == IPPROTO_MH && @@ -230,9 +228,9 @@ static int mip6_destopt_reject(struct xfrm_state *x, struct sk_buff *skb, (skb_network_header(skb) + offset); } - skb_get_timestamp(skb, &stamp); + stamp = skb_get_ktime(skb); - if (!mip6_report_rl_allow(&stamp, &ipv6_hdr(skb)->daddr, + if (!mip6_report_rl_allow(stamp, &ipv6_hdr(skb)->daddr, hao ? &hao->addr : &ipv6_hdr(skb)->saddr, opt->iif)) goto out; diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 7089c305245c..3e0f855e1bea 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -67,6 +67,7 @@ #include <net/flow.h> #include <net/ip6_checksum.h> #include <net/inet_common.h> +#include <net/l3mdev.h> #include <linux/proc_fs.h> #include <linux/netfilter.h> @@ -147,6 +148,7 @@ struct neigh_table nd_tbl = { .gc_thresh2 = 512, .gc_thresh3 = 1024, }; +EXPORT_SYMBOL_GPL(nd_tbl); static void ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data) { @@ -441,8 +443,11 @@ static void ndisc_send_skb(struct sk_buff *skb, if (!dst) { struct flowi6 fl6; + int oif = l3mdev_fib_oif(skb->dev); - icmpv6_flow_init(sk, &fl6, type, saddr, daddr, skb->dev->ifindex); + icmpv6_flow_init(sk, &fl6, type, saddr, daddr, oif); + if (oif != skb->dev->ifindex) + fl6.flowi6_flags |= FLOWI_FLAG_L3MDEV_SRC; dst = icmp6_dst_alloc(skb->dev, &fl6); if (IS_ERR(dst)) { kfree_skb(skb); @@ -465,7 +470,7 @@ static void ndisc_send_skb(struct sk_buff *skb, err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb, NULL, dst->dev, - dst_output_okfn); + dst_output); if (!err) { ICMP6MSGOUT_INC_STATS(net, idev, type); ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); @@ -766,7 +771,7 @@ static void ndisc_recv_ns(struct sk_buff *skb) ifp = ipv6_get_ifaddr(dev_net(dev), &msg->target, dev, 1); if (ifp) { - +have_ifp: if (ifp->flags & (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)) { if (dad) { /* @@ -792,6 +797,18 @@ static void ndisc_recv_ns(struct sk_buff *skb) } else { struct net *net = dev_net(dev); + /* perhaps an address on the master device */ + if (netif_is_l3_slave(dev)) { + struct net_device *mdev; + + mdev = netdev_master_upper_dev_get_rcu(dev); + if (mdev) { + ifp = ipv6_get_ifaddr(net, &msg->target, mdev, 1); + if (ifp) + goto have_ifp; + } + } + idev = in6_dev_get(dev); if (!idev) { /* XXX: count this drop? */ @@ -1483,6 +1500,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) struct flowi6 fl6; int rd_len; u8 ha_buf[MAX_ADDR_LEN], *ha = NULL; + int oif = l3mdev_fib_oif(dev); bool ret; if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) { @@ -1499,7 +1517,10 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) } icmpv6_flow_init(sk, &fl6, NDISC_REDIRECT, - &saddr_buf, &ipv6_hdr(skb)->saddr, dev->ifindex); + &saddr_buf, &ipv6_hdr(skb)->saddr, oif); + + if (oif != skb->dev->ifindex) + fl6.flowi6_flags |= FLOWI_FLAG_L3MDEV_SRC; dst = ip6_route_output(net, NULL, &fl6); if (dst->error) { diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index 3426d9df1be7..3deed5860a42 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c @@ -76,7 +76,7 @@ synproxy_send_tcp(const struct synproxy_net *snet, nf_conntrack_get(nfct); } - ip6_local_out(nskb); + ip6_local_out(net, nskb->sk, nskb); return; free_nskb: diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index deab0a1608d6..056f5d4a852a 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -563,12 +563,10 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff) return 0; } -struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user) +struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) { struct sk_buff *clone; struct net_device *dev = skb->dev; - struct net *net = skb_dst(skb) ? dev_net(skb_dst(skb)->dev) - : dev_net(skb->dev); struct frag_hdr *fhdr; struct frag_queue *fq; struct ipv6hdr *hdr; diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c index b6ddca746109..4fdbed5ebfb6 100644 --- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c +++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c @@ -63,7 +63,8 @@ static unsigned int ipv6_defrag(void *priv, return NF_ACCEPT; #endif - reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(state->hook, skb)); + reasm = nf_ct_frag6_gather(state->net, skb, + nf_ct6_defrag_user(state->hook, skb)); /* queued */ if (reasm == NULL) return NF_STOLEN; diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c index ee0d9a5b16c3..6989c70ae29f 100644 --- a/net/ipv6/netfilter/nf_dup_ipv6.c +++ b/net/ipv6/netfilter/nf_dup_ipv6.c @@ -68,7 +68,7 @@ void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum, } if (nf_dup_ipv6_route(net, skb, gw, oif)) { __this_cpu_write(nf_skb_duplicated, true); - ip6_local_out(skb); + ip6_local_out(net, skb->sk, skb); __this_cpu_write(nf_skb_duplicated, false); } else { kfree_skb(skb); diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c index 5629db84c047..e0f922b777e3 100644 --- a/net/ipv6/netfilter/nf_reject_ipv6.c +++ b/net/ipv6/netfilter/nf_reject_ipv6.c @@ -206,7 +206,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) dev_queue_xmit(nskb); } else #endif - ip6_local_out(nskb); + ip6_local_out(net, nskb->sk, nskb); } EXPORT_SYMBOL_GPL(nf_send_reset6); diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index e77102c4f804..462f2a76b5c2 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -138,9 +138,8 @@ int ip6_dst_hoplimit(struct dst_entry *dst) EXPORT_SYMBOL(ip6_dst_hoplimit); #endif -static int __ip6_local_out_sk(struct sock *sk, struct sk_buff *skb) +int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) { - struct net *net = dev_net(skb_dst(skb)->dev); int len; len = skb->len - sizeof(struct ipv6hdr); @@ -151,29 +150,18 @@ static int __ip6_local_out_sk(struct sock *sk, struct sk_buff *skb) return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb, NULL, skb_dst(skb)->dev, - dst_output_okfn); -} - -int __ip6_local_out(struct sk_buff *skb) -{ - return __ip6_local_out_sk(skb->sk, skb); + dst_output); } EXPORT_SYMBOL_GPL(__ip6_local_out); -int ip6_local_out_sk(struct sock *sk, struct sk_buff *skb) +int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) { int err; - err = __ip6_local_out_sk(sk, skb); + err = __ip6_local_out(net, sk, skb); if (likely(err == 1)) - err = dst_output(sk, skb); + err = dst_output(net, sk, skb); return err; } -EXPORT_SYMBOL_GPL(ip6_local_out_sk); - -int ip6_local_out(struct sk_buff *skb) -{ - return ip6_local_out_sk(skb->sk, skb); -} EXPORT_SYMBOL_GPL(ip6_local_out); diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index fec0151522a2..dc65ec198f7c 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -655,7 +655,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length, IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb, - NULL, rt->dst.dev, dst_output_okfn); + NULL, rt->dst.dev, dst_output); if (err > 0) err = net_xmit_errno(err); if (err) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index d3d946773a3e..5fc1149fe91d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -61,6 +61,7 @@ #include <net/nexthop.h> #include <net/lwtunnel.h> #include <net/ip_tunnels.h> +#include <net/l3mdev.h> #include <asm/uaccess.h> @@ -86,9 +87,9 @@ static void ip6_dst_ifdown(struct dst_entry *, static int ip6_dst_gc(struct dst_ops *ops); static int ip6_pkt_discard(struct sk_buff *skb); -static int ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb); +static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb); static int ip6_pkt_prohibit(struct sk_buff *skb); -static int ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb); +static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb); static void ip6_link_failure(struct sk_buff *skb); static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu); @@ -308,7 +309,7 @@ static const struct rt6_info ip6_blk_hole_entry_template = { .obsolete = DST_OBSOLETE_FORCE_CHK, .error = -EINVAL, .input = dst_discard, - .output = dst_discard_sk, + .output = dst_discard_out, }, .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), .rt6i_protocol = RTPROT_KERNEL, @@ -1044,6 +1045,9 @@ static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); saved_fn = fn; + if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) + oif = 0; + redo_rt6_select: rt = rt6_select(fn, oif, strict); if (rt->rt6i_nsiblings) @@ -1141,7 +1145,7 @@ void ip6_route_input(struct sk_buff *skb) int flags = RT6_LOOKUP_F_HAS_SADDR; struct ip_tunnel_info *tun_info; struct flowi6 fl6 = { - .flowi6_iif = skb->dev->ifindex, + .flowi6_iif = l3mdev_fib_oif(skb->dev), .daddr = iph->daddr, .saddr = iph->saddr, .flowlabel = ip6_flowinfo(iph), @@ -1165,8 +1169,13 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk, struct flowi6 *fl6) { + struct dst_entry *dst; int flags = 0; + dst = l3mdev_rt6_dst_by_oif(net, fl6); + if (dst) + return dst; + fl6->flowi6_iif = LOOPBACK_IFINDEX; if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) || @@ -1195,7 +1204,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori new->__use = 1; new->input = dst_discard; - new->output = dst_discard_sk; + new->output = dst_discard_out; if (dst_metrics_read_only(&ort->dst)) new->_metrics = ort->dst._metrics; @@ -1724,21 +1733,21 @@ static int ip6_convert_metrics(struct mx6_config *mxc, return -EINVAL; } -int ip6_route_info_create(struct fib6_config *cfg, struct rt6_info **rt_ret) +static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg) { - int err; struct net *net = cfg->fc_nlinfo.nl_net; struct rt6_info *rt = NULL; struct net_device *dev = NULL; struct inet6_dev *idev = NULL; struct fib6_table *table; int addr_type; + int err = -EINVAL; if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128) - return -EINVAL; + goto out; #ifndef CONFIG_IPV6_SUBTREES if (cfg->fc_src_len) - return -EINVAL; + goto out; #endif if (cfg->fc_ifindex) { err = -ENODEV; @@ -1853,7 +1862,7 @@ int ip6_route_info_create(struct fib6_config *cfg, struct rt6_info **rt_ret) switch (cfg->fc_type) { case RTN_BLACKHOLE: rt->dst.error = -EINVAL; - rt->dst.output = dst_discard_sk; + rt->dst.output = dst_discard_out; rt->dst.input = dst_discard; break; case RTN_PROHIBIT: @@ -1958,9 +1967,7 @@ install_route: cfg->fc_nlinfo.nl_net = dev_net(dev); - *rt_ret = rt; - - return 0; + return rt; out: if (dev) dev_put(dev); @@ -1969,20 +1976,21 @@ out: if (rt) dst_free(&rt->dst); - *rt_ret = NULL; - - return err; + return ERR_PTR(err); } int ip6_route_add(struct fib6_config *cfg) { struct mx6_config mxc = { .mx = NULL, }; - struct rt6_info *rt = NULL; + struct rt6_info *rt; int err; - err = ip6_route_info_create(cfg, &rt); - if (err) + rt = ip6_route_info_create(cfg); + if (IS_ERR(rt)) { + err = PTR_ERR(rt); + rt = NULL; goto out; + } err = ip6_convert_metrics(&mxc, cfg); if (err) @@ -2264,7 +2272,6 @@ static struct rt6_info *rt6_add_route_info(struct net *net, unsigned int pref) { struct fib6_config cfg = { - .fc_table = RT6_TABLE_INFO, .fc_metric = IP6_RT_PRIO_USER, .fc_ifindex = ifindex, .fc_dst_len = prefixlen, @@ -2275,6 +2282,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net, .fc_nlinfo.nl_net = net, }; + cfg.fc_table = l3mdev_fib_table_by_index(net, ifindex) ? : RT6_TABLE_INFO; cfg.fc_dst = *prefix; cfg.fc_gateway = *gwaddr; @@ -2315,7 +2323,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr, unsigned int pref) { struct fib6_config cfg = { - .fc_table = RT6_TABLE_DFLT, + .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT, .fc_metric = IP6_RT_PRIO_USER, .fc_ifindex = dev->ifindex, .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | @@ -2362,7 +2370,8 @@ static void rtmsg_to_fib6_config(struct net *net, { memset(cfg, 0, sizeof(*cfg)); - cfg->fc_table = RT6_TABLE_MAIN; + cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ? + : RT6_TABLE_MAIN; cfg->fc_ifindex = rtmsg->rtmsg_ifindex; cfg->fc_metric = rtmsg->rtmsg_metric; cfg->fc_expires = rtmsg->rtmsg_info; @@ -2446,7 +2455,7 @@ static int ip6_pkt_discard(struct sk_buff *skb) return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES); } -static int ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb) +static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb) { skb->dev = skb_dst(skb)->dev; return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES); @@ -2457,7 +2466,7 @@ static int ip6_pkt_prohibit(struct sk_buff *skb) return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES); } -static int ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb) +static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb) { skb->dev = skb_dst(skb)->dev; return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); @@ -2471,6 +2480,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, const struct in6_addr *addr, bool anycast) { + u32 tb_id; struct net *net = dev_net(idev->dev); struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, DST_NOCOUNT); @@ -2493,7 +2503,8 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, rt->rt6i_gateway = *addr; rt->rt6i_dst.addr = *addr; rt->rt6i_dst.plen = 128; - rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); + tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL; + rt->rt6i_table = fib6_get_table(net, tb_id); rt->dst.flags |= DST_NOCACHE; atomic_set(&rt->dst.__refcnt, 1); @@ -2871,9 +2882,12 @@ static int ip6_route_multipath_add(struct fib6_config *cfg) r_cfg.fc_encap_type = nla_get_u16(nla); } - err = ip6_route_info_create(&r_cfg, &rt); - if (err) + rt = ip6_route_info_create(&r_cfg); + if (IS_ERR(rt)) { + err = PTR_ERR(rt); + rt = NULL; goto cleanup; + } err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg); if (err) { @@ -3252,6 +3266,11 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) } else { fl6.flowi6_oif = oif; + if (netif_index_is_l3_master(net, oif)) { + fl6.flowi6_flags = FLOWI_FLAG_L3MDEV_SRC | + FLOWI_FLAG_SKIP_NH_OIF; + } + rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6); } diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 7606eba83e7b..bb8f2fa1c7fb 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -170,7 +170,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) goto out; ret = NULL; - req = inet_reqsk_alloc(&tcp6_request_sock_ops, sk); + req = inet_reqsk_alloc(&tcp6_request_sock_ops, sk, false); if (!req) goto out; @@ -235,9 +235,9 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) goto out_free; } - req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); + req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); tcp_select_initial_window(tcp_full_space(sk), req->mss, - &req->rcv_wnd, &req->window_clamp, + &req->rsk_rcv_wnd, &req->rsk_window_clamp, ireq->wscale_ok, &rcv_wscale, dst_metric(dst, RTAX_INITRWND)); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 33334f0c217d..acb06f86f372 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -931,7 +931,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, */ tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, - tcp_rsk(req)->rcv_nxt, req->rcv_wnd, + tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if, tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0, 0); @@ -1363,6 +1363,7 @@ static int tcp_v6_rcv(struct sk_buff *skb) th = tcp_hdr(skb); hdr = ipv6_hdr(skb); +lookup: sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest, inet6_iif(skb)); if (!sk) @@ -1382,8 +1383,12 @@ process: reqsk_put(req); goto discard_it; } - if (sk->sk_state == TCP_LISTEN) + if (likely(sk->sk_state == TCP_LISTEN)) { nsk = tcp_check_req(sk, skb, req, false); + } else { + inet_csk_reqsk_queue_drop_and_put(sk, req); + goto lookup; + } if (!nsk) { reqsk_put(req); goto discard_it; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 0aba654f5b91..01bcb49619ee 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -182,10 +182,12 @@ static inline int compute_score(struct sock *sk, struct net *net, score++; } + if (sk->sk_incoming_cpu == raw_smp_processor_id()) + score++; + return score; } -#define SCORE2_MAX (1 + 1 + 1) static inline int compute_score2(struct sock *sk, struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, @@ -223,6 +225,9 @@ static inline int compute_score2(struct sock *sk, struct net *net, score++; } + if (sk->sk_incoming_cpu == raw_smp_processor_id()) + score++; + return score; } @@ -251,8 +256,7 @@ begin: hash = udp6_ehashfn(net, daddr, hnum, saddr, sport); matches = 1; - } else if (score == SCORE2_MAX) - goto exact_match; + } } else if (score == badness && reuseport) { matches++; if (reciprocal_scale(hash, matches) == 0) @@ -269,7 +273,6 @@ begin: goto begin; if (result) { -exact_match: if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; else if (unlikely(compute_score2(result, net, saddr, sport, diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 0c3e9ffcf231..9db067a11b52 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -131,6 +131,13 @@ int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb) return xfrm_output(sk, skb); } +static int __xfrm6_output_finish(struct net *net, struct sock *sk, struct sk_buff *skb) +{ + struct xfrm_state *x = skb_dst(skb)->xfrm; + + return x->outer_mode->afinfo->output_finish(sk, skb); +} + static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); @@ -140,7 +147,7 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb) #ifdef CONFIG_NETFILTER if (!x) { IP6CB(skb)->flags |= IP6SKB_REROUTED; - return dst_output(sk, skb); + return dst_output(net, sk, skb); } #endif @@ -160,16 +167,14 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb) if (x->props.mode == XFRM_MODE_TUNNEL && ((skb->len > mtu && !skb_is_gso(skb)) || dst_allfrag(skb_dst(skb)))) { - return ip6_fragment(sk, skb, - x->outer_mode->afinfo->output_finish); + return ip6_fragment(net, sk, skb, + __xfrm6_output_finish); } return x->outer_mode->afinfo->output_finish(sk, skb); } -int xfrm6_output(struct sock *sk, struct sk_buff *skb) +int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb) { - struct net *net = dev_net(skb_dst(skb)->dev); - return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, net, sk, skb, NULL, skb_dst(skb)->dev, __xfrm6_output, diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 69cee4e0d728..08c9c93f3527 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -37,6 +37,7 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif, memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = oif; + fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF; memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr)); if (saddr) memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr)); diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c index ddf75ad41713..8e5ead366e7f 100644 --- a/net/l3mdev/l3mdev.c +++ b/net/l3mdev/l3mdev.c @@ -26,11 +26,11 @@ int l3mdev_master_ifindex_rcu(struct net_device *dev) if (netif_is_l3_master(dev)) { ifindex = dev->ifindex; - } else if (dev->flags & IFF_SLAVE) { + } else if (netif_is_l3_slave(dev)) { struct net_device *master; master = netdev_master_upper_dev_get_rcu(dev); - if (master && netif_is_l3_master(master)) + if (master) ifindex = master->ifindex; } @@ -54,7 +54,7 @@ u32 l3mdev_fib_table_rcu(const struct net_device *dev) if (netif_is_l3_master(dev)) { if (dev->l3mdev_ops->l3mdev_fib_table) tb_id = dev->l3mdev_ops->l3mdev_fib_table(dev); - } else if (dev->flags & IFF_SLAVE) { + } else if (netif_is_l3_slave(dev)) { /* Users of netdev_master_upper_dev_get_rcu need non-const, * but current inet_*type functions take a const */ @@ -62,7 +62,7 @@ u32 l3mdev_fib_table_rcu(const struct net_device *dev) const struct net_device *master; master = netdev_master_upper_dev_get_rcu(_dev); - if (master && netif_is_l3_master(master) && + if (master && master->l3mdev_ops->l3mdev_fib_table) tb_id = master->l3mdev_ops->l3mdev_fib_table(master); } diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 5c564a68fb50..10ad4ac1fa0b 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -79,7 +79,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, (int)reason); if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP, - &sta->sta, tid, NULL, 0)) + &sta->sta, tid, NULL, 0, false)) sdata_info(sta->sdata, "HW problem - can not stop rx aggregation for %pM tid %d\n", sta->sta.addr, tid); @@ -189,6 +189,7 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; + bool amsdu = ieee80211_hw_check(&local->hw, SUPPORTS_AMSDU_IN_AMPDU); u16 capab; skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); @@ -217,7 +218,8 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP; mgmt->u.action.u.addba_resp.dialog_token = dialog_token; - capab = (u16)(policy << 1); /* bit 1 aggregation policy */ + capab = (u16)(amsdu << 0); /* bit 0 A-MSDU support */ + capab |= (u16)(policy << 1); /* bit 1 aggregation policy */ capab |= (u16)(tid << 2); /* bit 5:2 TID number */ capab |= (u16)(buf_size << 6); /* bit 15:6 max size of aggregation */ @@ -321,7 +323,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta, __skb_queue_head_init(&tid_agg_rx->reorder_buf[i]); ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START, - &sta->sta, tid, &start_seq_num, 0); + &sta->sta, tid, &start_seq_num, 0, false); ht_dbg(sta->sdata, "Rx A-MPDU request on %pM tid %d result %d\n", sta->sta.addr, tid, ret); if (ret) { diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index c8ba2e77737c..a758eb84e8f0 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -97,7 +97,8 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ; mgmt->u.action.u.addba_req.dialog_token = dialog_token; - capab = (u16)(1 << 1); /* bit 1 aggregation policy */ + capab = (u16)(1 << 0); /* bit 0 A-MSDU support */ + capab |= (u16)(1 << 1); /* bit 1 aggregation policy */ capab |= (u16)(tid << 2); /* bit 5:2 TID number */ capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */ @@ -331,7 +332,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, return -EALREADY; ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_TX_STOP_FLUSH_CONT, - &sta->sta, tid, NULL, 0); + &sta->sta, tid, NULL, 0, false); WARN_ON_ONCE(ret); return 0; } @@ -381,7 +382,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST; ret = drv_ampdu_action(local, sta->sdata, action, - &sta->sta, tid, NULL, 0); + &sta->sta, tid, NULL, 0, false); /* HW shall not deny going back to legacy */ if (WARN_ON(ret)) { @@ -469,7 +470,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) start_seq_num = sta->tid_seq[tid] >> 4; ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START, - &sta->sta, tid, &start_seq_num, 0); + &sta->sta, tid, &start_seq_num, 0, false); if (ret) { ht_dbg(sdata, "BA request denied - HW unavailable for %pM tid %d\n", @@ -693,7 +694,8 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local, drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_TX_OPERATIONAL, - &sta->sta, tid, NULL, tid_tx->buf_size); + &sta->sta, tid, NULL, tid_tx->buf_size, + tid_tx->amsdu); /* * synchronize with TX path, while splicing the TX path @@ -918,8 +920,10 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, struct tid_ampdu_tx *tid_tx; u16 capab, tid; u8 buf_size; + bool amsdu; capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); + amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK; tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; @@ -968,6 +972,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, } tid_tx->buf_size = buf_size; + tid_tx->amsdu = amsdu; if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)) ieee80211_agg_tx_operational(local, sta, tid); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 7a77a1470f25..68e551e263c6 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -981,7 +981,7 @@ static int sta_apply_auth_flags(struct ieee80211_local *local, * well. Some drivers require rate control initialized * before drv_sta_state() is called. */ - if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) + if (!test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) rate_control_rate_init(sta); ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); @@ -1120,8 +1120,11 @@ static int sta_apply_parameters(struct ieee80211_local *local, local->hw.queues >= IEEE80211_NUM_ACS) sta->sta.wme = set & BIT(NL80211_STA_FLAG_WME); - /* auth flags will be set later for TDLS stations */ - if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { + /* auth flags will be set later for TDLS, + * and for unassociated stations that move to assocaited */ + if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER) && + !((mask & BIT(NL80211_STA_FLAG_ASSOCIATED)) && + (set & BIT(NL80211_STA_FLAG_ASSOCIATED)))) { ret = sta_apply_auth_flags(local, sta, mask, set); if (ret) return ret; @@ -1156,6 +1159,7 @@ static int sta_apply_parameters(struct ieee80211_local *local, set_sta_flag(sta, WLAN_STA_TDLS_CHAN_SWITCH); if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && + !sdata->u.mgd.tdls_wider_bw_prohibited && ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) && params->ext_capab_len >= 8 && params->ext_capab[7] & WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) @@ -1212,7 +1216,8 @@ static int sta_apply_parameters(struct ieee80211_local *local, sta_apply_mesh_params(local, sta, params); /* set the STA state after all sta info from usermode has been set */ - if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { + if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) || + set & BIT(NL80211_STA_FLAG_ASSOCIATED)) { ret = sta_apply_auth_flags(local, sta, mask, set); if (ret) return ret; @@ -1254,12 +1259,14 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, * defaults -- if userspace wants something else we'll * change it accordingly in sta_apply_parameters() */ - if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) { + if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) && + !(params->sta_flags_set & (BIT(NL80211_STA_FLAG_AUTHENTICATED) | + BIT(NL80211_STA_FLAG_ASSOCIATED)))) { sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); - } else { - sta->sta.tdls = true; } + if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) + sta->sta.tdls = true; err = sta_apply_parameters(local, sta, params); if (err) { @@ -1268,10 +1275,12 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, } /* - * for TDLS, rate control should be initialized only when - * rates are known and station is marked authorized + * for TDLS and for unassociated station, rate control should be + * initialized only when rates are known and station is marked + * authorized/associated */ - if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) + if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER) && + test_sta_flag(sta, WLAN_STA_ASSOC)) rate_control_rate_init(sta); layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN || @@ -1346,7 +1355,10 @@ static int ieee80211_change_station(struct wiphy *wiphy, break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: - statype = CFG80211_STA_AP_CLIENT; + if (test_sta_flag(sta, WLAN_STA_ASSOC)) + statype = CFG80211_STA_AP_CLIENT; + else + statype = CFG80211_STA_AP_CLIENT_UNASSOC; break; default: err = -EOPNOTSUPP; @@ -3522,18 +3534,32 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy, u16 frame_type, bool reg) { struct ieee80211_local *local = wiphy_priv(wiphy); + struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); switch (frame_type) { case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ: - if (reg) + if (reg) { local->probe_req_reg++; - else - local->probe_req_reg--; + sdata->vif.probe_req_reg++; + } else { + if (local->probe_req_reg) + local->probe_req_reg--; + + if (sdata->vif.probe_req_reg) + sdata->vif.probe_req_reg--; + } if (!local->open_count) break; - ieee80211_queue_work(&local->hw, &local->reconfig_filter); + if (sdata->vif.probe_req_reg == 1) + drv_config_iface_filter(local, sdata, FIF_PROBE_REQ, + FIF_PROBE_REQ); + else if (sdata->vif.probe_req_reg == 0) + drv_config_iface_filter(local, sdata, 0, + FIF_PROBE_REQ); + + ieee80211_configure_filter(local); break; default: break; diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index ced6bf3be8d6..3636b45440ab 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -123,6 +123,8 @@ static const char *hw_flag_names[NUM_IEEE80211_HW_FLAGS + 1] = { FLAG(SUPPORTS_CLONED_SKBS), FLAG(SINGLE_SCAN_ON_ALL_BANDS), FLAG(TDLS_WIDER_BW), + FLAG(SUPPORTS_AMSDU_IN_AMPDU), + FLAG(BEACON_TX_STATUS), /* keep last for the build bug below */ (void *)0x1 diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c index 702ca122c498..7961e7d0b61e 100644 --- a/net/mac80211/debugfs_key.c +++ b/net/mac80211/debugfs_key.c @@ -2,6 +2,7 @@ * Copyright 2003-2005 Devicescape Software, Inc. * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> + * Copyright (C) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -34,6 +35,14 @@ static const struct file_operations key_ ##name## _ops = { \ .llseek = generic_file_llseek, \ } +#define KEY_OPS_W(name) \ +static const struct file_operations key_ ##name## _ops = { \ + .read = key_##name##_read, \ + .write = key_##name##_write, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +} + #define KEY_FILE(name, format) \ KEY_READ_##format(name) \ KEY_OPS(name) @@ -74,6 +83,41 @@ static ssize_t key_algorithm_read(struct file *file, } KEY_OPS(algorithm); +static ssize_t key_tx_spec_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct ieee80211_key *key = file->private_data; + u64 pn; + int ret; + + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + return -EINVAL; + case WLAN_CIPHER_SUITE_TKIP: + /* not supported yet */ + return -EOPNOTSUPP; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + ret = kstrtou64_from_user(userbuf, count, 16, &pn); + if (ret) + return ret; + /* PN is a 48-bit counter */ + if (pn >= (1ULL << 48)) + return -ERANGE; + atomic64_set(&key->conf.tx_pn, pn); + return count; + default: + return 0; + } +} + static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { @@ -110,7 +154,7 @@ static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf, } return simple_read_from_buffer(userbuf, count, ppos, buf, len); } -KEY_OPS(tx_spec); +KEY_OPS_W(tx_spec); static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) @@ -278,6 +322,9 @@ KEY_OPS(key); #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, 0400, key->debugfs.dir, \ key, &key_##name##_ops); +#define DEBUGFS_ADD_W(name) \ + debugfs_create_file(#name, 0600, key->debugfs.dir, \ + key, &key_##name##_ops); void ieee80211_debugfs_key_add(struct ieee80211_key *key) { @@ -310,7 +357,7 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key) DEBUGFS_ADD(keyidx); DEBUGFS_ADD(hw_key_idx); DEBUGFS_ADD(algorithm); - DEBUGFS_ADD(tx_spec); + DEBUGFS_ADD_W(tx_spec); DEBUGFS_ADD(rx_spec); DEBUGFS_ADD(replays); DEBUGFS_ADD(icverrors); diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 1021e87c051f..37ea30e0754c 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -114,14 +114,6 @@ static ssize_t ieee80211_if_fmt_##name( \ return scnprintf(buf, buflen, "%pM\n", sdata->field); \ } -#define IEEE80211_IF_FMT_DEC_DIV_16(name, field) \ -static ssize_t ieee80211_if_fmt_##name( \ - const struct ieee80211_sub_if_data *sdata, \ - char *buf, int buflen) \ -{ \ - return scnprintf(buf, buflen, "%d\n", sdata->field / 16); \ -} - #define IEEE80211_IF_FMT_JIFFIES_TO_MS(name, field) \ static ssize_t ieee80211_if_fmt_##name( \ const struct ieee80211_sub_if_data *sdata, \ @@ -247,8 +239,6 @@ IEEE80211_IF_FILE_R(hw_queues); /* STA attributes */ IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC); IEEE80211_IF_FILE(aid, u.mgd.aid, DEC); -IEEE80211_IF_FILE(last_beacon, u.mgd.last_beacon_signal, DEC); -IEEE80211_IF_FILE(ave_beacon, u.mgd.ave_beacon_signal, DEC_DIV_16); IEEE80211_IF_FILE(beacon_timeout, u.mgd.beacon_timeout, JIFFIES_TO_MS); static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata, @@ -455,6 +445,34 @@ static ssize_t ieee80211_if_parse_uapsd_max_sp_len( } IEEE80211_IF_FILE_RW(uapsd_max_sp_len); +static ssize_t ieee80211_if_fmt_tdls_wider_bw( + const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) +{ + const struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + bool tdls_wider_bw; + + tdls_wider_bw = ieee80211_hw_check(&sdata->local->hw, TDLS_WIDER_BW) && + !ifmgd->tdls_wider_bw_prohibited; + + return snprintf(buf, buflen, "%d\n", tdls_wider_bw); +} + +static ssize_t ieee80211_if_parse_tdls_wider_bw( + struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + u8 val; + int ret; + + ret = kstrtou8(buf, 0, &val); + if (ret) + return ret; + + ifmgd->tdls_wider_bw_prohibited = !val; + return buflen; +} +IEEE80211_IF_FILE_RW(tdls_wider_bw); + /* AP attributes */ IEEE80211_IF_FILE(num_mcast_sta, u.ap.num_mcast_sta, ATOMIC); IEEE80211_IF_FILE(num_sta_ps, u.ap.ps.num_sta_ps, ATOMIC); @@ -606,14 +624,13 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD(bssid); DEBUGFS_ADD(aid); - DEBUGFS_ADD(last_beacon); - DEBUGFS_ADD(ave_beacon); DEBUGFS_ADD(beacon_timeout); DEBUGFS_ADD_MODE(smps, 0600); DEBUGFS_ADD_MODE(tkip_mic_test, 0200); DEBUGFS_ADD_MODE(beacon_loss, 0200); DEBUGFS_ADD_MODE(uapsd_queues, 0600); DEBUGFS_ADD_MODE(uapsd_max_sp_len, 0600); + DEBUGFS_ADD_MODE(tdls_wider_bw, 0600); } static void add_ap_files(struct ieee80211_sub_if_data *sdata) diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c index 267c3b1ca047..a1d54318f16c 100644 --- a/net/mac80211/driver-ops.c +++ b/net/mac80211/driver-ops.c @@ -8,6 +8,60 @@ #include "trace.h" #include "driver-ops.h" +int drv_add_interface(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + int ret; + + might_sleep(); + + if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN || + (sdata->vif.type == NL80211_IFTYPE_MONITOR && + !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) && + !(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE)))) + return -EINVAL; + + trace_drv_add_interface(local, sdata); + ret = local->ops->add_interface(&local->hw, &sdata->vif); + trace_drv_return_int(local, ret); + + if (ret == 0) + sdata->flags |= IEEE80211_SDATA_IN_DRIVER; + + return ret; +} + +int drv_change_interface(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum nl80211_iftype type, bool p2p) +{ + int ret; + + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_change_interface(local, sdata, type, p2p); + ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p); + trace_drv_return_int(local, ret); + return ret; +} + +void drv_remove_interface(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_remove_interface(local, sdata); + local->ops->remove_interface(&local->hw, &sdata->vif); + sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER; + trace_drv_return_void(local); +} + __must_check int drv_sta_state(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, @@ -39,3 +93,171 @@ int drv_sta_state(struct ieee80211_local *local, trace_drv_return_int(local, ret); return ret; } + +void drv_sta_rc_update(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, u32 changed) +{ + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return; + + WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED && + (sdata->vif.type != NL80211_IFTYPE_ADHOC && + sdata->vif.type != NL80211_IFTYPE_MESH_POINT)); + + trace_drv_sta_rc_update(local, sdata, sta, changed); + if (local->ops->sta_rc_update) + local->ops->sta_rc_update(&local->hw, &sdata->vif, + sta, changed); + + trace_drv_return_void(local); +} + +int drv_conf_tx(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, u16 ac, + const struct ieee80211_tx_queue_params *params) +{ + int ret = -EOPNOTSUPP; + + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return -EIO; + + if (WARN_ONCE(params->cw_min == 0 || + params->cw_min > params->cw_max, + "%s: invalid CW_min/CW_max: %d/%d\n", + sdata->name, params->cw_min, params->cw_max)) + return -EINVAL; + + trace_drv_conf_tx(local, sdata, ac, params); + if (local->ops->conf_tx) + ret = local->ops->conf_tx(&local->hw, &sdata->vif, + ac, params); + trace_drv_return_int(local, ret); + return ret; +} + +u64 drv_get_tsf(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + u64 ret = -1ULL; + + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return ret; + + trace_drv_get_tsf(local, sdata); + if (local->ops->get_tsf) + ret = local->ops->get_tsf(&local->hw, &sdata->vif); + trace_drv_return_u64(local, ret); + return ret; +} + +void drv_set_tsf(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + u64 tsf) +{ + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_set_tsf(local, sdata, tsf); + if (local->ops->set_tsf) + local->ops->set_tsf(&local->hw, &sdata->vif, tsf); + trace_drv_return_void(local); +} + +void drv_reset_tsf(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_reset_tsf(local, sdata); + if (local->ops->reset_tsf) + local->ops->reset_tsf(&local->hw, &sdata->vif); + trace_drv_return_void(local); +} + +int drv_switch_vif_chanctx(struct ieee80211_local *local, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, enum ieee80211_chanctx_switch_mode mode) +{ + int ret = 0; + int i; + + if (!local->ops->switch_vif_chanctx) + return -EOPNOTSUPP; + + for (i = 0; i < n_vifs; i++) { + struct ieee80211_chanctx *new_ctx = + container_of(vifs[i].new_ctx, + struct ieee80211_chanctx, + conf); + struct ieee80211_chanctx *old_ctx = + container_of(vifs[i].old_ctx, + struct ieee80211_chanctx, + conf); + + WARN_ON_ONCE(!old_ctx->driver_present); + WARN_ON_ONCE((mode == CHANCTX_SWMODE_SWAP_CONTEXTS && + new_ctx->driver_present) || + (mode == CHANCTX_SWMODE_REASSIGN_VIF && + !new_ctx->driver_present)); + } + + trace_drv_switch_vif_chanctx(local, vifs, n_vifs, mode); + ret = local->ops->switch_vif_chanctx(&local->hw, + vifs, n_vifs, mode); + trace_drv_return_int(local, ret); + + if (!ret && mode == CHANCTX_SWMODE_SWAP_CONTEXTS) { + for (i = 0; i < n_vifs; i++) { + struct ieee80211_chanctx *new_ctx = + container_of(vifs[i].new_ctx, + struct ieee80211_chanctx, + conf); + struct ieee80211_chanctx *old_ctx = + container_of(vifs[i].old_ctx, + struct ieee80211_chanctx, + conf); + + new_ctx->driver_present = true; + old_ctx->driver_present = false; + } + } + + return ret; +} + +int drv_ampdu_action(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, + u16 *ssn, u8 buf_size, bool amsdu) +{ + int ret = -EOPNOTSUPP; + + might_sleep(); + + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_ampdu_action(local, sdata, action, sta, tid, + ssn, buf_size, amsdu); + + if (local->ops->ampdu_action) + ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action, + sta, tid, ssn, buf_size, amsdu); + + trace_drv_return_int(local, ret); + + return ret; +} diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 02d91332d7dd..30987099eb8f 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -137,59 +137,15 @@ static inline void drv_set_wakeup(struct ieee80211_local *local, } #endif -static inline int drv_add_interface(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata) -{ - int ret; - - might_sleep(); - - if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN || - (sdata->vif.type == NL80211_IFTYPE_MONITOR && - !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) && - !(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE)))) - return -EINVAL; - - trace_drv_add_interface(local, sdata); - ret = local->ops->add_interface(&local->hw, &sdata->vif); - trace_drv_return_int(local, ret); - - if (ret == 0) - sdata->flags |= IEEE80211_SDATA_IN_DRIVER; - - return ret; -} - -static inline int drv_change_interface(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata, - enum nl80211_iftype type, bool p2p) -{ - int ret; +int drv_add_interface(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata); - might_sleep(); - - if (!check_sdata_in_driver(sdata)) - return -EIO; - - trace_drv_change_interface(local, sdata, type, p2p); - ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p); - trace_drv_return_int(local, ret); - return ret; -} - -static inline void drv_remove_interface(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata) -{ - might_sleep(); - - if (!check_sdata_in_driver(sdata)) - return; +int drv_change_interface(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum nl80211_iftype type, bool p2p); - trace_drv_remove_interface(local, sdata); - local->ops->remove_interface(&local->hw, &sdata->vif); - sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER; - trace_drv_return_void(local); -} +void drv_remove_interface(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata); static inline int drv_config(struct ieee80211_local *local, u32 changed) { @@ -260,6 +216,22 @@ static inline void drv_configure_filter(struct ieee80211_local *local, trace_drv_return_void(local); } +static inline void drv_config_iface_filter(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + unsigned int filter_flags, + unsigned int changed_flags) +{ + might_sleep(); + + trace_drv_config_iface_filter(local, sdata, filter_flags, + changed_flags); + if (local->ops->config_iface_filter) + local->ops->config_iface_filter(&local->hw, &sdata->vif, + filter_flags, + changed_flags); + trace_drv_return_void(local); +} + static inline int drv_set_tim(struct ieee80211_local *local, struct ieee80211_sta *sta, bool set) { @@ -580,25 +552,9 @@ int drv_sta_state(struct ieee80211_local *local, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state); -static inline void drv_sta_rc_update(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata, - struct ieee80211_sta *sta, u32 changed) -{ - sdata = get_bss_sdata(sdata); - if (!check_sdata_in_driver(sdata)) - return; - - WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED && - (sdata->vif.type != NL80211_IFTYPE_ADHOC && - sdata->vif.type != NL80211_IFTYPE_MESH_POINT)); - - trace_drv_sta_rc_update(local, sdata, sta, changed); - if (local->ops->sta_rc_update) - local->ops->sta_rc_update(&local->hw, &sdata->vif, - sta, changed); - - trace_drv_return_void(local); -} +void drv_sta_rc_update(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, u32 changed); static inline void drv_sta_rate_tbl_update(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, @@ -630,76 +586,17 @@ static inline void drv_sta_statistics(struct ieee80211_local *local, trace_drv_return_void(local); } -static inline int drv_conf_tx(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata, u16 ac, - const struct ieee80211_tx_queue_params *params) -{ - int ret = -EOPNOTSUPP; - - might_sleep(); - - if (!check_sdata_in_driver(sdata)) - return -EIO; - - if (WARN_ONCE(params->cw_min == 0 || - params->cw_min > params->cw_max, - "%s: invalid CW_min/CW_max: %d/%d\n", - sdata->name, params->cw_min, params->cw_max)) - return -EINVAL; - - trace_drv_conf_tx(local, sdata, ac, params); - if (local->ops->conf_tx) - ret = local->ops->conf_tx(&local->hw, &sdata->vif, - ac, params); - trace_drv_return_int(local, ret); - return ret; -} - -static inline u64 drv_get_tsf(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata) -{ - u64 ret = -1ULL; - - might_sleep(); - - if (!check_sdata_in_driver(sdata)) - return ret; - - trace_drv_get_tsf(local, sdata); - if (local->ops->get_tsf) - ret = local->ops->get_tsf(&local->hw, &sdata->vif); - trace_drv_return_u64(local, ret); - return ret; -} - -static inline void drv_set_tsf(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata, - u64 tsf) -{ - might_sleep(); - - if (!check_sdata_in_driver(sdata)) - return; - - trace_drv_set_tsf(local, sdata, tsf); - if (local->ops->set_tsf) - local->ops->set_tsf(&local->hw, &sdata->vif, tsf); - trace_drv_return_void(local); -} - -static inline void drv_reset_tsf(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata) -{ - might_sleep(); +int drv_conf_tx(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, u16 ac, + const struct ieee80211_tx_queue_params *params); - if (!check_sdata_in_driver(sdata)) - return; - - trace_drv_reset_tsf(local, sdata); - if (local->ops->reset_tsf) - local->ops->reset_tsf(&local->hw, &sdata->vif); - trace_drv_return_void(local); -} +u64 drv_get_tsf(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata); +void drv_set_tsf(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + u64 tsf); +void drv_reset_tsf(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata); static inline int drv_tx_last_beacon(struct ieee80211_local *local) { @@ -714,30 +611,11 @@ static inline int drv_tx_last_beacon(struct ieee80211_local *local) return ret; } -static inline int drv_ampdu_action(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, - u16 *ssn, u8 buf_size) -{ - int ret = -EOPNOTSUPP; - - might_sleep(); - - sdata = get_bss_sdata(sdata); - if (!check_sdata_in_driver(sdata)) - return -EIO; - - trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size); - - if (local->ops->ampdu_action) - ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action, - sta, tid, ssn, buf_size); - - trace_drv_return_int(local, ret); - - return ret; -} +int drv_ampdu_action(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, + u16 *ssn, u8 buf_size, bool amsdu); static inline int drv_get_survey(struct ieee80211_local *local, int idx, struct survey_info *survey) @@ -1066,58 +944,9 @@ static inline void drv_unassign_vif_chanctx(struct ieee80211_local *local, trace_drv_return_void(local); } -static inline int -drv_switch_vif_chanctx(struct ieee80211_local *local, - struct ieee80211_vif_chanctx_switch *vifs, - int n_vifs, - enum ieee80211_chanctx_switch_mode mode) -{ - int ret = 0; - int i; - - if (!local->ops->switch_vif_chanctx) - return -EOPNOTSUPP; - - for (i = 0; i < n_vifs; i++) { - struct ieee80211_chanctx *new_ctx = - container_of(vifs[i].new_ctx, - struct ieee80211_chanctx, - conf); - struct ieee80211_chanctx *old_ctx = - container_of(vifs[i].old_ctx, - struct ieee80211_chanctx, - conf); - - WARN_ON_ONCE(!old_ctx->driver_present); - WARN_ON_ONCE((mode == CHANCTX_SWMODE_SWAP_CONTEXTS && - new_ctx->driver_present) || - (mode == CHANCTX_SWMODE_REASSIGN_VIF && - !new_ctx->driver_present)); - } - - trace_drv_switch_vif_chanctx(local, vifs, n_vifs, mode); - ret = local->ops->switch_vif_chanctx(&local->hw, - vifs, n_vifs, mode); - trace_drv_return_int(local, ret); - - if (!ret && mode == CHANCTX_SWMODE_SWAP_CONTEXTS) { - for (i = 0; i < n_vifs; i++) { - struct ieee80211_chanctx *new_ctx = - container_of(vifs[i].new_ctx, - struct ieee80211_chanctx, - conf); - struct ieee80211_chanctx *old_ctx = - container_of(vifs[i].old_ctx, - struct ieee80211_chanctx, - conf); - - new_ctx->driver_present = true; - old_ctx->driver_present = false; - } - } - - return ret; -} +int drv_switch_vif_chanctx(struct ieee80211_local *local, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, enum ieee80211_chanctx_switch_mode mode); static inline int drv_start_ap(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 6e52659f923f..f9605f13def9 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -419,6 +419,8 @@ struct ieee80211_sta_tx_tspec { bool downgraded; }; +DECLARE_EWMA(beacon_signal, 16, 4) + struct ieee80211_if_managed { struct timer_list timer; struct timer_list conn_mon_timer; @@ -490,16 +492,7 @@ struct ieee80211_if_managed { s16 p2p_noa_index; - /* Signal strength from the last Beacon frame in the current BSS. */ - int last_beacon_signal; - - /* - * Weighted average of the signal strength from Beacon frames in the - * current BSS. This is in units of 1/16 of the signal unit to maintain - * accuracy and to speed up calculations, i.e., the value need to be - * divided by 16 to get the actual value. - */ - int ave_beacon_signal; + struct ewma_beacon_signal ave_beacon_signal; /* * Number of Beacon frames used in ave_beacon_signal. This can be used @@ -535,6 +528,7 @@ struct ieee80211_if_managed { struct sk_buff *teardown_skb; /* A copy to send through the AP */ spinlock_t teardown_lock; /* To lock changing teardown_skb */ bool tdls_chan_switch_prohibited; + bool tdls_wider_bw_prohibited; /* WMM-AC TSPEC support */ struct ieee80211_sta_tx_tspec tx_tspec[IEEE80211_NUM_ACS]; @@ -1641,6 +1635,9 @@ void ieee80211_purge_tx_queue(struct ieee80211_hw *hw, struct sk_buff * ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, u32 info_flags); +void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb, + struct ieee80211_supported_band *sband, + int retry_count, int shift, bool send_to_cooked); void ieee80211_check_fast_xmit(struct sta_info *sta); void ieee80211_check_fast_xmit_all(struct ieee80211_local *local); @@ -1853,7 +1850,7 @@ void ieee80211_dynamic_ps_disable_work(struct work_struct *work); void ieee80211_dynamic_ps_timer(unsigned long data); void ieee80211_send_nullfunc(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, - int powersave); + bool powersave); void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, struct ieee80211_hdr *hdr); void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 6964fc6a8ea2..42d7f0f65bd6 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1204,7 +1204,7 @@ static void ieee80211_iface_work(struct work_struct *work) if (!ieee80211_sdata_running(sdata)) return; - if (local->scanning) + if (test_bit(SCAN_SW_SCANNING, &local->scanning)) return; if (!ieee80211_can_run_worker(local)) diff --git a/net/mac80211/main.c b/net/mac80211/main.c index ff79a13d231d..9b813a2f3a75 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -543,7 +543,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, NL80211_FEATURE_HT_IBSS | NL80211_FEATURE_VIF_TXPOWER | NL80211_FEATURE_MAC_ON_CREATE | - NL80211_FEATURE_USERSPACE_MPM; + NL80211_FEATURE_USERSPACE_MPM | + NL80211_FEATURE_FULL_AP_CLIENT_STATE; if (!ops->hw_scan) wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN | diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index e06a5ca7c9a9..626e8de70842 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -94,6 +94,9 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, ieee80211_ht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan, ie->ht_operation, &sta_chan_def); + ieee80211_vht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan, + ie->vht_operation, &sta_chan_def); + if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef, &sta_chan_def)) return false; @@ -436,8 +439,6 @@ int mesh_add_ht_oper_ie(struct ieee80211_sub_if_data *sdata, struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_channel *channel; - enum nl80211_channel_type channel_type = - cfg80211_get_chandef_type(&sdata->vif.bss_conf.chandef); struct ieee80211_supported_band *sband; struct ieee80211_sta_ht_cap *ht_cap; u8 *pos; @@ -454,7 +455,10 @@ int mesh_add_ht_oper_ie(struct ieee80211_sub_if_data *sdata, sband = local->hw.wiphy->bands[channel->band]; ht_cap = &sband->ht_cap; - if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT) + if (!ht_cap->ht_supported || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10) return 0; if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_operation)) @@ -467,6 +471,68 @@ int mesh_add_ht_oper_ie(struct ieee80211_sub_if_data *sdata, return 0; } +int mesh_add_vht_cap_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_local *local = sdata->local; + enum ieee80211_band band = ieee80211_get_sdata_band(sdata); + struct ieee80211_supported_band *sband; + u8 *pos; + + sband = local->hw.wiphy->bands[band]; + if (!sband->vht_cap.vht_supported || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10) + return 0; + + if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_cap)) + return -ENOMEM; + + pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_cap)); + ieee80211_ie_build_vht_cap(pos, &sband->vht_cap, sband->vht_cap.cap); + + return 0; +} + +int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_channel *channel; + struct ieee80211_supported_band *sband; + struct ieee80211_sta_vht_cap *vht_cap; + u8 *pos; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); + if (WARN_ON(!chanctx_conf)) { + rcu_read_unlock(); + return -EINVAL; + } + channel = chanctx_conf->def.chan; + rcu_read_unlock(); + + sband = local->hw.wiphy->bands[channel->band]; + vht_cap = &sband->vht_cap; + + if (!vht_cap->vht_supported || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10) + return 0; + + if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_operation)) + return -ENOMEM; + + pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation)); + ieee80211_ie_build_vht_oper(pos, vht_cap, + &sdata->vif.bss_conf.chandef); + + return 0; +} + static void ieee80211_mesh_path_timer(unsigned long data) { struct ieee80211_sub_if_data *sdata = @@ -540,9 +606,9 @@ int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, * * Return the header length. */ -int ieee80211_new_mesh_header(struct ieee80211_sub_if_data *sdata, - struct ieee80211s_hdr *meshhdr, - const char *addr4or5, const char *addr6) +unsigned int ieee80211_new_mesh_header(struct ieee80211_sub_if_data *sdata, + struct ieee80211s_hdr *meshhdr, + const char *addr4or5, const char *addr6) { if (WARN_ON(!addr4or5 && addr6)) return 0; @@ -637,6 +703,8 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh) 2 + ifmsh->mesh_id_len + 2 + sizeof(struct ieee80211_meshconf_ie) + 2 + sizeof(__le16) + /* awake window */ + 2 + sizeof(struct ieee80211_vht_cap) + + 2 + sizeof(struct ieee80211_vht_operation) + ifmsh->ie_len; bcn = kzalloc(sizeof(*bcn) + head_len + tail_len, GFP_KERNEL); @@ -718,6 +786,8 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh) mesh_add_meshid_ie(sdata, skb) || mesh_add_meshconf_ie(sdata, skb) || mesh_add_awake_window_ie(sdata, skb) || + mesh_add_vht_cap_ie(sdata, skb) || + mesh_add_vht_oper_ie(sdata, skb) || mesh_add_vendor_ies(sdata, skb)) goto out_free; diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 50c8473cf9dc..a1596344c3ba 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -207,9 +207,9 @@ struct mesh_rmc { /* Various */ int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, const u8 *da, const u8 *sa); -int ieee80211_new_mesh_header(struct ieee80211_sub_if_data *sdata, - struct ieee80211s_hdr *meshhdr, - const char *addr4or5, const char *addr6); +unsigned int ieee80211_new_mesh_header(struct ieee80211_sub_if_data *sdata, + struct ieee80211s_hdr *meshhdr, + const char *addr4or5, const char *addr6); int mesh_rmc_check(struct ieee80211_sub_if_data *sdata, const u8 *addr, struct ieee80211s_hdr *mesh_hdr); bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, @@ -227,6 +227,10 @@ int mesh_add_ht_cap_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); int mesh_add_ht_oper_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); +int mesh_add_vht_cap_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); void ieee80211s_init(void); diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 58384642e03c..a360b24b7df8 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -226,6 +226,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, 2 + sizeof(struct ieee80211_meshconf_ie) + 2 + sizeof(struct ieee80211_ht_cap) + 2 + sizeof(struct ieee80211_ht_operation) + + 2 + sizeof(struct ieee80211_vht_cap) + + 2 + sizeof(struct ieee80211_vht_operation) + 2 + 8 + /* peering IE */ sdata->u.mesh.ie_len); if (!skb) @@ -306,7 +308,9 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, if (action != WLAN_SP_MESH_PEERING_CLOSE) { if (mesh_add_ht_cap_ie(sdata, skb) || - mesh_add_ht_oper_ie(sdata, skb)) + mesh_add_ht_oper_ie(sdata, skb) || + mesh_add_vht_cap_ie(sdata, skb) || + mesh_add_vht_oper_ie(sdata, skb)) goto free; } @@ -402,6 +406,9 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, elems->ht_cap_elem, sta)) changed |= IEEE80211_RC_BW_CHANGED; + ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, + elems->vht_cap_elem, sta); + if (bw != sta->sta.bandwidth) changed |= IEEE80211_RC_BW_CHANGED; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index cd7e55e08a23..56ef9a8e151c 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -82,13 +82,6 @@ MODULE_PARM_DESC(probe_wait_ms, " before disconnecting (reason 4)."); /* - * Weight given to the latest Beacon frame when calculating average signal - * strength for Beacon frames received in the current BSS. This must be - * between 1 and 15. - */ -#define IEEE80211_SIGNAL_AVE_WEIGHT 3 - -/* * How many Beacon frames need to have been used in average signal strength * before starting to indicate signal change events. */ @@ -943,7 +936,7 @@ void ieee80211_send_pspoll(struct ieee80211_local *local, void ieee80211_send_nullfunc(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, - int powersave) + bool powersave) { struct sk_buff *skb; struct ieee80211_hdr_3addr *nullfunc; @@ -1427,7 +1420,7 @@ static void ieee80211_enable_ps(struct ieee80211_local *local, msecs_to_jiffies(conf->dynamic_ps_timeout)); } else { if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) - ieee80211_send_nullfunc(local, sdata, 1); + ieee80211_send_nullfunc(local, sdata, true); if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) && ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) @@ -1642,7 +1635,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work) msecs_to_jiffies( local->hw.conf.dynamic_ps_timeout)); } else { - ieee80211_send_nullfunc(local, sdata, 1); + ieee80211_send_nullfunc(local, sdata, true); /* Flush to get the tx status of nullfunc frame */ ieee80211_flush_queues(local, sdata, false); } @@ -2275,7 +2268,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) if (ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) { ifmgd->nullfunc_failed = false; - ieee80211_send_nullfunc(sdata->local, sdata, 0); + ieee80211_send_nullfunc(sdata->local, sdata, false); } else { int ssid_len; @@ -3262,16 +3255,6 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, if (ifmgd->associated && ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) ieee80211_reset_ap_probe(sdata); - - if (ifmgd->auth_data && !ifmgd->auth_data->bss->proberesp_ies && - ether_addr_equal(mgmt->bssid, ifmgd->auth_data->bss->bssid)) { - /* got probe response, continue with auth */ - sdata_info(sdata, "direct probe responded\n"); - ifmgd->auth_data->tries = 0; - ifmgd->auth_data->timeout = jiffies; - ifmgd->auth_data->timeout_started = true; - run_again(sdata, ifmgd->auth_data->timeout); - } } /* @@ -3374,24 +3357,21 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, bssid = ifmgd->associated->bssid; /* Track average RSSI from the Beacon frames of the current AP */ - ifmgd->last_beacon_signal = rx_status->signal; if (ifmgd->flags & IEEE80211_STA_RESET_SIGNAL_AVE) { ifmgd->flags &= ~IEEE80211_STA_RESET_SIGNAL_AVE; - ifmgd->ave_beacon_signal = rx_status->signal * 16; + ewma_beacon_signal_init(&ifmgd->ave_beacon_signal); ifmgd->last_cqm_event_signal = 0; ifmgd->count_beacon_signal = 1; ifmgd->last_ave_beacon_signal = 0; } else { - ifmgd->ave_beacon_signal = - (IEEE80211_SIGNAL_AVE_WEIGHT * rx_status->signal * 16 + - (16 - IEEE80211_SIGNAL_AVE_WEIGHT) * - ifmgd->ave_beacon_signal) / 16; ifmgd->count_beacon_signal++; } + ewma_beacon_signal_add(&ifmgd->ave_beacon_signal, -rx_status->signal); + if (ifmgd->rssi_min_thold != ifmgd->rssi_max_thold && ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) { - int sig = ifmgd->ave_beacon_signal; + int sig = -ewma_beacon_signal_read(&ifmgd->ave_beacon_signal); int last_sig = ifmgd->last_ave_beacon_signal; struct ieee80211_event event = { .type = RSSI_EVENT, @@ -3418,10 +3398,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, if (bss_conf->cqm_rssi_thold && ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT && !(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) { - int sig = ifmgd->ave_beacon_signal / 16; + int sig = -ewma_beacon_signal_read(&ifmgd->ave_beacon_signal); int last_event = ifmgd->last_cqm_event_signal; int thold = bss_conf->cqm_rssi_thold; int hyst = bss_conf->cqm_rssi_hyst; + if (sig < thold && (last_event == 0 || sig < last_event - hyst)) { ifmgd->last_cqm_event_signal = sig; @@ -3456,31 +3437,27 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, len - baselen, false, &elems, care_about_ies, ncrc); - if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) { - bool directed_tim = ieee80211_check_tim(elems.tim, - elems.tim_len, - ifmgd->aid); - if (directed_tim) { - if (local->hw.conf.dynamic_ps_timeout > 0) { - if (local->hw.conf.flags & IEEE80211_CONF_PS) { - local->hw.conf.flags &= ~IEEE80211_CONF_PS; - ieee80211_hw_config(local, - IEEE80211_CONF_CHANGE_PS); - } - ieee80211_send_nullfunc(local, sdata, 0); - } else if (!local->pspolling && sdata->u.mgd.powersave) { - local->pspolling = true; - - /* - * Here is assumed that the driver will be - * able to send ps-poll frame and receive a - * response even though power save mode is - * enabled, but some drivers might require - * to disable power save here. This needs - * to be investigated. - */ - ieee80211_send_pspoll(local, sdata); + if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) && + ieee80211_check_tim(elems.tim, elems.tim_len, ifmgd->aid)) { + if (local->hw.conf.dynamic_ps_timeout > 0) { + if (local->hw.conf.flags & IEEE80211_CONF_PS) { + local->hw.conf.flags &= ~IEEE80211_CONF_PS; + ieee80211_hw_config(local, + IEEE80211_CONF_CHANGE_PS); } + ieee80211_send_nullfunc(local, sdata, false); + } else if (!local->pspolling && sdata->u.mgd.powersave) { + local->pspolling = true; + + /* + * Here is assumed that the driver will be + * able to send ps-poll frame and receive a + * response even though power save mode is + * enabled, but some drivers might require + * to disable power save here. This needs + * to be investigated. + */ + ieee80211_send_pspoll(local, sdata); } } @@ -3717,12 +3694,14 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, reason); } -static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) +static int ieee80211_auth(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_mgd_auth_data *auth_data = ifmgd->auth_data; u32 tx_flags = 0; + u16 trans = 1; + u16 status = 0; sdata_assert_lock(sdata); @@ -3746,54 +3725,27 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) drv_mgd_prepare_tx(local, sdata); - if (auth_data->bss->proberesp_ies) { - u16 trans = 1; - u16 status = 0; - - sdata_info(sdata, "send auth to %pM (try %d/%d)\n", - auth_data->bss->bssid, auth_data->tries, - IEEE80211_AUTH_MAX_TRIES); + sdata_info(sdata, "send auth to %pM (try %d/%d)\n", + auth_data->bss->bssid, auth_data->tries, + IEEE80211_AUTH_MAX_TRIES); - auth_data->expected_transaction = 2; + auth_data->expected_transaction = 2; - if (auth_data->algorithm == WLAN_AUTH_SAE) { - trans = auth_data->sae_trans; - status = auth_data->sae_status; - auth_data->expected_transaction = trans; - } - - if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) - tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS | - IEEE80211_TX_INTFL_MLME_CONN_TX; - - ieee80211_send_auth(sdata, trans, auth_data->algorithm, status, - auth_data->data, auth_data->data_len, - auth_data->bss->bssid, - auth_data->bss->bssid, NULL, 0, 0, - tx_flags); - } else { - const u8 *ssidie; + if (auth_data->algorithm == WLAN_AUTH_SAE) { + trans = auth_data->sae_trans; + status = auth_data->sae_status; + auth_data->expected_transaction = trans; + } - sdata_info(sdata, "direct probe to %pM (try %d/%i)\n", - auth_data->bss->bssid, auth_data->tries, - IEEE80211_AUTH_MAX_TRIES); + if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) + tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS | + IEEE80211_TX_INTFL_MLME_CONN_TX; - rcu_read_lock(); - ssidie = ieee80211_bss_get_ie(auth_data->bss, WLAN_EID_SSID); - if (!ssidie) { - rcu_read_unlock(); - return -EINVAL; - } - /* - * Direct probe is sent to broadcast address as some APs - * will not answer to direct packet in unassociated state. - */ - ieee80211_send_probe_req(sdata, sdata->vif.addr, NULL, - ssidie + 2, ssidie[1], - NULL, 0, (u32) -1, true, 0, - auth_data->bss->channel, false); - rcu_read_unlock(); - } + ieee80211_send_auth(sdata, trans, auth_data->algorithm, status, + auth_data->data, auth_data->data_len, + auth_data->bss->bssid, + auth_data->bss->bssid, NULL, 0, 0, + tx_flags); if (tx_flags == 0) { auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; @@ -3874,8 +3826,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) bool status_acked = ifmgd->status_acked; ifmgd->status_received = false; - if (ifmgd->auth_data && - (ieee80211_is_probe_req(fc) || ieee80211_is_auth(fc))) { + if (ifmgd->auth_data && ieee80211_is_auth(fc)) { if (status_acked) { ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT_SHORT; @@ -3906,7 +3857,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) * so let's just kill the auth data */ ieee80211_destroy_auth_data(sdata, false); - } else if (ieee80211_probe_auth(sdata)) { + } else if (ieee80211_auth(sdata)) { u8 bssid[ETH_ALEN]; struct ieee80211_event event = { .type = MLME_EVENT, @@ -4613,7 +4564,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, if (err) goto err_clear; - err = ieee80211_probe_auth(sdata); + err = ieee80211_auth(sdata); if (err) { sta_info_destroy_addr(sdata, req->bss->bssid); goto err_clear; diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index f2c75cf491fc..04401037140e 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -57,7 +57,7 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata) * to send a new nullfunc frame to inform the AP that we * are again sleeping. */ - ieee80211_send_nullfunc(local, sdata, 1); + ieee80211_send_nullfunc(local, sdata, true); } /* inform AP that we are awake again, unless power save is enabled */ @@ -66,7 +66,7 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata) struct ieee80211_local *local = sdata->local; if (!local->ps_sdata) - ieee80211_send_nullfunc(local, sdata, 0); + ieee80211_send_nullfunc(local, sdata, false); else if (local->offchannel_ps_enabled) { /* * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware @@ -93,7 +93,7 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata) * restart the timer now and send a nullfunc frame to inform * the AP that we are awake. */ - ieee80211_send_nullfunc(local, sdata, 0); + ieee80211_send_nullfunc(local, sdata, false); mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); } diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index b676b9fa707b..ad88ad4e8eb1 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -23,7 +23,8 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) ieee80211_del_virtual_monitor(local); - if (ieee80211_hw_check(hw, AMPDU_AGGREGATION)) { + if (ieee80211_hw_check(hw, AMPDU_AGGREGATION) && + !(wowlan && wowlan->any)) { mutex_lock(&local->sta_mtx); list_for_each_entry(sta, &local->sta_list, list) { set_sta_flag(sta, WLAN_STA_BLOCK_BA); diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 9ce8883d5f44..b07e2f748f93 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -305,7 +305,10 @@ static void __rate_control_send_low(struct ieee80211_hw *hw, info->control.rates[0].idx = i; break; } - WARN_ON_ONCE(i == sband->n_bitrates); + WARN_ONCE(i == sband->n_bitrates, + "no supported rates (0x%x) in rate_mask 0x%x with flags 0x%x\n", + sta ? sta->supp_rates[sband->band] : 0, + rate_mask, rate_flags); info->control.rates[0].count = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c index 1db5f7c3318a..820b0abc9c0d 100644 --- a/net/mac80211/rc80211_minstrel_debugfs.c +++ b/net/mac80211/rc80211_minstrel_debugfs.c @@ -85,12 +85,10 @@ minstrel_stats_open(struct inode *inode, struct file *file) file->private_data = ms; p = ms->buf; p += sprintf(p, "\n"); - p += sprintf(p, "best __________rate_________ ______" - "statistics______ ________last_______ " - "______sum-of________\n"); - p += sprintf(p, "rate [name idx airtime max_tp] [ ø(tp) ø(prob) " - "sd(prob)] [prob.|retry|suc|att] " - "[#success | #attempts]\n"); + p += sprintf(p, + "best __________rate_________ ________statistics________ ________last_______ ______sum-of________\n"); + p += sprintf(p, + "rate [name idx airtime max_tp] [avg(tp) avg(prob) sd(prob)] [prob.|retry|suc|att] [#success | #attempts]\n"); for (i = 0; i < mi->n_rates; i++) { struct minstrel_rate *mr = &mi->r[i]; @@ -112,7 +110,7 @@ minstrel_stats_open(struct inode *inode, struct file *file) prob = MINSTREL_TRUNC(mrs->cur_prob * 1000); eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000); - p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u %3u.%1u" + p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u %3u.%1u" " %3u.%1u %3u %3u %-3u " "%9llu %-9llu\n", tp_max / 10, tp_max % 10, diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c index 6822ce0f95e5..5320e35ed3d0 100644 --- a/net/mac80211/rc80211_minstrel_ht_debugfs.c +++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c @@ -86,7 +86,7 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) prob = MINSTREL_TRUNC(mrs->cur_prob * 1000); eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000); - p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u %3u.%1u" + p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u %3u.%1u" " %3u.%1u %3u %3u %-3u " "%9llu %-9llu\n", tp_max / 10, tp_max % 10, @@ -129,12 +129,10 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file) p = ms->buf; p += sprintf(p, "\n"); - p += sprintf(p, " best ____________rate__________ " - "______statistics______ ________last_______ " - "______sum-of________\n"); - p += sprintf(p, "mode guard # rate [name idx airtime max_tp] " - "[ ø(tp) ø(prob) sd(prob)] [prob.|retry|suc|att] [#success | " - "#attempts]\n"); + p += sprintf(p, + " best ____________rate__________ ________statistics________ ________last_______ ______sum-of________\n"); + p += sprintf(p, + "mode guard # rate [name idx airtime max_tp] [avg(tp) avg(prob) sd(prob)] [prob.|retry|suc|att] [#success | #attempts]\n"); p = minstrel_ht_stats_dump(mi, MINSTREL_CCK_GROUP, p); for (i = 0; i < MINSTREL_CCK_GROUP; i++) diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 64f1936350c6..c3644458e2ee 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -303,7 +303,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, struct ieee80211_local *local = sdata->local; struct ieee80211_hw *hw = &local->hw; struct sta_info *sta; - struct timespec uptime; int i; sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp); @@ -339,8 +338,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, /* Mark TID as unreserved */ sta->reserved_tid = IEEE80211_TID_UNRESERVED; - ktime_get_ts(&uptime); - sta->last_connected = uptime.tv_sec; + sta->last_connected = ktime_get_seconds(); ewma_signal_init(&sta->avg_signal); for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++) ewma_signal_init(&sta->chain_signal_avg[i]); @@ -1813,7 +1811,6 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; struct rate_control_ref *ref = NULL; - struct timespec uptime; u32 thr = 0; int i, ac; @@ -1838,8 +1835,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) BIT(NL80211_STA_INFO_RX_DROP_MISC) | BIT(NL80211_STA_INFO_BEACON_LOSS); - ktime_get_ts(&uptime); - sinfo->connected_time = uptime.tv_sec - sta->last_connected; + sinfo->connected_time = ktime_get_seconds() - sta->last_connected; sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx); if (!(sinfo->filled & (BIT(NL80211_STA_INFO_TX_BYTES64) | diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index b087c71ff7fe..d5ded8749ac4 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -133,6 +133,7 @@ enum ieee80211_agg_stop_reason { * @buf_size: reorder buffer size at receiver * @failed_bar_ssn: ssn of the last failed BAR tx attempt * @bar_pending: BAR needs to be re-sent + * @amsdu: support A-MSDU withing A-MDPU * * This structure's lifetime is managed by RCU, assignments to * the array holding it must hold the aggregation mutex. @@ -158,6 +159,7 @@ struct tid_ampdu_tx { u16 failed_bar_ssn; bool bar_pending; + bool amsdu; }; /** diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 8ba583243509..98fd04c4b2a0 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -668,16 +668,70 @@ void ieee80211_tx_status_noskb(struct ieee80211_hw *hw, } EXPORT_SYMBOL(ieee80211_tx_status_noskb); -void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) +void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb, + struct ieee80211_supported_band *sband, + int retry_count, int shift, bool send_to_cooked) { struct sk_buff *skb2; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_sub_if_data *sdata; + struct net_device *prev_dev = NULL; + int rtap_len; + + /* send frame to monitor interfaces now */ + rtap_len = ieee80211_tx_radiotap_len(info); + if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) { + pr_err("ieee80211_tx_status: headroom too small\n"); + dev_kfree_skb(skb); + return; + } + ieee80211_add_tx_radiotap_header(local, sband, skb, retry_count, + rtap_len, shift); + + /* XXX: is this sufficient for BPF? */ + skb_set_mac_header(skb, 0); + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = htons(ETH_P_802_2); + memset(skb->cb, 0, sizeof(skb->cb)); + + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { + if (!ieee80211_sdata_running(sdata)) + continue; + + if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) && + !send_to_cooked) + continue; + + if (prev_dev) { + skb2 = skb_clone(skb, GFP_ATOMIC); + if (skb2) { + skb2->dev = prev_dev; + netif_rx(skb2); + } + } + + prev_dev = sdata->dev; + } + } + if (prev_dev) { + skb->dev = prev_dev; + netif_rx(skb); + skb = NULL; + } + rcu_read_unlock(); + dev_kfree_skb(skb); +} + +void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) +{ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); __le16 fc; struct ieee80211_supported_band *sband; - struct ieee80211_sub_if_data *sdata; - struct net_device *prev_dev = NULL; struct sta_info *sta; struct rhash_head *tmp; int retry_count; @@ -685,7 +739,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) bool send_to_cooked; bool acked; struct ieee80211_bar *bar; - int rtap_len; int shift = 0; int tid = IEEE80211_NUM_TIDS; const struct bucket_table *tbl; @@ -878,51 +931,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) return; } - /* send frame to monitor interfaces now */ - rtap_len = ieee80211_tx_radiotap_len(info); - if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) { - pr_err("ieee80211_tx_status: headroom too small\n"); - dev_kfree_skb(skb); - return; - } - ieee80211_add_tx_radiotap_header(local, sband, skb, retry_count, - rtap_len, shift); - - /* XXX: is this sufficient for BPF? */ - skb_set_mac_header(skb, 0); - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->pkt_type = PACKET_OTHERHOST; - skb->protocol = htons(ETH_P_802_2); - memset(skb->cb, 0, sizeof(skb->cb)); - - rcu_read_lock(); - list_for_each_entry_rcu(sdata, &local->interfaces, list) { - if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { - if (!ieee80211_sdata_running(sdata)) - continue; - - if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) && - !send_to_cooked) - continue; - - if (prev_dev) { - skb2 = skb_clone(skb, GFP_ATOMIC); - if (skb2) { - skb2->dev = prev_dev; - netif_rx(skb2); - } - } - - prev_dev = sdata->dev; - } - } - if (prev_dev) { - skb->dev = prev_dev; - netif_rx(skb); - skb = NULL; - } - rcu_read_unlock(); - dev_kfree_skb(skb); + /* send to monitor interfaces */ + ieee80211_tx_monitor(local, skb, sband, retry_count, shift, send_to_cooked); } EXPORT_SYMBOL(ieee80211_tx_status); diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index 4e202d0679b2..ecc5e2a8f80b 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -41,9 +41,11 @@ static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; bool chan_switch = local->hw.wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH; - bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW); + bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) && + !ifmgd->tdls_wider_bw_prohibited; enum ieee80211_band band = ieee80211_get_sdata_band(sdata); struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; bool vht = sband && sband->vht_cap.vht_supported; @@ -331,8 +333,8 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata, /* proceed to downgrade the chandef until usable or the same */ while (uc.width > max_width && - !cfg80211_reg_can_beacon(sdata->local->hw.wiphy, - &uc, sdata->wdev.iftype)) + !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc, + sdata->wdev.iftype)) ieee80211_chandef_downgrade(&uc); if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) { diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index 6f14591d8ca9..314e3bd7fbdb 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -497,6 +497,36 @@ TRACE_EVENT(drv_configure_filter, ) ); +TRACE_EVENT(drv_config_iface_filter, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + unsigned int filter_flags, + unsigned int changed_flags), + + TP_ARGS(local, sdata, filter_flags, changed_flags), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(unsigned int, filter_flags) + __field(unsigned int, changed_flags) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->filter_flags = filter_flags; + __entry->changed_flags = changed_flags; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT + " filter_flags: %#x changed_flags: %#x", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->filter_flags, + __entry->changed_flags + ) +); + TRACE_EVENT(drv_set_tim, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sta *sta, bool set), @@ -944,9 +974,9 @@ TRACE_EVENT(drv_ampdu_action, struct ieee80211_sub_if_data *sdata, enum ieee80211_ampdu_mlme_action action, struct ieee80211_sta *sta, u16 tid, - u16 *ssn, u8 buf_size), + u16 *ssn, u8 buf_size, bool amsdu), - TP_ARGS(local, sdata, action, sta, tid, ssn, buf_size), + TP_ARGS(local, sdata, action, sta, tid, ssn, buf_size, amsdu), TP_STRUCT__entry( LOCAL_ENTRY @@ -955,6 +985,7 @@ TRACE_EVENT(drv_ampdu_action, __field(u16, tid) __field(u16, ssn) __field(u8, buf_size) + __field(bool, amsdu) VIF_ENTRY ), @@ -966,12 +997,13 @@ TRACE_EVENT(drv_ampdu_action, __entry->tid = tid; __entry->ssn = ssn ? *ssn : 0; __entry->buf_size = buf_size; + __entry->amsdu = amsdu; ), TP_printk( - LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d buf:%d", + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d buf:%d amsdu:%d", LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, - __entry->tid, __entry->buf_size + __entry->tid, __entry->buf_size, __entry->amsdu ) ); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 84e0e8c7fb23..464ba1a625bd 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2767,7 +2767,8 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { *ieee80211_get_qos_ctl(hdr) = tid; - hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid); + if (!sta->sta.txq[0]) + hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid); } else { info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number); @@ -3512,6 +3513,12 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, { struct ieee80211_mutable_offsets offs = {}; struct sk_buff *bcn = __ieee80211_beacon_get(hw, vif, &offs, false); + struct sk_buff *copy; + struct ieee80211_supported_band *sband; + int shift; + + if (!bcn) + return bcn; if (tim_offset) *tim_offset = offs.tim_offset; @@ -3519,6 +3526,19 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, if (tim_length) *tim_length = offs.tim_length; + if (ieee80211_hw_check(hw, BEACON_TX_STATUS) || + !hw_to_local(hw)->monitors) + return bcn; + + /* send a copy to monitor interfaces */ + copy = skb_copy(bcn, GFP_ATOMIC); + if (!copy) + return bcn; + + shift = ieee80211_vif_get_shift(vif); + sband = hw->wiphy->bands[ieee80211_get_sdata_band(vif_to_sdata(vif))]; + ieee80211_tx_monitor(hw_to_local(hw), copy, sband, 1, shift, false); + return bcn; } EXPORT_SYMBOL(ieee80211_beacon_get_tim); diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 1104421bc525..60c4dbf92625 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1966,7 +1966,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) if (!sdata->u.mgd.associated) continue; - ieee80211_send_nullfunc(local, sdata, 0); + ieee80211_send_nullfunc(local, sdata, false); } } @@ -2017,8 +2017,9 @@ int ieee80211_reconfig(struct ieee80211_local *local) mutex_lock(&local->sta_mtx); list_for_each_entry(sta, &local->sta_list, list) { - ieee80211_sta_tear_down_BA_sessions( - sta, AGG_STOP_LOCAL_REQUEST); + if (!local->resuming) + ieee80211_sta_tear_down_BA_sessions( + sta, AGG_STOP_LOCAL_REQUEST); clear_sta_flag(sta, WLAN_STA_BLOCK_BA); } @@ -2324,6 +2325,8 @@ u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, if (chandef->center_freq2) vht_oper->center_freq_seg2_idx = ieee80211_frequency_to_channel(chandef->center_freq2); + else + vht_oper->center_freq_seg2_idx = 0x00; switch (chandef->width) { case NL80211_CHAN_WIDTH_160: @@ -2541,7 +2544,7 @@ int ieee80211_ave_rssi(struct ieee80211_vif *vif) /* non-managed type inferfaces */ return 0; } - return ifmgd->ave_beacon_signal / 16; + return -ewma_beacon_signal_read(&ifmgd->ave_beacon_signal); } EXPORT_SYMBOL_GPL(ieee80211_ave_rssi); diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c index c865ebb2ace2..57b5e94471af 100644 --- a/net/mac802154/cfg.c +++ b/net/mac802154/cfg.c @@ -266,6 +266,195 @@ ieee802154_set_ackreq_default(struct wpan_phy *wpan_phy, return 0; } +#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL +static void +ieee802154_get_llsec_table(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + struct ieee802154_llsec_table **table) +{ + struct net_device *dev = wpan_dev->netdev; + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + + *table = &sdata->sec.table; +} + +static void +ieee802154_lock_llsec_table(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev) +{ + struct net_device *dev = wpan_dev->netdev; + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + + mutex_lock(&sdata->sec_mtx); +} + +static void +ieee802154_unlock_llsec_table(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev) +{ + struct net_device *dev = wpan_dev->netdev; + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + + mutex_unlock(&sdata->sec_mtx); +} + +static int +ieee802154_set_llsec_params(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + const struct ieee802154_llsec_params *params, + int changed) +{ + struct net_device *dev = wpan_dev->netdev; + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + int res; + + mutex_lock(&sdata->sec_mtx); + res = mac802154_llsec_set_params(&sdata->sec, params, changed); + mutex_unlock(&sdata->sec_mtx); + + return res; +} + +static int +ieee802154_get_llsec_params(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + struct ieee802154_llsec_params *params) +{ + struct net_device *dev = wpan_dev->netdev; + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + int res; + + mutex_lock(&sdata->sec_mtx); + res = mac802154_llsec_get_params(&sdata->sec, params); + mutex_unlock(&sdata->sec_mtx); + + return res; +} + +static int +ieee802154_add_llsec_key(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, + const struct ieee802154_llsec_key_id *id, + const struct ieee802154_llsec_key *key) +{ + struct net_device *dev = wpan_dev->netdev; + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + int res; + + mutex_lock(&sdata->sec_mtx); + res = mac802154_llsec_key_add(&sdata->sec, id, key); + mutex_unlock(&sdata->sec_mtx); + + return res; +} + +static int +ieee802154_del_llsec_key(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, + const struct ieee802154_llsec_key_id *id) +{ + struct net_device *dev = wpan_dev->netdev; + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + int res; + + mutex_lock(&sdata->sec_mtx); + res = mac802154_llsec_key_del(&sdata->sec, id); + mutex_unlock(&sdata->sec_mtx); + + return res; +} + +static int +ieee802154_add_seclevel(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, + const struct ieee802154_llsec_seclevel *sl) +{ + struct net_device *dev = wpan_dev->netdev; + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + int res; + + mutex_lock(&sdata->sec_mtx); + res = mac802154_llsec_seclevel_add(&sdata->sec, sl); + mutex_unlock(&sdata->sec_mtx); + + return res; +} + +static int +ieee802154_del_seclevel(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, + const struct ieee802154_llsec_seclevel *sl) +{ + struct net_device *dev = wpan_dev->netdev; + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + int res; + + mutex_lock(&sdata->sec_mtx); + res = mac802154_llsec_seclevel_del(&sdata->sec, sl); + mutex_unlock(&sdata->sec_mtx); + + return res; +} + +static int +ieee802154_add_device(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, + const struct ieee802154_llsec_device *dev_desc) +{ + struct net_device *dev = wpan_dev->netdev; + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + int res; + + mutex_lock(&sdata->sec_mtx); + res = mac802154_llsec_dev_add(&sdata->sec, dev_desc); + mutex_unlock(&sdata->sec_mtx); + + return res; +} + +static int +ieee802154_del_device(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, + __le64 extended_addr) +{ + struct net_device *dev = wpan_dev->netdev; + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + int res; + + mutex_lock(&sdata->sec_mtx); + res = mac802154_llsec_dev_del(&sdata->sec, extended_addr); + mutex_unlock(&sdata->sec_mtx); + + return res; +} + +static int +ieee802154_add_devkey(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, + __le64 extended_addr, + const struct ieee802154_llsec_device_key *key) +{ + struct net_device *dev = wpan_dev->netdev; + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + int res; + + mutex_lock(&sdata->sec_mtx); + res = mac802154_llsec_devkey_add(&sdata->sec, extended_addr, key); + mutex_unlock(&sdata->sec_mtx); + + return res; +} + +static int +ieee802154_del_devkey(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, + __le64 extended_addr, + const struct ieee802154_llsec_device_key *key) +{ + struct net_device *dev = wpan_dev->netdev; + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + int res; + + mutex_lock(&sdata->sec_mtx); + res = mac802154_llsec_devkey_del(&sdata->sec, extended_addr, key); + mutex_unlock(&sdata->sec_mtx); + + return res; +} +#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ + const struct cfg802154_ops mac802154_config_ops = { .add_virtual_intf_deprecated = ieee802154_add_iface_deprecated, .del_virtual_intf_deprecated = ieee802154_del_iface_deprecated, @@ -284,4 +473,20 @@ const struct cfg802154_ops mac802154_config_ops = { .set_max_frame_retries = ieee802154_set_max_frame_retries, .set_lbt_mode = ieee802154_set_lbt_mode, .set_ackreq_default = ieee802154_set_ackreq_default, +#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL + .get_llsec_table = ieee802154_get_llsec_table, + .lock_llsec_table = ieee802154_lock_llsec_table, + .unlock_llsec_table = ieee802154_unlock_llsec_table, + /* TODO above */ + .set_llsec_params = ieee802154_set_llsec_params, + .get_llsec_params = ieee802154_get_llsec_params, + .add_llsec_key = ieee802154_add_llsec_key, + .del_llsec_key = ieee802154_del_llsec_key, + .add_seclevel = ieee802154_add_seclevel, + .del_seclevel = ieee802154_del_seclevel, + .add_device = ieee802154_add_device, + .del_device = ieee802154_del_device, + .add_devkey = ieee802154_add_devkey, + .del_devkey = ieee802154_del_devkey, +#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ }; diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c index ed26952f9e14..7079cd32a7ad 100644 --- a/net/mac802154/iface.c +++ b/net/mac802154/iface.c @@ -367,12 +367,11 @@ static int mac802154_set_header_security(struct ieee802154_sub_if_data *sdata, return 0; } -static int mac802154_header_create(struct sk_buff *skb, - struct net_device *dev, - unsigned short type, - const void *daddr, - const void *saddr, - unsigned len) +static int ieee802154_header_create(struct sk_buff *skb, + struct net_device *dev, + const struct ieee802154_addr *daddr, + const struct ieee802154_addr *saddr, + unsigned len) { struct ieee802154_hdr hdr; struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); @@ -423,24 +422,89 @@ static int mac802154_header_create(struct sk_buff *skb, return hlen; } +static const struct wpan_dev_header_ops ieee802154_header_ops = { + .create = ieee802154_header_create, +}; + +/* This header create functionality assumes a 8 byte array for + * source and destination pointer at maximum. To adapt this for + * the 802.15.4 dataframe header we use extended address handling + * here only and intra pan connection. fc fields are mostly fallback + * handling. For provide dev_hard_header for dgram sockets. + */ +static int mac802154_header_create(struct sk_buff *skb, + struct net_device *dev, + unsigned short type, + const void *daddr, + const void *saddr, + unsigned len) +{ + struct ieee802154_hdr hdr; + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + struct wpan_dev *wpan_dev = &sdata->wpan_dev; + struct ieee802154_mac_cb cb = { }; + int hlen; + + if (!daddr) + return -EINVAL; + + memset(&hdr.fc, 0, sizeof(hdr.fc)); + hdr.fc.type = IEEE802154_FC_TYPE_DATA; + hdr.fc.ack_request = wpan_dev->ackreq; + hdr.seq = atomic_inc_return(&dev->ieee802154_ptr->dsn) & 0xFF; + + /* TODO currently a workaround to give zero cb block to set + * security parameters defaults according MIB. + */ + if (mac802154_set_header_security(sdata, &hdr, &cb) < 0) + return -EINVAL; + + hdr.dest.pan_id = wpan_dev->pan_id; + hdr.dest.mode = IEEE802154_ADDR_LONG; + ieee802154_be64_to_le64(&hdr.dest.extended_addr, daddr); + + hdr.source.pan_id = hdr.dest.pan_id; + hdr.source.mode = IEEE802154_ADDR_LONG; + + if (!saddr) + hdr.source.extended_addr = wpan_dev->extended_addr; + else + ieee802154_be64_to_le64(&hdr.source.extended_addr, saddr); + + hlen = ieee802154_hdr_push(skb, &hdr); + if (hlen < 0) + return -EINVAL; + + skb_reset_mac_header(skb); + skb->mac_len = hlen; + + if (len > ieee802154_max_payload(&hdr)) + return -EMSGSIZE; + + return hlen; +} + static int mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr) { struct ieee802154_hdr hdr; - struct ieee802154_addr *addr = (struct ieee802154_addr *)haddr; if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) { pr_debug("malformed packet\n"); return 0; } - *addr = hdr.source; - return sizeof(*addr); + if (hdr.source.mode == IEEE802154_ADDR_LONG) { + ieee802154_le64_to_be64(haddr, &hdr.source.extended_addr); + return IEEE802154_EXTENDED_ADDR_LEN; + } + + return 0; } -static struct header_ops mac802154_header_ops = { - .create = mac802154_header_create, - .parse = mac802154_header_parse, +static const struct header_ops mac802154_header_ops = { + .create = mac802154_header_create, + .parse = mac802154_header_parse, }; static const struct net_device_ops mac802154_wpan_ops = { @@ -471,9 +535,29 @@ static void ieee802154_if_setup(struct net_device *dev) dev->addr_len = IEEE802154_EXTENDED_ADDR_LEN; memset(dev->broadcast, 0xff, IEEE802154_EXTENDED_ADDR_LEN); - dev->hard_header_len = MAC802154_FRAME_HARD_HEADER_LEN; - dev->needed_tailroom = 2 + 16; /* FCS + MIC */ - dev->mtu = IEEE802154_MTU; + /* Let hard_header_len set to IEEE802154_MIN_HEADER_LEN. AF_PACKET + * will not send frames without any payload, but ack frames + * has no payload, so substract one that we can send a 3 bytes + * frame. The xmit callback assumes at least a hard header where two + * bytes fc and sequence field are set. + */ + dev->hard_header_len = IEEE802154_MIN_HEADER_LEN - 1; + /* The auth_tag header is for security and places in private payload + * room of mac frame which stucks between payload and FCS field. + */ + dev->needed_tailroom = IEEE802154_MAX_AUTH_TAG_LEN + + IEEE802154_FCS_LEN; + /* The mtu size is the payload without mac header in this case. + * We have a dynamic length header with a minimum header length + * which is hard_header_len. In this case we let mtu to the size + * of maximum payload which is IEEE802154_MTU - IEEE802154_FCS_LEN - + * hard_header_len. The FCS which is set by hardware or ndo_start_xmit + * and the minimum mac header which can be evaluated inside driver + * layer. The rest of mac header will be part of payload if greater + * than hard_header_len. + */ + dev->mtu = IEEE802154_MTU - IEEE802154_FCS_LEN - + dev->hard_header_len; dev->tx_queue_len = 300; dev->flags = IFF_NOARP | IFF_BROADCAST; } @@ -513,6 +597,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata, sdata->dev->netdev_ops = &mac802154_wpan_ops; sdata->dev->ml_priv = &mac802154_mlme_wpan; wpan_dev->promiscuous_mode = false; + wpan_dev->header_ops = &ieee802154_header_ops; mutex_init(&sdata->sec_mtx); @@ -550,7 +635,8 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name, if (!ndev) return ERR_PTR(-ENOMEM); - ndev->needed_headroom = local->hw.extra_tx_headroom; + ndev->needed_headroom = local->hw.extra_tx_headroom + + IEEE802154_MAX_HEADER_LEN; ret = dev_alloc_name(ndev, ndev->name); if (ret < 0) diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c index 985e9394e2af..7799d3c41fe2 100644 --- a/net/mac802154/llsec.c +++ b/net/mac802154/llsec.c @@ -401,6 +401,7 @@ int mac802154_llsec_dev_del(struct mac802154_llsec *sec, __le64 device_addr) hash_del_rcu(&pos->bucket_s); hash_del_rcu(&pos->bucket_hw); + list_del_rcu(&pos->dev.list); call_rcu(&pos->rcu, llsec_dev_free_rcu); return 0; diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c index d1c33c1d6b9b..42e96729dae6 100644 --- a/net/mac802154/rx.c +++ b/net/mac802154/rx.c @@ -87,6 +87,10 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata, skb->dev = sdata->dev; + /* TODO this should be moved after netif_receive_skb call, otherwise + * wireshark will show a mac header with security fields and the + * payload is already decrypted. + */ rc = mac802154_llsec_decrypt(&sdata->sec, skb); if (rc) { pr_debug("decryption failed: %i\n", rc); diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c index 7ed439172f30..3827f359b336 100644 --- a/net/mac802154/tx.c +++ b/net/mac802154/tx.c @@ -77,9 +77,6 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) put_unaligned_le16(crc, skb_put(skb, 2)); } - if (skb_cow_head(skb, local->hw.extra_tx_headroom)) - goto err_tx; - /* Stop the netif queue on each sub_if_data object. */ ieee802154_stop_queue(&local->hw); @@ -121,6 +118,10 @@ ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev) struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); int rc; + /* TODO we should move it to wpan_dev_hard_header and dev_hard_header + * functions. The reason is wireshark will show a mac header which is + * with security fields but the payload is not encrypted. + */ rc = mac802154_llsec_encrypt(&sdata->sec, skb); if (rc) { netdev_warn(dev, "encryption failed: %i\n", rc); diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c index 21e70bc9af98..67591aef9cae 100644 --- a/net/mpls/mpls_iptunnel.c +++ b/net/mpls/mpls_iptunnel.c @@ -37,7 +37,7 @@ static unsigned int mpls_encap_size(struct mpls_iptunnel_encap *en) return en->labels * sizeof(struct mpls_shim_hdr); } -int mpls_output(struct sock *sk, struct sk_buff *skb) +int mpls_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct mpls_iptunnel_encap *tun_encap_info; struct mpls_shim_hdr *hdr; diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index ce37d204fcf1..1e24fff53e4b 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -692,7 +692,7 @@ static inline int ip_vs_gather_frags(struct netns_ipvs *ipvs, int err; local_bh_disable(); - err = ip_defrag(skb, user); + err = ip_defrag(ipvs->net, skb, user); local_bh_enable(); if (!err) ip_send_check(ip_hdr(skb)); diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 77182b9750cd..3264cb49b333 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -576,7 +576,7 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb, if (!skb->sk) skb_sender_cpu_clear(skb); NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb, - NULL, skb_dst(skb)->dev, dst_output_okfn); + NULL, skb_dst(skb)->dev, dst_output); } else ret = NF_ACCEPT; @@ -598,7 +598,7 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb, if (!skb->sk) skb_sender_cpu_clear(skb); NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb, - NULL, skb_dst(skb)->dev, dst_output_okfn); + NULL, skb_dst(skb)->dev, dst_output); } else ret = NF_ACCEPT; return ret; @@ -1049,7 +1049,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ret = ip_vs_tunnel_xmit_prepare(skb, cp); if (ret == NF_ACCEPT) - ip_local_out(skb); + ip_local_out(net, skb->sk, skb); else if (ret == NF_DROP) kfree_skb(skb); rcu_read_unlock(); @@ -1141,7 +1141,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, ret = ip_vs_tunnel_xmit_prepare(skb, cp); if (ret == NF_ACCEPT) - ip6_local_out(skb); + ip6_local_out(cp->ipvs->net, skb->sk, skb); else if (ret == NF_DROP) kfree_skb(skb); rcu_read_unlock(); diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 70277b11f742..f1d9e887f5b1 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -64,7 +64,7 @@ void nfnl_unlock(__u8 subsys_id) EXPORT_SYMBOL_GPL(nfnl_unlock); #ifdef CONFIG_PROVE_LOCKING -int lockdep_nfnl_is_held(u8 subsys_id) +bool lockdep_nfnl_is_held(u8 subsys_id) { return lockdep_is_held(&table[subsys_id].mutex); } diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index a5b9680a1821..06eb48fceb42 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -541,9 +541,9 @@ __build_packet_message(struct nfnl_log_net *log, if (skb->tstamp.tv64) { struct nfulnl_msg_packet_timestamp ts; - struct timeval tv = ktime_to_timeval(skb->tstamp); - ts.sec = cpu_to_be64(tv.tv_sec); - ts.usec = cpu_to_be64(tv.tv_usec); + struct timespec64 kts = ktime_to_timespec64(skb->tstamp); + ts.sec = cpu_to_be64(kts.tv_sec); + ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC); if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts)) goto nla_put_failure; diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 75724a96aef2..bc0e504f33a6 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -39,7 +39,7 @@ void genl_unlock(void) EXPORT_SYMBOL(genl_unlock); #ifdef CONFIG_LOCKDEP -int lockdep_genl_is_held(void) +bool lockdep_genl_is_held(void) { return lockdep_is_held(&genl_mutex); } diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 315f5330b6e5..1d21ab9d2b5c 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -620,7 +620,7 @@ static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key, return 0; } -static int ovs_vport_output(struct sock *sock, struct sk_buff *skb) +static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage); struct vport *vport = data->vport; @@ -679,8 +679,8 @@ static void prepare_frag(struct vport *vport, struct sk_buff *skb) skb_pull(skb, hlen); } -static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru, - __be16 ethertype) +static void ovs_fragment(struct net *net, struct vport *vport, + struct sk_buff *skb, u16 mru, __be16 ethertype) { if (skb_network_offset(skb) > MAX_L2_LEN) { OVS_NLERR(1, "L2 header too long to fragment"); @@ -700,7 +700,7 @@ static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru, skb_dst_set_noref(skb, &ovs_dst); IPCB(skb)->frag_max_size = mru; - ip_do_fragment(skb->sk, skb, ovs_vport_output); + ip_do_fragment(net, skb->sk, skb, ovs_vport_output); refdst_drop(orig_dst); } else if (ethertype == htons(ETH_P_IPV6)) { const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops(); @@ -722,7 +722,7 @@ static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru, skb_dst_set_noref(skb, &ovs_rt.dst); IP6CB(skb)->frag_max_size = mru; - v6ops->fragment(skb->sk, skb, ovs_vport_output); + v6ops->fragment(net, skb->sk, skb, ovs_vport_output); refdst_drop(orig_dst); } else { WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.", @@ -743,6 +743,7 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port, if (likely(!mru || (skb->len <= mru + ETH_HLEN))) { ovs_vport_send(vport, skb); } else if (mru <= vport->dev->mtu) { + struct net *net = read_pnet(&dp->net); __be16 ethertype = key->eth.type; if (!is_flow_key_valid(key)) { @@ -752,7 +753,7 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port, ethertype = vlan_get_protocol(skb); } - ovs_fragment(vport, skb, mru, ethertype); + ovs_fragment(net, vport, skb, mru, ethertype); } else { kfree_skb(skb); } diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index eb759e3a88ca..ad614267cc2a 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -304,7 +304,7 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key, int err; memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); - err = ip_defrag(skb, user); + err = ip_defrag(net, skb, user); if (err) return err; @@ -315,7 +315,7 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key, struct sk_buff *reasm; memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); - reasm = nf_ct_frag6_gather(skb, user); + reasm = nf_ct_frag6_gather(net, skb, user); if (!reasm) return -EINPROGRESS; diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index c8db44ab2ee7..0ea128eeeab2 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -698,8 +698,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info, { /* Extract metadata from packet. */ if (tun_info) { - if (ip_tunnel_info_af(tun_info) != AF_INET) - return -EINVAL; + key->tun_proto = ip_tunnel_info_af(tun_info); memcpy(&key->tun_key, &tun_info->key, sizeof(key->tun_key)); if (tun_info->options_len) { @@ -714,6 +713,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info, key->tun_opts_len = 0; } } else { + key->tun_proto = 0; key->tun_opts_len = 0; memset(&key->tun_key, 0, sizeof(key->tun_key)); } diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index fe527d2dd4b7..5688e33e2de6 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -63,6 +63,7 @@ struct sw_flow_key { u32 skb_mark; /* SKB mark. */ u16 in_port; /* Input switch port (or DP_MAX_PORTS). */ } __packed phy; /* Safe when right after 'tun_key'. */ + u8 tun_proto; /* Protocol of encapsulating tunnel. */ u32 ovs_flow_hash; /* Datapath computed hash value. */ u32 recirc_id; /* Recirculation ID. */ struct { diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 5c030a4d7338..77850f177a47 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -262,8 +262,8 @@ size_t ovs_tun_key_attr_size(void) * updating this function. */ return nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */ - + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */ - + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */ + + nla_total_size(16) /* OVS_TUNNEL_KEY_ATTR_IPV[46]_SRC */ + + nla_total_size(16) /* OVS_TUNNEL_KEY_ATTR_IPV[46]_DST */ + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */ + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */ + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */ @@ -323,6 +323,8 @@ static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_VARIABLE }, [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED, .next = ovs_vxlan_ext_key_lens }, + [OVS_TUNNEL_KEY_ATTR_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, + [OVS_TUNNEL_KEY_ATTR_IPV6_DST] = { .len = sizeof(struct in6_addr) }, }; /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ @@ -542,14 +544,14 @@ static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr, return 0; } -static int ipv4_tun_from_nlattr(const struct nlattr *attr, - struct sw_flow_match *match, bool is_mask, - bool log) +static int ip_tun_from_nlattr(const struct nlattr *attr, + struct sw_flow_match *match, bool is_mask, + bool log) { struct nlattr *a; int rem; bool ttl = false; - __be16 tun_flags = 0; + __be16 tun_flags = 0, ipv4 = false, ipv6 = false; int opts_type = 0; nla_for_each_nested(a, attr, rem) { @@ -578,10 +580,22 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr, case OVS_TUNNEL_KEY_ATTR_IPV4_SRC: SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src, nla_get_in_addr(a), is_mask); + ipv4 = true; break; case OVS_TUNNEL_KEY_ATTR_IPV4_DST: SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.dst, nla_get_in_addr(a), is_mask); + ipv4 = true; + break; + case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: + SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst, + nla_get_in6_addr(a), is_mask); + ipv6 = true; + break; + case OVS_TUNNEL_KEY_ATTR_IPV6_DST: + SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst, + nla_get_in6_addr(a), is_mask); + ipv6 = true; break; case OVS_TUNNEL_KEY_ATTR_TOS: SW_FLOW_KEY_PUT(match, tun_key.tos, @@ -636,28 +650,46 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr, opts_type = type; break; default: - OVS_NLERR(log, "Unknown IPv4 tunnel attribute %d", + OVS_NLERR(log, "Unknown IP tunnel attribute %d", type); return -EINVAL; } } SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask); + if (is_mask) + SW_FLOW_KEY_MEMSET_FIELD(match, tun_proto, 0xff, true); + else + SW_FLOW_KEY_PUT(match, tun_proto, ipv6 ? AF_INET6 : AF_INET, + false); if (rem > 0) { - OVS_NLERR(log, "IPv4 tunnel attribute has %d unknown bytes.", + OVS_NLERR(log, "IP tunnel attribute has %d unknown bytes.", rem); return -EINVAL; } + if (ipv4 && ipv6) { + OVS_NLERR(log, "Mixed IPv4 and IPv6 tunnel attributes"); + return -EINVAL; + } + if (!is_mask) { - if (!match->key->tun_key.u.ipv4.dst) { + if (!ipv4 && !ipv6) { + OVS_NLERR(log, "IP tunnel dst address not specified"); + return -EINVAL; + } + if (ipv4 && !match->key->tun_key.u.ipv4.dst) { OVS_NLERR(log, "IPv4 tunnel dst address is zero"); return -EINVAL; } + if (ipv6 && ipv6_addr_any(&match->key->tun_key.u.ipv6.dst)) { + OVS_NLERR(log, "IPv6 tunnel dst address is zero"); + return -EINVAL; + } if (!ttl) { - OVS_NLERR(log, "IPv4 tunnel TTL not specified."); + OVS_NLERR(log, "IP tunnel TTL not specified."); return -EINVAL; } } @@ -682,21 +714,36 @@ static int vxlan_opt_to_nlattr(struct sk_buff *skb, return 0; } -static int __ipv4_tun_to_nlattr(struct sk_buff *skb, - const struct ip_tunnel_key *output, - const void *tun_opts, int swkey_tun_opts_len) +static int __ip_tun_to_nlattr(struct sk_buff *skb, + const struct ip_tunnel_key *output, + const void *tun_opts, int swkey_tun_opts_len, + unsigned short tun_proto) { if (output->tun_flags & TUNNEL_KEY && nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id)) return -EMSGSIZE; - if (output->u.ipv4.src && - nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, - output->u.ipv4.src)) - return -EMSGSIZE; - if (output->u.ipv4.dst && - nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, - output->u.ipv4.dst)) - return -EMSGSIZE; + switch (tun_proto) { + case AF_INET: + if (output->u.ipv4.src && + nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, + output->u.ipv4.src)) + return -EMSGSIZE; + if (output->u.ipv4.dst && + nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, + output->u.ipv4.dst)) + return -EMSGSIZE; + break; + case AF_INET6: + if (!ipv6_addr_any(&output->u.ipv6.src) && + nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_SRC, + &output->u.ipv6.src)) + return -EMSGSIZE; + if (!ipv6_addr_any(&output->u.ipv6.dst) && + nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_DST, + &output->u.ipv6.dst)) + return -EMSGSIZE; + break; + } if (output->tos && nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->tos)) return -EMSGSIZE; @@ -730,9 +777,10 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb, return 0; } -static int ipv4_tun_to_nlattr(struct sk_buff *skb, - const struct ip_tunnel_key *output, - const void *tun_opts, int swkey_tun_opts_len) +static int ip_tun_to_nlattr(struct sk_buff *skb, + const struct ip_tunnel_key *output, + const void *tun_opts, int swkey_tun_opts_len, + unsigned short tun_proto) { struct nlattr *nla; int err; @@ -741,7 +789,8 @@ static int ipv4_tun_to_nlattr(struct sk_buff *skb, if (!nla) return -EMSGSIZE; - err = __ipv4_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len); + err = __ip_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len, + tun_proto); if (err) return err; @@ -753,9 +802,10 @@ int ovs_nla_put_egress_tunnel_key(struct sk_buff *skb, const struct ip_tunnel_info *egress_tun_info, const void *egress_tun_opts) { - return __ipv4_tun_to_nlattr(skb, &egress_tun_info->key, - egress_tun_opts, - egress_tun_info->options_len); + return __ip_tun_to_nlattr(skb, &egress_tun_info->key, + egress_tun_opts, + egress_tun_info->options_len, + ip_tunnel_info_af(egress_tun_info)); } static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match, @@ -806,8 +856,8 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match, *attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK); } if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) { - if (ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match, - is_mask, log) < 0) + if (ip_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match, + is_mask, log) < 0) return -EINVAL; *attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL); } @@ -1194,7 +1244,7 @@ int ovs_nla_get_match(struct net *net, struct sw_flow_match *match, /* The userspace does not send tunnel attributes that * are 0, but we should not wildcard them nonetheless. */ - if (match->key->tun_key.u.ipv4.dst) + if (match->key->tun_proto) SW_FLOW_KEY_MEMSET_FIELD(match, tun_key, 0xff, true); @@ -1367,14 +1417,14 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey, if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority)) goto nla_put_failure; - if ((swkey->tun_key.u.ipv4.dst || is_mask)) { + if ((swkey->tun_proto || is_mask)) { const void *opts = NULL; if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT) opts = TUN_METADATA_OPTS(output, swkey->tun_opts_len); - if (ipv4_tun_to_nlattr(skb, &output->tun_key, opts, - swkey->tun_opts_len)) + if (ip_tun_to_nlattr(skb, &output->tun_key, opts, + swkey->tun_opts_len, swkey->tun_proto)) goto nla_put_failure; } @@ -1877,7 +1927,7 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, int err = 0, start, opts_type; ovs_match_init(&match, &key, NULL); - opts_type = ipv4_tun_from_nlattr(nla_data(attr), &match, false, log); + opts_type = ip_tun_from_nlattr(nla_data(attr), &match, false, log); if (opts_type < 0) return opts_type; @@ -1913,6 +1963,8 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, tun_info = &tun_dst->u.tun_info; tun_info->mode = IP_TUNNEL_INFO_TX; + if (key.tun_proto == AF_INET6) + tun_info->mode |= IP_TUNNEL_INFO_IPV6; tun_info->key = key.tun_key; /* We need to store the options in the action itself since @@ -2374,10 +2426,11 @@ static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb) if (!start) return -EMSGSIZE; - err = ipv4_tun_to_nlattr(skb, &tun_info->key, - tun_info->options_len ? + err = ip_tun_to_nlattr(skb, &tun_info->key, + tun_info->options_len ? ip_tunnel_info_opts(tun_info) : NULL, - tun_info->options_len); + tun_info->options_len, + ip_tunnel_info_af(tun_info)); if (err) return err; nla_nest_end(skb, start); diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index f2ea83ba4763..95dbcedf0bd4 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -427,7 +427,7 @@ static u32 flow_hash(const struct sw_flow_key *key, static int flow_key_start(const struct sw_flow_key *key) { - if (key->tun_key.u.ipv4.dst) + if (key->tun_proto) return 0; else return rounddown(offsetof(struct sw_flow_key, phy), diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index aa4b15c35884..691660b9b7ef 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1423,7 +1423,7 @@ static unsigned int fanout_demux_bpf(struct packet_fanout *f, rcu_read_lock(); prog = rcu_dereference(f->bpf_prog); if (prog) - ret = BPF_PROG_RUN(prog, skb) % num; + ret = bpf_prog_run_clear_cb(prog, skb) % num; rcu_read_unlock(); return ret; @@ -1439,17 +1439,17 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, { struct packet_fanout *f = pt->af_packet_priv; unsigned int num = READ_ONCE(f->num_members); + struct net *net = read_pnet(&f->net); struct packet_sock *po; unsigned int idx; - if (!net_eq(dev_net(dev), read_pnet(&f->net)) || - !num) { + if (!net_eq(dev_net(dev), net) || !num) { kfree_skb(skb); return 0; } if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { - skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET); + skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET); if (!skb) return 0; } @@ -1519,10 +1519,10 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po) static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) { - if (ptype->af_packet_priv == (void *)((struct packet_sock *)sk)->fanout) - return true; + if (sk->sk_family != PF_PACKET) + return false; - return false; + return ptype->af_packet_priv == pkt_sk(sk)->fanout; } static void fanout_init_data(struct packet_fanout *f) @@ -1567,7 +1567,7 @@ static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data, if (copy_from_user(&fprog, data, len)) return -EFAULT; - ret = bpf_prog_create_from_user(&new, &fprog, NULL); + ret = bpf_prog_create_from_user(&new, &fprog, NULL, false); if (ret) return ret; @@ -1939,16 +1939,16 @@ out_free: return err; } -static unsigned int run_filter(const struct sk_buff *skb, - const struct sock *sk, - unsigned int res) +static unsigned int run_filter(struct sk_buff *skb, + const struct sock *sk, + unsigned int res) { struct sk_filter *filter; rcu_read_lock(); filter = rcu_dereference(sk->sk_filter); if (filter != NULL) - res = SK_RUN_FILTER(filter, skb); + res = bpf_prog_run_clear_cb(filter->prog, skb); rcu_read_unlock(); return res; @@ -2630,6 +2630,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) __be16 proto; unsigned char *addr; int err, reserve = 0; + struct sockcm_cookie sockc; struct virtio_net_hdr vnet_hdr = { 0 }; int offset = 0; int vnet_hdr_len; @@ -2665,6 +2666,13 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) if (unlikely(!(dev->flags & IFF_UP))) goto out_unlock; + sockc.mark = sk->sk_mark; + if (msg->msg_controllen) { + err = sock_cmsg_send(sk, msg, &sockc); + if (unlikely(err)) + goto out_unlock; + } + if (sock->type == SOCK_RAW) reserve = dev->hard_header_len; if (po->has_vnet_hdr) { @@ -2774,7 +2782,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) skb->protocol = proto; skb->dev = dev; skb->priority = sk->sk_priority; - skb->mark = sk->sk_mark; + skb->mark = sockc.mark; packet_pick_tx_queue(dev, skb); diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index a2f28a6d4dc5..384ea1e3cd69 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -72,13 +72,7 @@ static int rds_release(struct socket *sock) rds_clear_recv_queue(rs); rds_cong_remove_socket(rs); - /* - * the binding lookup hash uses rcu, we need to - * make sure we synchronize_rcu before we free our - * entry - */ rds_remove_bound(rs); - synchronize_rcu(); rds_send_drop_to(rs, NULL); rds_rdma_drop_keys(rs); @@ -588,6 +582,8 @@ static int rds_init(void) { int ret; + rds_bind_lock_init(); + ret = rds_conn_init(); if (ret) goto out; diff --git a/net/rds/bind.c b/net/rds/bind.c index dd666fb9b4e1..61925667b7a4 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c @@ -38,48 +38,50 @@ #include <linux/ratelimit.h> #include "rds.h" +struct bind_bucket { + rwlock_t lock; + struct hlist_head head; +}; + #define BIND_HASH_SIZE 1024 -static struct hlist_head bind_hash_table[BIND_HASH_SIZE]; -static DEFINE_SPINLOCK(rds_bind_lock); +static struct bind_bucket bind_hash_table[BIND_HASH_SIZE]; -static struct hlist_head *hash_to_bucket(__be32 addr, __be16 port) +static struct bind_bucket *hash_to_bucket(__be32 addr, __be16 port) { return bind_hash_table + (jhash_2words((u32)addr, (u32)port, 0) & (BIND_HASH_SIZE - 1)); } -static struct rds_sock *rds_bind_lookup(__be32 addr, __be16 port, +/* must hold either read or write lock (write lock for insert != NULL) */ +static struct rds_sock *rds_bind_lookup(struct bind_bucket *bucket, + __be32 addr, __be16 port, struct rds_sock *insert) { struct rds_sock *rs; - struct hlist_head *head = hash_to_bucket(addr, port); + struct hlist_head *head = &bucket->head; u64 cmp; u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port); - rcu_read_lock(); - hlist_for_each_entry_rcu(rs, head, rs_bound_node) { + hlist_for_each_entry(rs, head, rs_bound_node) { cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) | be16_to_cpu(rs->rs_bound_port); if (cmp == needle) { - rcu_read_unlock(); + rds_sock_addref(rs); return rs; } } - rcu_read_unlock(); if (insert) { /* * make sure our addr and port are set before - * we are added to the list, other people - * in rcu will find us as soon as the - * hlist_add_head_rcu is done + * we are added to the list. */ insert->rs_bound_addr = addr; insert->rs_bound_port = port; rds_sock_addref(insert); - hlist_add_head_rcu(&insert->rs_bound_node, head); + hlist_add_head(&insert->rs_bound_node, head); } return NULL; } @@ -93,16 +95,21 @@ static struct rds_sock *rds_bind_lookup(__be32 addr, __be16 port, struct rds_sock *rds_find_bound(__be32 addr, __be16 port) { struct rds_sock *rs; + unsigned long flags; + struct bind_bucket *bucket = hash_to_bucket(addr, port); - rs = rds_bind_lookup(addr, port, NULL); + read_lock_irqsave(&bucket->lock, flags); + rs = rds_bind_lookup(bucket, addr, port, NULL); + read_unlock_irqrestore(&bucket->lock, flags); - if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) - rds_sock_addref(rs); - else + if (rs && sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) { + rds_sock_put(rs); rs = NULL; + } rdsdebug("returning rs %p for %pI4:%u\n", rs, &addr, ntohs(port)); + return rs; } @@ -112,6 +119,7 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port) unsigned long flags; int ret = -EADDRINUSE; u16 rover, last; + struct bind_bucket *bucket; if (*port != 0) { rover = be16_to_cpu(*port); @@ -121,42 +129,48 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port) last = rover - 1; } - spin_lock_irqsave(&rds_bind_lock, flags); - do { + struct rds_sock *rrs; if (rover == 0) rover++; - if (!rds_bind_lookup(addr, cpu_to_be16(rover), rs)) { + + bucket = hash_to_bucket(addr, cpu_to_be16(rover)); + write_lock_irqsave(&bucket->lock, flags); + rrs = rds_bind_lookup(bucket, addr, cpu_to_be16(rover), rs); + write_unlock_irqrestore(&bucket->lock, flags); + if (!rrs) { *port = rs->rs_bound_port; ret = 0; rdsdebug("rs %p binding to %pI4:%d\n", rs, &addr, (int)ntohs(*port)); break; + } else { + rds_sock_put(rrs); } } while (rover++ != last); - spin_unlock_irqrestore(&rds_bind_lock, flags); - return ret; } void rds_remove_bound(struct rds_sock *rs) { unsigned long flags; + struct bind_bucket *bucket = + hash_to_bucket(rs->rs_bound_addr, rs->rs_bound_port); - spin_lock_irqsave(&rds_bind_lock, flags); + write_lock_irqsave(&bucket->lock, flags); if (rs->rs_bound_addr) { rdsdebug("rs %p unbinding from %pI4:%d\n", rs, &rs->rs_bound_addr, ntohs(rs->rs_bound_port)); - hlist_del_init_rcu(&rs->rs_bound_node); + hlist_del_init(&rs->rs_bound_node); rds_sock_put(rs); rs->rs_bound_addr = 0; } - spin_unlock_irqrestore(&rds_bind_lock, flags); + write_unlock_irqrestore(&bucket->lock, flags); } int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) @@ -182,7 +196,14 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) goto out; if (rs->rs_transport) { /* previously bound */ - ret = 0; + trans = rs->rs_transport; + if (trans->laddr_check(sock_net(sock->sk), + sin->sin_addr.s_addr) != 0) { + ret = -ENOPROTOOPT; + rds_remove_bound(rs); + } else { + ret = 0; + } goto out; } trans = rds_trans_get_preferred(sock_net(sock->sk), @@ -200,9 +221,13 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) out: release_sock(sk); - - /* we might have called rds_remove_bound on error */ - if (ret) - synchronize_rcu(); return ret; } + +void rds_bind_lock_init(void) +{ + int i; + + for (i = 0; i < BIND_HASH_SIZE; i++) + rwlock_init(&bind_hash_table[i].lock); +} diff --git a/net/rds/connection.c b/net/rds/connection.c index 49adeef8090c..d4564036a339 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -128,10 +128,7 @@ static struct rds_connection *__rds_conn_create(struct net *net, struct rds_transport *loop_trans; unsigned long flags; int ret; - struct rds_transport *otrans = trans; - if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP) - goto new_conn; rcu_read_lock(); conn = rds_conn_lookup(net, head, laddr, faddr, trans); if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport && @@ -147,7 +144,6 @@ static struct rds_connection *__rds_conn_create(struct net *net, if (conn) goto out; -new_conn: conn = kmem_cache_zalloc(rds_conn_slab, gfp); if (!conn) { conn = ERR_PTR(-ENOMEM); @@ -207,6 +203,7 @@ new_conn: atomic_set(&conn->c_state, RDS_CONN_DOWN); conn->c_send_gen = 0; + conn->c_outgoing = (is_outgoing ? 1 : 0); conn->c_reconnect_jiffies = 0; INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker); INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker); @@ -243,22 +240,13 @@ new_conn: /* Creating normal conn */ struct rds_connection *found; - if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP) - found = NULL; - else - found = rds_conn_lookup(net, head, laddr, faddr, trans); + found = rds_conn_lookup(net, head, laddr, faddr, trans); if (found) { trans->conn_free(conn->c_transport_data); kmem_cache_free(rds_conn_slab, conn); conn = found; } else { - if ((is_outgoing && otrans->t_type == RDS_TRANS_TCP) || - (otrans->t_type != RDS_TRANS_TCP)) { - /* Only the active side should be added to - * reconnect list for TCP. - */ - hlist_add_head_rcu(&conn->c_hash_node, head); - } + hlist_add_head_rcu(&conn->c_hash_node, head); rds_cong_add_conn(conn); rds_conn_count++; } @@ -337,7 +325,9 @@ void rds_conn_shutdown(struct rds_connection *conn) rcu_read_lock(); if (!hlist_unhashed(&conn->c_hash_node)) { rcu_read_unlock(); - rds_queue_reconnect(conn); + if (conn->c_trans->t_type != RDS_TRANS_TCP || + conn->c_outgoing == 1) + rds_queue_reconnect(conn); } else { rcu_read_unlock(); } diff --git a/net/rds/ib.c b/net/rds/ib.c index 2d3f2ab475df..a833ab7898fe 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c @@ -43,14 +43,14 @@ #include "rds.h" #include "ib.h" -static unsigned int fmr_pool_size = RDS_FMR_POOL_SIZE; -unsigned int fmr_message_size = RDS_FMR_SIZE + 1; /* +1 allows for unaligned MRs */ +unsigned int rds_ib_fmr_1m_pool_size = RDS_FMR_1M_POOL_SIZE; +unsigned int rds_ib_fmr_8k_pool_size = RDS_FMR_8K_POOL_SIZE; unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT; -module_param(fmr_pool_size, int, 0444); -MODULE_PARM_DESC(fmr_pool_size, " Max number of fmr per HCA"); -module_param(fmr_message_size, int, 0444); -MODULE_PARM_DESC(fmr_message_size, " Max size of a RDMA transfer"); +module_param(rds_ib_fmr_1m_pool_size, int, 0444); +MODULE_PARM_DESC(rds_ib_fmr_1m_pool_size, " Max number of 1M fmr per HCA"); +module_param(rds_ib_fmr_8k_pool_size, int, 0444); +MODULE_PARM_DESC(rds_ib_fmr_8k_pool_size, " Max number of 8K fmr per HCA"); module_param(rds_ib_retry_count, int, 0444); MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error"); @@ -97,8 +97,10 @@ static void rds_ib_dev_free(struct work_struct *work) struct rds_ib_device *rds_ibdev = container_of(work, struct rds_ib_device, free_work); - if (rds_ibdev->mr_pool) - rds_ib_destroy_mr_pool(rds_ibdev->mr_pool); + if (rds_ibdev->mr_8k_pool) + rds_ib_destroy_mr_pool(rds_ibdev->mr_8k_pool); + if (rds_ibdev->mr_1m_pool) + rds_ib_destroy_mr_pool(rds_ibdev->mr_1m_pool); if (rds_ibdev->pd) ib_dealloc_pd(rds_ibdev->pd); @@ -148,9 +150,13 @@ static void rds_ib_add_one(struct ib_device *device) rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE); rds_ibdev->fmr_max_remaps = dev_attr->max_map_per_fmr?: 32; - rds_ibdev->max_fmrs = dev_attr->max_fmr ? - min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) : - fmr_pool_size; + rds_ibdev->max_1m_fmrs = dev_attr->max_mr ? + min_t(unsigned int, (dev_attr->max_mr / 2), + rds_ib_fmr_1m_pool_size) : rds_ib_fmr_1m_pool_size; + + rds_ibdev->max_8k_fmrs = dev_attr->max_mr ? + min_t(unsigned int, ((dev_attr->max_mr / 2) * RDS_MR_8K_SCALE), + rds_ib_fmr_8k_pool_size) : rds_ib_fmr_8k_pool_size; rds_ibdev->max_initiator_depth = dev_attr->max_qp_init_rd_atom; rds_ibdev->max_responder_resources = dev_attr->max_qp_rd_atom; @@ -162,12 +168,25 @@ static void rds_ib_add_one(struct ib_device *device) goto put_dev; } - rds_ibdev->mr_pool = rds_ib_create_mr_pool(rds_ibdev); - if (IS_ERR(rds_ibdev->mr_pool)) { - rds_ibdev->mr_pool = NULL; + rds_ibdev->mr_1m_pool = + rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_1M_POOL); + if (IS_ERR(rds_ibdev->mr_1m_pool)) { + rds_ibdev->mr_1m_pool = NULL; goto put_dev; } + rds_ibdev->mr_8k_pool = + rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_8K_POOL); + if (IS_ERR(rds_ibdev->mr_8k_pool)) { + rds_ibdev->mr_8k_pool = NULL; + goto put_dev; + } + + rdsdebug("RDS/IB: max_mr = %d, max_wrs = %d, max_sge = %d, fmr_max_remaps = %d, max_1m_fmrs = %d, max_8k_fmrs = %d\n", + dev_attr->max_fmr, rds_ibdev->max_wrs, rds_ibdev->max_sge, + rds_ibdev->fmr_max_remaps, rds_ibdev->max_1m_fmrs, + rds_ibdev->max_8k_fmrs); + INIT_LIST_HEAD(&rds_ibdev->ipaddr_list); INIT_LIST_HEAD(&rds_ibdev->conn_list); diff --git a/net/rds/ib.h b/net/rds/ib.h index aae60fda77f6..f17d09567890 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h @@ -9,8 +9,11 @@ #include "rds.h" #include "rdma_transport.h" -#define RDS_FMR_SIZE 256 -#define RDS_FMR_POOL_SIZE 8192 +#define RDS_FMR_1M_POOL_SIZE (8192 / 2) +#define RDS_FMR_1M_MSG_SIZE 256 +#define RDS_FMR_8K_MSG_SIZE 2 +#define RDS_MR_8K_SCALE (256 / (RDS_FMR_8K_MSG_SIZE + 1)) +#define RDS_FMR_8K_POOL_SIZE (RDS_MR_8K_SCALE * (8192 / 2)) #define RDS_IB_MAX_SGE 8 #define RDS_IB_RECV_SGE 2 @@ -24,6 +27,9 @@ #define RDS_IB_RECYCLE_BATCH_COUNT 32 +#define RDS_IB_WC_MAX 32 +#define RDS_IB_SEND_OP BIT_ULL(63) + extern struct rw_semaphore rds_ib_devices_lock; extern struct list_head rds_ib_devices; @@ -89,6 +95,20 @@ struct rds_ib_work_ring { atomic_t w_free_ctr; }; +/* Rings are posted with all the allocations they'll need to queue the + * incoming message to the receiving socket so this can't fail. + * All fragments start with a header, so we can make sure we're not receiving + * garbage, and we can tell a small 8 byte fragment from an ACK frame. + */ +struct rds_ib_ack_state { + u64 ack_next; + u64 ack_recv; + unsigned int ack_required:1; + unsigned int ack_next_valid:1; + unsigned int ack_recv_valid:1; +}; + + struct rds_ib_device; struct rds_ib_connection { @@ -102,6 +122,12 @@ struct rds_ib_connection { struct ib_pd *i_pd; struct ib_cq *i_send_cq; struct ib_cq *i_recv_cq; + struct ib_wc i_send_wc[RDS_IB_WC_MAX]; + struct ib_wc i_recv_wc[RDS_IB_WC_MAX]; + + /* interrupt handling */ + struct tasklet_struct i_send_tasklet; + struct tasklet_struct i_recv_tasklet; /* tx */ struct rds_ib_work_ring i_send_ring; @@ -112,7 +138,6 @@ struct rds_ib_connection { atomic_t i_signaled_sends; /* rx */ - struct tasklet_struct i_recv_tasklet; struct mutex i_recv_mutex; struct rds_ib_work_ring i_recv_ring; struct rds_ib_incoming *i_ibinc; @@ -164,6 +189,12 @@ struct rds_ib_connection { struct rds_ib_ipaddr { struct list_head list; __be32 ipaddr; + struct rcu_head rcu; +}; + +enum { + RDS_IB_MR_8K_POOL, + RDS_IB_MR_1M_POOL, }; struct rds_ib_device { @@ -172,9 +203,12 @@ struct rds_ib_device { struct list_head conn_list; struct ib_device *dev; struct ib_pd *pd; - struct rds_ib_mr_pool *mr_pool; - unsigned int fmr_max_remaps; unsigned int max_fmrs; + struct rds_ib_mr_pool *mr_1m_pool; + struct rds_ib_mr_pool *mr_8k_pool; + unsigned int fmr_max_remaps; + unsigned int max_8k_fmrs; + unsigned int max_1m_fmrs; int max_sge; unsigned int max_wrs; unsigned int max_initiator_depth; @@ -197,14 +231,14 @@ struct rds_ib_device { struct rds_ib_statistics { uint64_t s_ib_connect_raced; uint64_t s_ib_listen_closed_stale; - uint64_t s_ib_tx_cq_call; + uint64_t s_ib_evt_handler_call; + uint64_t s_ib_tasklet_call; uint64_t s_ib_tx_cq_event; uint64_t s_ib_tx_ring_full; uint64_t s_ib_tx_throttle; uint64_t s_ib_tx_sg_mapping_failure; uint64_t s_ib_tx_stalled; uint64_t s_ib_tx_credit_updates; - uint64_t s_ib_rx_cq_call; uint64_t s_ib_rx_cq_event; uint64_t s_ib_rx_ring_empty; uint64_t s_ib_rx_refill_from_cq; @@ -216,12 +250,18 @@ struct rds_ib_statistics { uint64_t s_ib_ack_send_delayed; uint64_t s_ib_ack_send_piggybacked; uint64_t s_ib_ack_received; - uint64_t s_ib_rdma_mr_alloc; - uint64_t s_ib_rdma_mr_free; - uint64_t s_ib_rdma_mr_used; - uint64_t s_ib_rdma_mr_pool_flush; - uint64_t s_ib_rdma_mr_pool_wait; - uint64_t s_ib_rdma_mr_pool_depleted; + uint64_t s_ib_rdma_mr_8k_alloc; + uint64_t s_ib_rdma_mr_8k_free; + uint64_t s_ib_rdma_mr_8k_used; + uint64_t s_ib_rdma_mr_8k_pool_flush; + uint64_t s_ib_rdma_mr_8k_pool_wait; + uint64_t s_ib_rdma_mr_8k_pool_depleted; + uint64_t s_ib_rdma_mr_1m_alloc; + uint64_t s_ib_rdma_mr_1m_free; + uint64_t s_ib_rdma_mr_1m_used; + uint64_t s_ib_rdma_mr_1m_pool_flush; + uint64_t s_ib_rdma_mr_1m_pool_wait; + uint64_t s_ib_rdma_mr_1m_pool_depleted; uint64_t s_ib_atomic_cswp; uint64_t s_ib_atomic_fadd; }; @@ -273,7 +313,8 @@ struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device); void rds_ib_dev_put(struct rds_ib_device *rds_ibdev); extern struct ib_client rds_ib_client; -extern unsigned int fmr_message_size; +extern unsigned int rds_ib_fmr_1m_pool_size; +extern unsigned int rds_ib_fmr_8k_pool_size; extern unsigned int rds_ib_retry_count; extern spinlock_t ib_nodev_conns_lock; @@ -303,7 +344,8 @@ int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr); void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn); void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn); void rds_ib_destroy_nodev_conns(void); -struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *); +struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_dev, + int npages); void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo); void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *); void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, @@ -323,7 +365,8 @@ void rds_ib_recv_free_caches(struct rds_ib_connection *ic); void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp); void rds_ib_inc_free(struct rds_incoming *inc); int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to); -void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context); +void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc, + struct rds_ib_ack_state *state); void rds_ib_recv_tasklet_fn(unsigned long data); void rds_ib_recv_init_ring(struct rds_ib_connection *ic); void rds_ib_recv_clear_ring(struct rds_ib_connection *ic); @@ -331,6 +374,7 @@ void rds_ib_recv_init_ack(struct rds_ib_connection *ic); void rds_ib_attempt_ack(struct rds_ib_connection *ic); void rds_ib_ack_send_complete(struct rds_ib_connection *ic); u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic); +void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required); /* ib_ring.c */ void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr); @@ -348,7 +392,7 @@ extern wait_queue_head_t rds_ib_ring_empty_wait; void rds_ib_xmit_complete(struct rds_connection *conn); int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, unsigned int hdr_off, unsigned int sg, unsigned int off); -void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context); +void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc); void rds_ib_send_init_ring(struct rds_ib_connection *ic); void rds_ib_send_clear_ring(struct rds_ib_connection *ic); int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op); diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 9043f5c04787..2b2370e7f356 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -216,6 +216,96 @@ static void rds_ib_cq_event_handler(struct ib_event *event, void *data) event->event, ib_event_msg(event->event), data); } +/* Plucking the oldest entry from the ring can be done concurrently with + * the thread refilling the ring. Each ring operation is protected by + * spinlocks and the transient state of refilling doesn't change the + * recording of which entry is oldest. + * + * This relies on IB only calling one cq comp_handler for each cq so that + * there will only be one caller of rds_recv_incoming() per RDS connection. + */ +static void rds_ib_cq_comp_handler_recv(struct ib_cq *cq, void *context) +{ + struct rds_connection *conn = context; + struct rds_ib_connection *ic = conn->c_transport_data; + + rdsdebug("conn %p cq %p\n", conn, cq); + + rds_ib_stats_inc(s_ib_evt_handler_call); + + tasklet_schedule(&ic->i_recv_tasklet); +} + +static void poll_cq(struct rds_ib_connection *ic, struct ib_cq *cq, + struct ib_wc *wcs, + struct rds_ib_ack_state *ack_state) +{ + int nr; + int i; + struct ib_wc *wc; + + while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) { + for (i = 0; i < nr; i++) { + wc = wcs + i; + rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", + (unsigned long long)wc->wr_id, wc->status, + wc->byte_len, be32_to_cpu(wc->ex.imm_data)); + + if (wc->wr_id & RDS_IB_SEND_OP) + rds_ib_send_cqe_handler(ic, wc); + else + rds_ib_recv_cqe_handler(ic, wc, ack_state); + } + } +} + +static void rds_ib_tasklet_fn_send(unsigned long data) +{ + struct rds_ib_connection *ic = (struct rds_ib_connection *)data; + struct rds_connection *conn = ic->conn; + struct rds_ib_ack_state state; + + rds_ib_stats_inc(s_ib_tasklet_call); + + memset(&state, 0, sizeof(state)); + poll_cq(ic, ic->i_send_cq, ic->i_send_wc, &state); + ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP); + poll_cq(ic, ic->i_send_cq, ic->i_send_wc, &state); + + if (rds_conn_up(conn) && + (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) || + test_bit(0, &conn->c_map_queued))) + rds_send_xmit(ic->conn); +} + +static void rds_ib_tasklet_fn_recv(unsigned long data) +{ + struct rds_ib_connection *ic = (struct rds_ib_connection *)data; + struct rds_connection *conn = ic->conn; + struct rds_ib_device *rds_ibdev = ic->rds_ibdev; + struct rds_ib_ack_state state; + + if (!rds_ibdev) + rds_conn_drop(conn); + + rds_ib_stats_inc(s_ib_tasklet_call); + + memset(&state, 0, sizeof(state)); + poll_cq(ic, ic->i_recv_cq, ic->i_recv_wc, &state); + ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); + poll_cq(ic, ic->i_recv_cq, ic->i_recv_wc, &state); + + if (state.ack_next_valid) + rds_ib_set_ack(ic, state.ack_next, state.ack_required); + if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) { + rds_send_drop_acked(conn, state.ack_recv, NULL); + ic->i_ack_recv = state.ack_recv; + } + + if (rds_conn_up(conn)) + rds_ib_attempt_ack(ic); +} + static void rds_ib_qp_event_handler(struct ib_event *event, void *data) { struct rds_connection *conn = data; @@ -238,6 +328,18 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data) } } +static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context) +{ + struct rds_connection *conn = context; + struct rds_ib_connection *ic = conn->c_transport_data; + + rdsdebug("conn %p cq %p\n", conn, cq); + + rds_ib_stats_inc(s_ib_evt_handler_call); + + tasklet_schedule(&ic->i_send_tasklet); +} + /* * This needs to be very careful to not leave IS_ERR pointers around for * cleanup to trip over. @@ -271,7 +373,8 @@ static int rds_ib_setup_qp(struct rds_connection *conn) ic->i_pd = rds_ibdev->pd; cq_attr.cqe = ic->i_send_ring.w_nr + 1; - ic->i_send_cq = ib_create_cq(dev, rds_ib_send_cq_comp_handler, + + ic->i_send_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_send, rds_ib_cq_event_handler, conn, &cq_attr); if (IS_ERR(ic->i_send_cq)) { @@ -282,7 +385,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) } cq_attr.cqe = ic->i_recv_ring.w_nr; - ic->i_recv_cq = ib_create_cq(dev, rds_ib_recv_cq_comp_handler, + ic->i_recv_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv, rds_ib_cq_event_handler, conn, &cq_attr); if (IS_ERR(ic->i_recv_cq)) { @@ -637,6 +740,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn) wait_event(rds_ib_ring_empty_wait, rds_ib_ring_empty(&ic->i_recv_ring) && (atomic_read(&ic->i_signaled_sends) == 0)); + tasklet_kill(&ic->i_send_tasklet); tasklet_kill(&ic->i_recv_tasklet); /* first destroy the ib state that generates callbacks */ @@ -743,8 +847,10 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp) } INIT_LIST_HEAD(&ic->ib_node); - tasklet_init(&ic->i_recv_tasklet, rds_ib_recv_tasklet_fn, - (unsigned long) ic); + tasklet_init(&ic->i_send_tasklet, rds_ib_tasklet_fn_send, + (unsigned long)ic); + tasklet_init(&ic->i_recv_tasklet, rds_ib_tasklet_fn_recv, + (unsigned long)ic); mutex_init(&ic->i_recv_mutex); #ifndef KERNEL_HAS_ATOMIC64 spin_lock_init(&ic->i_ack_lock); diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index 251d1ce0b7c7..a2340748ec86 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c @@ -65,6 +65,7 @@ struct rds_ib_mr { * Our own little FMR pool */ struct rds_ib_mr_pool { + unsigned int pool_type; struct mutex flush_lock; /* serialize fmr invalidate */ struct delayed_work flush_worker; /* flush worker */ @@ -83,7 +84,7 @@ struct rds_ib_mr_pool { struct ib_fmr_attr fmr_attr; }; -struct workqueue_struct *rds_ib_fmr_wq; +static struct workqueue_struct *rds_ib_fmr_wq; int rds_ib_fmr_init(void) { @@ -159,10 +160,8 @@ static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) } spin_unlock_irq(&rds_ibdev->spinlock); - if (to_free) { - synchronize_rcu(); - kfree(to_free); - } + if (to_free) + kfree_rcu(to_free, rcu); } int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) @@ -236,7 +235,8 @@ void rds_ib_destroy_nodev_conns(void) rds_conn_destroy(ic->conn); } -struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev) +struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev, + int pool_type) { struct rds_ib_mr_pool *pool; @@ -244,6 +244,7 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev) if (!pool) return ERR_PTR(-ENOMEM); + pool->pool_type = pool_type; init_llist_head(&pool->free_list); init_llist_head(&pool->drop_list); init_llist_head(&pool->clean_list); @@ -251,28 +252,30 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev) init_waitqueue_head(&pool->flush_wait); INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); - pool->fmr_attr.max_pages = fmr_message_size; + if (pool_type == RDS_IB_MR_1M_POOL) { + /* +1 allows for unaligned MRs */ + pool->fmr_attr.max_pages = RDS_FMR_1M_MSG_SIZE + 1; + pool->max_items = RDS_FMR_1M_POOL_SIZE; + } else { + /* pool_type == RDS_IB_MR_8K_POOL */ + pool->fmr_attr.max_pages = RDS_FMR_8K_MSG_SIZE + 1; + pool->max_items = RDS_FMR_8K_POOL_SIZE; + } + + pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4; pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; pool->fmr_attr.page_shift = PAGE_SHIFT; - pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4; - - /* We never allow more than max_items MRs to be allocated. - * When we exceed more than max_items_soft, we start freeing - * items more aggressively. - * Make sure that max_items > max_items_soft > max_items / 2 - */ pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4; - pool->max_items = rds_ibdev->max_fmrs; return pool; } void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo) { - struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; + struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool; - iinfo->rdma_mr_max = pool->max_items; - iinfo->rdma_mr_size = pool->fmr_attr.max_pages; + iinfo->rdma_mr_max = pool_1m->max_items; + iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages; } void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) @@ -314,14 +317,28 @@ static inline void wait_clean_list_grace(void) } } -static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev) +static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, + int npages) { - struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; + struct rds_ib_mr_pool *pool; struct rds_ib_mr *ibmr = NULL; int err = 0, iter = 0; + if (npages <= RDS_FMR_8K_MSG_SIZE) + pool = rds_ibdev->mr_8k_pool; + else + pool = rds_ibdev->mr_1m_pool; + if (atomic_read(&pool->dirty_count) >= pool->max_items / 10) - schedule_delayed_work(&pool->flush_worker, 10); + queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10); + + /* Switch pools if one of the pool is reaching upper limit */ + if (atomic_read(&pool->dirty_count) >= pool->max_items * 9 / 10) { + if (pool->pool_type == RDS_IB_MR_8K_POOL) + pool = rds_ibdev->mr_1m_pool; + else + pool = rds_ibdev->mr_8k_pool; + } while (1) { ibmr = rds_ib_reuse_fmr(pool); @@ -343,12 +360,18 @@ static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev) atomic_dec(&pool->item_count); if (++iter > 2) { - rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted); + if (pool->pool_type == RDS_IB_MR_8K_POOL) + rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted); + else + rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted); return ERR_PTR(-EAGAIN); } /* We do have some empty MRs. Flush them out. */ - rds_ib_stats_inc(s_ib_rdma_mr_pool_wait); + if (pool->pool_type == RDS_IB_MR_8K_POOL) + rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait); + else + rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait); rds_ib_flush_mr_pool(pool, 0, &ibmr); if (ibmr) return ibmr; @@ -373,7 +396,12 @@ static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev) goto out_no_cigar; } - rds_ib_stats_inc(s_ib_rdma_mr_alloc); + ibmr->pool = pool; + if (pool->pool_type == RDS_IB_MR_8K_POOL) + rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc); + else + rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc); + return ibmr; out_no_cigar: @@ -429,7 +457,7 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm } page_cnt += len >> PAGE_SHIFT; - if (page_cnt > fmr_message_size) + if (page_cnt > ibmr->pool->fmr_attr.max_pages) return -EINVAL; dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC, @@ -461,7 +489,10 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm ibmr->sg_dma_len = sg_dma_len; ibmr->remap_count++; - rds_ib_stats_inc(s_ib_rdma_mr_used); + if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL) + rds_ib_stats_inc(s_ib_rdma_mr_8k_used); + else + rds_ib_stats_inc(s_ib_rdma_mr_1m_used); ret = 0; out: @@ -524,8 +555,7 @@ static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr) __rds_ib_teardown_mr(ibmr); if (pinned) { - struct rds_ib_device *rds_ibdev = ibmr->device; - struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; + struct rds_ib_mr_pool *pool = ibmr->pool; atomic_sub(pinned, &pool->free_pinned); } @@ -594,7 +624,7 @@ static void list_to_llist_nodes(struct rds_ib_mr_pool *pool, * to free as many MRs as needed to get back to this limit. */ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, - int free_all, struct rds_ib_mr **ibmr_ret) + int free_all, struct rds_ib_mr **ibmr_ret) { struct rds_ib_mr *ibmr, *next; struct llist_node *clean_nodes; @@ -605,11 +635,14 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, unsigned int nfreed = 0, dirty_to_clean = 0, free_goal; int ret = 0; - rds_ib_stats_inc(s_ib_rdma_mr_pool_flush); + if (pool->pool_type == RDS_IB_MR_8K_POOL) + rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush); + else + rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_flush); if (ibmr_ret) { DEFINE_WAIT(wait); - while(!mutex_trylock(&pool->flush_lock)) { + while (!mutex_trylock(&pool->flush_lock)) { ibmr = rds_ib_reuse_fmr(pool); if (ibmr) { *ibmr_ret = ibmr; @@ -666,8 +699,12 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) { unpinned += ibmr->sg_len; __rds_ib_teardown_mr(ibmr); - if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) { - rds_ib_stats_inc(s_ib_rdma_mr_free); + if (nfreed < free_goal || + ibmr->remap_count >= pool->fmr_attr.max_maps) { + if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL) + rds_ib_stats_inc(s_ib_rdma_mr_8k_free); + else + rds_ib_stats_inc(s_ib_rdma_mr_1m_free); list_del(&ibmr->unmap_list); ib_dealloc_fmr(ibmr->fmr); kfree(ibmr); @@ -719,8 +756,8 @@ static void rds_ib_mr_pool_flush_worker(struct work_struct *work) void rds_ib_free_mr(void *trans_private, int invalidate) { struct rds_ib_mr *ibmr = trans_private; + struct rds_ib_mr_pool *pool = ibmr->pool; struct rds_ib_device *rds_ibdev = ibmr->device; - struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len); @@ -759,10 +796,11 @@ void rds_ib_flush_mrs(void) down_read(&rds_ib_devices_lock); list_for_each_entry(rds_ibdev, &rds_ib_devices, list) { - struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; + if (rds_ibdev->mr_8k_pool) + rds_ib_flush_mr_pool(rds_ibdev->mr_8k_pool, 0, NULL); - if (pool) - rds_ib_flush_mr_pool(pool, 0, NULL); + if (rds_ibdev->mr_1m_pool) + rds_ib_flush_mr_pool(rds_ibdev->mr_1m_pool, 0, NULL); } up_read(&rds_ib_devices_lock); } @@ -780,12 +818,12 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, goto out; } - if (!rds_ibdev->mr_pool) { + if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) { ret = -ENODEV; goto out; } - ibmr = rds_ib_alloc_fmr(rds_ibdev); + ibmr = rds_ib_alloc_fmr(rds_ibdev, nents); if (IS_ERR(ibmr)) { rds_ib_dev_put(rds_ibdev); return ibmr; diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index f43831e4186a..96744b75db93 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c @@ -596,8 +596,7 @@ void rds_ib_recv_init_ack(struct rds_ib_connection *ic) * wr_id and avoids working with the ring in that case. */ #ifndef KERNEL_HAS_ATOMIC64 -static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, - int ack_required) +void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required) { unsigned long flags; @@ -622,8 +621,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic) return seq; } #else -static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, - int ack_required) +void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required) { atomic64_set(&ic->i_ack_next, seq); if (ack_required) { @@ -830,20 +828,6 @@ static void rds_ib_cong_recv(struct rds_connection *conn, rds_cong_map_updated(map, uncongested); } -/* - * Rings are posted with all the allocations they'll need to queue the - * incoming message to the receiving socket so this can't fail. - * All fragments start with a header, so we can make sure we're not receiving - * garbage, and we can tell a small 8 byte fragment from an ACK frame. - */ -struct rds_ib_ack_state { - u64 ack_next; - u64 ack_recv; - unsigned int ack_required:1; - unsigned int ack_next_valid:1; - unsigned int ack_recv_valid:1; -}; - static void rds_ib_process_recv(struct rds_connection *conn, struct rds_ib_recv_work *recv, u32 data_len, struct rds_ib_ack_state *state) @@ -969,96 +953,50 @@ static void rds_ib_process_recv(struct rds_connection *conn, } } -/* - * Plucking the oldest entry from the ring can be done concurrently with - * the thread refilling the ring. Each ring operation is protected by - * spinlocks and the transient state of refilling doesn't change the - * recording of which entry is oldest. - * - * This relies on IB only calling one cq comp_handler for each cq so that - * there will only be one caller of rds_recv_incoming() per RDS connection. - */ -void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context) -{ - struct rds_connection *conn = context; - struct rds_ib_connection *ic = conn->c_transport_data; - - rdsdebug("conn %p cq %p\n", conn, cq); - - rds_ib_stats_inc(s_ib_rx_cq_call); - - tasklet_schedule(&ic->i_recv_tasklet); -} - -static inline void rds_poll_cq(struct rds_ib_connection *ic, - struct rds_ib_ack_state *state) +void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, + struct ib_wc *wc, + struct rds_ib_ack_state *state) { struct rds_connection *conn = ic->conn; - struct ib_wc wc; struct rds_ib_recv_work *recv; - while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) { - rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n", - (unsigned long long)wc.wr_id, wc.status, - ib_wc_status_msg(wc.status), wc.byte_len, - be32_to_cpu(wc.ex.imm_data)); - rds_ib_stats_inc(s_ib_rx_cq_event); + rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n", + (unsigned long long)wc->wr_id, wc->status, + ib_wc_status_msg(wc->status), wc->byte_len, + be32_to_cpu(wc->ex.imm_data)); - recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)]; - - ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE); - - /* - * Also process recvs in connecting state because it is possible - * to get a recv completion _before_ the rdmacm ESTABLISHED - * event is processed. - */ - if (wc.status == IB_WC_SUCCESS) { - rds_ib_process_recv(conn, recv, wc.byte_len, state); - } else { - /* We expect errors as the qp is drained during shutdown */ - if (rds_conn_up(conn) || rds_conn_connecting(conn)) - rds_ib_conn_error(conn, "recv completion on %pI4 had " - "status %u (%s), disconnecting and " - "reconnecting\n", &conn->c_faddr, - wc.status, - ib_wc_status_msg(wc.status)); - } + rds_ib_stats_inc(s_ib_rx_cq_event); + recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)]; + ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, + DMA_FROM_DEVICE); - /* - * rds_ib_process_recv() doesn't always consume the frag, and - * we might not have called it at all if the wc didn't indicate - * success. We already unmapped the frag's pages, though, and - * the following rds_ib_ring_free() call tells the refill path - * that it will not find an allocated frag here. Make sure we - * keep that promise by freeing a frag that's still on the ring. - */ - if (recv->r_frag) { - rds_ib_frag_free(ic, recv->r_frag); - recv->r_frag = NULL; - } - rds_ib_ring_free(&ic->i_recv_ring, 1); + /* Also process recvs in connecting state because it is possible + * to get a recv completion _before_ the rdmacm ESTABLISHED + * event is processed. + */ + if (wc->status == IB_WC_SUCCESS) { + rds_ib_process_recv(conn, recv, wc->byte_len, state); + } else { + /* We expect errors as the qp is drained during shutdown */ + if (rds_conn_up(conn) || rds_conn_connecting(conn)) + rds_ib_conn_error(conn, "recv completion on %pI4 had status %u (%s), disconnecting and reconnecting\n", + &conn->c_faddr, + wc->status, + ib_wc_status_msg(wc->status)); } -} -void rds_ib_recv_tasklet_fn(unsigned long data) -{ - struct rds_ib_connection *ic = (struct rds_ib_connection *) data; - struct rds_connection *conn = ic->conn; - struct rds_ib_ack_state state = { 0, }; - - rds_poll_cq(ic, &state); - ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); - rds_poll_cq(ic, &state); - - if (state.ack_next_valid) - rds_ib_set_ack(ic, state.ack_next, state.ack_required); - if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) { - rds_send_drop_acked(conn, state.ack_recv, NULL); - ic->i_ack_recv = state.ack_recv; + /* rds_ib_process_recv() doesn't always consume the frag, and + * we might not have called it at all if the wc didn't indicate + * success. We already unmapped the frag's pages, though, and + * the following rds_ib_ring_free() call tells the refill path + * that it will not find an allocated frag here. Make sure we + * keep that promise by freeing a frag that's still on the ring. + */ + if (recv->r_frag) { + rds_ib_frag_free(ic, recv->r_frag); + recv->r_frag = NULL; } - if (rds_conn_up(conn)) - rds_ib_attempt_ack(ic); + rds_ib_ring_free(&ic->i_recv_ring, 1); /* If we ever end up with a really empty receive ring, we're * in deep trouble, as the sender will definitely see RNR diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index 4e88047086b6..670882c752e9 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c @@ -195,7 +195,7 @@ void rds_ib_send_init_ring(struct rds_ib_connection *ic) send->s_op = NULL; - send->s_wr.wr_id = i; + send->s_wr.wr_id = i | RDS_IB_SEND_OP; send->s_wr.sg_list = send->s_sge; send->s_wr.ex.imm_data = 0; @@ -237,81 +237,73 @@ static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr) * unallocs the next free entry in the ring it doesn't alter which is * the next to be freed, which is what this is concerned with. */ -void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context) +void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc) { - struct rds_connection *conn = context; - struct rds_ib_connection *ic = conn->c_transport_data; struct rds_message *rm = NULL; - struct ib_wc wc; + struct rds_connection *conn = ic->conn; struct rds_ib_send_work *send; u32 completed; u32 oldest; u32 i = 0; - int ret; int nr_sig = 0; - rdsdebug("cq %p conn %p\n", cq, conn); - rds_ib_stats_inc(s_ib_tx_cq_call); - ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); - if (ret) - rdsdebug("ib_req_notify_cq send failed: %d\n", ret); - - while (ib_poll_cq(cq, 1, &wc) > 0) { - rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n", - (unsigned long long)wc.wr_id, wc.status, - ib_wc_status_msg(wc.status), wc.byte_len, - be32_to_cpu(wc.ex.imm_data)); - rds_ib_stats_inc(s_ib_tx_cq_event); - - if (wc.wr_id == RDS_IB_ACK_WR_ID) { - if (time_after(jiffies, ic->i_ack_queued + HZ/2)) - rds_ib_stats_inc(s_ib_tx_stalled); - rds_ib_ack_send_complete(ic); - continue; - } - oldest = rds_ib_ring_oldest(&ic->i_send_ring); + rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n", + (unsigned long long)wc->wr_id, wc->status, + ib_wc_status_msg(wc->status), wc->byte_len, + be32_to_cpu(wc->ex.imm_data)); + rds_ib_stats_inc(s_ib_tx_cq_event); - completed = rds_ib_ring_completed(&ic->i_send_ring, wc.wr_id, oldest); + if (wc->wr_id == RDS_IB_ACK_WR_ID) { + if (time_after(jiffies, ic->i_ack_queued + HZ / 2)) + rds_ib_stats_inc(s_ib_tx_stalled); + rds_ib_ack_send_complete(ic); + return; + } - for (i = 0; i < completed; i++) { - send = &ic->i_sends[oldest]; - if (send->s_wr.send_flags & IB_SEND_SIGNALED) - nr_sig++; + oldest = rds_ib_ring_oldest(&ic->i_send_ring); - rm = rds_ib_send_unmap_op(ic, send, wc.status); + completed = rds_ib_ring_completed(&ic->i_send_ring, + (wc->wr_id & ~RDS_IB_SEND_OP), + oldest); - if (time_after(jiffies, send->s_queued + HZ/2)) - rds_ib_stats_inc(s_ib_tx_stalled); + for (i = 0; i < completed; i++) { + send = &ic->i_sends[oldest]; + if (send->s_wr.send_flags & IB_SEND_SIGNALED) + nr_sig++; - if (send->s_op) { - if (send->s_op == rm->m_final_op) { - /* If anyone waited for this message to get flushed out, wake - * them up now */ - rds_message_unmapped(rm); - } - rds_message_put(rm); - send->s_op = NULL; - } + rm = rds_ib_send_unmap_op(ic, send, wc->status); - oldest = (oldest + 1) % ic->i_send_ring.w_nr; - } + if (time_after(jiffies, send->s_queued + HZ / 2)) + rds_ib_stats_inc(s_ib_tx_stalled); - rds_ib_ring_free(&ic->i_send_ring, completed); - rds_ib_sub_signaled(ic, nr_sig); - nr_sig = 0; - - if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) || - test_bit(0, &conn->c_map_queued)) - queue_delayed_work(rds_wq, &conn->c_send_w, 0); - - /* We expect errors as the qp is drained during shutdown */ - if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) { - rds_ib_conn_error(conn, "send completion on %pI4 had status " - "%u (%s), disconnecting and reconnecting\n", - &conn->c_faddr, wc.status, - ib_wc_status_msg(wc.status)); + if (send->s_op) { + if (send->s_op == rm->m_final_op) { + /* If anyone waited for this message to get + * flushed out, wake them up now + */ + rds_message_unmapped(rm); + } + rds_message_put(rm); + send->s_op = NULL; } + + oldest = (oldest + 1) % ic->i_send_ring.w_nr; + } + + rds_ib_ring_free(&ic->i_send_ring, completed); + rds_ib_sub_signaled(ic, nr_sig); + nr_sig = 0; + + if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) || + test_bit(0, &conn->c_map_queued)) + queue_delayed_work(rds_wq, &conn->c_send_w, 0); + + /* We expect errors as the qp is drained during shutdown */ + if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) { + rds_ib_conn_error(conn, "send completion on %pI4 had status %u (%s), disconnecting and reconnecting\n", + &conn->c_faddr, wc->status, + ib_wc_status_msg(wc->status)); } } diff --git a/net/rds/ib_stats.c b/net/rds/ib_stats.c index 2d5965d6e97c..d77e04473056 100644 --- a/net/rds/ib_stats.c +++ b/net/rds/ib_stats.c @@ -42,14 +42,14 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats); static const char *const rds_ib_stat_names[] = { "ib_connect_raced", "ib_listen_closed_stale", - "ib_tx_cq_call", + "s_ib_evt_handler_call", + "ib_tasklet_call", "ib_tx_cq_event", "ib_tx_ring_full", "ib_tx_throttle", "ib_tx_sg_mapping_failure", "ib_tx_stalled", "ib_tx_credit_updates", - "ib_rx_cq_call", "ib_rx_cq_event", "ib_rx_ring_empty", "ib_rx_refill_from_cq", @@ -61,12 +61,18 @@ static const char *const rds_ib_stat_names[] = { "ib_ack_send_delayed", "ib_ack_send_piggybacked", "ib_ack_received", - "ib_rdma_mr_alloc", - "ib_rdma_mr_free", - "ib_rdma_mr_used", - "ib_rdma_mr_pool_flush", - "ib_rdma_mr_pool_wait", - "ib_rdma_mr_pool_depleted", + "ib_rdma_mr_8k_alloc", + "ib_rdma_mr_8k_free", + "ib_rdma_mr_8k_used", + "ib_rdma_mr_8k_pool_flush", + "ib_rdma_mr_8k_pool_wait", + "ib_rdma_mr_8k_pool_depleted", + "ib_rdma_mr_1m_alloc", + "ib_rdma_mr_1m_free", + "ib_rdma_mr_1m_used", + "ib_rdma_mr_1m_pool_flush", + "ib_rdma_mr_1m_pool_wait", + "ib_rdma_mr_1m_pool_depleted", "ib_atomic_cswp", "ib_atomic_fadd", }; diff --git a/net/rds/rds.h b/net/rds/rds.h index afb4048d0cfd..543c308fcc2a 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -86,7 +86,9 @@ struct rds_connection { struct hlist_node c_hash_node; __be32 c_laddr; __be32 c_faddr; - unsigned int c_loopback:1; + unsigned int c_loopback:1, + c_outgoing:1, + c_pad_to_32:30; struct rds_connection *c_passive; struct rds_cong_map *c_lcong; @@ -603,6 +605,7 @@ extern wait_queue_head_t rds_poll_waitq; int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len); void rds_remove_bound(struct rds_sock *rs); struct rds_sock *rds_find_bound(__be32 addr, __be16 port); +void rds_bind_lock_init(void); /* cong.c */ int rds_cong_get_maps(struct rds_connection *conn); diff --git a/net/rds/send.c b/net/rds/send.c index 4df61a515b83..ee49c2556f47 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -38,6 +38,7 @@ #include <linux/list.h> #include <linux/ratelimit.h> #include <linux/export.h> +#include <linux/sizes.h> #include "rds.h" @@ -51,7 +52,7 @@ * it to 0 will restore the old behavior (where we looped until we had * drained the queue). */ -static int send_batch_count = 64; +static int send_batch_count = SZ_1K; module_param(send_batch_count, int, 0444); MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue"); @@ -223,7 +224,7 @@ restart: * through a lot of messages, lets back off and see * if anyone else jumps in */ - if (batch_count >= 1024) + if (batch_count >= send_batch_count) goto over_batch; spin_lock_irqsave(&conn->c_lock, flags); @@ -423,12 +424,15 @@ over_batch: !list_empty(&conn->c_send_queue)) && send_gen == conn->c_send_gen) { rds_stats_inc(s_send_lock_queue_raced); - goto restart; + if (batch_count < send_batch_count) + goto restart; + queue_delayed_work(rds_wq, &conn->c_send_w, 1); } } out: return ret; } +EXPORT_SYMBOL_GPL(rds_send_xmit); static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm) { @@ -1120,8 +1124,9 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) */ rds_stats_inc(s_send_queued); - if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) - rds_send_xmit(conn); + ret = rds_send_xmit(conn); + if (ret == -ENOMEM || ret == -EAGAIN) + queue_delayed_work(rds_wq, &conn->c_send_w, 1); rds_message_put(rm); return payload_len; @@ -1177,8 +1182,9 @@ rds_send_pong(struct rds_connection *conn, __be16 dport) rds_stats_inc(s_send_queued); rds_stats_inc(s_send_pong); - if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) - queue_delayed_work(rds_wq, &conn->c_send_w, 0); + ret = rds_send_xmit(conn); + if (ret == -ENOMEM || ret == -EAGAIN) + queue_delayed_work(rds_wq, &conn->c_send_w, 1); rds_message_put(rm); return 0; diff --git a/net/rds/tcp.c b/net/rds/tcp.c index c42b60bf4c68..9d6ddbacd875 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -67,21 +67,13 @@ void rds_tcp_nonagle(struct socket *sock) set_fs(oldfs); } +/* All module specific customizations to the RDS-TCP socket should be done in + * rds_tcp_tune() and applied after socket creation. In general these + * customizations should be tunable via module_param() + */ void rds_tcp_tune(struct socket *sock) { - struct sock *sk = sock->sk; - rds_tcp_nonagle(sock); - - /* - * We're trying to saturate gigabit with the default, - * see svc_sock_setbufsize(). - */ - lock_sock(sk); - sk->sk_sndbuf = RDS_TCP_DEFAULT_BUFSIZE; - sk->sk_rcvbuf = RDS_TCP_DEFAULT_BUFSIZE; - sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK; - release_sock(sk); } u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc) diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 444d78d0bd77..0936a4a32b47 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c @@ -110,28 +110,27 @@ int rds_tcp_accept_one(struct socket *sock) goto out; } /* An incoming SYN request came in, and TCP just accepted it. - * We always create a new conn for listen side of TCP, and do not - * add it to the c_hash_list. * * If the client reboots, this conn will need to be cleaned up. * rds_tcp_state_change() will do that cleanup */ rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data; - WARN_ON(!rs_tcp || rs_tcp->t_sock); - - /* - * see the comment above rds_queue_delayed_reconnect() - */ - if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) { - if (rds_conn_state(conn) == RDS_CONN_UP) - rds_tcp_stats_inc(s_tcp_listen_closed_stale); - else - rds_tcp_stats_inc(s_tcp_connect_raced); - rds_conn_drop(conn); + if (rs_tcp->t_sock && + ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) { + struct sock *nsk = new_sock->sk; + + nsk->sk_user_data = NULL; + nsk->sk_prot->disconnect(nsk, 0); + tcp_done(nsk); + new_sock = NULL; ret = 0; goto out; + } else if (rs_tcp->t_sock) { + rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp); + conn->c_outgoing = 0; } + rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING); rds_tcp_set_callbacks(new_sock, conn); rds_connect_complete(conn); new_sock = NULL; diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c index 53b17ca0dff5..2894e6095e3b 100644 --- a/net/rds/tcp_send.c +++ b/net/rds/tcp_send.c @@ -83,6 +83,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, struct rds_tcp_connection *tc = conn->c_transport_data; int done = 0; int ret = 0; + int more; if (hdr_off == 0) { /* @@ -116,12 +117,15 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, goto out; } + more = rm->data.op_nents > 1 ? (MSG_MORE | MSG_SENDPAGE_NOTLAST) : 0; while (sg < rm->data.op_nents) { + int flags = MSG_DONTWAIT | MSG_NOSIGNAL | more; + ret = tc->t_sock->ops->sendpage(tc->t_sock, sg_page(&rm->data.op_sg[sg]), rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off, - MSG_DONTWAIT|MSG_NOSIGNAL); + flags); rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.op_sg[sg]), rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off, ret); @@ -134,6 +138,8 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, off = 0; sg++; } + if (sg == rm->data.op_nents - 1) + more = 0; } out: diff --git a/net/rds/threads.c b/net/rds/threads.c index dc2402e871fd..454aa6d23327 100644 --- a/net/rds/threads.c +++ b/net/rds/threads.c @@ -162,7 +162,9 @@ void rds_send_worker(struct work_struct *work) int ret; if (rds_conn_state(conn) == RDS_CONN_UP) { + clear_bit(RDS_LL_SEND_FULL, &conn->c_flags); ret = rds_send_xmit(conn); + cond_resched(); rdsdebug("conn %p ret %d\n", conn, ret); switch (ret) { case -EAGAIN: diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c index 094a874b48bc..3fee70d9814f 100644 --- a/net/sched/sch_blackhole.c +++ b/net/sched/sch_blackhole.c @@ -11,7 +11,7 @@ * Note: Quantum tunneling is not supported. */ -#include <linux/module.h> +#include <linux/init.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/skbuff.h> @@ -37,17 +37,8 @@ static struct Qdisc_ops blackhole_qdisc_ops __read_mostly = { .owner = THIS_MODULE, }; -static int __init blackhole_module_init(void) +static int __init blackhole_init(void) { return register_qdisc(&blackhole_qdisc_ops); } - -static void __exit blackhole_module_exit(void) -{ - unregister_qdisc(&blackhole_qdisc_ops); -} - -module_init(blackhole_module_init) -module_exit(blackhole_module_exit) - -MODULE_LICENSE("GPL"); +device_initcall(blackhole_init) diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 3386cce4751e..109b2322778f 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -225,6 +225,7 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) return &q->internal; /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket + * or a listener (SYNCOOKIE mode) * 1) request sockets are not full blown, * they do not contain sk_pacing_rate * 2) They are not part of a 'flow' yet @@ -232,7 +233,7 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) * especially if the listener set SO_MAX_PACING_RATE * 4) We pretend they are orphaned */ - if (!sk || sk->sk_state == TCP_NEW_SYN_RECV) { + if (!sk || sk_listener(sk)) { unsigned long hash = skb_get_hash(skb) & q->orphan_mask; /* By forcing low order bit to 1, we make sure to not diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 7954c52e1794..763e06a55155 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2494,7 +2494,7 @@ static int sctp_process_param(struct sctp_association *asoc, __u16 sat; int retval = 1; sctp_scope_t scope; - time_t stale; + u32 stale; struct sctp_af *af; union sctp_addr_param *addr_param; struct sctp_transport *t; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index d7eaa7354cf7..6f46aa16cb76 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -2306,7 +2306,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; - time_t stale; + u32 stale; sctp_cookie_preserve_param_t bht; sctp_errhdr_t *err; struct sctp_chunk *reply; diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 6e4a4f9ad927..73e3895175cf 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -15,8 +15,10 @@ #include <linux/mutex.h> #include <linux/notifier.h> #include <linux/netdevice.h> +#include <linux/etherdevice.h> #include <linux/if_bridge.h> #include <linux/list.h> +#include <linux/workqueue.h> #include <net/ip_fib.h> #include <net/switchdev.h> @@ -92,6 +94,85 @@ static void switchdev_trans_items_warn_destroy(struct net_device *dev, switchdev_trans_items_destroy(trans); } +static LIST_HEAD(deferred); +static DEFINE_SPINLOCK(deferred_lock); + +typedef void switchdev_deferred_func_t(struct net_device *dev, + const void *data); + +struct switchdev_deferred_item { + struct list_head list; + struct net_device *dev; + switchdev_deferred_func_t *func; + unsigned long data[0]; +}; + +static struct switchdev_deferred_item *switchdev_deferred_dequeue(void) +{ + struct switchdev_deferred_item *dfitem; + + spin_lock_bh(&deferred_lock); + if (list_empty(&deferred)) { + dfitem = NULL; + goto unlock; + } + dfitem = list_first_entry(&deferred, + struct switchdev_deferred_item, list); + list_del(&dfitem->list); +unlock: + spin_unlock_bh(&deferred_lock); + return dfitem; +} + +/** + * switchdev_deferred_process - Process ops in deferred queue + * + * Called to flush the ops currently queued in deferred ops queue. + * rtnl_lock must be held. + */ +void switchdev_deferred_process(void) +{ + struct switchdev_deferred_item *dfitem; + + ASSERT_RTNL(); + + while ((dfitem = switchdev_deferred_dequeue())) { + dfitem->func(dfitem->dev, dfitem->data); + dev_put(dfitem->dev); + kfree(dfitem); + } +} +EXPORT_SYMBOL_GPL(switchdev_deferred_process); + +static void switchdev_deferred_process_work(struct work_struct *work) +{ + rtnl_lock(); + switchdev_deferred_process(); + rtnl_unlock(); +} + +static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work); + +static int switchdev_deferred_enqueue(struct net_device *dev, + const void *data, size_t data_len, + switchdev_deferred_func_t *func) +{ + struct switchdev_deferred_item *dfitem; + + dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC); + if (!dfitem) + return -ENOMEM; + dfitem->dev = dev; + dfitem->func = func; + memcpy(dfitem->data, data, data_len); + dev_hold(dev); + spin_lock_bh(&deferred_lock); + list_add_tail(&dfitem->list, &deferred); + spin_unlock_bh(&deferred_lock); + schedule_work(&deferred_process_work); + return 0; +} + /** * switchdev_port_attr_get - Get port attribute * @@ -135,7 +216,7 @@ int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr) EXPORT_SYMBOL_GPL(switchdev_port_attr_get); static int __switchdev_port_attr_set(struct net_device *dev, - struct switchdev_attr *attr, + const struct switchdev_attr *attr, struct switchdev_trans *trans) { const struct switchdev_ops *ops = dev->switchdev_ops; @@ -147,7 +228,7 @@ static int __switchdev_port_attr_set(struct net_device *dev, return ops->switchdev_port_attr_set(dev, attr, trans); if (attr->flags & SWITCHDEV_F_NO_RECURSE) - return err; + goto done; /* Switch device port(s) may be stacked under * bond/team/vlan dev, so recurse down to set attr on @@ -156,81 +237,26 @@ static int __switchdev_port_attr_set(struct net_device *dev, netdev_for_each_lower_dev(dev, lower_dev, iter) { err = __switchdev_port_attr_set(lower_dev, attr, trans); + if (err == -EOPNOTSUPP && + attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP) + continue; if (err) break; } - return err; -} - -struct switchdev_attr_set_work { - struct work_struct work; - struct net_device *dev; - struct switchdev_attr attr; -}; - -static void switchdev_port_attr_set_work(struct work_struct *work) -{ - struct switchdev_attr_set_work *asw = - container_of(work, struct switchdev_attr_set_work, work); - int err; - - rtnl_lock(); - err = switchdev_port_attr_set(asw->dev, &asw->attr); - if (err && err != -EOPNOTSUPP) - netdev_err(asw->dev, "failed (err=%d) to set attribute (id=%d)\n", - err, asw->attr.id); - rtnl_unlock(); - - dev_put(asw->dev); - kfree(work); -} - -static int switchdev_port_attr_set_defer(struct net_device *dev, - struct switchdev_attr *attr) -{ - struct switchdev_attr_set_work *asw; - - asw = kmalloc(sizeof(*asw), GFP_ATOMIC); - if (!asw) - return -ENOMEM; - - INIT_WORK(&asw->work, switchdev_port_attr_set_work); - - dev_hold(dev); - asw->dev = dev; - memcpy(&asw->attr, attr, sizeof(asw->attr)); - - schedule_work(&asw->work); +done: + if (err == -EOPNOTSUPP && attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP) + err = 0; - return 0; + return err; } -/** - * switchdev_port_attr_set - Set port attribute - * - * @dev: port device - * @attr: attribute to set - * - * Use a 2-phase prepare-commit transaction model to ensure - * system is not left in a partially updated state due to - * failure from driver/device. - */ -int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr) +static int switchdev_port_attr_set_now(struct net_device *dev, + const struct switchdev_attr *attr) { struct switchdev_trans trans; int err; - if (!rtnl_is_locked()) { - /* Running prepare-commit transaction across stacked - * devices requires nothing moves, so if rtnl_lock is - * not held, schedule a worker thread to hold rtnl_lock - * while setting attr. - */ - - return switchdev_port_attr_set_defer(dev, attr); - } - switchdev_trans_init(&trans); /* Phase I: prepare for attr set. Driver/device should fail @@ -267,6 +293,47 @@ int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr) return err; } + +static void switchdev_port_attr_set_deferred(struct net_device *dev, + const void *data) +{ + const struct switchdev_attr *attr = data; + int err; + + err = switchdev_port_attr_set_now(dev, attr); + if (err && err != -EOPNOTSUPP) + netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n", + err, attr->id); +} + +static int switchdev_port_attr_set_defer(struct net_device *dev, + const struct switchdev_attr *attr) +{ + return switchdev_deferred_enqueue(dev, attr, sizeof(*attr), + switchdev_port_attr_set_deferred); +} + +/** + * switchdev_port_attr_set - Set port attribute + * + * @dev: port device + * @attr: attribute to set + * + * Use a 2-phase prepare-commit transaction model to ensure + * system is not left in a partially updated state due to + * failure from driver/device. + * + * rtnl_lock must be held and must not be in atomic section, + * in case SWITCHDEV_F_DEFER flag is not set. + */ +int switchdev_port_attr_set(struct net_device *dev, + const struct switchdev_attr *attr) +{ + if (attr->flags & SWITCHDEV_F_DEFER) + return switchdev_port_attr_set_defer(dev, attr); + ASSERT_RTNL(); + return switchdev_port_attr_set_now(dev, attr); +} EXPORT_SYMBOL_GPL(switchdev_port_attr_set); static int __switchdev_port_obj_add(struct net_device *dev, @@ -295,21 +362,8 @@ static int __switchdev_port_obj_add(struct net_device *dev, return err; } -/** - * switchdev_port_obj_add - Add port object - * - * @dev: port device - * @id: object ID - * @obj: object to add - * - * Use a 2-phase prepare-commit transaction model to ensure - * system is not left in a partially updated state due to - * failure from driver/device. - * - * rtnl_lock must be held. - */ -int switchdev_port_obj_add(struct net_device *dev, - const struct switchdev_obj *obj) +static int switchdev_port_obj_add_now(struct net_device *dev, + const struct switchdev_obj *obj) { struct switchdev_trans trans; int err; @@ -351,18 +405,53 @@ int switchdev_port_obj_add(struct net_device *dev, return err; } -EXPORT_SYMBOL_GPL(switchdev_port_obj_add); + +static void switchdev_port_obj_add_deferred(struct net_device *dev, + const void *data) +{ + const struct switchdev_obj *obj = data; + int err; + + err = switchdev_port_obj_add_now(dev, obj); + if (err && err != -EOPNOTSUPP) + netdev_err(dev, "failed (err=%d) to add object (id=%d)\n", + err, obj->id); +} + +static int switchdev_port_obj_add_defer(struct net_device *dev, + const struct switchdev_obj *obj) +{ + return switchdev_deferred_enqueue(dev, obj, sizeof(*obj), + switchdev_port_obj_add_deferred); +} /** - * switchdev_port_obj_del - Delete port object + * switchdev_port_obj_add - Add port object * * @dev: port device * @id: object ID - * @obj: object to delete + * @obj: object to add + * + * Use a 2-phase prepare-commit transaction model to ensure + * system is not left in a partially updated state due to + * failure from driver/device. + * + * rtnl_lock must be held and must not be in atomic section, + * in case SWITCHDEV_F_DEFER flag is not set. */ -int switchdev_port_obj_del(struct net_device *dev, +int switchdev_port_obj_add(struct net_device *dev, const struct switchdev_obj *obj) { + if (obj->flags & SWITCHDEV_F_DEFER) + return switchdev_port_obj_add_defer(dev, obj); + ASSERT_RTNL(); + return switchdev_port_obj_add_now(dev, obj); +} +EXPORT_SYMBOL_GPL(switchdev_port_obj_add); + +static int switchdev_port_obj_del_now(struct net_device *dev, + const struct switchdev_obj *obj) +{ const struct switchdev_ops *ops = dev->switchdev_ops; struct net_device *lower_dev; struct list_head *iter; @@ -377,13 +466,51 @@ int switchdev_port_obj_del(struct net_device *dev, */ netdev_for_each_lower_dev(dev, lower_dev, iter) { - err = switchdev_port_obj_del(lower_dev, obj); + err = switchdev_port_obj_del_now(lower_dev, obj); if (err) break; } return err; } + +static void switchdev_port_obj_del_deferred(struct net_device *dev, + const void *data) +{ + const struct switchdev_obj *obj = data; + int err; + + err = switchdev_port_obj_del_now(dev, obj); + if (err && err != -EOPNOTSUPP) + netdev_err(dev, "failed (err=%d) to del object (id=%d)\n", + err, obj->id); +} + +static int switchdev_port_obj_del_defer(struct net_device *dev, + const struct switchdev_obj *obj) +{ + return switchdev_deferred_enqueue(dev, obj, sizeof(*obj), + switchdev_port_obj_del_deferred); +} + +/** + * switchdev_port_obj_del - Delete port object + * + * @dev: port device + * @id: object ID + * @obj: object to delete + * + * rtnl_lock must be held and must not be in atomic section, + * in case SWITCHDEV_F_DEFER flag is not set. + */ +int switchdev_port_obj_del(struct net_device *dev, + const struct switchdev_obj *obj) +{ + if (obj->flags & SWITCHDEV_F_DEFER) + return switchdev_port_obj_del_defer(dev, obj); + ASSERT_RTNL(); + return switchdev_port_obj_del_now(dev, obj); +} EXPORT_SYMBOL_GPL(switchdev_port_obj_del); /** @@ -393,6 +520,8 @@ EXPORT_SYMBOL_GPL(switchdev_port_obj_del); * @id: object ID * @obj: object to dump * @cb: function to call with a filled object + * + * rtnl_lock must be held. */ int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj, switchdev_obj_dump_cb_t *cb) @@ -402,6 +531,8 @@ int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj, struct list_head *iter; int err = -EOPNOTSUPP; + ASSERT_RTNL(); + if (ops && ops->switchdev_port_obj_dump) return ops->switchdev_port_obj_dump(dev, obj, cb); @@ -720,6 +851,9 @@ static int switchdev_port_br_afspec(struct net_device *dev, if (vlan.vid_begin) return -EINVAL; vlan.vid_begin = vinfo->vid; + /* don't allow range of pvids */ + if (vlan.flags & BRIDGE_VLAN_INFO_PVID) + return -EINVAL; } else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) { if (!vlan.vid_begin) return -EINVAL; @@ -822,10 +956,10 @@ int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], { struct switchdev_obj_port_fdb fdb = { .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB, - .addr = addr, .vid = vid, }; + ether_addr_copy(fdb.addr, addr); return switchdev_port_obj_add(dev, &fdb.obj); } EXPORT_SYMBOL_GPL(switchdev_port_fdb_add); @@ -847,10 +981,10 @@ int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], { struct switchdev_obj_port_fdb fdb = { .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB, - .addr = addr, .vid = vid, }; + ether_addr_copy(fdb.addr, addr); return switchdev_port_obj_del(dev, &fdb.obj); } EXPORT_SYMBOL_GPL(switchdev_port_fdb_del); @@ -967,6 +1101,8 @@ static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi) struct net_device *dev = NULL; int nhsel; + ASSERT_RTNL(); + /* For this route, all nexthop devs must be on the same switch. */ for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) { @@ -1012,7 +1148,6 @@ int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB, .dst = dst, .dst_len = dst_len, - .fi = fi, .tos = tos, .type = type, .nlflags = nlflags, @@ -1021,6 +1156,8 @@ int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, struct net_device *dev; int err = 0; + memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi)); + /* Don't offload route if using custom ip rules or if * IPv4 FIB offloading has been disabled completely. */ @@ -1064,7 +1201,6 @@ int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB, .dst = dst, .dst_len = dst_len, - .fi = fi, .tos = tos, .type = type, .nlflags = 0, @@ -1073,6 +1209,8 @@ int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, struct net_device *dev; int err = 0; + memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi)); + if (!(fi->fib_flags & RTNH_F_OFFLOAD)) return 0; @@ -1195,10 +1333,11 @@ void switchdev_port_fwd_mark_set(struct net_device *dev, u32 mark = dev->ifindex; u32 reset_mark = 0; - if (group_dev && joining) { - mark = switchdev_port_fwd_mark_get(dev, group_dev); - } else if (group_dev && !joining) { - if (dev->offload_fwd_mark == mark) + if (group_dev) { + ASSERT_RTNL(); + if (joining) + mark = switchdev_port_fwd_mark_get(dev, group_dev); + else if (dev->offload_fwd_mark == mark) /* Ohoh, this port was the mark reference port, * but it's leaving the group, so reset the * mark for the remaining ports in the group. diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index ce9f7bfc0b92..82b278668ab7 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -362,6 +362,7 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr) b_ptr->media->disable_media(b_ptr); tipc_node_delete_links(net, b_ptr->identity); + RCU_INIT_POINTER(b_ptr->media_ptr, NULL); if (b_ptr->link_req) tipc_disc_delete(b_ptr->link_req); @@ -399,16 +400,13 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, /* tipc_disable_l2_media - detach TIPC bearer from an L2 interface * - * Mark L2 bearer as inactive so that incoming buffers are thrown away, - * then get worker thread to complete bearer cleanup. (Can't do cleanup - * here because cleanup code needs to sleep and caller holds spinlocks.) + * Mark L2 bearer as inactive so that incoming buffers are thrown away */ void tipc_disable_l2_media(struct tipc_bearer *b) { struct net_device *dev; dev = (struct net_device *)rtnl_dereference(b->media_ptr); - RCU_INIT_POINTER(b->media_ptr, NULL); RCU_INIT_POINTER(dev->tipc_ptr, NULL); synchronize_net(); dev_put(dev); @@ -554,7 +552,7 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, case NETDEV_CHANGE: if (netif_carrier_ok(dev)) break; - case NETDEV_DOWN: + case NETDEV_GOING_DOWN: case NETDEV_CHANGEMTU: tipc_reset_bearer(net, b_ptr); break; diff --git a/net/tipc/link.c b/net/tipc/link.c index 75db07c78a69..ff9b0b92e62e 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -120,11 +120,21 @@ bool tipc_link_is_up(struct tipc_link *l) return link_is_up(l); } +bool tipc_link_peer_is_down(struct tipc_link *l) +{ + return l->state == LINK_PEER_RESET; +} + bool tipc_link_is_reset(struct tipc_link *l) { return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING); } +bool tipc_link_is_establishing(struct tipc_link *l) +{ + return l->state == LINK_ESTABLISHING; +} + bool tipc_link_is_synching(struct tipc_link *l) { return l->state == LINK_SYNCHING; @@ -321,14 +331,15 @@ int tipc_link_fsm_evt(struct tipc_link *l, int evt) switch (evt) { case LINK_ESTABLISH_EVT: l->state = LINK_ESTABLISHED; - rc |= TIPC_LINK_UP_EVT; break; case LINK_FAILOVER_BEGIN_EVT: l->state = LINK_FAILINGOVER; break; - case LINK_PEER_RESET_EVT: case LINK_RESET_EVT: + l->state = LINK_RESET; + break; case LINK_FAILURE_EVT: + case LINK_PEER_RESET_EVT: case LINK_SYNCH_BEGIN_EVT: case LINK_FAILOVER_END_EVT: break; @@ -578,8 +589,6 @@ void tipc_link_purge_queues(struct tipc_link *l_ptr) void tipc_link_reset(struct tipc_link *l) { - tipc_link_fsm_evt(l, LINK_RESET_EVT); - /* Link is down, accept any session */ l->peer_session = WILDCARD_SESSION; @@ -953,7 +962,7 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb, case TIPC_HIGH_IMPORTANCE: case TIPC_CRITICAL_IMPORTANCE: case CONN_MANAGER: - __skb_queue_tail(inputq, skb); + skb_queue_tail(inputq, skb); return true; case NAME_DISTRIBUTOR: node->bclink.recv_permitted = true; @@ -982,6 +991,7 @@ static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb, struct tipc_msg *hdr = buf_msg(skb); struct sk_buff **reasm_skb = &l->reasm_buf; struct sk_buff *iskb; + struct sk_buff_head tmpq; int usr = msg_user(hdr); int rc = 0; int pos = 0; @@ -1006,10 +1016,12 @@ static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb, } if (usr == MSG_BUNDLER) { + skb_queue_head_init(&tmpq); l->stats.recv_bundles++; l->stats.recv_bundled += msg_msgcnt(hdr); while (tipc_msg_extract(skb, &iskb, &pos)) - tipc_data_input(l, iskb, inputq); + tipc_data_input(l, iskb, &tmpq); + tipc_skb_queue_splice_tail(&tmpq, inputq); return 0; } else if (usr == MSG_FRAGMENTER) { l->stats.recv_fragments++; @@ -1044,49 +1056,76 @@ static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked) return released; } +/* tipc_link_build_ack_msg: prepare link acknowledge message for transmission + */ +void tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq) +{ + l->rcv_unacked = 0; + l->stats.sent_acks++; + tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq); +} + +/* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message + */ +void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq) +{ + int mtyp = RESET_MSG; + + if (l->state == LINK_ESTABLISHING) + mtyp = ACTIVATE_MSG; + + tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq); +} + +/* tipc_link_build_nack_msg: prepare link nack message for transmission + */ +static void tipc_link_build_nack_msg(struct tipc_link *l, + struct sk_buff_head *xmitq) +{ + u32 def_cnt = ++l->stats.deferred_recv; + + if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV)) + tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq); +} + /* tipc_link_rcv - process TIPC packets/messages arriving from off-node - * @link: the link that should handle the message + * @l: the link that should handle the message * @skb: TIPC packet * @xmitq: queue to place packets to be sent after this call */ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, struct sk_buff_head *xmitq) { - struct sk_buff_head *arrvq = &l->deferdq; - struct sk_buff_head tmpq; + struct sk_buff_head *defq = &l->deferdq; struct tipc_msg *hdr; - u16 seqno, rcv_nxt; + u16 seqno, rcv_nxt, win_lim; int rc = 0; - __skb_queue_head_init(&tmpq); - - if (unlikely(!__tipc_skb_queue_sorted(arrvq, skb))) { - if (!(skb_queue_len(arrvq) % TIPC_NACK_INTV)) - tipc_link_build_proto_msg(l, STATE_MSG, 0, - 0, 0, 0, xmitq); - return rc; - } - - while ((skb = skb_peek(arrvq))) { + do { hdr = buf_msg(skb); + seqno = msg_seqno(hdr); + rcv_nxt = l->rcv_nxt; + win_lim = rcv_nxt + TIPC_MAX_LINK_WIN; /* Verify and update link state */ - if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) { - __skb_dequeue(arrvq); - rc = tipc_link_proto_rcv(l, skb, xmitq); - continue; - } + if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) + return tipc_link_proto_rcv(l, skb, xmitq); if (unlikely(!link_is_up(l))) { - rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT); - if (!link_is_up(l)) { - kfree_skb(__skb_dequeue(arrvq)); - goto exit; - } + if (l->state == LINK_ESTABLISHING) + rc = TIPC_LINK_UP_EVT; + goto drop; } + /* Don't send probe at next timeout expiration */ l->silent_intv_cnt = 0; + /* Drop if outside receive window */ + if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) { + l->stats.duplicates++; + goto drop; + } + /* Forward queues and wake up waiting users */ if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) { tipc_link_advance_backlog(l, xmitq); @@ -1094,39 +1133,28 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, link_prepare_wakeup(l); } - /* Defer reception if there is a gap in the sequence */ - seqno = msg_seqno(hdr); - rcv_nxt = l->rcv_nxt; - if (unlikely(less(rcv_nxt, seqno))) { - l->stats.deferred_recv++; - goto exit; - } - - __skb_dequeue(arrvq); - - /* Drop if packet already received */ - if (unlikely(more(rcv_nxt, seqno))) { - l->stats.duplicates++; - kfree_skb(skb); - goto exit; + /* Defer delivery if sequence gap */ + if (unlikely(seqno != rcv_nxt)) { + __tipc_skb_queue_sorted(defq, seqno, skb); + tipc_link_build_nack_msg(l, xmitq); + break; } - /* Packet can be delivered */ + /* Deliver packet */ l->rcv_nxt++; l->stats.recv_info++; - if (unlikely(!tipc_data_input(l, skb, &tmpq))) - rc = tipc_link_input(l, skb, &tmpq); + if (!tipc_data_input(l, skb, l->inputq)) + rc = tipc_link_input(l, skb, l->inputq); + if (unlikely(rc)) + break; + if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) + tipc_link_build_ack_msg(l, xmitq); + + } while ((skb = __skb_dequeue(defq))); - /* Ack at regular intervals */ - if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) { - l->rcv_unacked = 0; - l->stats.sent_acks++; - tipc_link_build_proto_msg(l, STATE_MSG, - 0, 0, 0, 0, xmitq); - } - } -exit: - tipc_skb_queue_splice_tail(&tmpq, l->inputq); + return rc; +drop: + kfree_skb(skb); return rc; } @@ -1250,7 +1278,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, } /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets - * with contents of the link's tranmsit and backlog queues. + * with contents of the link's transmit and backlog queues. */ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, int mtyp, struct sk_buff_head *xmitq) @@ -1331,6 +1359,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, u16 peers_tol = msg_link_tolerance(hdr); u16 peers_prio = msg_linkprio(hdr); u16 rcv_nxt = l->rcv_nxt; + int mtyp = msg_type(hdr); char *if_name; int rc = 0; @@ -1340,7 +1369,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, if (link_own_addr(l) > msg_prevnode(hdr)) l->net_plane = msg_net_plane(hdr); - switch (msg_type(hdr)) { + switch (mtyp) { case RESET_MSG: /* Ignore duplicate RESET with old session number */ @@ -1367,12 +1396,14 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI)) l->priority = peers_prio; - if (msg_type(hdr) == RESET_MSG) { - rc |= tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); - } else if (!link_is_up(l)) { - tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); - rc |= tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT); - } + /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ + if ((mtyp == RESET_MSG) || !link_is_up(l)) + rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); + + /* ACTIVATE_MSG takes up link if it was already locally reset */ + if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING)) + rc = TIPC_LINK_UP_EVT; + l->peer_session = msg_session(hdr); l->peer_bearer_id = msg_bearer_id(hdr); if (l->mtu > msg_max_pkt(hdr)) @@ -1389,9 +1420,12 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, l->stats.recv_states++; if (msg_probe(hdr)) l->stats.recv_probes++; - rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT); - if (!link_is_up(l)) + + if (!link_is_up(l)) { + if (l->state == LINK_ESTABLISHING) + rc = TIPC_LINK_UP_EVT; break; + } /* Send NACK if peer has sent pkts we haven't received yet */ if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l)) diff --git a/net/tipc/link.h b/net/tipc/link.h index 39ff8b6919a4..0201212cb49a 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h @@ -185,7 +185,7 @@ struct tipc_link { } backlog[5]; u16 snd_nxt; u16 last_retransm; - u32 window; + u16 window; u32 stale_count; /* Reception */ @@ -213,10 +213,13 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, int mtyp, struct sk_buff_head *xmitq); void tipc_link_build_bcast_sync_msg(struct tipc_link *l, struct sk_buff_head *xmitq); +void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq); int tipc_link_fsm_evt(struct tipc_link *l, int evt); void tipc_link_reset_fragments(struct tipc_link *l_ptr); bool tipc_link_is_up(struct tipc_link *l); +bool tipc_link_peer_is_down(struct tipc_link *l); bool tipc_link_is_reset(struct tipc_link *l); +bool tipc_link_is_establishing(struct tipc_link *l); bool tipc_link_is_synching(struct tipc_link *l); bool tipc_link_is_failingover(struct tipc_link *l); bool tipc_link_is_blocked(struct tipc_link *l); diff --git a/net/tipc/msg.c b/net/tipc/msg.c index c5ac436235e0..454f5ec275c8 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -590,3 +590,34 @@ error: kfree_skb(head); return NULL; } + +/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number + * @list: list to be appended to + * @seqno: sequence number of buffer to add + * @skb: buffer to add + */ +void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno, + struct sk_buff *skb) +{ + struct sk_buff *_skb, *tmp; + + if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) { + __skb_queue_head(list, skb); + return; + } + + if (more(seqno, buf_seqno(skb_peek_tail(list)))) { + __skb_queue_tail(list, skb); + return; + } + + skb_queue_walk_safe(list, _skb, tmp) { + if (more(seqno, buf_seqno(_skb))) + continue; + if (seqno == buf_seqno(_skb)) + break; + __skb_queue_before(list, _skb, skb); + return; + } + kfree_skb(skb); +} diff --git a/net/tipc/msg.h b/net/tipc/msg.h index a82c5848d4bc..c784ba05f2aa 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -790,6 +790,8 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, int dsz, int mtu, struct sk_buff_head *list); bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err); struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list); +void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno, + struct sk_buff *skb); static inline u16 buf_seqno(struct sk_buff *skb) { @@ -862,38 +864,6 @@ static inline struct sk_buff *tipc_skb_dequeue(struct sk_buff_head *list, return skb; } -/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number - * @list: list to be appended to - * @skb: buffer to add - * Returns true if queue should treated further, otherwise false - */ -static inline bool __tipc_skb_queue_sorted(struct sk_buff_head *list, - struct sk_buff *skb) -{ - struct sk_buff *_skb, *tmp; - struct tipc_msg *hdr = buf_msg(skb); - u16 seqno = msg_seqno(hdr); - - if (skb_queue_empty(list) || (msg_user(hdr) == LINK_PROTOCOL)) { - __skb_queue_head(list, skb); - return true; - } - if (likely(less(seqno, buf_seqno(skb_peek(list))))) { - __skb_queue_head(list, skb); - return true; - } - if (!more(seqno, buf_seqno(skb_peek_tail(list)))) { - skb_queue_walk_safe(list, _skb, tmp) { - if (likely(less(seqno, buf_seqno(_skb)))) { - __skb_queue_before(list, _skb, skb); - return true; - } - } - } - __skb_queue_tail(list, skb); - return false; -} - /* tipc_skb_queue_splice_tail - append an skb list to lock protected list * @list: the new list to append. Not lock protected * @head: target list. Lock protected. diff --git a/net/tipc/node.c b/net/tipc/node.c index 703875fd6cde..d1f340116c84 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -317,7 +317,11 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id, struct tipc_link *ol = node_active_link(n, 0); struct tipc_link *nl = n->links[bearer_id].link; - if (!nl || !tipc_link_is_up(nl)) + if (!nl) + return; + + tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT); + if (!tipc_link_is_up(nl)) return; n->working_links++; @@ -416,7 +420,13 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, } if (!tipc_node_is_up(n)) { + if (tipc_link_peer_is_down(l)) + tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); + tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT); + tipc_link_fsm_evt(l, LINK_RESET_EVT); tipc_link_reset(l); + tipc_link_build_reset_msg(l, xmitq); + *maddr = &n->links[*bearer_id].maddr; node_lost_contact(n, &le->inputq); return; } @@ -428,6 +438,7 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, n->sync_point = tnl->rcv_nxt + (U16_MAX / 2 - 1); tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq); tipc_link_reset(l); + tipc_link_fsm_evt(l, LINK_RESET_EVT); tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); *maddr = &n->links[tnl->bearer_id].maddr; @@ -437,20 +448,28 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) { struct tipc_link_entry *le = &n->links[bearer_id]; + struct tipc_link *l = le->link; struct tipc_media_addr *maddr; struct sk_buff_head xmitq; + if (!l) + return; + __skb_queue_head_init(&xmitq); tipc_node_lock(n); - __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); - if (delete && le->link) { - kfree(le->link); - le->link = NULL; - n->link_cnt--; + if (!tipc_link_is_establishing(l)) { + __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); + if (delete) { + kfree(l); + le->link = NULL; + n->link_cnt--; + } + } else { + /* Defuse pending tipc_node_link_up() */ + tipc_link_fsm_evt(l, LINK_RESET_EVT); } tipc_node_unlock(n); - tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr); tipc_sk_rcv(n->net, &le->inputq); } @@ -567,6 +586,7 @@ void tipc_node_check_dest(struct net *net, u32 onode, goto exit; } tipc_link_reset(l); + tipc_link_fsm_evt(l, LINK_RESET_EVT); if (n->state == NODE_FAILINGOVER) tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); le->link = l; @@ -579,7 +599,7 @@ void tipc_node_check_dest(struct net *net, u32 onode, memcpy(&le->maddr, maddr, sizeof(*maddr)); exit: tipc_node_unlock(n); - if (reset) + if (reset && !tipc_link_is_reset(l)) tipc_node_link_down(n, b->identity, false); tipc_node_put(n); } @@ -686,10 +706,10 @@ static void tipc_node_fsm_evt(struct tipc_node *n, int evt) break; case SELF_ESTABL_CONTACT_EVT: case PEER_LOST_CONTACT_EVT: - break; case NODE_SYNCH_END_EVT: - case NODE_SYNCH_BEGIN_EVT: case NODE_FAILOVER_BEGIN_EVT: + break; + case NODE_SYNCH_BEGIN_EVT: case NODE_FAILOVER_END_EVT: default: goto illegal_evt; @@ -849,9 +869,6 @@ static void node_lost_contact(struct tipc_node *n_ptr, tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT); } - /* Prevent re-contact with node until cleanup is done */ - tipc_node_fsm_evt(n_ptr, SELF_LOST_CONTACT_EVT); - /* Notify publications from this node */ n_ptr->action_flags |= TIPC_NOTIFY_NODE_DOWN; diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index c170d3138953..9bc0b1e515fa 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -425,7 +425,6 @@ static void tipc_udp_disable(struct tipc_bearer *b) } if (ub->ubsock) sock_set_flag(ub->ubsock->sk, SOCK_DEAD); - RCU_INIT_POINTER(b->media_ptr, NULL); RCU_INIT_POINTER(ub->bearer, NULL); /* sock_release need to be done outside of rtnl lock */ diff --git a/net/wireless/core.c b/net/wireless/core.c index 3893409dee95..f223026ddb03 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -419,6 +419,7 @@ use_default_name: device_initialize(&rdev->wiphy.dev); rdev->wiphy.dev.class = &ieee80211_class; rdev->wiphy.dev.platform_data = rdev; + device_enable_async_suspend(&rdev->wiphy.dev); INIT_LIST_HEAD(&rdev->destroy_list); spin_lock_init(&rdev->destroy_list_lock); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 5d8748b4c8a2..f05ba8b7af61 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -3,6 +3,7 @@ * * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright 2015 Intel Deutschland GmbH */ #include <linux/if.h> @@ -2403,6 +2404,16 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag } } + if (rdev->ops->get_tx_power) { + int dbm, ret; + + ret = rdev_get_tx_power(rdev, wdev, &dbm); + if (ret == 0 && + nla_put_u32(msg, NL80211_ATTR_WIPHY_TX_POWER_LEVEL, + DBM_TO_MBM(dbm))) + goto nla_put_failure; + } + if (wdev->ssid_len) { if (nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid)) goto nla_put_failure; @@ -3998,7 +4009,8 @@ int cfg80211_check_station_change(struct wiphy *wiphy, params->sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER); } - if (statype != CFG80211_STA_TDLS_PEER_SETUP) { + if (statype != CFG80211_STA_TDLS_PEER_SETUP && + statype != CFG80211_STA_AP_CLIENT_UNASSOC) { /* reject other things that can't change */ if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD) return -EINVAL; @@ -4010,7 +4022,8 @@ int cfg80211_check_station_change(struct wiphy *wiphy, return -EINVAL; } - if (statype != CFG80211_STA_AP_CLIENT) { + if (statype != CFG80211_STA_AP_CLIENT && + statype != CFG80211_STA_AP_CLIENT_UNASSOC) { if (params->vlan) return -EINVAL; } @@ -4022,6 +4035,7 @@ int cfg80211_check_station_change(struct wiphy *wiphy, return -EOPNOTSUPP; break; case CFG80211_STA_AP_CLIENT: + case CFG80211_STA_AP_CLIENT_UNASSOC: /* accept only the listed bits */ if (params->sta_flags_mask & ~(BIT(NL80211_STA_FLAG_AUTHORIZED) | @@ -9938,6 +9952,9 @@ static int nl80211_vendor_cmd(struct sk_buff *skb, struct genl_info *info) if (!wdev->netdev && !wdev->p2p_started) return -ENETDOWN; } + + if (!vcmd->doit) + return -EOPNOTSUPP; } else { wdev = NULL; } @@ -9957,6 +9974,193 @@ static int nl80211_vendor_cmd(struct sk_buff *skb, struct genl_info *info) return -EOPNOTSUPP; } +static int nl80211_prepare_vendor_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct cfg80211_registered_device **rdev, + struct wireless_dev **wdev) +{ + u32 vid, subcmd; + unsigned int i; + int vcmd_idx = -1; + int err; + void *data = NULL; + unsigned int data_len = 0; + + rtnl_lock(); + + if (cb->args[0]) { + /* subtract the 1 again here */ + struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); + struct wireless_dev *tmp; + + if (!wiphy) { + err = -ENODEV; + goto out_unlock; + } + *rdev = wiphy_to_rdev(wiphy); + *wdev = NULL; + + if (cb->args[1]) { + list_for_each_entry(tmp, &(*rdev)->wdev_list, list) { + if (tmp->identifier == cb->args[1] - 1) { + *wdev = tmp; + break; + } + } + } + + /* keep rtnl locked in successful case */ + return 0; + } + + err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, + nl80211_fam.attrbuf, nl80211_fam.maxattr, + nl80211_policy); + if (err) + goto out_unlock; + + if (!nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_ID] || + !nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) { + err = -EINVAL; + goto out_unlock; + } + + *wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), + nl80211_fam.attrbuf); + if (IS_ERR(*wdev)) + *wdev = NULL; + + *rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), + nl80211_fam.attrbuf); + if (IS_ERR(*rdev)) { + err = PTR_ERR(*rdev); + goto out_unlock; + } + + vid = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_ID]); + subcmd = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD]); + + for (i = 0; i < (*rdev)->wiphy.n_vendor_commands; i++) { + const struct wiphy_vendor_command *vcmd; + + vcmd = &(*rdev)->wiphy.vendor_commands[i]; + + if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd) + continue; + + if (!vcmd->dumpit) { + err = -EOPNOTSUPP; + goto out_unlock; + } + + vcmd_idx = i; + break; + } + + if (vcmd_idx < 0) { + err = -EOPNOTSUPP; + goto out_unlock; + } + + if (nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_DATA]) { + data = nla_data(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_DATA]); + data_len = nla_len(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_DATA]); + } + + /* 0 is the first index - add 1 to parse only once */ + cb->args[0] = (*rdev)->wiphy_idx + 1; + /* add 1 to know if it was NULL */ + cb->args[1] = *wdev ? (*wdev)->identifier + 1 : 0; + cb->args[2] = vcmd_idx; + cb->args[3] = (unsigned long)data; + cb->args[4] = data_len; + + /* keep rtnl locked in successful case */ + return 0; + out_unlock: + rtnl_unlock(); + return err; +} + +static int nl80211_vendor_cmd_dump(struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct cfg80211_registered_device *rdev; + struct wireless_dev *wdev; + unsigned int vcmd_idx; + const struct wiphy_vendor_command *vcmd; + void *data; + int data_len; + int err; + struct nlattr *vendor_data; + + err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev); + if (err) + return err; + + vcmd_idx = cb->args[2]; + data = (void *)cb->args[3]; + data_len = cb->args[4]; + vcmd = &rdev->wiphy.vendor_commands[vcmd_idx]; + + if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV)) { + if (!wdev) + return -EINVAL; + if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV && + !wdev->netdev) + return -EINVAL; + + if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) { + if (wdev->netdev && + !netif_running(wdev->netdev)) + return -ENETDOWN; + if (!wdev->netdev && !wdev->p2p_started) + return -ENETDOWN; + } + } + + while (1) { + void *hdr = nl80211hdr_put(skb, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + NL80211_CMD_VENDOR); + if (!hdr) + break; + + if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + (wdev && nla_put_u64(skb, NL80211_ATTR_WDEV, + wdev_id(wdev)))) { + genlmsg_cancel(skb, hdr); + break; + } + + vendor_data = nla_nest_start(skb, NL80211_ATTR_VENDOR_DATA); + if (!vendor_data) { + genlmsg_cancel(skb, hdr); + break; + } + + err = vcmd->dumpit(&rdev->wiphy, wdev, skb, data, data_len, + (unsigned long *)&cb->args[5]); + nla_nest_end(skb, vendor_data); + + if (err == -ENOBUFS || err == -ENOENT) { + genlmsg_cancel(skb, hdr); + break; + } else if (err) { + genlmsg_cancel(skb, hdr); + goto out; + } + + genlmsg_end(skb, hdr); + } + + err = skb->len; + out: + rtnl_unlock(); + return err; +} + struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy, enum nl80211_commands cmd, enum nl80211_attrs attr, @@ -10994,6 +11198,7 @@ static const struct genl_ops nl80211_ops[] = { { .cmd = NL80211_CMD_VENDOR, .doit = nl80211_vendor_cmd, + .dumpit = nl80211_vendor_cmd_dump, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 2510b231451e..7258246b7458 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1040,8 +1040,8 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq, return ERR_PTR(-EINVAL); } -const struct ieee80211_reg_rule *__freq_reg_info(struct wiphy *wiphy, - u32 center_freq, u32 min_bw) +static const struct ieee80211_reg_rule * +__freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 min_bw) { const struct ieee80211_regdomain *regd = reg_get_regdomain(wiphy); const struct ieee80211_reg_rule *reg_rule = NULL; diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index c48a4b8582bb..cc3676eb6239 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -136,12 +136,12 @@ int xfrm_output_resume(struct sk_buff *skb, int err) while (likely((err = xfrm_output_one(skb, err)) == 0)) { nf_reset(skb); - err = skb_dst(skb)->ops->local_out(skb); + err = skb_dst(skb)->ops->local_out(net, skb->sk, skb); if (unlikely(err != 1)) goto out; if (!skb_dst(skb)->xfrm) - return dst_output(skb->sk, skb); + return dst_output(net, skb->sk, skb); err = nf_hook(skb_dst(skb)->ops->family, NF_INET_POST_ROUTING, net, skb->sk, skb, diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 418daa038edf..09bfcbac63bb 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1887,6 +1887,7 @@ static void xfrm_policy_queue_process(unsigned long arg) struct sock *sk; struct dst_entry *dst; struct xfrm_policy *pol = (struct xfrm_policy *)arg; + struct net *net = xp_net(pol); struct xfrm_policy_queue *pq = &pol->polq; struct flowi fl; struct sk_buff_head list; @@ -1903,8 +1904,7 @@ static void xfrm_policy_queue_process(unsigned long arg) spin_unlock(&pq->hold_queue.lock); dst_hold(dst->path); - dst = xfrm_lookup(xp_net(pol), dst->path, &fl, - sk, 0); + dst = xfrm_lookup(net, dst->path, &fl, sk, 0); if (IS_ERR(dst)) goto purge_queue; @@ -1934,8 +1934,7 @@ static void xfrm_policy_queue_process(unsigned long arg) xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family); dst_hold(skb_dst(skb)->path); - dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path, - &fl, skb->sk, 0); + dst = xfrm_lookup(net, skb_dst(skb)->path, &fl, skb->sk, 0); if (IS_ERR(dst)) { kfree_skb(skb); continue; @@ -1945,7 +1944,7 @@ static void xfrm_policy_queue_process(unsigned long arg) skb_dst_drop(skb); skb_dst_set(skb, dst); - dst_output(skb->sk, skb); + dst_output(net, skb->sk, skb); } out: @@ -1958,7 +1957,7 @@ purge_queue: xfrm_pol_put(pol); } -static int xdst_queue_output(struct sock *sk, struct sk_buff *skb) +static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb) { unsigned long sched_next; struct dst_entry *dst = skb_dst(skb); |