diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-03-22 18:46:57 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-03-22 18:47:01 +0100 |
commit | d2f1e15b661e71fd52111f51c99a6ce41384e9ef (patch) | |
tree | 8731e7e772e6f825ebbc6eef7681bc46302149bd /net | |
parent | 40b7e05e17eef31ff30fe08dfc2424ef653a792c (diff) | |
parent | 220bf991b0366cc50a94feede3d7341fa5710ee4 (diff) | |
download | linux-d2f1e15b661e71fd52111f51c99a6ce41384e9ef.tar.bz2 |
Merge commit 'v2.6.34-rc2' into perf/core
Merge reason: Pick up latest perf fixes from upstream.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'net')
62 files changed, 630 insertions, 399 deletions
diff --git a/net/9p/client.c b/net/9p/client.c index bde9f3d38c57..e3e5bf4469ce 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -60,7 +60,7 @@ static const match_table_t tokens = { inline int p9_is_proto_dotl(struct p9_client *clnt) { - return (clnt->proto_version == p9_proto_2010L); + return (clnt->proto_version == p9_proto_2000L); } EXPORT_SYMBOL(p9_is_proto_dotl); @@ -80,9 +80,9 @@ static unsigned char get_protocol_version(const substring_t *name) } else if (!strncmp("9p2000.u", name->from, name->to-name->from)) { version = p9_proto_2000u; P9_DPRINTK(P9_DEBUG_9P, "Protocol version: 9P2000.u\n"); - } else if (!strncmp("9p2010.L", name->from, name->to-name->from)) { - version = p9_proto_2010L; - P9_DPRINTK(P9_DEBUG_9P, "Protocol version: 9P2010.L\n"); + } else if (!strncmp("9p2000.L", name->from, name->to-name->from)) { + version = p9_proto_2000L; + P9_DPRINTK(P9_DEBUG_9P, "Protocol version: 9P2000.L\n"); } else { P9_DPRINTK(P9_DEBUG_ERROR, "Unknown protocol version %s. ", name->from); @@ -672,9 +672,9 @@ int p9_client_version(struct p9_client *c) c->msize, c->proto_version); switch (c->proto_version) { - case p9_proto_2010L: + case p9_proto_2000L: req = p9_client_rpc(c, P9_TVERSION, "ds", - c->msize, "9P2010.L"); + c->msize, "9P2000.L"); break; case p9_proto_2000u: req = p9_client_rpc(c, P9_TVERSION, "ds", @@ -700,8 +700,8 @@ int p9_client_version(struct p9_client *c) } P9_DPRINTK(P9_DEBUG_9P, "<<< RVERSION msize %d %s\n", msize, version); - if (!strncmp(version, "9P2010.L", 8)) - c->proto_version = p9_proto_2010L; + if (!strncmp(version, "9P2000.L", 8)) + c->proto_version = p9_proto_2000L; else if (!strncmp(version, "9P2000.u", 8)) c->proto_version = p9_proto_2000u; else if (!strncmp(version, "9P2000", 6)) diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 0aaed4819379..afde1a89fbb3 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -78,6 +78,12 @@ struct virtio_chan { /* Scatterlist: can be too big for stack. */ struct scatterlist sg[VIRTQUEUE_NUM]; + int tag_len; + /* + * tag name to identify a mount Non-null terminated + */ + char *tag; + struct list_head chan_list; }; @@ -214,6 +220,20 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req) return 0; } +static ssize_t p9_mount_tag_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct virtio_chan *chan; + struct virtio_device *vdev; + + vdev = dev_to_virtio(dev); + chan = vdev->priv; + + return snprintf(buf, chan->tag_len + 1, "%s", chan->tag); +} + +static DEVICE_ATTR(mount_tag, 0444, p9_mount_tag_show, NULL); + /** * p9_virtio_probe - probe for existence of 9P virtio channels * @vdev: virtio device to probe @@ -224,6 +244,8 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req) static int p9_virtio_probe(struct virtio_device *vdev) { + __u16 tag_len; + char *tag; int err; struct virtio_chan *chan; @@ -248,6 +270,28 @@ static int p9_virtio_probe(struct virtio_device *vdev) sg_init_table(chan->sg, VIRTQUEUE_NUM); chan->inuse = false; + if (virtio_has_feature(vdev, VIRTIO_9P_MOUNT_TAG)) { + vdev->config->get(vdev, + offsetof(struct virtio_9p_config, tag_len), + &tag_len, sizeof(tag_len)); + } else { + err = -EINVAL; + goto out_free_vq; + } + tag = kmalloc(tag_len, GFP_KERNEL); + if (!tag) { + err = -ENOMEM; + goto out_free_vq; + } + vdev->config->get(vdev, offsetof(struct virtio_9p_config, tag), + tag, tag_len); + chan->tag = tag; + chan->tag_len = tag_len; + err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); + if (err) { + kfree(tag); + goto out_free_vq; + } mutex_lock(&virtio_9p_lock); list_add_tail(&chan->chan_list, &virtio_chan_list); mutex_unlock(&virtio_9p_lock); @@ -284,7 +328,7 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args) mutex_lock(&virtio_9p_lock); list_for_each_entry(chan, &virtio_chan_list, chan_list) { - if (!strcmp(devname, dev_name(&chan->vdev->dev))) { + if (!strncmp(devname, chan->tag, chan->tag_len)) { if (!chan->inuse) { chan->inuse = true; found = 1; @@ -323,6 +367,8 @@ static void p9_virtio_remove(struct virtio_device *vdev) mutex_lock(&virtio_9p_lock); list_del(&chan->chan_list); mutex_unlock(&virtio_9p_lock); + sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); + kfree(chan->tag); kfree(chan); } @@ -332,13 +378,19 @@ static struct virtio_device_id id_table[] = { { 0 }, }; +static unsigned int features[] = { + VIRTIO_9P_MOUNT_TAG, +}; + /* The standard "struct lguest_driver": */ static struct virtio_driver p9_virtio_drv = { - .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, - .id_table = id_table, - .probe = p9_virtio_probe, - .remove = p9_virtio_remove, + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = p9_virtio_probe, + .remove = p9_virtio_remove, }; static struct p9_trans_module p9_virtio_trans = { diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 1a79a6c7e30e..cafb55b0cea5 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -3,6 +3,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/debugfs.h> +#include <linux/seq_file.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> @@ -405,20 +406,11 @@ static struct device_type bt_host = { .release = bt_host_release, }; -static int inquiry_cache_open(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - -static ssize_t inquiry_cache_read(struct file *file, char __user *userbuf, - size_t count, loff_t *ppos) +static int inquiry_cache_show(struct seq_file *f, void *p) { - struct hci_dev *hdev = file->private_data; + struct hci_dev *hdev = f->private; struct inquiry_cache *cache = &hdev->inq_cache; struct inquiry_entry *e; - char buf[4096]; - int n = 0; hci_dev_lock_bh(hdev); @@ -426,23 +418,30 @@ static ssize_t inquiry_cache_read(struct file *file, char __user *userbuf, struct inquiry_data *data = &e->data; bdaddr_t bdaddr; baswap(&bdaddr, &data->bdaddr); - n += sprintf(buf + n, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", - batostr(&bdaddr), - data->pscan_rep_mode, data->pscan_period_mode, - data->pscan_mode, data->dev_class[2], - data->dev_class[1], data->dev_class[0], - __le16_to_cpu(data->clock_offset), - data->rssi, data->ssp_mode, e->timestamp); + seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", + batostr(&bdaddr), + data->pscan_rep_mode, data->pscan_period_mode, + data->pscan_mode, data->dev_class[2], + data->dev_class[1], data->dev_class[0], + __le16_to_cpu(data->clock_offset), + data->rssi, data->ssp_mode, e->timestamp); } hci_dev_unlock_bh(hdev); - return simple_read_from_buffer(userbuf, count, ppos, buf, n); + return 0; +} + +static int inquiry_cache_open(struct inode *inode, struct file *file) +{ + return single_open(file, inquiry_cache_show, inode->i_private); } static const struct file_operations inquiry_cache_fops = { - .open = inquiry_cache_open, - .read = inquiry_cache_read, + .open = inquiry_cache_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, }; int hci_register_sysfs(struct hci_dev *hdev) diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig index 19a6b9629c51..d115d5cea5b6 100644 --- a/net/bridge/Kconfig +++ b/net/bridge/Kconfig @@ -35,6 +35,7 @@ config BRIDGE config BRIDGE_IGMP_SNOOPING bool "IGMP snooping" depends on BRIDGE + depends on INET default y ---help--- If you say Y here, then the Ethernet bridge will be able selectively diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index eb7062d2e9e5..90a9024e5c1e 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -40,7 +40,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) goto out; mdst = br_mdb_get(br, skb); - if (mdst || BR_INPUT_SKB_CB(skb)->mrouters_only) + if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) br_multicast_deliver(mdst, skb); else br_flood_deliver(br, skb); diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index d61e6f741125..8dbec83e50ca 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -19,6 +19,11 @@ #include <linux/netfilter_bridge.h> #include "br_private.h" +static int deliver_clone(const struct net_bridge_port *prev, + struct sk_buff *skb, + void (*__packet_hook)(const struct net_bridge_port *p, + struct sk_buff *skb)); + /* Don't forward packets to originating port or forwarding diasabled */ static inline int should_deliver(const struct net_bridge_port *p, const struct sk_buff *skb) @@ -94,17 +99,22 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) } /* called with rcu_read_lock */ -void br_forward(const struct net_bridge_port *to, struct sk_buff *skb) +void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) { if (should_deliver(to, skb)) { - __br_forward(to, skb); + if (skb0) + deliver_clone(to, skb, __br_forward); + else + __br_forward(to, skb); return; } - kfree_skb(skb); + if (!skb0) + kfree_skb(skb); } -static int deliver_clone(struct net_bridge_port *prev, struct sk_buff *skb, +static int deliver_clone(const struct net_bridge_port *prev, + struct sk_buff *skb, void (*__packet_hook)(const struct net_bridge_port *p, struct sk_buff *skb)) { diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 53b39851d87d..d74d570fc848 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -70,7 +70,7 @@ int br_handle_frame_finish(struct sk_buff *skb) if (is_multicast_ether_addr(dest)) { mdst = br_mdb_get(br, skb); - if (mdst || BR_INPUT_SKB_CB(skb)->mrouters_only) { + if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { if ((mdst && !hlist_unhashed(&mdst->mglist)) || br_multicast_is_router(br)) skb2 = skb; @@ -90,7 +90,7 @@ int br_handle_frame_finish(struct sk_buff *skb) if (skb) { if (dst) - br_forward(dst->dst, skb); + br_forward(dst->dst, skb, skb2); else br_flood_forward(br, skb, skb2); } diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 2559fb539836..6980625537ca 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -38,7 +38,7 @@ static struct net_bridge_mdb_entry *__br_mdb_ip_get( struct net_bridge_mdb_entry *mp; struct hlist_node *p; - hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { + hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { if (dst == mp->addr) return mp; } @@ -49,22 +49,23 @@ static struct net_bridge_mdb_entry *__br_mdb_ip_get( static struct net_bridge_mdb_entry *br_mdb_ip_get( struct net_bridge_mdb_htable *mdb, __be32 dst) { + if (!mdb) + return NULL; + return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); } struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, struct sk_buff *skb) { - struct net_bridge_mdb_htable *mdb = br->mdb; - - if (!mdb || br->multicast_disabled) + if (br->multicast_disabled) return NULL; switch (skb->protocol) { case htons(ETH_P_IP): if (BR_INPUT_SKB_CB(skb)->igmp) break; - return br_mdb_ip_get(mdb, ip_hdr(skb)->daddr); + return br_mdb_ip_get(br->mdb, ip_hdr(skb)->daddr); } return NULL; @@ -627,8 +628,8 @@ static void br_multicast_port_query_expired(unsigned long data) struct net_bridge *br = port->br; spin_lock(&br->multicast_lock); - if (port && (port->state == BR_STATE_DISABLED || - port->state == BR_STATE_BLOCKING)) + if (port->state == BR_STATE_DISABLED || + port->state == BR_STATE_BLOCKING) goto out; if (port->multicast_startup_queries_sent < @@ -823,6 +824,7 @@ static int br_multicast_query(struct net_bridge *br, unsigned long max_delay; unsigned long now = jiffies; __be32 group; + int err = 0; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || @@ -841,15 +843,17 @@ static int br_multicast_query(struct net_bridge *br, group = 0; } } else { - if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) - return -EINVAL; + if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) { + err = -EINVAL; + goto out; + } ih3 = igmpv3_query_hdr(skb); if (ih3->nsrcs) - return 0; + goto out; - max_delay = ih3->code ? 1 : - IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE); + max_delay = ih3->code ? + IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; } if (!group) @@ -876,7 +880,7 @@ static int br_multicast_query(struct net_bridge *br, out: spin_unlock(&br->multicast_lock); - return 0; + return err; } static void br_multicast_leave_group(struct net_bridge *br, @@ -987,7 +991,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, err = pskb_trim_rcsum(skb2, len); if (err) - return err; + goto err_out; } len -= ip_hdrlen(skb2); @@ -1009,7 +1013,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, case CHECKSUM_NONE: skb2->csum = 0; if (skb_checksum_complete(skb2)) - return -EINVAL; + goto out; } err = 0; @@ -1036,6 +1040,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, out: __skb_push(skb2, offset); +err_out: if (skb2 != skb) kfree_skb(skb2); return err; @@ -1135,7 +1140,7 @@ void br_multicast_stop(struct net_bridge *br) if (mdb->old) { spin_unlock_bh(&br->multicast_lock); - synchronize_rcu_bh(); + rcu_barrier_bh(); spin_lock_bh(&br->multicast_lock); WARN_ON(mdb->old); } diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index fef0384e3c0b..846d7d1e2075 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -206,12 +206,20 @@ struct net_bridge struct br_input_skb_cb { struct net_device *brdev; +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING int igmp; int mrouters_only; +#endif }; #define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb) +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING +# define BR_INPUT_SKB_CB_MROUTERS_ONLY(__skb) (BR_INPUT_SKB_CB(__skb)->mrouters_only) +#else +# define BR_INPUT_SKB_CB_MROUTERS_ONLY(__skb) (0) +#endif + extern struct notifier_block br_device_notifier; extern const u8 br_group_address[ETH_ALEN]; @@ -252,7 +260,7 @@ extern void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb); extern int br_dev_queue_push_xmit(struct sk_buff *skb); extern void br_forward(const struct net_bridge_port *to, - struct sk_buff *skb); + struct sk_buff *skb, struct sk_buff *skb0); extern int br_forward_finish(struct sk_buff *skb); extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb); extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c index fd91569e2394..3dc295beb483 100644 --- a/net/core/dev_mcast.c +++ b/net/core/dev_mcast.c @@ -97,8 +97,9 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) netif_addr_lock_bh(dev); if (alen != dev->addr_len) - return -EINVAL; - err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl); + err = -EINVAL; + else + err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl); if (!err) __dev_set_rx_mode(dev); netif_addr_unlock_bh(dev); diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 0f2f82185ec4..f4cb6b6299d9 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -17,6 +17,7 @@ #include <linux/errno.h> #include <linux/ethtool.h> #include <linux/netdevice.h> +#include <linux/bitops.h> #include <asm/uaccess.h> /* @@ -199,10 +200,7 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr) return dev->ethtool_ops->set_settings(dev, &cmd); } -/* - * noinline attribute so that gcc doesnt use too much stack in dev_ethtool() - */ -static noinline int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) +static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) { struct ethtool_drvinfo info; const struct ethtool_ops *ops = dev->ethtool_ops; @@ -214,6 +212,10 @@ static noinline int ethtool_get_drvinfo(struct net_device *dev, void __user *use info.cmd = ETHTOOL_GDRVINFO; ops->get_drvinfo(dev, &info); + /* + * this method of obtaining string set info is deprecated; + * Use ETHTOOL_GSSET_INFO instead. + */ if (ops->get_sset_count) { int rc; @@ -237,10 +239,67 @@ static noinline int ethtool_get_drvinfo(struct net_device *dev, void __user *use return 0; } -/* - * noinline attribute so that gcc doesnt use too much stack in dev_ethtool() - */ -static noinline int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr) +static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev, + void __user *useraddr) +{ + struct ethtool_sset_info info; + const struct ethtool_ops *ops = dev->ethtool_ops; + u64 sset_mask; + int i, idx = 0, n_bits = 0, ret, rc; + u32 *info_buf = NULL; + + if (!ops->get_sset_count) + return -EOPNOTSUPP; + + if (copy_from_user(&info, useraddr, sizeof(info))) + return -EFAULT; + + /* store copy of mask, because we zero struct later on */ + sset_mask = info.sset_mask; + if (!sset_mask) + return 0; + + /* calculate size of return buffer */ + n_bits = hweight64(sset_mask); + + memset(&info, 0, sizeof(info)); + info.cmd = ETHTOOL_GSSET_INFO; + + info_buf = kzalloc(n_bits * sizeof(u32), GFP_USER); + if (!info_buf) + return -ENOMEM; + + /* + * fill return buffer based on input bitmask and successful + * get_sset_count return + */ + for (i = 0; i < 64; i++) { + if (!(sset_mask & (1ULL << i))) + continue; + + rc = ops->get_sset_count(dev, i); + if (rc >= 0) { + info.sset_mask |= (1ULL << i); + info_buf[idx++] = rc; + } + } + + ret = -EFAULT; + if (copy_to_user(useraddr, &info, sizeof(info))) + goto out; + + useraddr += offsetof(struct ethtool_sset_info, data); + if (copy_to_user(useraddr, info_buf, idx * sizeof(u32))) + goto out; + + ret = 0; + +out: + kfree(info_buf); + return ret; +} + +static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr) { struct ethtool_rxnfc cmd; @@ -253,10 +312,7 @@ static noinline int ethtool_set_rxnfc(struct net_device *dev, void __user *usera return dev->ethtool_ops->set_rxnfc(dev, &cmd); } -/* - * noinline attribute so that gcc doesnt use too much stack in dev_ethtool() - */ -static noinline int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr) +static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr) { struct ethtool_rxnfc info; const struct ethtool_ops *ops = dev->ethtool_ops; @@ -328,10 +384,7 @@ static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list, list->count++; } -/* - * noinline attribute so that gcc doesnt use too much stack in dev_ethtool() - */ -static noinline int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr) +static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr) { struct ethtool_rx_ntuple cmd; const struct ethtool_ops *ops = dev->ethtool_ops; @@ -799,10 +852,7 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) return ret; } -/* - * noinline attribute so that gcc doesnt use too much stack in dev_ethtool() - */ -static noinline int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) +static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) { struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; @@ -816,10 +866,7 @@ static noinline int ethtool_get_coalesce(struct net_device *dev, void __user *us return 0; } -/* - * noinline attribute so that gcc doesnt use too much stack in dev_ethtool() - */ -static noinline int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) +static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) { struct ethtool_coalesce coalesce; @@ -1229,10 +1276,7 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr, return actor(dev, edata.data); } -/* - * noinline attribute so that gcc doesnt use too much stack in dev_ethtool() - */ -static noinline int ethtool_flash_device(struct net_device *dev, char __user *useraddr) +static noinline_for_stack int ethtool_flash_device(struct net_device *dev, char __user *useraddr) { struct ethtool_flash efl; @@ -1471,6 +1515,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_GRXNTUPLE: rc = ethtool_get_rx_ntuple(dev, useraddr); break; + case ETHTOOL_GSSET_INFO: + rc = ethtool_get_sset_info(dev, useraddr); + break; default: rc = -EOPNOTSUPP; } diff --git a/net/core/neighbour.c b/net/core/neighbour.c index d102f6d9abdc..6cee6434da67 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -771,6 +771,8 @@ static __inline__ int neigh_max_probes(struct neighbour *n) } static void neigh_invalidate(struct neighbour *neigh) + __releases(neigh->lock) + __acquires(neigh->lock) { struct sk_buff *skb; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 7aa697253765..d4ec38fa64e6 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -735,7 +735,7 @@ int netpoll_setup(struct netpoll *np) npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); if (!npinfo) { err = -ENOMEM; - goto release; + goto put; } npinfo->rx_flags = 0; @@ -845,7 +845,7 @@ int netpoll_setup(struct netpoll *np) kfree(npinfo); } - +put: dev_put(ndev); return err; } diff --git a/net/core/sock.c b/net/core/sock.c index fcd397a762ff..c5812bbc2cc9 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -340,8 +340,12 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) rc = sk_backlog_rcv(sk, skb); mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); - } else - sk_add_backlog(sk, skb); + } else if (sk_add_backlog(sk, skb)) { + bh_unlock_sock(sk); + atomic_inc(&sk->sk_drops); + goto discard_and_relse; + } + bh_unlock_sock(sk); out: sock_put(sk); @@ -1139,6 +1143,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) sock_lock_init(newsk); bh_lock_sock(newsk); newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; + newsk->sk_backlog.len = 0; atomic_set(&newsk->sk_rmem_alloc, 0); /* @@ -1542,6 +1547,12 @@ static void __release_sock(struct sock *sk) bh_lock_sock(sk); } while ((skb = sk->sk_backlog.head) != NULL); + + /* + * Doing the zeroing here guarantee we can not loop forever + * while a wild producer attempts to flood us. + */ + sk->sk_backlog.len = 0; } /** @@ -1874,6 +1885,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) sk->sk_allocation = GFP_KERNEL; sk->sk_rcvbuf = sysctl_rmem_default; sk->sk_sndbuf = sysctl_wmem_default; + sk->sk_backlog.limit = sk->sk_rcvbuf << 1; sk->sk_state = TCP_CLOSE; sk_set_socket(sk, sock); @@ -2276,7 +2288,8 @@ out_free_request_sock_slab: prot->rsk_prot->slab = NULL; } out_free_request_sock_slab_name: - kfree(prot->rsk_prot->slab_name); + if (prot->rsk_prot) + kfree(prot->rsk_prot->slab_name); out_free_sock_slab: kmem_cache_destroy(prot->slab); prot->slab = NULL; diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index b195c4feaa0a..4071eaf2b361 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -998,11 +998,11 @@ static struct inet_protosw dccp_v4_protosw = { static int __net_init dccp_v4_init_net(struct net *net) { - int err; + if (dccp_hashinfo.bhash == NULL) + return -ESOCKTNOSUPPORT; - err = inet_ctl_sock_create(&net->dccp.v4_ctl_sk, PF_INET, - SOCK_DCCP, IPPROTO_DCCP, net); - return err; + return inet_ctl_sock_create(&net->dccp.v4_ctl_sk, PF_INET, + SOCK_DCCP, IPPROTO_DCCP, net); } static void __net_exit dccp_v4_exit_net(struct net *net) diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 1aec6349e858..af3394df63b7 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -1191,11 +1191,11 @@ static struct inet_protosw dccp_v6_protosw = { static int __net_init dccp_v6_init_net(struct net *net) { - int err; + if (dccp_hashinfo.bhash == NULL) + return -ESOCKTNOSUPPORT; - err = inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6, - SOCK_DCCP, IPPROTO_DCCP, net); - return err; + return inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6, + SOCK_DCCP, IPPROTO_DCCP, net); } static void __net_exit dccp_v6_exit_net(struct net *net) diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index af226a063141..0d508c359fa9 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -254,7 +254,7 @@ int dccp_child_process(struct sock *parent, struct sock *child, * in main socket hash table and lock on listening * socket does not protect us more. */ - sk_add_backlog(child, skb); + __sk_add_backlog(child, skb); } bh_unlock_sock(child); diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 0ef7061920c0..aa4cef374fd0 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -1036,7 +1036,7 @@ static int __init dccp_init(void) FIELD_SIZEOF(struct sk_buff, cb)); rc = percpu_counter_init(&dccp_orphan_count, 0); if (rc) - goto out; + goto out_fail; rc = -ENOBUFS; inet_hashinfo_init(&dccp_hashinfo); dccp_hashinfo.bind_bucket_cachep = @@ -1125,8 +1125,9 @@ static int __init dccp_init(void) goto out_sysctl_exit; dccp_timestamping_init(); -out: - return rc; + + return 0; + out_sysctl_exit: dccp_sysctl_exit(); out_ackvec_exit: @@ -1135,18 +1136,19 @@ out_free_dccp_mib: dccp_mib_exit(); out_free_dccp_bhash: free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order); - dccp_hashinfo.bhash = NULL; out_free_dccp_locks: inet_ehash_locks_free(&dccp_hashinfo); out_free_dccp_ehash: free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order); - dccp_hashinfo.ehash = NULL; out_free_bind_bucket_cachep: kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); - dccp_hashinfo.bind_bucket_cachep = NULL; out_free_percpu: percpu_counter_destroy(&dccp_orphan_count); - goto out; +out_fail: + dccp_hashinfo.bhash = NULL; + dccp_hashinfo.ehash = NULL; + dccp_hashinfo.bind_bucket_cachep = NULL; + return rc; } static void __exit dccp_fini(void) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index c0c5274d0271..f47c9f76754b 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -1144,12 +1144,9 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, if (saddr) memcpy(&iph->saddr, saddr, 4); - - if (daddr) { + if (daddr) memcpy(&iph->daddr, daddr, 4); - return t->hlen; - } - if (iph->daddr && !ipv4_is_multicast(iph->daddr)) + if (iph->daddr) return t->hlen; return -t->hlen; diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 10a6a604bf32..678909281648 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -187,6 +187,16 @@ struct ic_device { static struct ic_device *ic_first_dev __initdata = NULL;/* List of open device */ static struct net_device *ic_dev __initdata = NULL; /* Selected device */ +static bool __init ic_device_match(struct net_device *dev) +{ + if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) : + (!(dev->flags & IFF_LOOPBACK) && + (dev->flags & (IFF_POINTOPOINT|IFF_BROADCAST)) && + strncmp(dev->name, "dummy", 5))) + return true; + return false; +} + static int __init ic_open_devs(void) { struct ic_device *d, **last; @@ -207,10 +217,7 @@ static int __init ic_open_devs(void) for_each_netdev(&init_net, dev) { if (dev->flags & IFF_LOOPBACK) continue; - if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) : - (!(dev->flags & IFF_LOOPBACK) && - (dev->flags & (IFF_POINTOPOINT|IFF_BROADCAST)) && - strncmp(dev->name, "dummy", 5))) { + if (ic_device_match(dev)) { int able = 0; if (dev->mtu >= 364) able |= IC_BOOTP; @@ -228,7 +235,7 @@ static int __init ic_open_devs(void) } if (!(d = kmalloc(sizeof(struct ic_device), GFP_KERNEL))) { rtnl_unlock(); - return -1; + return -ENOMEM; } d->dev = dev; *last = d; @@ -253,7 +260,7 @@ static int __init ic_open_devs(void) printk(KERN_ERR "IP-Config: Device `%s' not found.\n", user_dev_name); else printk(KERN_ERR "IP-Config: No network devices available.\n"); - return -1; + return -ENODEV; } return 0; } @@ -1303,6 +1310,32 @@ __be32 __init root_nfs_parse_addr(char *name) return addr; } +#define DEVICE_WAIT_MAX 12 /* 12 seconds */ + +static int __init wait_for_devices(void) +{ + int i; + + msleep(CONF_PRE_OPEN); + for (i = 0; i < DEVICE_WAIT_MAX; i++) { + struct net_device *dev; + int found = 0; + + rtnl_lock(); + for_each_netdev(&init_net, dev) { + if (ic_device_match(dev)) { + found = 1; + break; + } + } + rtnl_unlock(); + if (found) + return 0; + ssleep(1); + } + return -ENODEV; +} + /* * IP Autoconfig dispatcher. */ @@ -1313,6 +1346,7 @@ static int __init ip_auto_config(void) #ifdef IPCONFIG_DYNAMIC int retries = CONF_OPEN_RETRIES; #endif + int err; #ifdef CONFIG_PROC_FS proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops); @@ -1325,12 +1359,15 @@ static int __init ip_auto_config(void) #ifdef IPCONFIG_DYNAMIC try_try_again: #endif - /* Give hardware a chance to settle */ - msleep(CONF_PRE_OPEN); + /* Wait for devices to appear */ + err = wait_for_devices(); + if (err) + return err; /* Setup all network devices */ - if (ic_open_devs() < 0) - return -1; + err = ic_open_devs(); + if (err) + return err; /* Give drivers a chance to settle */ ssleep(CONF_POST_OPEN); diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 242ed2307370..4f1f337f4337 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -249,6 +249,8 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPSackShifted", LINUX_MIB_SACKSHIFTED), SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED), SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK), + SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP), + SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), SNMP_MIB_SENTINEL }; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index b2ba5581d2ae..a770df2493d2 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -146,7 +146,6 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); static void ipv4_link_failure(struct sk_buff *skb); static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu); static int rt_garbage_collect(struct dst_ops *ops); -static void rt_emergency_hash_rebuild(struct net *net); static struct dst_ops ipv4_dst_ops = { @@ -780,11 +779,30 @@ static void rt_do_flush(int process_context) #define FRACT_BITS 3 #define ONE (1UL << FRACT_BITS) +/* + * Given a hash chain and an item in this hash chain, + * find if a previous entry has the same hash_inputs + * (but differs on tos, mark or oif) + * Returns 0 if an alias is found. + * Returns ONE if rth has no alias before itself. + */ +static int has_noalias(const struct rtable *head, const struct rtable *rth) +{ + const struct rtable *aux = head; + + while (aux != rth) { + if (compare_hash_inputs(&aux->fl, &rth->fl)) + return 0; + aux = aux->u.dst.rt_next; + } + return ONE; +} + static void rt_check_expire(void) { static unsigned int rover; unsigned int i = rover, goal; - struct rtable *rth, *aux, **rthp; + struct rtable *rth, **rthp; unsigned long samples = 0; unsigned long sum = 0, sum2 = 0; unsigned long delta; @@ -835,15 +853,7 @@ nofree: * attributes don't unfairly skew * the length computation */ - for (aux = rt_hash_table[i].chain;;) { - if (aux == rth) { - length += ONE; - break; - } - if (compare_hash_inputs(&aux->fl, &rth->fl)) - break; - aux = aux->u.dst.rt_next; - } + length += has_noalias(rt_hash_table[i].chain, rth); continue; } } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) @@ -922,10 +932,8 @@ static void rt_secret_rebuild_oneshot(struct net *net) { del_timer_sync(&net->ipv4.rt_secret_timer); rt_cache_invalidate(net); - if (ip_rt_secret_interval) { - net->ipv4.rt_secret_timer.expires += ip_rt_secret_interval; - add_timer(&net->ipv4.rt_secret_timer); - } + if (ip_rt_secret_interval) + mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval); } static void rt_emergency_hash_rebuild(struct net *net) @@ -1073,6 +1081,21 @@ work_done: out: return 0; } +/* + * Returns number of entries in a hash chain that have different hash_inputs + */ +static int slow_chain_length(const struct rtable *head) +{ + int length = 0; + const struct rtable *rth = head; + + while (rth) { + length += has_noalias(head, rth); + rth = rth->u.dst.rt_next; + } + return length >> FRACT_BITS; +} + static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp, struct sk_buff *skb) { @@ -1185,7 +1208,8 @@ restart: rt_free(cand); } } else { - if (chain_length > rt_chain_length_max) { + if (chain_length > rt_chain_length_max && + slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) { struct net *net = dev_net(rt->u.dst.dev); int num = ++net->ipv4.current_rt_cache_rebuild_count; if (!rt_caching(dev_net(rt->u.dst.dev))) { @@ -3077,22 +3101,20 @@ static void rt_secret_reschedule(int old) rtnl_lock(); for_each_net(net) { int deleted = del_timer_sync(&net->ipv4.rt_secret_timer); + long time; if (!new) continue; if (deleted) { - long time = net->ipv4.rt_secret_timer.expires - jiffies; + time = net->ipv4.rt_secret_timer.expires - jiffies; if (time <= 0 || (time += diff) <= 0) time = 0; - - net->ipv4.rt_secret_timer.expires = time; } else - net->ipv4.rt_secret_timer.expires = new; + time = new; - net->ipv4.rt_secret_timer.expires += jiffies; - add_timer(&net->ipv4.rt_secret_timer); + mod_timer(&net->ipv4.rt_secret_timer, jiffies + time); } rtnl_unlock(); } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index c3588b4fd979..70df40980a87 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1651,13 +1651,15 @@ int tcp_v4_rcv(struct sk_buff *skb) if (!sk) goto no_tcp_socket; - if (iph->ttl < inet_sk(sk)->min_ttl) - goto discard_and_relse; - process: if (sk->sk_state == TCP_TIME_WAIT) goto do_time_wait; + if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { + NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); + goto discard_and_relse; + } + if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_and_relse; nf_reset(skb); @@ -1682,8 +1684,11 @@ process: if (!tcp_prequeue(sk, skb)) ret = tcp_v4_do_rcv(sk, skb); } - } else - sk_add_backlog(sk, skb); + } else if (unlikely(sk_add_backlog(sk, skb))) { + bh_unlock_sock(sk); + NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); + goto discard_and_relse; + } bh_unlock_sock(sk); sock_put(sk); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index f206ee5dda80..4199bc6915c5 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -728,7 +728,7 @@ int tcp_child_process(struct sock *parent, struct sock *child, * in main socket hash table and lock on listening * socket does not protect us more. */ - sk_add_backlog(child, skb); + __sk_add_backlog(child, skb); } bh_unlock_sock(child); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 4a1605d3f909..f181b78f2385 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2395,13 +2395,17 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, struct tcp_extend_values *xvp = tcp_xv(rvp); struct inet_request_sock *ireq = inet_rsk(req); struct tcp_sock *tp = tcp_sk(sk); + const struct tcp_cookie_values *cvp = tp->cookie_values; struct tcphdr *th; struct sk_buff *skb; struct tcp_md5sig_key *md5; int tcp_header_size; int mss; + int s_data_desired = 0; - skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); + if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired) + s_data_desired = cvp->s_data_desired; + skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC); if (skb == NULL) return NULL; @@ -2457,16 +2461,12 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, TCPCB_FLAG_SYN | TCPCB_FLAG_ACK); if (OPTION_COOKIE_EXTENSION & opts.options) { - const struct tcp_cookie_values *cvp = tp->cookie_values; - - if (cvp != NULL && - cvp->s_data_constant && - cvp->s_data_desired > 0) { - u8 *buf = skb_put(skb, cvp->s_data_desired); + if (s_data_desired) { + u8 *buf = skb_put(skb, s_data_desired); /* copy data directly from the listening socket. */ - memcpy(buf, cvp->s_data_payload, cvp->s_data_desired); - TCP_SKB_CB(skb)->end_seq += cvp->s_data_desired; + memcpy(buf, cvp->s_data_payload, s_data_desired); + TCP_SKB_CB(skb)->end_seq += s_data_desired; } if (opts.hash_size > 0) { diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index a17629b8912e..b2e6bbccaee1 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -134,7 +134,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) } /* This function calculates a "timeout" which is equivalent to the timeout of a - * TCP connection after "boundary" unsucessful, exponentially backed-off + * TCP connection after "boundary" unsuccessful, exponentially backed-off * retransmissions with an initial RTO of TCP_RTO_MIN. */ static bool retransmits_timed_out(struct sock *sk, diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 608a5446d05b..7af756d0f931 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1371,8 +1371,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) bh_lock_sock(sk); if (!sock_owned_by_user(sk)) rc = __udp_queue_rcv_skb(sk, skb); - else - sk_add_backlog(sk, skb); + else if (sk_add_backlog(sk, skb)) { + bh_unlock_sock(sk); + goto drop; + } bh_unlock_sock(sk); return rc; diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 67107d63c1cd..e4a1483fba77 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -91,11 +91,12 @@ static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst, return 0; } -static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev) +static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, + struct flowi *fl) { struct rtable *rt = (struct rtable *)xdst->route; - xdst->u.rt.fl = rt->fl; + xdst->u.rt.fl = *fl; xdst->u.dst.dev = dev; dev_hold(dev); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 88fd8c5877ee..3381b4317c27 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1380,6 +1380,8 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) if (dad_failed) ifp->flags |= IFA_F_DADFAILED; spin_unlock_bh(&ifp->lock); + if (dad_failed) + ipv6_ifa_notify(0, ifp); in6_ifa_put(ifp); #ifdef CONFIG_IPV6_PRIVACY } else if (ifp->flags&IFA_F_TEMPORARY) { @@ -2615,7 +2617,7 @@ static void addrconf_bonding_change(struct net_device *dev, unsigned long event) static int addrconf_ifdown(struct net_device *dev, int how) { struct inet6_dev *idev; - struct inet6_ifaddr *ifa, **bifa; + struct inet6_ifaddr *ifa, *keep_list, **bifa; struct net *net = dev_net(dev); int i; @@ -2649,11 +2651,11 @@ static int addrconf_ifdown(struct net_device *dev, int how) write_lock_bh(&addrconf_hash_lock); while ((ifa = *bifa) != NULL) { if (ifa->idev == idev && - (how || !(ifa->flags&IFA_F_PERMANENT))) { + (how || !(ifa->flags&IFA_F_PERMANENT) || + ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) { *bifa = ifa->lst_next; ifa->lst_next = NULL; - addrconf_del_timer(ifa); - in6_ifa_put(ifa); + __in6_ifa_put(ifa); continue; } bifa = &ifa->lst_next; @@ -2689,31 +2691,51 @@ static int addrconf_ifdown(struct net_device *dev, int how) write_lock_bh(&idev->lock); } #endif - bifa = &idev->addr_list; - while ((ifa = *bifa) != NULL) { - if (how == 0 && (ifa->flags&IFA_F_PERMANENT)) { - /* Retain permanent address on admin down */ + keep_list = NULL; + bifa = &keep_list; + while ((ifa = idev->addr_list) != NULL) { + idev->addr_list = ifa->if_next; + ifa->if_next = NULL; + + addrconf_del_timer(ifa); + + /* If just doing link down, and address is permanent + and not link-local, then retain it. */ + if (how == 0 && + (ifa->flags&IFA_F_PERMANENT) && + !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) { + + /* Move to holding list */ + *bifa = ifa; bifa = &ifa->if_next; - /* Restart DAD if needed when link comes back up */ - if ( !((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) || - idev->cnf.accept_dad <= 0 || - (ifa->flags & IFA_F_NODAD))) - ifa->flags |= IFA_F_TENTATIVE; - } else { - *bifa = ifa->if_next; - ifa->if_next = NULL; + /* If not doing DAD on this address, just keep it. */ + if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) || + idev->cnf.accept_dad <= 0 || + (ifa->flags & IFA_F_NODAD)) + continue; + /* If it was tentative already, no need to notify */ + if (ifa->flags & IFA_F_TENTATIVE) + continue; + + /* Flag it for later restoration when link comes up */ + ifa->flags |= IFA_F_TENTATIVE; + in6_ifa_hold(ifa); + } else { ifa->dead = 1; - write_unlock_bh(&idev->lock); + } + write_unlock_bh(&idev->lock); - __ipv6_ifa_notify(RTM_DELADDR, ifa); - atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); - in6_ifa_put(ifa); + __ipv6_ifa_notify(RTM_DELADDR, ifa); + atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); + in6_ifa_put(ifa); - write_lock_bh(&idev->lock); - } + write_lock_bh(&idev->lock); } + + idev->addr_list = keep_list; + write_unlock_bh(&idev->lock); /* Step 5: Discard multicast list */ @@ -2739,28 +2761,29 @@ static int addrconf_ifdown(struct net_device *dev, int how) static void addrconf_rs_timer(unsigned long data) { struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data; + struct inet6_dev *idev = ifp->idev; - if (ifp->idev->cnf.forwarding) + read_lock(&idev->lock); + if (idev->dead || !(idev->if_flags & IF_READY)) goto out; - if (ifp->idev->if_flags & IF_RA_RCVD) { - /* - * Announcement received after solicitation - * was sent - */ + if (idev->cnf.forwarding) + goto out; + + /* Announcement received after solicitation was sent */ + if (idev->if_flags & IF_RA_RCVD) goto out; - } spin_lock(&ifp->lock); - if (ifp->probes++ < ifp->idev->cnf.rtr_solicits) { + if (ifp->probes++ < idev->cnf.rtr_solicits) { /* The wait after the last probe can be shorter */ addrconf_mod_timer(ifp, AC_RS, - (ifp->probes == ifp->idev->cnf.rtr_solicits) ? - ifp->idev->cnf.rtr_solicit_delay : - ifp->idev->cnf.rtr_solicit_interval); + (ifp->probes == idev->cnf.rtr_solicits) ? + idev->cnf.rtr_solicit_delay : + idev->cnf.rtr_solicit_interval); spin_unlock(&ifp->lock); - ndisc_send_rs(ifp->idev->dev, &ifp->addr, &in6addr_linklocal_allrouters); + ndisc_send_rs(idev->dev, &ifp->addr, &in6addr_linklocal_allrouters); } else { spin_unlock(&ifp->lock); /* @@ -2768,10 +2791,11 @@ static void addrconf_rs_timer(unsigned long data) * assumption any longer. */ printk(KERN_DEBUG "%s: no IPv6 routers present\n", - ifp->idev->dev->name); + idev->dev->name); } out: + read_unlock(&idev->lock); in6_ifa_put(ifp); } @@ -2850,9 +2874,9 @@ static void addrconf_dad_timer(unsigned long data) struct inet6_dev *idev = ifp->idev; struct in6_addr mcaddr; - read_lock_bh(&idev->lock); - if (idev->dead) { - read_unlock_bh(&idev->lock); + read_lock(&idev->lock); + if (idev->dead || !(idev->if_flags & IF_READY)) { + read_unlock(&idev->lock); goto out; } @@ -2864,7 +2888,7 @@ static void addrconf_dad_timer(unsigned long data) ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); spin_unlock(&ifp->lock); - read_unlock_bh(&idev->lock); + read_unlock(&idev->lock); addrconf_dad_completed(ifp); @@ -2874,7 +2898,7 @@ static void addrconf_dad_timer(unsigned long data) ifp->probes--; addrconf_mod_timer(ifp, AC_DAD, ifp->idev->nd_parms->retrans_time); spin_unlock(&ifp->lock); - read_unlock_bh(&idev->lock); + read_unlock(&idev->lock); /* send a neighbour solicitation for our addr */ addrconf_addr_solict_mult(&ifp->addr, &mcaddr); diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 551882b9dfd6..5e463c43fcc2 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -84,18 +84,11 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, if ((rule->flags & FIB_RULE_FIND_SADDR) && r->src.plen && !(flags & RT6_LOOKUP_F_HAS_SADDR)) { struct in6_addr saddr; - unsigned int srcprefs = 0; - - if (flags & RT6_LOOKUP_F_SRCPREF_TMP) - srcprefs |= IPV6_PREFER_SRC_TMP; - if (flags & RT6_LOOKUP_F_SRCPREF_PUBLIC) - srcprefs |= IPV6_PREFER_SRC_PUBLIC; - if (flags & RT6_LOOKUP_F_SRCPREF_COA) - srcprefs |= IPV6_PREFER_SRC_COA; if (ipv6_dev_get_saddr(net, ip6_dst_idev(&rt->u.dst)->dev, - &flp->fl6_dst, srcprefs, + &flp->fl6_dst, + rt6_flags2srcprefs(flags), &saddr)) goto again; if (!ipv6_prefix_equal(&saddr, &r->src.addr, diff --git a/net/ipv6/route.c b/net/ipv6/route.c index b08879e97f22..52cd3eff31dc 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -819,15 +819,8 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk, if (!ipv6_addr_any(&fl->fl6_src)) flags |= RT6_LOOKUP_F_HAS_SADDR; - else if (sk) { - unsigned int prefs = inet6_sk(sk)->srcprefs; - if (prefs & IPV6_PREFER_SRC_TMP) - flags |= RT6_LOOKUP_F_SRCPREF_TMP; - if (prefs & IPV6_PREFER_SRC_PUBLIC) - flags |= RT6_LOOKUP_F_SRCPREF_PUBLIC; - if (prefs & IPV6_PREFER_SRC_COA) - flags |= RT6_LOOKUP_F_SRCPREF_COA; - } + else if (sk) + flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs); return fib6_rule_lookup(net, fl, flags, ip6_pol_route_output); } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 6963a6b6763e..9b6dbba80d31 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1740,8 +1740,11 @@ process: if (!tcp_prequeue(sk, skb)) ret = tcp_v6_do_rcv(sk, skb); } - } else - sk_add_backlog(sk, skb); + } else if (unlikely(sk_add_backlog(sk, skb))) { + bh_unlock_sock(sk); + NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); + goto discard_and_relse; + } bh_unlock_sock(sk); sock_put(sk); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 52b8347ae3b2..3c0c9c755c92 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -583,16 +583,20 @@ static void flush_stack(struct sock **stack, unsigned int count, bh_lock_sock(sk); if (!sock_owned_by_user(sk)) udpv6_queue_rcv_skb(sk, skb1); - else - sk_add_backlog(sk, skb1); + else if (sk_add_backlog(sk, skb1)) { + kfree_skb(skb1); + bh_unlock_sock(sk); + goto drop; + } bh_unlock_sock(sk); - } else { - atomic_inc(&sk->sk_drops); - UDP6_INC_STATS_BH(sock_net(sk), - UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); - UDP6_INC_STATS_BH(sock_net(sk), - UDP_MIB_INERRORS, IS_UDPLITE(sk)); + continue; } +drop: + atomic_inc(&sk->sk_drops); + UDP6_INC_STATS_BH(sock_net(sk), + UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); + UDP6_INC_STATS_BH(sock_net(sk), + UDP_MIB_INERRORS, IS_UDPLITE(sk)); } } /* @@ -754,8 +758,12 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, bh_lock_sock(sk); if (!sock_owned_by_user(sk)) udpv6_queue_rcv_skb(sk, skb); - else - sk_add_backlog(sk, skb); + else if (sk_add_backlog(sk, skb)) { + atomic_inc(&sk->sk_drops); + bh_unlock_sock(sk); + sock_put(sk); + goto discard; + } bh_unlock_sock(sk); sock_put(sk); return 0; diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index dbdc696f5fc5..ae181651c75a 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -116,7 +116,8 @@ static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst, return 0; } -static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev) +static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, + struct flowi *fl) { struct rt6_info *rt = (struct rt6_info*)xdst->route; diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c index 019c780512e8..86d6985b9d49 100644 --- a/net/llc/llc_c_ac.c +++ b/net/llc/llc_c_ac.c @@ -1437,7 +1437,7 @@ static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb) llc_conn_state_process(sk, skb); else { llc_set_backlog_type(skb, LLC_EVENT); - sk_add_backlog(sk, skb); + __sk_add_backlog(sk, skb); } } } diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index a8dde9b010da..a12144da7974 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c @@ -827,7 +827,8 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb) else { dprintk("%s: adding to backlog...\n", __func__); llc_set_backlog_type(skb, LLC_PACKET); - sk_add_backlog(sk, skb); + if (sk_add_backlog(sk, skb)) + goto drop_unlock; } out: bh_unlock_sock(sk); diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 9affe2cd185f..b4ddb2f83914 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -48,20 +48,24 @@ static ssize_t ieee80211_if_write( ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int)) { u8 *buf; - ssize_t ret = -ENODEV; + ssize_t ret; - buf = kzalloc(count, GFP_KERNEL); + buf = kmalloc(count, GFP_KERNEL); if (!buf) return -ENOMEM; + ret = -EFAULT; if (copy_from_user(buf, userbuf, count)) - return -EFAULT; + goto freebuf; + ret = -ENODEV; rtnl_lock(); if (sdata->dev->reg_state == NETREG_REGISTERED) ret = (*write)(sdata, buf, count); rtnl_unlock(); +freebuf: + kfree(buf); return ret; } diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index bc4e20e57ff5..1a29c4a8139e 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -744,7 +744,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m break; default: /* should not get here, PLINK_BLOCKED is dealt with at the - * beggining of the function + * beginning of the function */ spin_unlock_bh(&sta->lock); break; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 41812a15eea0..be5f723d643a 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -177,7 +177,8 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, sta = sta_info_get(sdata, bssid); if (sta) rate_control_rate_update(local, sband, sta, - IEEE80211_RC_HT_CHANGED); + IEEE80211_RC_HT_CHANGED, + local->oper_channel_type); rcu_read_unlock(); } @@ -435,10 +436,12 @@ static void ieee80211_enable_ps(struct ieee80211_local *local, if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) ieee80211_send_nullfunc(local, sdata, 1); - if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) { - conf->flags |= IEEE80211_CONF_PS; - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); - } + if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && + (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) + return; + + conf->flags |= IEEE80211_CONF_PS; + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); } } @@ -557,7 +560,8 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work) (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) ieee80211_send_nullfunc(local, sdata, 1); - if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) || + if (!((local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) && + (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) || (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) { ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; local->hw.conf.flags |= IEEE80211_CONF_PS; @@ -1893,8 +1897,20 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, mutex_lock(&ifmgd->mtx); if (ifmgd->associated) { - mutex_unlock(&ifmgd->mtx); - return -EALREADY; + if (!req->prev_bssid || + memcmp(req->prev_bssid, ifmgd->associated->bssid, + ETH_ALEN)) { + /* + * We are already associated and the request was not a + * reassociation request from the current BSS, so + * reject it. + */ + mutex_unlock(&ifmgd->mtx); + return -EALREADY; + } + + /* Trying to reassociate - clear previous association state */ + ieee80211_set_disassoc(sdata); } mutex_unlock(&ifmgd->mtx); diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index b6108bca96d4..065a96190e32 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h @@ -66,7 +66,8 @@ static inline void rate_control_rate_init(struct sta_info *sta) static inline void rate_control_rate_update(struct ieee80211_local *local, struct ieee80211_supported_band *sband, - struct sta_info *sta, u32 changed) + struct sta_info *sta, u32 changed, + enum nl80211_channel_type oper_chan_type) { struct rate_control_ref *ref = local->rate_ctrl; struct ieee80211_sta *ista = &sta->sta; @@ -74,7 +75,7 @@ static inline void rate_control_rate_update(struct ieee80211_local *local, if (ref && ref->ops->rate_update) ref->ops->rate_update(ref->priv, sband, ista, - priv_sta, changed); + priv_sta, changed, oper_chan_type); } static inline void *rate_control_alloc_sta(struct rate_control_ref *ref, diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 211c475f73c6..56422d894351 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -434,6 +434,7 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU) /* check if STA exists already */ if (sta_info_get_bss(sdata, sta->sta.addr)) { spin_unlock_irqrestore(&local->sta_lock, flags); + mutex_unlock(&local->sta_mtx); rcu_read_lock(); err = -EEXIST; goto out_free; diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 8dd75d90efc0..c6cd1b84eddd 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -284,7 +284,7 @@ EXPORT_SYMBOL_GPL(ct_sip_parse_request); * tabs, spaces and continuation lines, which are treated as a single whitespace * character. * - * Some headers may appear multiple times. A comma seperated list of values is + * Some headers may appear multiple times. A comma separated list of values is * equivalent to multiple headers. */ static const struct sip_header ct_sip_hdrs[] = { @@ -421,7 +421,7 @@ int ct_sip_get_header(const struct nf_conn *ct, const char *dptr, } EXPORT_SYMBOL_GPL(ct_sip_get_header); -/* Get next header field in a list of comma seperated values */ +/* Get next header field in a list of comma separated values */ static int ct_sip_next_header(const struct nf_conn *ct, const char *dptr, unsigned int dataoff, unsigned int datalen, enum sip_header_types type, diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index d952806b6469..9e9c48963942 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -1,6 +1,6 @@ /* * xt_hashlimit - Netfilter module to limit the number of packets per time - * seperately for each hashbucket (sourceip/sourceport/dstip/dstport) + * separately for each hashbucket (sourceip/sourceport/dstip/dstport) * * (C) 2003-2004 by Harald Welte <laforge@netfilter.org> * Copyright © CC Computer Consultants GmbH, 2007 - 2008 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 031a5e6fb4aa..1612d417d10c 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1688,6 +1688,8 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, { switch (i->type) { case PACKET_MR_MULTICAST: + if (i->alen != dev->addr_len) + return -EINVAL; if (what > 0) return dev_mc_add(dev, i->addr, i->alen, 0); else @@ -1700,6 +1702,8 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, return dev_set_allmulti(dev, what); break; case PACKET_MR_UNICAST: + if (i->alen != dev->addr_len) + return -EINVAL; if (what > 0) return dev_unicast_add(dev, i->addr); else @@ -1734,7 +1738,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) goto done; err = -EINVAL; - if (mreq->mr_alen != dev->addr_len) + if (mreq->mr_alen > dev->addr_len) goto done; err = -ENOBUFS; diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index c597cc53a6fb..5c6ae0c701c0 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c @@ -107,8 +107,7 @@ static void phonet_device_destroy(struct net_device *dev) if (pnd) { u8 addr; - for (addr = find_first_bit(pnd->addrs, 64); addr < 64; - addr = find_next_bit(pnd->addrs, 64, 1+addr)) + for_each_set_bit(addr, pnd->addrs, 64) phonet_address_notify(RTM_DELADDR, dev, addr); kfree(pnd); } diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c index 2e6c7eb8e76a..fe2e7088ee07 100644 --- a/net/phonet/pn_netlink.c +++ b/net/phonet/pn_netlink.c @@ -141,8 +141,7 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) continue; addr_idx = 0; - for (addr = find_first_bit(pnd->addrs, 64); addr < 64; - addr = find_next_bit(pnd->addrs, 64, 1+addr)) { + for_each_set_bit(addr, pnd->addrs, 64) { if (addr_idx++ < addr_start_idx) continue; diff --git a/net/rfkill/input.c b/net/rfkill/input.c index a7295ad5f9cb..3713d7ecab96 100644 --- a/net/rfkill/input.c +++ b/net/rfkill/input.c @@ -212,6 +212,9 @@ static void rfkill_event(struct input_handle *handle, unsigned int type, case KEY_WIMAX: rfkill_schedule_toggle(RFKILL_TYPE_WIMAX); break; + case KEY_RFKILL: + rfkill_schedule_toggle(RFKILL_TYPE_ALL); + break; } } else if (type == EV_SW && code == SW_RFKILL_ALL) rfkill_schedule_evsw_rfkillall(data); @@ -295,6 +298,11 @@ static const struct input_device_id rfkill_ids[] = { .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, }, { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, + .evbit = { BIT_MASK(EV_KEY) }, + .keybit = { [BIT_WORD(KEY_RFKILL)] = BIT_MASK(KEY_RFKILL) }, + }, + { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT, .evbit = { BIT(EV_SW) }, .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) }, diff --git a/net/sctp/input.c b/net/sctp/input.c index c0c973e67add..3d74b264ea22 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -75,7 +75,7 @@ static struct sctp_association *__sctp_lookup_association( const union sctp_addr *peer, struct sctp_transport **pt); -static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb); +static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb); /* Calculate the SCTP checksum of an SCTP packet. */ @@ -265,8 +265,13 @@ int sctp_rcv(struct sk_buff *skb) } if (sock_owned_by_user(sk)) { + if (sctp_add_backlog(sk, skb)) { + sctp_bh_unlock_sock(sk); + sctp_chunk_free(chunk); + skb = NULL; /* sctp_chunk_free already freed the skb */ + goto discard_release; + } SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); - sctp_add_backlog(sk, skb); } else { SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ); sctp_inq_push(&chunk->rcvr->inqueue, chunk); @@ -336,8 +341,10 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) sctp_bh_lock_sock(sk); if (sock_owned_by_user(sk)) { - sk_add_backlog(sk, skb); - backloged = 1; + if (sk_add_backlog(sk, skb)) + sctp_chunk_free(chunk); + else + backloged = 1; } else sctp_inq_push(inqueue, chunk); @@ -362,22 +369,27 @@ done: return 0; } -static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb) +static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) { struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; struct sctp_ep_common *rcvr = chunk->rcvr; + int ret; - /* Hold the assoc/ep while hanging on the backlog queue. - * This way, we know structures we need will not disappear from us - */ - if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) - sctp_association_hold(sctp_assoc(rcvr)); - else if (SCTP_EP_TYPE_SOCKET == rcvr->type) - sctp_endpoint_hold(sctp_ep(rcvr)); - else - BUG(); + ret = sk_add_backlog(sk, skb); + if (!ret) { + /* Hold the assoc/ep while hanging on the backlog queue. + * This way, we know structures we need will not disappear + * from us + */ + if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) + sctp_association_hold(sctp_assoc(rcvr)); + else if (SCTP_EP_TYPE_SOCKET == rcvr->type) + sctp_endpoint_hold(sctp_ep(rcvr)); + else + BUG(); + } + return ret; - sk_add_backlog(sk, skb); } /* Handle icmp frag needed error. */ diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 4e4ca65cd320..500886bda9b4 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -475,7 +475,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc, * used to provide an upper bound to this doubling operation. * * Special Case: the first HB doesn't trigger exponential backoff. - * The first unacknowleged HB triggers it. We do this with a flag + * The first unacknowledged HB triggers it. We do this with a flag * that indicates that we have an outstanding HB. */ if (!is_hb || transport->hb_sent) { diff --git a/net/sctp/socket.c b/net/sctp/socket.c index f6d1e59c4151..dfc5c127efd4 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3720,6 +3720,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) SCTP_DBG_OBJCNT_INC(sock); percpu_counter_inc(&sctp_sockets_allocated); + /* Set socket backlog limit. */ + sk->sk_backlog.limit = sysctl_sctp_rmem[1]; + local_bh_disable(); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); local_bh_enable(); diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 7018eef1dcdd..f96c2fe6137b 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -160,16 +160,15 @@ xprt_rdma_format_addresses(struct rpc_xprt *xprt) (void)rpc_ntop(sap, buf, sizeof(buf)); xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL); - (void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); + snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma"; - (void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x", - NIPQUAD(sin->sin_addr.s_addr)); + snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); - (void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); + snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); /* netid */ diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 712412982cee..e4839c07c913 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -297,12 +297,11 @@ static void xs_format_common_peer_addresses(struct rpc_xprt *xprt) switch (sap->sa_family) { case AF_INET: sin = xs_addr_in(xprt); - (void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x", - NIPQUAD(sin->sin_addr.s_addr)); + snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); break; case AF_INET6: sin6 = xs_addr_in6(xprt); - (void)snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); + snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); break; default: BUG(); @@ -315,10 +314,10 @@ static void xs_format_common_peer_ports(struct rpc_xprt *xprt) struct sockaddr *sap = xs_addr(xprt); char buf[128]; - (void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); + snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); - (void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); + snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); } @@ -549,8 +548,6 @@ static int xs_udp_send_request(struct rpc_task *task) /* Still some bytes left; set up for a retry later. */ status = -EAGAIN; } - if (!transport->sock) - goto out; switch (status) { case -ENOTSOCK: @@ -570,7 +567,7 @@ static int xs_udp_send_request(struct rpc_task *task) * prompts ECONNREFUSED. */ clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); } -out: + return status; } @@ -652,8 +649,6 @@ static int xs_tcp_send_request(struct rpc_task *task) status = -EAGAIN; break; } - if (!transport->sock) - goto out; switch (status) { case -ENOTSOCK: @@ -673,7 +668,7 @@ static int xs_tcp_send_request(struct rpc_task *task) case -ENOTCONN: clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); } -out: + return status; } diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 327011fcc407..78091375ca12 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -45,10 +45,10 @@ #define MAX_ADDR_STR 32 -static struct media *media_list = NULL; +static struct media media_list[MAX_MEDIA]; static u32 media_count = 0; -struct bearer *tipc_bearers = NULL; +struct bearer tipc_bearers[MAX_BEARERS]; /** * media_name_valid - validate media name @@ -108,9 +108,11 @@ int tipc_register_media(u32 media_type, int res = -EINVAL; write_lock_bh(&tipc_net_lock); - if (!media_list) - goto exit; + if (tipc_mode != TIPC_NET_MODE) { + warn("Media <%s> rejected, not in networked mode yet\n", name); + goto exit; + } if (!media_name_valid(name)) { warn("Media <%s> rejected, illegal name\n", name); goto exit; @@ -660,33 +662,10 @@ int tipc_disable_bearer(const char *name) -int tipc_bearer_init(void) -{ - int res; - - write_lock_bh(&tipc_net_lock); - tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC); - media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC); - if (tipc_bearers && media_list) { - res = 0; - } else { - kfree(tipc_bearers); - kfree(media_list); - tipc_bearers = NULL; - media_list = NULL; - res = -ENOMEM; - } - write_unlock_bh(&tipc_net_lock); - return res; -} - void tipc_bearer_stop(void) { u32 i; - if (!tipc_bearers) - return; - for (i = 0; i < MAX_BEARERS; i++) { if (tipc_bearers[i].active) tipc_bearers[i].publ.blocked = 1; @@ -695,10 +674,6 @@ void tipc_bearer_stop(void) if (tipc_bearers[i].active) bearer_disable(tipc_bearers[i].publ.name); } - kfree(tipc_bearers); - kfree(media_list); - tipc_bearers = NULL; - media_list = NULL; media_count = 0; } diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index ca5734892713..000228e93f9e 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h @@ -114,7 +114,7 @@ struct bearer_name { struct link; -extern struct bearer *tipc_bearers; +extern struct bearer tipc_bearers[]; void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a); struct sk_buff *tipc_media_get_names(void); diff --git a/net/tipc/link.c b/net/tipc/link.c index 6f50f6423f63..1a7e4665af80 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1882,6 +1882,15 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr) (msg_destnode(msg) != tipc_own_addr))) goto cont; + /* Discard non-routeable messages destined for another node */ + + if (unlikely(!msg_isdata(msg) && + (msg_destnode(msg) != tipc_own_addr))) { + if ((msg_user(msg) != CONN_MANAGER) && + (msg_user(msg) != MSG_FRAGMENTER)) + goto cont; + } + /* Locate unicast link endpoint that should handle message */ n_ptr = tipc_node_find(msg_prevnode(msg)); diff --git a/net/tipc/net.c b/net/tipc/net.c index 7906608bf510..f25b1cdb64eb 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -116,7 +116,8 @@ */ DEFINE_RWLOCK(tipc_net_lock); -struct network tipc_net = { NULL }; +struct _zone *tipc_zones[256] = { NULL, }; +struct network tipc_net = { tipc_zones }; struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref) { @@ -158,28 +159,12 @@ void tipc_net_send_external_routes(u32 dest) } } -static int net_init(void) -{ - memset(&tipc_net, 0, sizeof(tipc_net)); - tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC); - if (!tipc_net.zones) { - return -ENOMEM; - } - return 0; -} - static void net_stop(void) { u32 z_num; - if (!tipc_net.zones) - return; - - for (z_num = 1; z_num <= tipc_max_zones; z_num++) { + for (z_num = 1; z_num <= tipc_max_zones; z_num++) tipc_zone_delete(tipc_net.zones[z_num]); - } - kfree(tipc_net.zones); - tipc_net.zones = NULL; } static void net_route_named_msg(struct sk_buff *buf) @@ -282,9 +267,7 @@ int tipc_net_start(u32 addr) tipc_named_reinit(); tipc_port_reinit(); - if ((res = tipc_bearer_init()) || - (res = net_init()) || - (res = tipc_cltr_init()) || + if ((res = tipc_cltr_init()) || (res = tipc_bclink_init())) { return res; } diff --git a/net/tipc/ref.c b/net/tipc/ref.c index 414fc34b8bea..8dea66500cf5 100644 --- a/net/tipc/ref.c +++ b/net/tipc/ref.c @@ -153,11 +153,11 @@ void tipc_ref_table_stop(void) u32 tipc_ref_acquire(void *object, spinlock_t **lock) { - struct reference *entry; u32 index; u32 index_mask; u32 next_plus_upper; u32 ref; + struct reference *entry = NULL; if (!object) { err("Attempt to acquire reference to non-existent object\n"); @@ -175,30 +175,36 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock) index = tipc_ref_table.first_free; entry = &(tipc_ref_table.entries[index]); index_mask = tipc_ref_table.index_mask; - /* take lock in case a previous user of entry still holds it */ - spin_lock_bh(&entry->lock); next_plus_upper = entry->ref; tipc_ref_table.first_free = next_plus_upper & index_mask; ref = (next_plus_upper & ~index_mask) + index; - entry->ref = ref; - entry->object = object; - *lock = &entry->lock; } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { index = tipc_ref_table.init_point++; entry = &(tipc_ref_table.entries[index]); spin_lock_init(&entry->lock); - spin_lock_bh(&entry->lock); ref = tipc_ref_table.start_mask + index; - entry->ref = ref; - entry->object = object; - *lock = &entry->lock; } else { ref = 0; } write_unlock_bh(&ref_table_lock); + /* + * Grab the lock so no one else can modify this entry + * While we assign its ref value & object pointer + */ + if (entry) { + spin_lock_bh(&entry->lock); + entry->ref = ref; + entry->object = object; + *lock = &entry->lock; + /* + * keep it locked, the caller is responsible + * for unlocking this when they're done with it + */ + } + return ref; } diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 1ea64f09cc45..4b235fc1c70f 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -1322,8 +1322,10 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf) if (!sock_owned_by_user(sk)) { res = filter_rcv(sk, buf); } else { - sk_add_backlog(sk, buf); - res = TIPC_OK; + if (sk_add_backlog(sk, buf)) + res = TIPC_ERR_OVERLOAD; + else + res = TIPC_OK; } bh_unlock_sock(sk); diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index ac91f0dfa144..ff123e56114a 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -76,19 +76,6 @@ struct top_srv { static struct top_srv topsrv = { 0 }; /** - * htohl - convert value to endianness used by destination - * @in: value to convert - * @swap: non-zero if endianness must be reversed - * - * Returns converted value - */ - -static u32 htohl(u32 in, int swap) -{ - return swap ? swab32(in) : in; -} - -/** * subscr_send_event - send a message containing a tipc_event to the subscriber * * Note: Must not hold subscriber's server port lock, since tipc_send() will @@ -107,11 +94,11 @@ static void subscr_send_event(struct subscription *sub, msg_sect.iov_base = (void *)&sub->evt; msg_sect.iov_len = sizeof(struct tipc_event); - sub->evt.event = htohl(event, sub->swap); - sub->evt.found_lower = htohl(found_lower, sub->swap); - sub->evt.found_upper = htohl(found_upper, sub->swap); - sub->evt.port.ref = htohl(port_ref, sub->swap); - sub->evt.port.node = htohl(node, sub->swap); + sub->evt.event = htonl(event); + sub->evt.found_lower = htonl(found_lower); + sub->evt.found_upper = htonl(found_upper); + sub->evt.port.ref = htonl(port_ref); + sub->evt.port.node = htonl(node); tipc_send(sub->server_ref, 1, &msg_sect); } @@ -287,16 +274,23 @@ static void subscr_cancel(struct tipc_subscr *s, { struct subscription *sub; struct subscription *sub_temp; + __u32 type, lower, upper; int found = 0; /* Find first matching subscription, exit if not found */ + type = ntohl(s->seq.type); + lower = ntohl(s->seq.lower); + upper = ntohl(s->seq.upper); + list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, subscription_list) { - if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) { - found = 1; - break; - } + if ((type == sub->seq.type) && + (lower == sub->seq.lower) && + (upper == sub->seq.upper)) { + found = 1; + break; + } } if (!found) return; @@ -325,16 +319,10 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s, struct subscriber *subscriber) { struct subscription *sub; - int swap; - - /* Determine subscriber's endianness */ - - swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE)); /* Detect & process a subscription cancellation request */ - if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) { - s->filter &= ~htohl(TIPC_SUB_CANCEL, swap); + if (ntohl(s->filter) & TIPC_SUB_CANCEL) { subscr_cancel(s, subscriber); return NULL; } @@ -359,11 +347,11 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s, /* Initialize subscription object */ - sub->seq.type = htohl(s->seq.type, swap); - sub->seq.lower = htohl(s->seq.lower, swap); - sub->seq.upper = htohl(s->seq.upper, swap); - sub->timeout = htohl(s->timeout, swap); - sub->filter = htohl(s->filter, swap); + sub->seq.type = ntohl(s->seq.type); + sub->seq.lower = ntohl(s->seq.lower); + sub->seq.upper = ntohl(s->seq.upper); + sub->timeout = ntohl(s->timeout); + sub->filter = ntohl(s->filter); if ((!(sub->filter & TIPC_SUB_PORTS) == !(sub->filter & TIPC_SUB_SERVICE)) || (sub->seq.lower > sub->seq.upper)) { @@ -376,7 +364,6 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s, INIT_LIST_HEAD(&sub->nameseq_list); list_add(&sub->subscription_list, &subscriber->subscription_list); sub->server_ref = subscriber->port_ref; - sub->swap = swap; memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr)); atomic_inc(&topsrv.subscription_count); if (sub->timeout != TIPC_WAIT_FOREVER) { diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h index 45d89bf4d202..c20f496d95b2 100644 --- a/net/tipc/subscr.h +++ b/net/tipc/subscr.h @@ -53,7 +53,6 @@ typedef void (*tipc_subscr_event) (struct subscription *sub, * @nameseq_list: adjacent subscriptions in name sequence's subscription list * @subscription_list: adjacent subscriptions in subscriber's subscription list * @server_ref: object reference of server port associated with subscription - * @swap: indicates if subscriber uses opposite endianness in its messages * @evt: template for events generated by subscription */ @@ -66,7 +65,6 @@ struct subscription { struct list_head nameseq_list; struct list_head subscription_list; u32 server_ref; - int swap; struct tipc_event evt; }; diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index 3e1efe534645..52e304212241 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c @@ -53,7 +53,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) if (!sock_owned_by_user(sk)) { queued = x25_process_rx_frame(sk, skb); } else { - sk_add_backlog(sk, skb); + queued = !sk_add_backlog(sk, skb); } bh_unlock_sock(sk); sock_put(sk); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 34a5ef8316e7..843e066649cb 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1372,7 +1372,8 @@ static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, return err; } -static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev) +static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, + struct flowi *fl) { struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(xdst->u.dst.ops->family); @@ -1381,7 +1382,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev) if (!afinfo) return -EINVAL; - err = afinfo->fill_dst(xdst, dev); + err = afinfo->fill_dst(xdst, dev, fl); xfrm_policy_put_afinfo(afinfo); @@ -1486,7 +1487,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) { struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev; - err = xfrm_fill_dst(xdst, dev); + err = xfrm_fill_dst(xdst, dev, fl); if (err) goto free_dst; |