summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-06-01 22:33:25 -0700
committerDavid S. Miller <davem@davemloft.net>2015-06-01 22:51:30 -0700
commitdda922c831d1661c11a3ae1051b7160236f6ffb0 (patch)
tree2fe588dbc4dc90addaabf303713fb2c6af0dc19b /net
parentdb3397b9aa30d334be237170e048dcd96e1e951d (diff)
parentc46a024ea5eb0165114dbbc8c82c29b7bcf66e71 (diff)
downloadlinux-dda922c831d1661c11a3ae1051b7160236f6ffb0.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/phy/amd-xgbe-phy.c drivers/net/wireless/iwlwifi/Kconfig include/net/mac80211.h iwlwifi/Kconfig and mac80211.h were both trivial overlapping changes. The drivers/net/phy/amd-xgbe-phy.c file got removed in 'net-next' and the bug fix that happened on the 'net' side is already integrated into the rest of the amd-xgbe driver. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_multicast.c2
-rw-r--r--net/bridge/netfilter/ebtables.c4
-rw-r--r--net/caif/caif_socket.c8
-rw-r--r--net/ceph/osd_client.c33
-rw-r--r--net/core/ethtool.c10
-rw-r--r--net/dsa/dsa.c4
-rw-r--r--net/ipv4/esp4.c3
-rw-r--r--net/ipv4/ip_vti.c14
-rw-r--r--net/ipv4/tcp_cong.c5
-rw-r--r--net/ipv4/tcp_minisocks.c5
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/esp6.c3
-rw-r--r--net/ipv6/ip6_vti.c27
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/mac80211/cfg.c59
-rw-r--r--net/mac80211/ieee80211_i.h9
-rw-r--r--net/mac80211/iface.c6
-rw-r--r--net/mac80211/key.c82
-rw-r--r--net/mac80211/key.h1
-rw-r--r--net/mac80211/rx.c5
-rw-r--r--net/mac80211/util.c3
-rw-r--r--net/sched/sch_api.c10
-rw-r--r--net/unix/af_unix.c8
-rw-r--r--net/xfrm/xfrm_input.c17
-rw-r--r--net/xfrm/xfrm_replay.c2
-rw-r--r--net/xfrm/xfrm_state.c2
26 files changed, 215 insertions, 119 deletions
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 7c78b8df1d81..0b38ee98024b 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1644,7 +1644,7 @@ static void br_multicast_query_expired(struct net_bridge *br,
if (query->startup_sent < br->multicast_startup_query_count)
query->startup_sent++;
- RCU_INIT_POINTER(querier, NULL);
+ RCU_INIT_POINTER(querier->port, NULL);
br_multicast_send_query(br, NULL, query);
spin_unlock(&br->multicast_lock);
}
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index d5aba394ff6f..5149d9e71114 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1117,8 +1117,6 @@ static int do_replace(struct net *net, const void __user *user,
return -ENOMEM;
if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
return -ENOMEM;
- if (tmp.num_counters == 0)
- return -EINVAL;
tmp.name[sizeof(tmp.name) - 1] = 0;
@@ -2161,8 +2159,6 @@ static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
return -ENOMEM;
if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
return -ENOMEM;
- if (tmp.num_counters == 0)
- return -EINVAL;
memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 78a04ebb113c..3cc71b9f5517 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -330,6 +330,10 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
+
+ if (sock_flag(sk, SOCK_DEAD))
+ break;
+
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
}
@@ -373,6 +377,10 @@ static int caif_stream_recvmsg(struct socket *sock, struct msghdr *msg,
struct sk_buff *skb;
lock_sock(sk);
+ if (sock_flag(sk, SOCK_DEAD)) {
+ err = -ECONNRESET;
+ goto unlock;
+ }
skb = skb_dequeue(&sk->sk_receive_queue);
caif_check_flow_release(sk);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 41a4abc7e98e..c4ec9239249a 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1306,8 +1306,6 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
if (list_empty(&req->r_osd_item))
req->r_osd = NULL;
}
-
- list_del_init(&req->r_req_lru_item); /* can be on notarget */
ceph_osdc_put_request(req);
}
@@ -2017,20 +2015,29 @@ static void kick_requests(struct ceph_osd_client *osdc, bool force_resend,
err = __map_request(osdc, req,
force_resend || force_resend_writes);
dout("__map_request returned %d\n", err);
- if (err == 0)
- continue; /* no change and no osd was specified */
if (err < 0)
continue; /* hrm! */
- if (req->r_osd == NULL) {
- dout("tid %llu maps to no valid osd\n", req->r_tid);
- needmap++; /* request a newer map */
- continue;
- }
+ if (req->r_osd == NULL || err > 0) {
+ if (req->r_osd == NULL) {
+ dout("lingering %p tid %llu maps to no osd\n",
+ req, req->r_tid);
+ /*
+ * A homeless lingering request makes
+ * no sense, as it's job is to keep
+ * a particular OSD connection open.
+ * Request a newer map and kick the
+ * request, knowing that it won't be
+ * resent until we actually get a map
+ * that can tell us where to send it.
+ */
+ needmap++;
+ }
- dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
- req->r_osd ? req->r_osd->o_osd : -1);
- __register_request(osdc, req);
- __unregister_linger_request(osdc, req);
+ dout("kicking lingering %p tid %llu osd%d\n", req,
+ req->r_tid, req->r_osd ? req->r_osd->o_osd : -1);
+ __register_request(osdc, req);
+ __unregister_linger_request(osdc, req);
+ }
}
reset_changed_osds(osdc);
mutex_unlock(&osdc->request_mutex);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 4f6a17ef0710..eb0c3ace7458 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -358,15 +358,7 @@ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
int err;
struct ethtool_cmd cmd;
- if (!dev->ethtool_ops->get_settings)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
- return -EFAULT;
-
- cmd.cmd = ETHTOOL_GSET;
-
- err = dev->ethtool_ops->get_settings(dev, &cmd);
+ err = __ethtool_get_settings(dev, &cmd);
if (err < 0)
return err;
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index e6f6cc3a1bcf..392e29a0227d 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -359,7 +359,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
*/
ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL);
if (ds == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ds->dst = dst;
ds->index = index;
@@ -370,7 +370,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
ret = dsa_switch_setup_one(ds, parent);
if (ret)
- return NULL;
+ return ERR_PTR(ret);
return ds;
}
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 421a80b09b62..30b544f025ac 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -256,7 +256,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
aead_givcrypt_set_assoc(req, asg, assoclen);
aead_givcrypt_set_giv(req, esph->enc_data,
- XFRM_SKB_CB(skb)->seq.output.low);
+ XFRM_SKB_CB(skb)->seq.output.low +
+ ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
ESP_SKB_CB(skb)->tmp = tmp;
err = crypto_aead_givencrypt(req);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 9f7269f3c54a..0c152087ca15 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -65,7 +65,6 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
goto drop;
XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
- skb->mark = be32_to_cpu(tunnel->parms.i_key);
return xfrm_input(skb, nexthdr, spi, encap_type);
}
@@ -91,6 +90,8 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
struct pcpu_sw_netstats *tstats;
struct xfrm_state *x;
struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4;
+ u32 orig_mark = skb->mark;
+ int ret;
if (!tunnel)
return 1;
@@ -107,7 +108,11 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
x = xfrm_input_state(skb);
family = x->inner_mode->afinfo->family;
- if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
+ skb->mark = be32_to_cpu(tunnel->parms.i_key);
+ ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
+ skb->mark = orig_mark;
+
+ if (!ret)
return -EPERM;
skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev)));
@@ -216,8 +221,6 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
memset(&fl, 0, sizeof(fl));
- skb->mark = be32_to_cpu(tunnel->parms.o_key);
-
switch (skb->protocol) {
case htons(ETH_P_IP):
xfrm_decode_session(skb, &fl, AF_INET);
@@ -233,6 +236,9 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+ /* override mark with tunnel output key */
+ fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key);
+
return vti_xmit(skb, dev, &fl);
}
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 7a5ae50c80c8..84be008c945c 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -187,6 +187,7 @@ static void tcp_reinit_congestion_control(struct sock *sk,
tcp_cleanup_congestion_control(sk);
icsk->icsk_ca_ops = ca;
+ icsk->icsk_ca_setsockopt = 1;
if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
icsk->icsk_ca_ops->init(sk);
@@ -335,8 +336,10 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
rcu_read_lock();
ca = __tcp_ca_find_autoload(name);
/* No change asking for existing value */
- if (ca == icsk->icsk_ca_ops)
+ if (ca == icsk->icsk_ca_ops) {
+ icsk->icsk_ca_setsockopt = 1;
goto out;
+ }
if (!ca)
err = -ENOENT;
else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index df7fe3c31162..4bc00cb79e60 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -420,7 +420,10 @@ void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
rcu_read_unlock();
}
- if (!ca_got_dst && !try_module_get(icsk->icsk_ca_ops->owner))
+ /* If no valid choice made yet, assign current system default ca. */
+ if (!ca_got_dst &&
+ (!icsk->icsk_ca_setsockopt ||
+ !try_module_get(icsk->icsk_ca_ops->owner)))
tcp_assign_congestion_control(sk);
tcp_set_ca_state(sk, TCP_CA_Open);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d10b7e0112eb..1c92ea67baef 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1345,10 +1345,8 @@ csum_copy_err:
}
unlock_sock_fast(sk, slow);
- if (noblock)
- return -EAGAIN;
-
- /* starting over for a new packet */
+ /* starting over for a new packet, but check if we need to yield */
+ cond_resched();
msg->msg_flags &= ~MSG_TRUNC;
goto try_again;
}
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 31f1b5d5e2ef..7c07ce36aae2 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -248,7 +248,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
aead_givcrypt_set_assoc(req, asg, assoclen);
aead_givcrypt_set_giv(req, esph->enc_data,
- XFRM_SKB_CB(skb)->seq.output.low);
+ XFRM_SKB_CB(skb)->seq.output.low +
+ ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
ESP_SKB_CB(skb)->tmp = tmp;
err = crypto_aead_givencrypt(req);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index ed9d681207fa..0224c032dca5 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -322,7 +322,6 @@ static int vti6_rcv(struct sk_buff *skb)
}
XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t;
- skb->mark = be32_to_cpu(t->parms.i_key);
rcu_read_unlock();
@@ -342,6 +341,8 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
struct pcpu_sw_netstats *tstats;
struct xfrm_state *x;
struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6;
+ u32 orig_mark = skb->mark;
+ int ret;
if (!t)
return 1;
@@ -358,7 +359,11 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
x = xfrm_input_state(skb);
family = x->inner_mode->afinfo->family;
- if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
+ skb->mark = be32_to_cpu(t->parms.i_key);
+ ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
+ skb->mark = orig_mark;
+
+ if (!ret)
return -EPERM;
skb_scrub_packet(skb, !net_eq(t->net, dev_net(skb->dev)));
@@ -430,6 +435,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
struct net_device *tdev;
struct xfrm_state *x;
int err = -1;
+ int mtu;
if (!dst)
goto tx_err_link_failure;
@@ -463,6 +469,19 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
skb_dst_set(skb, dst);
skb->dev = skb_dst(skb)->dev;
+ mtu = dst_mtu(dst);
+ if (!skb->ignore_df && skb->len > mtu) {
+ skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
+
+ if (skb->protocol == htons(ETH_P_IPV6))
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ else
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(mtu));
+
+ return -EMSGSIZE;
+ }
+
err = dst_output(skb);
if (net_xmit_eval(err) == 0) {
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
@@ -495,7 +514,6 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
int ret;
memset(&fl, 0, sizeof(fl));
- skb->mark = be32_to_cpu(t->parms.o_key);
switch (skb->protocol) {
case htons(ETH_P_IPV6):
@@ -516,6 +534,9 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_err;
}
+ /* override mark with tunnel output key */
+ fl.flowi_mark = be32_to_cpu(t->parms.o_key);
+
ret = vti6_xmit(skb, dev, &fl);
if (ret < 0)
goto tx_err;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index c2ec41617a35..e51fc3eee6db 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -525,10 +525,8 @@ csum_copy_err:
}
unlock_sock_fast(sk, slow);
- if (noblock)
- return -EAGAIN;
-
- /* starting over for a new packet */
+ /* starting over for a new packet, but check if we need to yield */
+ cond_resched();
msg->msg_flags &= ~MSG_TRUNC;
goto try_again;
}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index bb9f83640b46..690b9f640b41 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2539,51 +2539,22 @@ static bool ieee80211_coalesce_started_roc(struct ieee80211_local *local,
struct ieee80211_roc_work *new_roc,
struct ieee80211_roc_work *cur_roc)
{
- unsigned long j = jiffies;
- unsigned long cur_roc_end = cur_roc->hw_start_time +
- msecs_to_jiffies(cur_roc->duration);
- struct ieee80211_roc_work *next_roc;
- int new_dur;
+ unsigned long now = jiffies;
+ unsigned long remaining = cur_roc->hw_start_time +
+ msecs_to_jiffies(cur_roc->duration) -
+ now;
if (WARN_ON(!cur_roc->started || !cur_roc->hw_begun))
return false;
- if (time_after(j + IEEE80211_ROC_MIN_LEFT, cur_roc_end))
+ /* if it doesn't fit entirely, schedule a new one */
+ if (new_roc->duration > jiffies_to_msecs(remaining))
return false;
ieee80211_handle_roc_started(new_roc);
- new_dur = new_roc->duration - jiffies_to_msecs(cur_roc_end - j);
-
- /* cur_roc is long enough - add new_roc to the dependents list. */
- if (new_dur <= 0) {
- list_add_tail(&new_roc->list, &cur_roc->dependents);
- return true;
- }
-
- new_roc->duration = new_dur;
-
- /*
- * if cur_roc was already coalesced before, we might
- * want to extend the next roc instead of adding
- * a new one.
- */
- next_roc = list_entry(cur_roc->list.next,
- struct ieee80211_roc_work, list);
- if (&next_roc->list != &local->roc_list &&
- next_roc->chan == new_roc->chan &&
- next_roc->sdata == new_roc->sdata &&
- !WARN_ON(next_roc->started)) {
- list_add_tail(&new_roc->list, &next_roc->dependents);
- next_roc->duration = max(next_roc->duration,
- new_roc->duration);
- next_roc->type = max(next_roc->type, new_roc->type);
- return true;
- }
-
- /* add right after cur_roc */
- list_add(&new_roc->list, &cur_roc->list);
-
+ /* add to dependents so we send the expired event properly */
+ list_add_tail(&new_roc->list, &cur_roc->dependents);
return true;
}
@@ -2696,17 +2667,9 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
* In the offloaded ROC case, if it hasn't begun, add
* this new one to the dependent list to be handled
* when the master one begins. If it has begun,
- * check that there's still a minimum time left and
- * if so, start this one, transmitting the frame, but
- * add it to the list directly after this one with
- * a reduced time so we'll ask the driver to execute
- * it right after finishing the previous one, in the
- * hope that it'll also be executed right afterwards,
- * effectively extending the old one.
- * If there's no minimum time left, just add it to the
- * normal list.
- * TODO: the ROC type is ignored here, assuming that it
- * is better to immediately use the current ROC.
+ * check if it fits entirely within the existing one,
+ * in which case it will just be dependent as well.
+ * Otherwise, schedule it by itself.
*/
if (!tmp->hw_begun) {
list_add_tail(&roc->list, &tmp->dependents);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 2c4fe45ea38a..b12f61507f9f 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -202,6 +202,8 @@ enum ieee80211_packet_rx_flags {
* @IEEE80211_RX_CMNTR: received on cooked monitor already
* @IEEE80211_RX_BEACON_REPORTED: This frame was already reported
* to cfg80211_report_obss_beacon().
+ * @IEEE80211_RX_REORDER_TIMER: this frame is released by the
+ * reorder buffer timeout timer, not the normal RX path
*
* These flags are used across handling multiple interfaces
* for a single frame.
@@ -209,6 +211,7 @@ enum ieee80211_packet_rx_flags {
enum ieee80211_rx_flags {
IEEE80211_RX_CMNTR = BIT(0),
IEEE80211_RX_BEACON_REPORTED = BIT(1),
+ IEEE80211_RX_REORDER_TIMER = BIT(2),
};
struct ieee80211_rx_data {
@@ -322,12 +325,6 @@ struct mesh_preq_queue {
u8 flags;
};
-#if HZ/100 == 0
-#define IEEE80211_ROC_MIN_LEFT 1
-#else
-#define IEEE80211_ROC_MIN_LEFT (HZ/100)
-#endif
-
struct ieee80211_roc_work {
struct list_head list;
struct list_head dependents;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 4ee8fea263ed..b2e85ffca7ed 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -522,6 +522,12 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
memcpy(sdata->vif.hw_queue, master->vif.hw_queue,
sizeof(sdata->vif.hw_queue));
sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef;
+
+ mutex_lock(&local->key_mtx);
+ sdata->crypto_tx_tailroom_needed_cnt +=
+ master->crypto_tx_tailroom_needed_cnt;
+ mutex_unlock(&local->key_mtx);
+
break;
}
case NL80211_IFTYPE_AP:
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 2e677376c958..577a11a13cdf 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -58,6 +58,22 @@ static void assert_key_lock(struct ieee80211_local *local)
lockdep_assert_held(&local->key_mtx);
}
+static void
+update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta)
+{
+ struct ieee80211_sub_if_data *vlan;
+
+ if (sdata->vif.type != NL80211_IFTYPE_AP)
+ return;
+
+ mutex_lock(&sdata->local->mtx);
+
+ list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+ vlan->crypto_tx_tailroom_needed_cnt += delta;
+
+ mutex_unlock(&sdata->local->mtx);
+}
+
static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
{
/*
@@ -79,6 +95,8 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
* http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net
*/
+ update_vlan_tailroom_need_count(sdata, 1);
+
if (!sdata->crypto_tx_tailroom_needed_cnt++) {
/*
* Flush all XMIT packets currently using HW encryption or no
@@ -88,6 +106,15 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
}
}
+static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata,
+ int delta)
+{
+ WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta);
+
+ update_vlan_tailroom_need_count(sdata, -delta);
+ sdata->crypto_tx_tailroom_needed_cnt -= delta;
+}
+
static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
{
struct ieee80211_sub_if_data *sdata;
@@ -144,7 +171,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
(key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
- sdata->crypto_tx_tailroom_needed_cnt--;
+ decrease_tailroom_need_count(sdata, 1);
WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV));
@@ -545,7 +572,7 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key,
schedule_delayed_work(&sdata->dec_tailroom_needed_wk,
HZ/2);
} else {
- sdata->crypto_tx_tailroom_needed_cnt--;
+ decrease_tailroom_need_count(sdata, 1);
}
}
@@ -635,6 +662,7 @@ void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom)
void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_key *key;
+ struct ieee80211_sub_if_data *vlan;
ASSERT_RTNL();
@@ -643,7 +671,14 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
mutex_lock(&sdata->local->key_mtx);
- sdata->crypto_tx_tailroom_needed_cnt = 0;
+ WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt ||
+ sdata->crypto_tx_tailroom_pending_dec);
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP) {
+ list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+ WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt ||
+ vlan->crypto_tx_tailroom_pending_dec);
+ }
list_for_each_entry(key, &sdata->key_list, list) {
increment_tailroom_need_count(sdata);
@@ -653,6 +688,22 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
mutex_unlock(&sdata->local->key_mtx);
}
+void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_sub_if_data *vlan;
+
+ mutex_lock(&sdata->local->key_mtx);
+
+ sdata->crypto_tx_tailroom_needed_cnt = 0;
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP) {
+ list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+ vlan->crypto_tx_tailroom_needed_cnt = 0;
+ }
+
+ mutex_unlock(&sdata->local->key_mtx);
+}
+
void ieee80211_iter_keys(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
void (*iter)(struct ieee80211_hw *hw,
@@ -692,8 +743,8 @@ static void ieee80211_free_keys_iface(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_key *key, *tmp;
- sdata->crypto_tx_tailroom_needed_cnt -=
- sdata->crypto_tx_tailroom_pending_dec;
+ decrease_tailroom_need_count(sdata,
+ sdata->crypto_tx_tailroom_pending_dec);
sdata->crypto_tx_tailroom_pending_dec = 0;
ieee80211_debugfs_key_remove_mgmt_default(sdata);
@@ -713,6 +764,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_sub_if_data *vlan;
+ struct ieee80211_sub_if_data *master;
struct ieee80211_key *key, *tmp;
LIST_HEAD(keys);
@@ -732,8 +784,20 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata,
list_for_each_entry_safe(key, tmp, &keys, list)
__ieee80211_key_destroy(key, false);
- WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt ||
- sdata->crypto_tx_tailroom_pending_dec);
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+ if (sdata->bss) {
+ master = container_of(sdata->bss,
+ struct ieee80211_sub_if_data,
+ u.ap);
+
+ WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt !=
+ master->crypto_tx_tailroom_needed_cnt);
+ }
+ } else {
+ WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt ||
+ sdata->crypto_tx_tailroom_pending_dec);
+ }
+
if (sdata->vif.type == NL80211_IFTYPE_AP) {
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt ||
@@ -797,8 +861,8 @@ void ieee80211_delayed_tailroom_dec(struct work_struct *wk)
*/
mutex_lock(&sdata->local->key_mtx);
- sdata->crypto_tx_tailroom_needed_cnt -=
- sdata->crypto_tx_tailroom_pending_dec;
+ decrease_tailroom_need_count(sdata,
+ sdata->crypto_tx_tailroom_pending_dec);
sdata->crypto_tx_tailroom_pending_dec = 0;
mutex_unlock(&sdata->local->key_mtx);
}
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index df430a618764..2119526db2f4 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -160,6 +160,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata,
void ieee80211_free_sta_keys(struct ieee80211_local *local,
struct sta_info *sta);
void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata);
+void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata);
#define key_mtx_dereference(local, ref) \
rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx)))
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index aa35977a9c4d..7d85f7516324 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2108,7 +2108,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
/* deliver to local stack */
skb->protocol = eth_type_trans(skb, dev);
memset(skb->cb, 0, sizeof(skb->cb));
- if (rx->local->napi)
+ if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) &&
+ rx->local->napi)
napi_gro_receive(rx->local->napi, skb);
else
netif_receive_skb(skb);
@@ -3215,7 +3216,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
/* This is OK -- must be QoS data frame */
.security_idx = tid,
.seqno_idx = tid,
- .flags = 0,
+ .flags = IEEE80211_RX_REORDER_TIMER,
};
struct tid_ampdu_rx *tid_agg_rx;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 79412f16b61d..b864ebc6ab8f 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2023,6 +2023,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
/* add back keys */
list_for_each_entry(sdata, &local->interfaces, list)
+ ieee80211_reset_crypto_tx_tailroom(sdata);
+
+ list_for_each_entry(sdata, &local->interfaces, list)
if (ieee80211_sdata_running(sdata))
ieee80211_enable_keys(sdata);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 0b74dc0ede9c..c5b9db84d069 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -815,10 +815,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
if (dev->flags & IFF_UP)
dev_deactivate(dev);
- if (new && new->ops->attach) {
- new->ops->attach(new);
- num_q = 0;
- }
+ if (new && new->ops->attach)
+ goto skip;
for (i = 0; i < num_q; i++) {
struct netdev_queue *dev_queue = dev_ingress_queue(dev);
@@ -834,12 +832,16 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
qdisc_destroy(old);
}
+skip:
if (!ingress) {
notify_and_destroy(net, skb, n, classid,
dev->qdisc, new);
if (new && !new->ops->attach)
atomic_inc(&new->refcnt);
dev->qdisc = new ? : &noop_qdisc;
+
+ if (new && new->ops->attach)
+ new->ops->attach(new);
} else {
notify_and_destroy(net, skb, n, classid, old, new);
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index b8c44076c776..f25e1675b865 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1984,6 +1984,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
unix_state_unlock(sk);
timeo = freezable_schedule_timeout(timeo);
unix_state_lock(sk);
+
+ if (sock_flag(sk, SOCK_DEAD))
+ break;
+
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
}
@@ -2055,6 +2059,10 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
struct sk_buff *skb, *last;
unix_state_lock(sk);
+ if (sock_flag(sk, SOCK_DEAD)) {
+ err = -ECONNRESET;
+ goto unlock;
+ }
last = skb = skb_peek(&sk->sk_receive_queue);
last_len = last ? last->len : 0;
again:
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 1858a45f008b..60ce7014e1b0 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -13,6 +13,8 @@
#include <net/dst.h>
#include <net/ip.h>
#include <net/xfrm.h>
+#include <net/ip_tunnels.h>
+#include <net/ip6_tunnel.h>
static struct kmem_cache *secpath_cachep __read_mostly;
@@ -186,6 +188,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
struct xfrm_state *x = NULL;
xfrm_address_t *daddr;
struct xfrm_mode *inner_mode;
+ u32 mark = skb->mark;
unsigned int family;
int decaps = 0;
int async = 0;
@@ -203,6 +206,18 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
XFRM_SPI_SKB_CB(skb)->daddroff);
family = XFRM_SPI_SKB_CB(skb)->family;
+ /* if tunnel is present override skb->mark value with tunnel i_key */
+ if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) {
+ switch (family) {
+ case AF_INET:
+ mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
+ break;
+ case AF_INET6:
+ mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
+ break;
+ }
+ }
+
/* Allocate new secpath or COW existing one. */
if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
struct sec_path *sp;
@@ -229,7 +244,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
goto drop;
}
- x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family);
+ x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family);
if (x == NULL) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
xfrm_audit_state_notfound(skb, family, spi, seq);
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index dab57daae408..4fd725a0c500 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -99,6 +99,7 @@ static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq;
+ XFRM_SKB_CB(skb)->seq.output.hi = 0;
if (unlikely(x->replay.oseq == 0)) {
x->replay.oseq--;
xfrm_audit_state_replay_overflow(x, skb);
@@ -177,6 +178,7 @@ static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb)
if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq;
+ XFRM_SKB_CB(skb)->seq.output.hi = 0;
if (unlikely(replay_esn->oseq == 0)) {
replay_esn->oseq--;
xfrm_audit_state_replay_overflow(x, skb);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index e47e4980b35c..9895a8c56d8c 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -927,8 +927,8 @@ struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
x->id.spi != spi)
continue;
- spin_unlock_bh(&net->xfrm.xfrm_state_lock);
xfrm_state_hold(x);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return x;
}
spin_unlock_bh(&net->xfrm.xfrm_state_lock);