summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-08-23 12:14:42 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-08-23 12:14:42 -0700
commit6450f65168bcf3c03b5fb44c2fe96682c0d3086b (patch)
treec6d2c3e0885ef3f73893c7e6d22ea9454d073ee6
parent7a8fc9b248e77a4eab0613acf30a6811799786b3 (diff)
parentf410a1fba7afa79d2992620e874a343fdba28332 (diff)
downloadlinux-6450f65168bcf3c03b5fb44c2fe96682c0d3086b.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: ipv6: protocol for address routes icmp: icmp_sk() should not use smp_processor_id() in preemptible code pkt_sched: Fix qdisc list locking pkt_sched: Fix qdisc_watchdog() vs. dev_deactivate() race sctp: fix potential panics in the SCTP-AUTH API.
-rw-r--r--include/net/pkt_sched.h1
-rw-r--r--include/net/sch_generic.h5
-rw-r--r--net/ipv4/icmp.c22
-rw-r--r--net/ipv6/addrconf.c1
-rw-r--r--net/ipv6/icmp.c23
-rw-r--r--net/sched/sch_api.c48
-rw-r--r--net/sched/sch_cbq.c4
-rw-r--r--net/sched/sch_generic.c5
-rw-r--r--net/sctp/endpointola.c4
-rw-r--r--net/sctp/socket.c85
10 files changed, 149 insertions, 49 deletions
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 853fe83d9f37..b786a5b09253 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -78,6 +78,7 @@ extern struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
extern int register_qdisc(struct Qdisc_ops *qops);
extern int unregister_qdisc(struct Qdisc_ops *qops);
+extern void qdisc_list_del(struct Qdisc *q);
extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 84d25f2e6188..b1d2cfea89c5 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -193,6 +193,11 @@ static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
return qdisc->dev_queue->qdisc;
}
+static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
+{
+ return qdisc->dev_queue->qdisc_sleeping;
+}
+
/* The qdisc root lock is a mechanism by which to top level
* of a qdisc tree can be locked from any qdisc node in the
* forest. This allows changing the configuration of some
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 860558633b2c..55c355e63234 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -204,18 +204,22 @@ static struct sock *icmp_sk(struct net *net)
return net->ipv4.icmp_sk[smp_processor_id()];
}
-static inline int icmp_xmit_lock(struct sock *sk)
+static inline struct sock *icmp_xmit_lock(struct net *net)
{
+ struct sock *sk;
+
local_bh_disable();
+ sk = icmp_sk(net);
+
if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
/* This can happen if the output path signals a
* dst_link_failure() for an outgoing ICMP packet.
*/
local_bh_enable();
- return 1;
+ return NULL;
}
- return 0;
+ return sk;
}
static inline void icmp_xmit_unlock(struct sock *sk)
@@ -354,15 +358,17 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
struct ipcm_cookie ipc;
struct rtable *rt = skb->rtable;
struct net *net = dev_net(rt->u.dst.dev);
- struct sock *sk = icmp_sk(net);
- struct inet_sock *inet = inet_sk(sk);
+ struct sock *sk;
+ struct inet_sock *inet;
__be32 daddr;
if (ip_options_echo(&icmp_param->replyopts, skb))
return;
- if (icmp_xmit_lock(sk))
+ sk = icmp_xmit_lock(net);
+ if (sk == NULL)
return;
+ inet = inet_sk(sk);
icmp_param->data.icmph.checksum = 0;
@@ -419,7 +425,6 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
if (!rt)
goto out;
net = dev_net(rt->u.dst.dev);
- sk = icmp_sk(net);
/*
* Find the original header. It is expected to be valid, of course.
@@ -483,7 +488,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
}
}
- if (icmp_xmit_lock(sk))
+ sk = icmp_xmit_lock(net);
+ if (sk == NULL)
return;
/*
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e2d3b7580b76..7b6a584b62dd 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1688,6 +1688,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
.fc_dst_len = plen,
.fc_flags = RTF_UP | flags,
.fc_nlinfo.nl_net = dev_net(dev),
+ .fc_protocol = RTPROT_KERNEL,
};
ipv6_addr_copy(&cfg.fc_dst, pfx);
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index abedf95fdf2d..b3157a0cc15d 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -91,19 +91,22 @@ static struct inet6_protocol icmpv6_protocol = {
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
-static __inline__ int icmpv6_xmit_lock(struct sock *sk)
+static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
{
+ struct sock *sk;
+
local_bh_disable();
+ sk = icmpv6_sk(net);
if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
/* This can happen if the output path (f.e. SIT or
* ip6ip6 tunnel) signals dst_link_failure() for an
* outgoing ICMP6 packet.
*/
local_bh_enable();
- return 1;
+ return NULL;
}
- return 0;
+ return sk;
}
static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
@@ -392,11 +395,10 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
fl.fl_icmp_code = code;
security_skb_classify_flow(skb, &fl);
- sk = icmpv6_sk(net);
- np = inet6_sk(sk);
-
- if (icmpv6_xmit_lock(sk))
+ sk = icmpv6_xmit_lock(net);
+ if (sk == NULL)
return;
+ np = inet6_sk(sk);
if (!icmpv6_xrlim_allow(sk, type, &fl))
goto out;
@@ -539,11 +541,10 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
security_skb_classify_flow(skb, &fl);
- sk = icmpv6_sk(net);
- np = inet6_sk(sk);
-
- if (icmpv6_xmit_lock(sk))
+ sk = icmpv6_xmit_lock(net);
+ if (sk == NULL)
return;
+ np = inet6_sk(sk);
if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
fl.oif = np->mcast_oif;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index ef0efeca6352..e7fb9e0d21b4 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -199,19 +199,53 @@ struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
return NULL;
}
+/*
+ * This lock is needed until some qdiscs stop calling qdisc_tree_decrease_qlen()
+ * without rtnl_lock(); currently hfsc_dequeue(), netem_dequeue(), tbf_dequeue()
+ */
+static DEFINE_SPINLOCK(qdisc_list_lock);
+
+static void qdisc_list_add(struct Qdisc *q)
+{
+ if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
+ spin_lock_bh(&qdisc_list_lock);
+ list_add_tail(&q->list, &qdisc_root_sleeping(q)->list);
+ spin_unlock_bh(&qdisc_list_lock);
+ }
+}
+
+void qdisc_list_del(struct Qdisc *q)
+{
+ if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
+ spin_lock_bh(&qdisc_list_lock);
+ list_del(&q->list);
+ spin_unlock_bh(&qdisc_list_lock);
+ }
+}
+EXPORT_SYMBOL(qdisc_list_del);
+
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
{
unsigned int i;
+ struct Qdisc *q;
+
+ spin_lock_bh(&qdisc_list_lock);
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- struct Qdisc *q, *txq_root = txq->qdisc_sleeping;
+ struct Qdisc *txq_root = txq->qdisc_sleeping;
q = qdisc_match_from_root(txq_root, handle);
if (q)
- return q;
+ goto unlock;
}
- return qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
+
+ q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
+
+unlock:
+ spin_unlock_bh(&qdisc_list_lock);
+
+ return q;
}
static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
@@ -444,6 +478,10 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
{
ktime_t time;
+ if (test_bit(__QDISC_STATE_DEACTIVATED,
+ &qdisc_root_sleeping(wd->qdisc)->state))
+ return;
+
wd->qdisc->flags |= TCQ_F_THROTTLED;
time = ktime_set(0, 0);
time = ktime_add_ns(time, PSCHED_US2NS(expires));
@@ -806,8 +844,8 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
goto err_out3;
}
}
- if ((parent != TC_H_ROOT) && !(sch->flags & TCQ_F_INGRESS))
- list_add_tail(&sch->list, &dev_queue->qdisc_sleeping->list);
+
+ qdisc_list_add(sch);
return sch;
}
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 47ef492c4ff4..8fa90d68ec6d 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -521,6 +521,10 @@ static void cbq_ovl_delay(struct cbq_class *cl)
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
psched_tdiff_t delay = cl->undertime - q->now;
+ if (test_bit(__QDISC_STATE_DEACTIVATED,
+ &qdisc_root_sleeping(cl->qdisc)->state))
+ return;
+
if (!cl->delayed) {
psched_time_t sched = q->now;
ktime_t expires;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index c3ed4d44fc14..5f0ade7806a7 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -526,10 +526,9 @@ void qdisc_destroy(struct Qdisc *qdisc)
!atomic_dec_and_test(&qdisc->refcnt))
return;
- if (qdisc->parent)
- list_del(&qdisc->list);
-
#ifdef CONFIG_NET_SCHED
+ qdisc_list_del(qdisc);
+
qdisc_put_stab(qdisc->stab);
#endif
gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index e39a0cdef184..4c8d9f45ce09 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -103,6 +103,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
/* Initialize the CHUNKS parameter */
auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS;
+ auth_chunks->param_hdr.length = htons(sizeof(sctp_paramhdr_t));
/* If the Add-IP functionality is enabled, we must
* authenticate, ASCONF and ASCONF-ACK chunks
@@ -110,8 +111,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
if (sctp_addip_enable) {
auth_chunks->chunks[0] = SCTP_CID_ASCONF;
auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK;
- auth_chunks->param_hdr.length =
- htons(sizeof(sctp_paramhdr_t) + 2);
+ auth_chunks->param_hdr.length += htons(2);
}
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index dbb79adf8f3c..bb5c9ef13046 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3055,6 +3055,9 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
{
struct sctp_authchunk val;
+ if (!sctp_auth_enable)
+ return -EACCES;
+
if (optlen != sizeof(struct sctp_authchunk))
return -EINVAL;
if (copy_from_user(&val, optval, optlen))
@@ -3085,6 +3088,9 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
struct sctp_hmacalgo *hmacs;
int err;
+ if (!sctp_auth_enable)
+ return -EACCES;
+
if (optlen < sizeof(struct sctp_hmacalgo))
return -EINVAL;
@@ -3123,6 +3129,9 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
struct sctp_association *asoc;
int ret;
+ if (!sctp_auth_enable)
+ return -EACCES;
+
if (optlen <= sizeof(struct sctp_authkey))
return -EINVAL;
@@ -3160,6 +3169,9 @@ static int sctp_setsockopt_active_key(struct sock *sk,
struct sctp_authkeyid val;
struct sctp_association *asoc;
+ if (!sctp_auth_enable)
+ return -EACCES;
+
if (optlen != sizeof(struct sctp_authkeyid))
return -EINVAL;
if (copy_from_user(&val, optval, optlen))
@@ -3185,6 +3197,9 @@ static int sctp_setsockopt_del_key(struct sock *sk,
struct sctp_authkeyid val;
struct sctp_association *asoc;
+ if (!sctp_auth_enable)
+ return -EACCES;
+
if (optlen != sizeof(struct sctp_authkeyid))
return -EINVAL;
if (copy_from_user(&val, optval, optlen))
@@ -5197,19 +5212,29 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
+ struct sctp_hmacalgo __user *p = (void __user *)optval;
struct sctp_hmac_algo_param *hmacs;
- __u16 param_len;
+ __u16 data_len = 0;
+ u32 num_idents;
+
+ if (!sctp_auth_enable)
+ return -EACCES;
hmacs = sctp_sk(sk)->ep->auth_hmacs_list;
- param_len = ntohs(hmacs->param_hdr.length);
+ data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t);
- if (len < param_len)
+ if (len < sizeof(struct sctp_hmacalgo) + data_len)
return -EINVAL;
+
+ len = sizeof(struct sctp_hmacalgo) + data_len;
+ num_idents = data_len / sizeof(u16);
+
if (put_user(len, optlen))
return -EFAULT;
- if (copy_to_user(optval, hmacs->hmac_ids, len))
+ if (put_user(num_idents, &p->shmac_num_idents))
+ return -EFAULT;
+ if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
return -EFAULT;
-
return 0;
}
@@ -5219,6 +5244,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
struct sctp_authkeyid val;
struct sctp_association *asoc;
+ if (!sctp_auth_enable)
+ return -EACCES;
+
if (len < sizeof(struct sctp_authkeyid))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid)))
@@ -5233,6 +5261,12 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
else
val.scact_keynumber = sctp_sk(sk)->ep->active_key_id;
+ len = sizeof(struct sctp_authkeyid);
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+
return 0;
}
@@ -5243,13 +5277,16 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
struct sctp_authchunks val;
struct sctp_association *asoc;
struct sctp_chunks_param *ch;
- u32 num_chunks;
+ u32 num_chunks = 0;
char __user *to;
- if (len <= sizeof(struct sctp_authchunks))
+ if (!sctp_auth_enable)
+ return -EACCES;
+
+ if (len < sizeof(struct sctp_authchunks))
return -EINVAL;
- if (copy_from_user(&val, p, sizeof(struct sctp_authchunks)))
+ if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
return -EFAULT;
to = p->gauth_chunks;
@@ -5258,20 +5295,21 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
return -EINVAL;
ch = asoc->peer.peer_chunks;
+ if (!ch)
+ goto num;
/* See if the user provided enough room for all the data */
num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
if (len < num_chunks)
return -EINVAL;
- len = num_chunks;
- if (put_user(len, optlen))
+ if (copy_to_user(to, ch->chunks, num_chunks))
return -EFAULT;
+num:
+ len = sizeof(struct sctp_authchunks) + num_chunks;
+ if (put_user(len, optlen)) return -EFAULT;
if (put_user(num_chunks, &p->gauth_number_of_chunks))
return -EFAULT;
- if (copy_to_user(to, ch->chunks, len))
- return -EFAULT;
-
return 0;
}
@@ -5282,13 +5320,16 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
struct sctp_authchunks val;
struct sctp_association *asoc;
struct sctp_chunks_param *ch;
- u32 num_chunks;
+ u32 num_chunks = 0;
char __user *to;
- if (len <= sizeof(struct sctp_authchunks))
+ if (!sctp_auth_enable)
+ return -EACCES;
+
+ if (len < sizeof(struct sctp_authchunks))
return -EINVAL;
- if (copy_from_user(&val, p, sizeof(struct sctp_authchunks)))
+ if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
return -EFAULT;
to = p->gauth_chunks;
@@ -5301,17 +5342,21 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
else
ch = sctp_sk(sk)->ep->auth_chunk_list;
+ if (!ch)
+ goto num;
+
num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
- if (len < num_chunks)
+ if (len < sizeof(struct sctp_authchunks) + num_chunks)
return -EINVAL;
- len = num_chunks;
+ if (copy_to_user(to, ch->chunks, num_chunks))
+ return -EFAULT;
+num:
+ len = sizeof(struct sctp_authchunks) + num_chunks;
if (put_user(len, optlen))
return -EFAULT;
if (put_user(num_chunks, &p->gauth_number_of_chunks))
return -EFAULT;
- if (copy_to_user(to, ch->chunks, len))
- return -EFAULT;
return 0;
}