summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-08-17 23:55:36 -0700
committerDavid S. Miller <davem@davemloft.net>2008-08-18 00:39:41 -0700
commit69747650c814a8a79fef412c7416adf823293a3e (patch)
treec1c83e9ae903b0e93029f0071fdca544d2add340 /net
parent96d203169d1d851ac1468f7d4459a09581be364c (diff)
downloadlinux-69747650c814a8a79fef412c7416adf823293a3e.tar.bz2
pkt_sched: Fix return value corruption in HTB and TBF.
Based upon a bug report by Josip Rodin. Packet schedulers should only return NET_XMIT_DROP iff the packet really was dropped. If the packet does reach the device after we return NET_XMIT_DROP then TCP can crash because it depends upon the enqueue path return values being accurate. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/sched/sch_htb.c4
-rw-r--r--net/sched/sch_tbf.c11
2 files changed, 4 insertions, 11 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 6febd245e62b..0df0df202ed0 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -577,7 +577,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sch->qstats.drops++;
cl->qstats.drops++;
}
- return NET_XMIT_DROP;
+ return ret;
} else {
cl->bstats.packets +=
skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
@@ -623,7 +623,7 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
sch->qstats.drops++;
cl->qstats.drops++;
}
- return NET_XMIT_DROP;
+ return ret;
} else
htb_activate(q, cl);
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 7d3b7ff3bf07..94c61598b86a 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -123,15 +123,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
struct tbf_sched_data *q = qdisc_priv(sch);
int ret;
- if (qdisc_pkt_len(skb) > q->max_size) {
- sch->qstats.drops++;
-#ifdef CONFIG_NET_CLS_ACT
- if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
-#endif
- kfree_skb(skb);
-
- return NET_XMIT_DROP;
- }
+ if (qdisc_pkt_len(skb) > q->max_size)
+ return qdisc_reshape_fail(skb, sch);
ret = qdisc_enqueue(skb, q->qdisc);
if (ret != 0) {