summaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-12-22 11:39:59 -0800
committerDavid S. Miller <davem@davemloft.net>2010-12-22 11:39:59 -0800
commitee09b3c1cff0335137dc1b146488e4352f640f13 (patch)
tree4f21be5938205963ab73f8ca2ebdd0f4a65f8b17 /net/sched
parent503b1a529a6b62b31904bab4699752c523cf76b2 (diff)
downloadlinux-ee09b3c1cff0335137dc1b146488e4352f640f13.tar.bz2
sfq: fix sfq class stats handling
sfq_walk() runs without qdisc lock. By the time it selects a non empty hash slot and sfq_dump_class_stats() is run (with lock held), slot might have been freed : We then access q->slots[SFQ_EMPTY_SLOT], out of bounds, and crash in slot_queue_walk() On previous kernels, bug is here but out of bounds qs[SFQ_DEPTH] and allot[SFQ_DEPTH] are located in struct sfq_sched_data, so no illegal memory access happens, only possibly wrong data reported to user. Also, slot_dequeue_tail() should make sure slot skb chain is correctly terminated, or sfq_dump_class_stats() can access freed skbs. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_sfq.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 13322e8a0456..6a2f88fea6d8 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -281,6 +281,7 @@ static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
struct sk_buff *skb = slot->skblist_prev;
slot->skblist_prev = skb->prev;
+ skb->prev->next = (struct sk_buff *)slot;
skb->next = skb->prev = NULL;
return skb;
}
@@ -608,14 +609,19 @@ static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct gnet_dump *d)
{
struct sfq_sched_data *q = qdisc_priv(sch);
- const struct sfq_slot *slot = &q->slots[q->ht[cl - 1]];
- struct gnet_stats_queue qs = { .qlen = slot->qlen };
- struct tc_sfq_xstats xstats = { .allot = slot->allot };
+ sfq_index idx = q->ht[cl - 1];
+ struct gnet_stats_queue qs = { 0 };
+ struct tc_sfq_xstats xstats = { 0 };
struct sk_buff *skb;
- slot_queue_walk(slot, skb)
- qs.backlog += qdisc_pkt_len(skb);
+ if (idx != SFQ_EMPTY_SLOT) {
+ const struct sfq_slot *slot = &q->slots[idx];
+ xstats.allot = slot->allot;
+ qs.qlen = slot->qlen;
+ slot_queue_walk(slot, skb)
+ qs.backlog += qdisc_pkt_len(skb);
+ }
if (gnet_stats_copy_queue(d, &qs) < 0)
return -1;
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));