diff options
author | John Fastabend <john.fastabend@gmail.com> | 2017-12-07 09:57:20 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-12-08 13:32:26 -0500 |
commit | b01ac095c740fc21f4bb21abe900b0f5b3042cf9 (patch) | |
tree | bd055fb59d32845be92e7c52f030778055b10c96 /net/sched/sch_mq.c | |
parent | 7e66016f2c65bfc1181f42274fcb7f1183ab1bb5 (diff) | |
download | linux-b01ac095c740fc21f4bb21abe900b0f5b3042cf9.tar.bz2 |
net: sched: add support for TCQ_F_NOLOCK subqueues to sch_mq
The sch_mq qdisc creates a sub-qdisc per tx queue which are then
called independently for enqueue and dequeue operations. However
statistics are aggregated and pushed up to the "master" qdisc.
This patch adds support for any of the sub-qdiscs to be per cpu
statistic qdiscs. To handle this case add a check when calculating
stats and aggregate the per cpu stats if needed.
Also exports __gnet_stats_copy_queue() to use as a helper function.
Signed-off-by: John Fastabend <john.fastabend@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_mq.c')
-rw-r--r-- | net/sched/sch_mq.c | 25 |
1 files changed, 18 insertions, 7 deletions
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index 213b586a06a0..bc59f05e1a0f 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c @@ -17,6 +17,7 @@ #include <linux/skbuff.h> #include <net/netlink.h> #include <net/pkt_sched.h> +#include <net/sch_generic.h> struct mq_sched { struct Qdisc **qdiscs; @@ -103,15 +104,25 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb) memset(&sch->qstats, 0, sizeof(sch->qstats)); for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { + struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL; + struct gnet_stats_queue __percpu *cpu_qstats = NULL; + __u32 qlen = 0; + qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; spin_lock_bh(qdisc_lock(qdisc)); - sch->q.qlen += qdisc->q.qlen; - sch->bstats.bytes += qdisc->bstats.bytes; - sch->bstats.packets += qdisc->bstats.packets; - sch->qstats.backlog += qdisc->qstats.backlog; - sch->qstats.drops += qdisc->qstats.drops; - sch->qstats.requeues += qdisc->qstats.requeues; - sch->qstats.overlimits += qdisc->qstats.overlimits; + + if (qdisc_is_percpu_stats(qdisc)) { + cpu_bstats = qdisc->cpu_bstats; + cpu_qstats = qdisc->cpu_qstats; + } + + qlen = qdisc_qlen_sum(qdisc); + + __gnet_stats_copy_basic(NULL, &sch->bstats, + cpu_bstats, &qdisc->bstats); + __gnet_stats_copy_queue(&sch->qstats, + cpu_qstats, &qdisc->qstats, qlen); + spin_unlock_bh(qdisc_lock(qdisc)); } return 0; |