diff options
author | Eric Dumazet <edumazet@google.com> | 2016-12-04 09:48:16 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-12-05 15:21:59 -0500 |
commit | 1c0d32fde5bdf1184bc274f864c09799278a1114 (patch) | |
tree | 47367d46dfc125e19294c3f5fa9a021520bd5660 /net/sched | |
parent | a6e169312971219a34927e8fdece60046fafb8ba (diff) | |
download | linux-1c0d32fde5bdf1184bc274f864c09799278a1114.tar.bz2 |
net_sched: gen_estimator: complete rewrite of rate estimators
1) Old code was hard to maintain, due to complex lock chains.
(We probably will be able to remove some kfree_rcu() in callers)
2) Using a single timer to update all estimators does not scale.
3) Code was buggy on 32bit kernel (WRITE_ONCE() on 64bit quantity
is not supposed to work well)
In this rewrite :
- I removed the RB tree that had to be scanned in
gen_estimator_active(). qdisc dumps should be much faster.
- Each estimator has its own timer.
- Estimations are maintained in net_rate_estimator structure,
instead of dirtying the qdisc. Minor, but part of the simplification.
- Reading the estimator uses RCU and a seqcount to provide proper
support for 32bit kernels.
- We reduce memory need when estimators are not used, since
we store a pointer, instead of the bytes/packets counters.
- xt_rateest_mt() no longer has to grab a spinlock.
(In the future, xt_rateest_tg() could be switched to per cpu counters)
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/act_api.c | 9 | ||||
-rw-r--r-- | net/sched/act_police.c | 21 | ||||
-rw-r--r-- | net/sched/sch_api.c | 2 | ||||
-rw-r--r-- | net/sched/sch_cbq.c | 6 | ||||
-rw-r--r-- | net/sched/sch_drr.c | 6 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 2 | ||||
-rw-r--r-- | net/sched/sch_hfsc.c | 6 | ||||
-rw-r--r-- | net/sched/sch_htb.c | 6 | ||||
-rw-r--r-- | net/sched/sch_qfq.c | 8 |
9 files changed, 33 insertions, 33 deletions
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index f893d180da1c..2095c83ce773 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -41,8 +41,7 @@ static void tcf_hash_destroy(struct tcf_hashinfo *hinfo, struct tc_action *p) spin_lock_bh(&hinfo->lock); hlist_del(&p->tcfa_head); spin_unlock_bh(&hinfo->lock); - gen_kill_estimator(&p->tcfa_bstats, - &p->tcfa_rate_est); + gen_kill_estimator(&p->tcfa_rate_est); /* * gen_estimator est_timer() might access p->tcfa_lock * or bstats, wait a RCU grace period before freeing p @@ -237,8 +236,7 @@ EXPORT_SYMBOL(tcf_hash_check); void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est) { if (est) - gen_kill_estimator(&a->tcfa_bstats, - &a->tcfa_rate_est); + gen_kill_estimator(&a->tcfa_rate_est); call_rcu(&a->tcfa_rcu, free_tcf); } EXPORT_SYMBOL(tcf_hash_cleanup); @@ -670,8 +668,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p, goto errout; if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 || - gnet_stats_copy_rate_est(&d, &p->tcfa_bstats, - &p->tcfa_rate_est) < 0 || + gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 || gnet_stats_copy_queue(&d, p->cpu_qstats, &p->tcfa_qstats, p->tcfa_qstats.qlen) < 0) diff --git a/net/sched/act_police.c b/net/sched/act_police.c index c990b73a6c85..0ba91d1ce994 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -142,8 +142,7 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla, goto failure_unlock; } else if (tb[TCA_POLICE_AVRATE] && (ret == ACT_P_CREATED || - !gen_estimator_active(&police->tcf_bstats, - &police->tcf_rate_est))) { + !gen_estimator_active(&police->tcf_rate_est))) { err = -EINVAL; goto failure_unlock; } @@ -216,13 +215,17 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a, bstats_update(&police->tcf_bstats, skb); tcf_lastuse_update(&police->tcf_tm); - if (police->tcfp_ewma_rate && - police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { - police->tcf_qstats.overlimits++; - if (police->tcf_action == TC_ACT_SHOT) - police->tcf_qstats.drops++; - spin_unlock(&police->tcf_lock); - return police->tcf_action; + if (police->tcfp_ewma_rate) { + struct gnet_stats_rate_est64 sample; + + if (!gen_estimator_read(&police->tcf_rate_est, &sample) || + sample.bps >= police->tcfp_ewma_rate) { + police->tcf_qstats.overlimits++; + if (police->tcf_action == TC_ACT_SHOT) + police->tcf_qstats.drops++; + spin_unlock(&police->tcf_lock); + return police->tcf_action; + } } if (qdisc_pkt_len(skb) <= police->tcfp_mtu) { diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index f337f1bdd1d4..d7b93429f0cc 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1395,7 +1395,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q), &d, cpu_bstats, &q->bstats) < 0 || - gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 || + gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 || gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0) goto nla_put_failure; diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index beb554aa8cfb..9ffe1c220b02 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -122,7 +122,7 @@ struct cbq_class { psched_time_t penalized; struct gnet_stats_basic_packed bstats; struct gnet_stats_queue qstats; - struct gnet_stats_rate_est64 rate_est; + struct net_rate_estimator __rcu *rate_est; struct tc_cbq_xstats xstats; struct tcf_proto __rcu *filter_list; @@ -1346,7 +1346,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 || - gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || + gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0) return -1; @@ -1405,7 +1405,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl) tcf_destroy_chain(&cl->filter_list); qdisc_destroy(cl->q); qdisc_put_rtab(cl->R_tab); - gen_kill_estimator(&cl->bstats, &cl->rate_est); + gen_kill_estimator(&cl->rate_est); if (cl != &q->link) kfree(cl); } diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 8af5c59eef84..bb4cbdf75004 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -25,7 +25,7 @@ struct drr_class { struct gnet_stats_basic_packed bstats; struct gnet_stats_queue qstats; - struct gnet_stats_rate_est64 rate_est; + struct net_rate_estimator __rcu *rate_est; struct list_head alist; struct Qdisc *qdisc; @@ -142,7 +142,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl) { - gen_kill_estimator(&cl->bstats, &cl->rate_est); + gen_kill_estimator(&cl->rate_est); qdisc_destroy(cl->qdisc); kfree(cl); } @@ -283,7 +283,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 || - gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || + gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0) return -1; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 6cfb6e9038c2..6eb9c8e88519 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -709,7 +709,7 @@ void qdisc_destroy(struct Qdisc *qdisc) qdisc_put_stab(rtnl_dereference(qdisc->stab)); #endif - gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); + gen_kill_estimator(&qdisc->rate_est); if (ops->reset) ops->reset(qdisc); if (ops->destroy) diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 000f1d36128e..3ffaa6fb0990 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -114,7 +114,7 @@ struct hfsc_class { struct gnet_stats_basic_packed bstats; struct gnet_stats_queue qstats; - struct gnet_stats_rate_est64 rate_est; + struct net_rate_estimator __rcu *rate_est; struct tcf_proto __rcu *filter_list; /* filter list */ unsigned int filter_cnt; /* filter count */ unsigned int level; /* class level in hierarchy */ @@ -1091,7 +1091,7 @@ hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl) tcf_destroy_chain(&cl->filter_list); qdisc_destroy(cl->qdisc); - gen_kill_estimator(&cl->bstats, &cl->rate_est); + gen_kill_estimator(&cl->rate_est); if (cl != &q->root) kfree(cl); } @@ -1348,7 +1348,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, xstats.rtwork = cl->cl_cumul; if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 || - gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || + gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0) return -1; diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 9926fe4f3b6f..760f39e7caee 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -111,7 +111,7 @@ struct htb_class { unsigned int children; struct htb_class *parent; /* parent class */ - struct gnet_stats_rate_est64 rate_est; + struct net_rate_estimator __rcu *rate_est; /* * Written often fields @@ -1145,7 +1145,7 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 || - gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || + gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0) return -1; @@ -1228,7 +1228,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) WARN_ON(!cl->un.leaf.q); qdisc_destroy(cl->un.leaf.q); } - gen_kill_estimator(&cl->bstats, &cl->rate_est); + gen_kill_estimator(&cl->rate_est); tcf_destroy_chain(&cl->filter_list); kfree(cl); } diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index ca0516e6f743..f9e712ce2d15 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -137,7 +137,7 @@ struct qfq_class { struct gnet_stats_basic_packed bstats; struct gnet_stats_queue qstats; - struct gnet_stats_rate_est64 rate_est; + struct net_rate_estimator __rcu *rate_est; struct Qdisc *qdisc; struct list_head alist; /* Link for active-classes list. */ struct qfq_aggregate *agg; /* Parent aggregate. */ @@ -508,7 +508,7 @@ set_change_agg: new_agg = kzalloc(sizeof(*new_agg), GFP_KERNEL); if (new_agg == NULL) { err = -ENOBUFS; - gen_kill_estimator(&cl->bstats, &cl->rate_est); + gen_kill_estimator(&cl->rate_est); goto destroy_class; } sch_tree_lock(sch); @@ -533,7 +533,7 @@ static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl) struct qfq_sched *q = qdisc_priv(sch); qfq_rm_from_agg(q, cl); - gen_kill_estimator(&cl->bstats, &cl->rate_est); + gen_kill_estimator(&cl->rate_est); qdisc_destroy(cl->qdisc); kfree(cl); } @@ -667,7 +667,7 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg, if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 || - gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || + gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0) return -1; |