summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorQais Yousef <qais.yousef@arm.com>2019-06-04 12:14:57 +0100
committerIngo Molnar <mingo@kernel.org>2019-06-24 19:23:42 +0200
commit8de6242cca17d9299e654e29c966d8612d397272 (patch)
tree143affeeb4eff7f3a549003fc7b1e29704a88ea7 /kernel/sched
parentba19f51fcb549c7ee6261da243eea55a47e98d78 (diff)
downloadlinux-8de6242cca17d9299e654e29c966d8612d397272.tar.bz2
sched/debug: Add new tracepoint to track PELT at se level
The new tracepoint allows tracking PELT signals at sched_entity level. Which is supported in CFS tasks and taskgroups only. Signed-off-by: Qais Yousef <qais.yousef@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Pavankumar Kondeti <pkondeti@codeaurora.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Quentin Perret <quentin.perret@arm.com> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Uwe Kleine-Konig <u.kleine-koenig@pengutronix.de> Link: https://lkml.kernel.org/r/20190604111459.2862-5-qais.yousef@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c1
-rw-r--r--kernel/sched/pelt.c2
2 files changed, 3 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e883d7e17e36..75218ab1fa07 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3348,6 +3348,7 @@ static inline int propagate_entity_load_avg(struct sched_entity *se)
update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
trace_pelt_cfs_tp(cfs_rq);
+ trace_pelt_se_tp(se);
return 1;
}
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index 4e961b55b5ea..a96db50d40e0 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -267,6 +267,7 @@ int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
{
if (___update_load_sum(now, &se->avg, 0, 0, 0)) {
___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
+ trace_pelt_se_tp(se);
return 1;
}
@@ -280,6 +281,7 @@ int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se
___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
cfs_se_util_change(&se->avg);
+ trace_pelt_se_tp(se);
return 1;
}