diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-03-30 17:01:51 -0700 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-03-30 17:01:51 -0700 | 
| commit | 642e53ead6aea8740a219ede509a5d138fd4f780 (patch) | |
| tree | 5c4680d0c07315dab24fe7333c62f56bc19ec4e4 /include/trace/events | |
| parent | 9b82f05f869a823d43ea4186f5f732f2924d3693 (diff) | |
| parent | 313f16e2e35abb833eab5bdebc6ae30699adca18 (diff) | |
| download | linux-642e53ead6aea8740a219ede509a5d138fd4f780.tar.bz2 | |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
 "The main changes in this cycle are:
   - Various NUMA scheduling updates: harmonize the load-balancer and
     NUMA placement logic to not work against each other. The intended
     result is better locality, better utilization and fewer migrations.
   - Introduce Thermal Pressure tracking and optimizations, to improve
     task placement on thermally overloaded systems.
   - Implement frequency invariant scheduler accounting on (some) x86
     CPUs. This is done by observing and sampling the 'recent' CPU
     frequency average at ~tick boundaries. The CPU provides this data
     via the APERF/MPERF MSRs. This hopefully makes our capacity
     estimates more precise and keeps tasks on the same CPU better even
     if it might seem overloaded at a lower momentary frequency. (As
     usual, turbo mode is a complication that we resolve by observing
     the maximum frequency and renormalizing to it.)
   - Add asymmetric CPU capacity wakeup scan to improve capacity
     utilization on asymmetric topologies. (big.LITTLE systems)
   - PSI fixes and optimizations.
   - RT scheduling capacity awareness fixes & improvements.
   - Optimize the CONFIG_RT_GROUP_SCHED constraints code.
   - Misc fixes, cleanups and optimizations - see the changelog for
     details"
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (62 commits)
  threads: Update PID limit comment according to futex UAPI change
  sched/fair: Fix condition of avg_load calculation
  sched/rt: cpupri_find: Trigger a full search as fallback
  kthread: Do not preempt current task if it is going to call schedule()
  sched/fair: Improve spreading of utilization
  sched: Avoid scale real weight down to zero
  psi: Move PF_MEMSTALL out of task->flags
  MAINTAINERS: Add maintenance information for psi
  psi: Optimize switching tasks inside shared cgroups
  psi: Fix cpu.pressure for cpu.max and competing cgroups
  sched/core: Distribute tasks within affinity masks
  sched/fair: Fix enqueue_task_fair warning
  thermal/cpu-cooling, sched/core: Move the arch_set_thermal_pressure() API to generic scheduler code
  sched/rt: Remove unnecessary push for unfit tasks
  sched/rt: Allow pulling unfitting task
  sched/rt: Optimize cpupri_find() on non-heterogenous systems
  sched/rt: Re-instate old behavior in select_task_rq_rt()
  sched/rt: cpupri_find: Implement fallback mechanism for !fit case
  sched/fair: Fix reordering of enqueue/dequeue_task_fair()
  sched/fair: Fix runnable_avg for throttled cfs
  ...
Diffstat (limited to 'include/trace/events')
| -rw-r--r-- | include/trace/events/sched.h | 53 | 
1 files changed, 31 insertions, 22 deletions
| diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 420e80e56e55..ed168b0e2c53 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -487,7 +487,11 @@ TRACE_EVENT(sched_process_hang,  );  #endif /* CONFIG_DETECT_HUNG_TASK */ -DECLARE_EVENT_CLASS(sched_move_task_template, +/* + * Tracks migration of tasks from one runqueue to another. Can be used to + * detect if automatic NUMA balancing is bouncing between nodes. + */ +TRACE_EVENT(sched_move_numa,  	TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu), @@ -519,23 +523,7 @@ DECLARE_EVENT_CLASS(sched_move_task_template,  			__entry->dst_cpu, __entry->dst_nid)  ); -/* - * Tracks migration of tasks from one runqueue to another. Can be used to - * detect if automatic NUMA balancing is bouncing between nodes - */ -DEFINE_EVENT(sched_move_task_template, sched_move_numa, -	TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu), - -	TP_ARGS(tsk, src_cpu, dst_cpu) -); - -DEFINE_EVENT(sched_move_task_template, sched_stick_numa, -	TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu), - -	TP_ARGS(tsk, src_cpu, dst_cpu) -); - -TRACE_EVENT(sched_swap_numa, +DECLARE_EVENT_CLASS(sched_numa_pair_template,  	TP_PROTO(struct task_struct *src_tsk, int src_cpu,  		 struct task_struct *dst_tsk, int dst_cpu), @@ -561,11 +549,11 @@ TRACE_EVENT(sched_swap_numa,  		__entry->src_ngid	= task_numa_group_id(src_tsk);  		__entry->src_cpu	= src_cpu;  		__entry->src_nid	= cpu_to_node(src_cpu); -		__entry->dst_pid	= task_pid_nr(dst_tsk); -		__entry->dst_tgid	= task_tgid_nr(dst_tsk); -		__entry->dst_ngid	= task_numa_group_id(dst_tsk); +		__entry->dst_pid	= dst_tsk ? task_pid_nr(dst_tsk) : 0; +		__entry->dst_tgid	= dst_tsk ? task_tgid_nr(dst_tsk) : 0; +		__entry->dst_ngid	= dst_tsk ? task_numa_group_id(dst_tsk) : 0;  		__entry->dst_cpu	= dst_cpu; -		__entry->dst_nid	= cpu_to_node(dst_cpu); +		__entry->dst_nid	= dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1;  	),  	TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d", @@ -575,6 +563,23 @@ TRACE_EVENT(sched_swap_numa,  			__entry->dst_cpu, __entry->dst_nid)  ); +DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa, + +	TP_PROTO(struct task_struct *src_tsk, int src_cpu, +		 struct task_struct *dst_tsk, int dst_cpu), + +	TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu) +); + +DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa, + +	TP_PROTO(struct task_struct *src_tsk, int src_cpu, +		 struct task_struct *dst_tsk, int dst_cpu), + +	TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu) +); + +  /*   * Tracepoint for waking a polling cpu without an IPI.   */ @@ -613,6 +618,10 @@ DECLARE_TRACE(pelt_dl_tp,  	TP_PROTO(struct rq *rq),  	TP_ARGS(rq)); +DECLARE_TRACE(pelt_thermal_tp, +	TP_PROTO(struct rq *rq), +	TP_ARGS(rq)); +  DECLARE_TRACE(pelt_irq_tp,  	TP_PROTO(struct rq *rq),  	TP_ARGS(rq)); |