diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-04-02 09:25:10 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-04-02 09:25:10 -0700 |
commit | 128c434a70996a3738c7ca4aa2ee91942e4c48f0 (patch) | |
tree | 65646fafea62ef939e49ef57a31edec4c5b049fb /kernel | |
parent | 0a89b5eb818aee2efe8b092bb303aec56486a3d3 (diff) | |
parent | 658b299580da2830a69eea44a8b6da1c2579a32e (diff) | |
download | linux-128c434a70996a3738c7ca4aa2ee91942e4c48f0.tar.bz2 |
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Thomas Gleixner:
"This update provides:
- make the scheduler clock switch to unstable mode smooth so the
timestamps stay at microseconds granularity instead of switching to
tick granularity.
- unbreak perf test tsc by taking the new offset into account which
was added in order to proveide better sched clock continuity
- switching sched clock to unstable mode runs all clock related
computations which affect the sched clock output itself from a work
queue. In case of preemption sched clock uses half updated data and
provides wrong timestamps. Keep the math in the protected context
and delegate only the static key switch to workqueue context.
- remove a duplicate header include"
* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/headers: Remove duplicate #include <linux/sched/debug.h> line
sched/clock: Fix broken stable to unstable transfer
sched/clock, x86/perf: Fix "perf test tsc"
sched/clock: Fix clear_sched_clock_stable() preempt wobbly
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/clock.c | 46 |
1 files changed, 27 insertions, 19 deletions
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index a08795e21628..00a45c45beca 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -96,10 +96,10 @@ static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable); static int __sched_clock_stable_early = 1; /* - * We want: ktime_get_ns() + gtod_offset == sched_clock() + raw_offset + * We want: ktime_get_ns() + __gtod_offset == sched_clock() + __sched_clock_offset */ -static __read_mostly u64 raw_offset; -static __read_mostly u64 gtod_offset; +__read_mostly u64 __sched_clock_offset; +static __read_mostly u64 __gtod_offset; struct sched_clock_data { u64 tick_raw; @@ -131,17 +131,24 @@ static void __set_sched_clock_stable(void) /* * Attempt to make the (initial) unstable->stable transition continuous. */ - raw_offset = (scd->tick_gtod + gtod_offset) - (scd->tick_raw); + __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw); printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n", - scd->tick_gtod, gtod_offset, - scd->tick_raw, raw_offset); + scd->tick_gtod, __gtod_offset, + scd->tick_raw, __sched_clock_offset); static_branch_enable(&__sched_clock_stable); tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); } -static void __clear_sched_clock_stable(struct work_struct *work) +static void __sched_clock_work(struct work_struct *work) +{ + static_branch_disable(&__sched_clock_stable); +} + +static DECLARE_WORK(sched_clock_work, __sched_clock_work); + +static void __clear_sched_clock_stable(void) { struct sched_clock_data *scd = this_scd(); @@ -154,17 +161,17 @@ static void __clear_sched_clock_stable(struct work_struct *work) * * Still do what we can. */ - gtod_offset = (scd->tick_raw + raw_offset) - (scd->tick_gtod); + __gtod_offset = (scd->tick_raw + __sched_clock_offset) - (scd->tick_gtod); printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n", - scd->tick_gtod, gtod_offset, - scd->tick_raw, raw_offset); + scd->tick_gtod, __gtod_offset, + scd->tick_raw, __sched_clock_offset); - static_branch_disable(&__sched_clock_stable); tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE); -} -static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable); + if (sched_clock_stable()) + schedule_work(&sched_clock_work); +} void clear_sched_clock_stable(void) { @@ -173,7 +180,7 @@ void clear_sched_clock_stable(void) smp_mb(); /* matches sched_clock_init_late() */ if (sched_clock_running == 2) - schedule_work(&sched_clock_work); + __clear_sched_clock_stable(); } void sched_clock_init_late(void) @@ -214,7 +221,7 @@ static inline u64 wrap_max(u64 x, u64 y) */ static u64 sched_clock_local(struct sched_clock_data *scd) { - u64 now, clock, old_clock, min_clock, max_clock; + u64 now, clock, old_clock, min_clock, max_clock, gtod; s64 delta; again: @@ -231,9 +238,10 @@ again: * scd->tick_gtod + TICK_NSEC); */ - clock = scd->tick_gtod + gtod_offset + delta; - min_clock = wrap_max(scd->tick_gtod, old_clock); - max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC); + gtod = scd->tick_gtod + __gtod_offset; + clock = gtod + delta; + min_clock = wrap_max(gtod, old_clock); + max_clock = wrap_max(old_clock, gtod + TICK_NSEC); clock = wrap_max(clock, min_clock); clock = wrap_min(clock, max_clock); @@ -317,7 +325,7 @@ u64 sched_clock_cpu(int cpu) u64 clock; if (sched_clock_stable()) - return sched_clock() + raw_offset; + return sched_clock() + __sched_clock_offset; if (unlikely(!sched_clock_running)) return 0ull; |