summaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2015-05-26 22:50:35 +0000
committerThomas Gleixner <tglx@linutronix.de>2015-06-19 15:18:28 +0200
commit683be13a284720205228e29207ef11a1c3c322b9 (patch)
tree02a925c3d34bd79de893c152d793aae46218a113 /kernel/time
parentbc7a34b8b9ebfb0f4b8a35a72a0b134fd6c5ef50 (diff)
downloadlinux-683be13a284720205228e29207ef11a1c3c322b9.tar.bz2
timer: Minimize nohz off overhead
If nohz is disabled on the kernel command line the [hr]timer code still calls wake_up_nohz_cpu() and tick_nohz_full_cpu(), a pretty pointless exercise. Cache nohz_active in [hr]timer per cpu bases and avoid the overhead. Before: 48.10% hog [.] main 15.25% [kernel] [k] _raw_spin_lock_irqsave 9.76% [kernel] [k] _raw_spin_unlock_irqrestore 6.50% [kernel] [k] mod_timer 6.44% [kernel] [k] lock_timer_base.isra.38 3.87% [kernel] [k] detach_if_pending 3.80% [kernel] [k] del_timer 2.67% [kernel] [k] internal_add_timer 1.33% [kernel] [k] __internal_add_timer 0.73% [kernel] [k] timerfn 0.54% [kernel] [k] wake_up_nohz_cpu After: 48.73% hog [.] main 15.36% [kernel] [k] _raw_spin_lock_irqsave 9.77% [kernel] [k] _raw_spin_unlock_irqrestore 6.61% [kernel] [k] lock_timer_base.isra.38 6.42% [kernel] [k] mod_timer 3.90% [kernel] [k] detach_if_pending 3.76% [kernel] [k] del_timer 2.41% [kernel] [k] internal_add_timer 1.39% [kernel] [k] __internal_add_timer 0.76% [kernel] [k] timerfn We probably should have a cached value for nohz full in the per cpu bases as well to avoid the cpumask check. The base cache line is hot already, the cpumask not necessarily. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Paul McKenney <paulmck@linux.vnet.ibm.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Viresh Kumar <viresh.kumar@linaro.org> Cc: John Stultz <john.stultz@linaro.org> Cc: Joonwoo Park <joonwoop@codeaurora.org> Cc: Wenbo Wang <wenbo.wang@memblaze.com> Link: http://lkml.kernel.org/r/20150526224512.207378134@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/hrtimer.c3
-rw-r--r--kernel/time/tick-internal.h4
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/time/timer.c16
4 files changed, 17 insertions, 8 deletions
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 6115f4df119b..db5c9508ed95 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -994,7 +994,8 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
* Kick to reschedule the next tick to handle the new timer
* on dynticks target.
*/
- wake_up_nohz_cpu(new_base->cpu_base->cpu);
+ if (new_base->cpu_base->nohz_active)
+ wake_up_nohz_cpu(new_base->cpu_base->cpu);
} else {
hrtimer_reprogram(timer, new_base);
}
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 2edde84744df..966a5a6fdd0a 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -156,9 +156,9 @@ extern unsigned long tick_nohz_active;
#endif
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
-extern void timers_update_migration(void);
+extern void timers_update_migration(bool update_nohz);
#else
-static inline void timers_update_migration(void) { }
+static inline void timers_update_migration(bool update_nohz) { }
#endif
DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index b1cb01699355..c792429e98c6 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -963,7 +963,7 @@ static inline void tick_nohz_activate(struct tick_sched *ts, int mode)
ts->nohz_mode = mode;
/* One update is enough */
if (!test_and_set_bit(0, &tick_nohz_active))
- timers_update_migration();
+ timers_update_migration(true);
}
/**
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 343142ed996a..520499dd85af 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -86,6 +86,7 @@ struct tvec_base {
unsigned long all_timers;
int cpu;
bool migration_enabled;
+ bool nohz_active;
struct tvec_root tv1;
struct tvec tv2;
struct tvec tv3;
@@ -99,7 +100,7 @@ static DEFINE_PER_CPU(struct tvec_base, tvec_bases);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
unsigned int sysctl_timer_migration = 1;
-void timers_update_migration(void)
+void timers_update_migration(bool update_nohz)
{
bool on = sysctl_timer_migration && tick_nohz_active;
unsigned int cpu;
@@ -111,6 +112,10 @@ void timers_update_migration(void)
for_each_possible_cpu(cpu) {
per_cpu(tvec_bases.migration_enabled, cpu) = on;
per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
+ if (!update_nohz)
+ continue;
+ per_cpu(tvec_bases.nohz_active, cpu) = true;
+ per_cpu(hrtimer_bases.nohz_active, cpu) = true;
}
}
@@ -124,7 +129,7 @@ int timer_migration_handler(struct ctl_table *table, int write,
mutex_lock(&mutex);
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (!ret && write)
- timers_update_migration();
+ timers_update_migration(false);
mutex_unlock(&mutex);
return ret;
}
@@ -436,8 +441,11 @@ static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
* require special care against races with idle_cpu(), lets deal
* with that later.
*/
- if (!(timer->flags & TIMER_DEFERRABLE) || tick_nohz_full_cpu(base->cpu))
- wake_up_nohz_cpu(base->cpu);
+ if (base->nohz_active) {
+ if (!(timer->flags & TIMER_DEFERRABLE) ||
+ tick_nohz_full_cpu(base->cpu))
+ wake_up_nohz_cpu(base->cpu);
+ }
}
#ifdef CONFIG_TIMER_STATS