summaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/clocksource.c3
-rw-r--r--kernel/time/jiffies.c1
-rw-r--r--kernel/time/ntp.c93
-rw-r--r--kernel/time/tick-broadcast.c13
-rw-r--r--kernel/time/tick-internal.h2
-rw-r--r--kernel/time/tick-sched.c101
-rw-r--r--kernel/time/timekeeping.c122
-rw-r--r--kernel/time/timer_list.c20
8 files changed, 234 insertions, 121 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 093d4acf993b..9ed2eec97526 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -325,6 +325,9 @@ int clocksource_register(struct clocksource *c)
unsigned long flags;
int ret;
+ /* save mult_orig on registration */
+ c->mult_orig = c->mult;
+
spin_lock_irqsave(&clocksource_lock, flags);
ret = clocksource_enqueue(c);
if (!ret)
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 4c256fdb8875..1ca99557e929 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -61,6 +61,7 @@ struct clocksource clocksource_jiffies = {
.read = jiffies_read,
.mask = 0xffffffff, /*32bits*/
.mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */
+ .mult_orig = NSEC_PER_JIFFY << JIFFIES_SHIFT,
.shift = JIFFIES_SHIFT,
};
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 9c114b726ab3..8ff15e5d486b 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -10,13 +10,13 @@
#include <linux/mm.h>
#include <linux/time.h>
-#include <linux/timer.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
#include <linux/hrtimer.h>
#include <linux/capability.h>
#include <linux/math64.h>
#include <linux/clocksource.h>
+#include <linux/workqueue.h>
#include <asm/timex.h>
/*
@@ -217,11 +217,11 @@ void second_overflow(void)
/* Disable the cmos update - used by virtualization and embedded */
int no_sync_cmos_clock __read_mostly;
-static void sync_cmos_clock(unsigned long dummy);
+static void sync_cmos_clock(struct work_struct *work);
-static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
+static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
-static void sync_cmos_clock(unsigned long dummy)
+static void sync_cmos_clock(struct work_struct *work)
{
struct timespec now, next;
int fail = 1;
@@ -257,13 +257,13 @@ static void sync_cmos_clock(unsigned long dummy)
next.tv_sec++;
next.tv_nsec -= NSEC_PER_SEC;
}
- mod_timer(&sync_cmos_timer, jiffies + timespec_to_jiffies(&next));
+ schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next));
}
static void notify_cmos_timer(void)
{
if (!no_sync_cmos_clock)
- mod_timer(&sync_cmos_timer, jiffies + 1);
+ schedule_delayed_work(&sync_cmos_work, 0);
}
#else
@@ -276,38 +276,50 @@ static inline void notify_cmos_timer(void) { }
int do_adjtimex(struct timex *txc)
{
struct timespec ts;
- long save_adjust, sec;
int result;
- /* In order to modify anything, you gotta be super-user! */
- if (txc->modes && !capable(CAP_SYS_TIME))
- return -EPERM;
-
- /* Now we validate the data before disabling interrupts */
-
- if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) {
+ /* Validate the data before disabling interrupts */
+ if (txc->modes & ADJ_ADJTIME) {
/* singleshot must not be used with any other mode bits */
- if (txc->modes & ~ADJ_OFFSET_SS_READ)
+ if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
return -EINVAL;
+ if (!(txc->modes & ADJ_OFFSET_READONLY) &&
+ !capable(CAP_SYS_TIME))
+ return -EPERM;
+ } else {
+ /* In order to modify anything, you gotta be super-user! */
+ if (txc->modes && !capable(CAP_SYS_TIME))
+ return -EPERM;
+
+ /* if the quartz is off by more than 10% something is VERY wrong! */
+ if (txc->modes & ADJ_TICK &&
+ (txc->tick < 900000/USER_HZ ||
+ txc->tick > 1100000/USER_HZ))
+ return -EINVAL;
+
+ if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
+ hrtimer_cancel(&leap_timer);
}
- /* if the quartz is off by more than 10% something is VERY wrong ! */
- if (txc->modes & ADJ_TICK)
- if (txc->tick < 900000/USER_HZ ||
- txc->tick > 1100000/USER_HZ)
- return -EINVAL;
-
- if (time_state != TIME_OK && txc->modes & ADJ_STATUS)
- hrtimer_cancel(&leap_timer);
getnstimeofday(&ts);
write_seqlock_irq(&xtime_lock);
- /* Save for later - semantics of adjtime is to return old value */
- save_adjust = time_adjust;
-
/* If there are input parameters, then process them */
+ if (txc->modes & ADJ_ADJTIME) {
+ long save_adjust = time_adjust;
+
+ if (!(txc->modes & ADJ_OFFSET_READONLY)) {
+ /* adjtime() is independent from ntp_adjtime() */
+ time_adjust = txc->offset;
+ ntp_update_frequency();
+ }
+ txc->offset = save_adjust;
+ goto adj_done;
+ }
if (txc->modes) {
+ long sec;
+
if (txc->modes & ADJ_STATUS) {
if ((time_status & STA_PLL) &&
!(txc->status & STA_PLL)) {
@@ -374,13 +386,8 @@ int do_adjtimex(struct timex *txc)
if (txc->modes & ADJ_TAI && txc->constant > 0)
time_tai = txc->constant;
- if (txc->modes & ADJ_OFFSET) {
- if (txc->modes == ADJ_OFFSET_SINGLESHOT)
- /* adjtime() is independent from ntp_adjtime() */
- time_adjust = txc->offset;
- else
- ntp_update_offset(txc->offset);
- }
+ if (txc->modes & ADJ_OFFSET)
+ ntp_update_offset(txc->offset);
if (txc->modes & ADJ_TICK)
tick_usec = txc->tick;
@@ -388,22 +395,18 @@ int do_adjtimex(struct timex *txc)
ntp_update_frequency();
}
+ txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
+ NTP_SCALE_SHIFT);
+ if (!(time_status & STA_NANO))
+ txc->offset /= NSEC_PER_USEC;
+
+adj_done:
result = time_state; /* mostly `TIME_OK' */
if (time_status & (STA_UNSYNC|STA_CLOCKERR))
result = TIME_ERROR;
- if ((txc->modes == ADJ_OFFSET_SINGLESHOT) ||
- (txc->modes == ADJ_OFFSET_SS_READ))
- txc->offset = save_adjust;
- else {
- txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
- NTP_SCALE_SHIFT);
- if (!(time_status & STA_NANO))
- txc->offset /= NSEC_PER_USEC;
- }
- txc->freq = shift_right((s32)(time_freq >> PPM_SCALE_INV_SHIFT) *
- (s64)PPM_SCALE_INV,
- NTP_SCALE_SHIFT);
+ txc->freq = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) *
+ (s64)PPM_SCALE_INV, NTP_SCALE_SHIFT);
txc->maxerror = time_maxerror;
txc->esterror = time_esterror;
txc->status = time_status;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index cb01cd8f919b..f98a1b7b16e9 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -384,6 +384,19 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
}
/*
+ * Called from irq_enter() when idle was interrupted to reenable the
+ * per cpu device.
+ */
+void tick_check_oneshot_broadcast(int cpu)
+{
+ if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) {
+ struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
+
+ clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
+ }
+}
+
+/*
* Handle oneshot mode broadcasting
*/
static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 469248782c23..b1c05bf75ee0 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -36,6 +36,7 @@ extern void tick_broadcast_switch_to_oneshot(void);
extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
extern int tick_broadcast_oneshot_active(void);
+extern void tick_check_oneshot_broadcast(int cpu);
# else /* BROADCAST */
static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
@@ -45,6 +46,7 @@ static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
static inline void tick_broadcast_switch_to_oneshot(void) { }
static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
static inline int tick_broadcast_oneshot_active(void) { return 0; }
+static inline void tick_check_oneshot_broadcast(int cpu) { }
# endif /* !BROADCAST */
#else /* !ONESHOT */
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index a547be11cf97..5bbb1044f847 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -155,7 +155,7 @@ void tick_nohz_update_jiffies(void)
touch_softlockup_watchdog();
}
-void tick_nohz_stop_idle(int cpu)
+static void tick_nohz_stop_idle(int cpu)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
@@ -377,6 +377,32 @@ ktime_t tick_nohz_get_sleep_length(void)
return ts->sleep_length;
}
+static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
+{
+ hrtimer_cancel(&ts->sched_timer);
+ hrtimer_set_expires(&ts->sched_timer, ts->idle_tick);
+
+ while (1) {
+ /* Forward the time to expire in the future */
+ hrtimer_forward(&ts->sched_timer, now, tick_period);
+
+ if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
+ hrtimer_start_expires(&ts->sched_timer,
+ HRTIMER_MODE_ABS);
+ /* Check, if the timer was already in the past */
+ if (hrtimer_active(&ts->sched_timer))
+ break;
+ } else {
+ if (!tick_program_event(
+ hrtimer_get_expires(&ts->sched_timer), 0))
+ break;
+ }
+ /* Update jiffies and reread time */
+ tick_do_update_jiffies64(now);
+ now = ktime_get();
+ }
+}
+
/**
* tick_nohz_restart_sched_tick - restart the idle tick from the idle task
*
@@ -430,28 +456,9 @@ void tick_nohz_restart_sched_tick(void)
*/
ts->tick_stopped = 0;
ts->idle_exittime = now;
- hrtimer_cancel(&ts->sched_timer);
- hrtimer_set_expires(&ts->sched_timer, ts->idle_tick);
- while (1) {
- /* Forward the time to expire in the future */
- hrtimer_forward(&ts->sched_timer, now, tick_period);
+ tick_nohz_restart(ts, now);
- if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
- hrtimer_start_expires(&ts->sched_timer,
- HRTIMER_MODE_ABS);
- /* Check, if the timer was already in the past */
- if (hrtimer_active(&ts->sched_timer))
- break;
- } else {
- if (!tick_program_event(
- hrtimer_get_expires(&ts->sched_timer), 0))
- break;
- }
- /* Update jiffies and reread time */
- tick_do_update_jiffies64(now);
- now = ktime_get();
- }
local_irq_enable();
}
@@ -503,10 +510,6 @@ static void tick_nohz_handler(struct clock_event_device *dev)
update_process_times(user_mode(regs));
profile_tick(CPU_PROFILING);
- /* Do not restart, when we are in the idle loop */
- if (ts->tick_stopped)
- return;
-
while (tick_nohz_reprogram(ts, now)) {
now = ktime_get();
tick_do_update_jiffies64(now);
@@ -552,6 +555,37 @@ static void tick_nohz_switch_to_nohz(void)
smp_processor_id());
}
+/*
+ * When NOHZ is enabled and the tick is stopped, we need to kick the
+ * tick timer from irq_enter() so that the jiffies update is kept
+ * alive during long running softirqs. That's ugly as hell, but
+ * correctness is key even if we need to fix the offending softirq in
+ * the first place.
+ *
+ * Note, this is different to tick_nohz_restart. We just kick the
+ * timer and do not touch the other magic bits which need to be done
+ * when idle is left.
+ */
+static void tick_nohz_kick_tick(int cpu)
+{
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ ktime_t delta, now;
+
+ if (!ts->tick_stopped)
+ return;
+
+ /*
+ * Do not touch the tick device, when the next expiry is either
+ * already reached or less/equal than the tick period.
+ */
+ now = ktime_get();
+ delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
+ if (delta.tv64 <= tick_period.tv64)
+ return;
+
+ tick_nohz_restart(ts, now);
+}
+
#else
static inline void tick_nohz_switch_to_nohz(void) { }
@@ -559,6 +593,19 @@ static inline void tick_nohz_switch_to_nohz(void) { }
#endif /* NO_HZ */
/*
+ * Called from irq_enter to notify about the possible interruption of idle()
+ */
+void tick_check_idle(int cpu)
+{
+ tick_check_oneshot_broadcast(cpu);
+#ifdef CONFIG_NO_HZ
+ tick_nohz_stop_idle(cpu);
+ tick_nohz_update_jiffies();
+ tick_nohz_kick_tick(cpu);
+#endif
+}
+
+/*
* High resolution timer specific code
*/
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -611,10 +658,6 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
profile_tick(CPU_PROFILING);
}
- /* Do not restart, when we are in the idle loop */
- if (ts->tick_stopped)
- return HRTIMER_NORESTART;
-
hrtimer_forward(timer, now, tick_period);
return HRTIMER_RESTART;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e91c29f961c9..e7acfb482a68 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -58,27 +58,26 @@ struct clocksource *clock;
#ifdef CONFIG_GENERIC_TIME
/**
- * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook
+ * clocksource_forward_now - update clock to the current time
*
- * private function, must hold xtime_lock lock when being
- * called. Returns the number of nanoseconds since the
- * last call to update_wall_time() (adjusted by NTP scaling)
+ * Forward the current clock to update its state since the last call to
+ * update_wall_time(). This is useful before significant clock changes,
+ * as it avoids having to deal with this time offset explicitly.
*/
-static inline s64 __get_nsec_offset(void)
+static void clocksource_forward_now(void)
{
cycle_t cycle_now, cycle_delta;
- s64 ns_offset;
+ s64 nsec;
- /* read clocksource: */
cycle_now = clocksource_read(clock);
-
- /* calculate the delta since the last update_wall_time: */
cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+ clock->cycle_last = cycle_now;
- /* convert to nanoseconds: */
- ns_offset = cyc2ns(clock, cycle_delta);
+ nsec = cyc2ns(clock, cycle_delta);
+ timespec_add_ns(&xtime, nsec);
- return ns_offset;
+ nsec = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
+ clock->raw_time.tv_nsec += nsec;
}
/**
@@ -89,6 +88,7 @@ static inline s64 __get_nsec_offset(void)
*/
void getnstimeofday(struct timespec *ts)
{
+ cycle_t cycle_now, cycle_delta;
unsigned long seq;
s64 nsecs;
@@ -96,7 +96,15 @@ void getnstimeofday(struct timespec *ts)
seq = read_seqbegin(&xtime_lock);
*ts = xtime;
- nsecs = __get_nsec_offset();
+
+ /* read clocksource: */
+ cycle_now = clocksource_read(clock);
+
+ /* calculate the delta since the last update_wall_time: */
+ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+
+ /* convert to nanoseconds: */
+ nsecs = cyc2ns(clock, cycle_delta);
} while (read_seqretry(&xtime_lock, seq));
@@ -129,22 +137,22 @@ EXPORT_SYMBOL(do_gettimeofday);
*/
int do_settimeofday(struct timespec *tv)
{
+ struct timespec ts_delta;
unsigned long flags;
- time_t wtm_sec, sec = tv->tv_sec;
- long wtm_nsec, nsec = tv->tv_nsec;
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
write_seqlock_irqsave(&xtime_lock, flags);
- nsec -= __get_nsec_offset();
+ clocksource_forward_now();
+
+ ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
+ ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
+ wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+ xtime = *tv;
- set_normalized_timespec(&xtime, sec, nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
update_xtime_cache(0);
clock->error = 0;
@@ -170,22 +178,19 @@ EXPORT_SYMBOL(do_settimeofday);
static void change_clocksource(void)
{
struct clocksource *new;
- cycle_t now;
- u64 nsec;
new = clocksource_get_next();
if (clock == new)
return;
- new->cycle_last = 0;
- now = clocksource_read(new);
- nsec = __get_nsec_offset();
- timespec_add_ns(&xtime, nsec);
+ clocksource_forward_now();
- clock = new;
- clock->cycle_last = now;
+ new->raw_time = clock->raw_time;
+ clock = new;
+ clock->cycle_last = 0;
+ clock->cycle_last = clocksource_read(new);
clock->error = 0;
clock->xtime_nsec = 0;
clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
@@ -200,11 +205,44 @@ static void change_clocksource(void)
*/
}
#else
+static inline void clocksource_forward_now(void) { }
static inline void change_clocksource(void) { }
-static inline s64 __get_nsec_offset(void) { return 0; }
#endif
/**
+ * getrawmonotonic - Returns the raw monotonic time in a timespec
+ * @ts: pointer to the timespec to be set
+ *
+ * Returns the raw monotonic time (completely un-modified by ntp)
+ */
+void getrawmonotonic(struct timespec *ts)
+{
+ unsigned long seq;
+ s64 nsecs;
+ cycle_t cycle_now, cycle_delta;
+
+ do {
+ seq = read_seqbegin(&xtime_lock);
+
+ /* read clocksource: */
+ cycle_now = clocksource_read(clock);
+
+ /* calculate the delta since the last update_wall_time: */
+ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+
+ /* convert to nanoseconds: */
+ nsecs = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
+
+ *ts = clock->raw_time;
+
+ } while (read_seqretry(&xtime_lock, seq));
+
+ timespec_add_ns(ts, nsecs);
+}
+EXPORT_SYMBOL(getrawmonotonic);
+
+
+/**
* timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
*/
int timekeeping_valid_for_hres(void)
@@ -265,8 +303,6 @@ void __init timekeeping_init(void)
static int timekeeping_suspended;
/* time in seconds when suspend began */
static unsigned long timekeeping_suspend_time;
-/* xtime offset when we went into suspend */
-static s64 timekeeping_suspend_nsecs;
/**
* timekeeping_resume - Resumes the generic timekeeping subsystem.
@@ -292,8 +328,6 @@ static int timekeeping_resume(struct sys_device *dev)
wall_to_monotonic.tv_sec -= sleep_length;
total_sleep_time += sleep_length;
}
- /* Make sure that we have the correct xtime reference */
- timespec_add_ns(&xtime, timekeeping_suspend_nsecs);
update_xtime_cache(0);
/* re-base the last cycle value */
clock->cycle_last = 0;
@@ -319,8 +353,7 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
timekeeping_suspend_time = read_persistent_clock();
write_seqlock_irqsave(&xtime_lock, flags);
- /* Get the current xtime offset */
- timekeeping_suspend_nsecs = __get_nsec_offset();
+ clocksource_forward_now();
timekeeping_suspended = 1;
write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -454,23 +487,29 @@ void update_wall_time(void)
#else
offset = clock->cycle_interval;
#endif
- clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift;
+ clock->xtime_nsec = (s64)xtime.tv_nsec << clock->shift;
/* normally this loop will run just once, however in the
* case of lost or late ticks, it will accumulate correctly.
*/
while (offset >= clock->cycle_interval) {
/* accumulate one interval */
- clock->xtime_nsec += clock->xtime_interval;
- clock->cycle_last += clock->cycle_interval;
offset -= clock->cycle_interval;
+ clock->cycle_last += clock->cycle_interval;
+ clock->xtime_nsec += clock->xtime_interval;
if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
xtime.tv_sec++;
second_overflow();
}
+ clock->raw_time.tv_nsec += clock->raw_interval;
+ if (clock->raw_time.tv_nsec >= NSEC_PER_SEC) {
+ clock->raw_time.tv_nsec -= NSEC_PER_SEC;
+ clock->raw_time.tv_sec++;
+ }
+
/* accumulate error between NTP and clock interval */
clock->error += tick_length;
clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift);
@@ -479,9 +518,12 @@ void update_wall_time(void)
/* correct the clock when NTP error is too big */
clocksource_adjust(offset);
- /* store full nanoseconds into xtime */
- xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift;
+ /* store full nanoseconds into xtime after rounding it up and
+ * add the remainder to the error difference.
+ */
+ xtime.tv_nsec = ((s64)clock->xtime_nsec >> clock->shift) + 1;
clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
+ clock->error += clock->xtime_nsec << (NTP_SCALE_SHIFT - clock->shift);
update_xtime_cache(cyc2ns(clock, offset));
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 122ee751d2d1..a999b92a1277 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -47,13 +47,14 @@ static void print_name_offset(struct seq_file *m, void *sym)
}
static void
-print_timer(struct seq_file *m, struct hrtimer *timer, int idx, u64 now)
+print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
+ int idx, u64 now)
{
#ifdef CONFIG_TIMER_STATS
char tmp[TASK_COMM_LEN + 1];
#endif
SEQ_printf(m, " #%d: ", idx);
- print_name_offset(m, timer);
+ print_name_offset(m, taddr);
SEQ_printf(m, ", ");
print_name_offset(m, timer->function);
SEQ_printf(m, ", S:%02lx", timer->state);
@@ -101,7 +102,7 @@ next_one:
tmp = *timer;
spin_unlock_irqrestore(&base->cpu_base->lock, flags);
- print_timer(m, &tmp, i, now);
+ print_timer(m, timer, &tmp, i, now);
next++;
goto next_one;
}
@@ -111,6 +112,7 @@ next_one:
static void
print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
{
+ SEQ_printf(m, " .base: %p\n", base);
SEQ_printf(m, " .index: %d\n",
base->index);
SEQ_printf(m, " .resolution: %Lu nsecs\n",
@@ -185,12 +187,16 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
#ifdef CONFIG_GENERIC_CLOCKEVENTS
static void
-print_tickdevice(struct seq_file *m, struct tick_device *td)
+print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
{
struct clock_event_device *dev = td->evtdev;
SEQ_printf(m, "\n");
SEQ_printf(m, "Tick Device: mode: %d\n", td->mode);
+ if (cpu < 0)
+ SEQ_printf(m, "Broadcast device\n");
+ else
+ SEQ_printf(m, "Per CPU device: %d\n", cpu);
SEQ_printf(m, "Clock Event Device: ");
if (!dev) {
@@ -224,7 +230,7 @@ static void timer_list_show_tickdevices(struct seq_file *m)
int cpu;
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
- print_tickdevice(m, tick_get_broadcast_device());
+ print_tickdevice(m, tick_get_broadcast_device(), -1);
SEQ_printf(m, "tick_broadcast_mask: %08lx\n",
tick_get_broadcast_mask()->bits[0]);
#ifdef CONFIG_TICK_ONESHOT
@@ -234,7 +240,7 @@ static void timer_list_show_tickdevices(struct seq_file *m)
SEQ_printf(m, "\n");
#endif
for_each_online_cpu(cpu)
- print_tickdevice(m, tick_get_device(cpu));
+ print_tickdevice(m, tick_get_device(cpu), cpu);
SEQ_printf(m, "\n");
}
#else
@@ -246,7 +252,7 @@ static int timer_list_show(struct seq_file *m, void *v)
u64 now = ktime_to_ns(ktime_get());
int cpu;
- SEQ_printf(m, "Timer List Version: v0.3\n");
+ SEQ_printf(m, "Timer List Version: v0.4\n");
SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);