summaryrefslogtreecommitdiffstats
path: root/kernel/time/timekeeping.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2014-07-16 21:05:16 +0000
committerJohn Stultz <john.stultz@linaro.org>2014-07-23 15:01:53 -0700
commitd28ede83791defee9a81e558540699dc46dbbe13 (patch)
tree40df9738db287097241fe25493df982ee33b046f /kernel/time/timekeeping.c
parent6d3aadf3e180e09dbefab16478c6876b584ce16e (diff)
downloadlinux-d28ede83791defee9a81e558540699dc46dbbe13.tar.bz2
timekeeping: Create struct tk_read_base and use it in struct timekeeper
The members of the new struct are the required ones for the new NMI safe accessor to clcok monotonic. In order to reuse the existing timekeeping code and to make the update of the fast NMI safe timekeepers a simple memcpy use the struct for the timekeeper as well and convert all users. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Signed-off-by: John Stultz <john.stultz@linaro.org>
Diffstat (limited to 'kernel/time/timekeeping.c')
-rw-r--r--kernel/time/timekeeping.c132
1 files changed, 66 insertions, 66 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 14b7367e6b94..ccb69980ef7e 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -52,8 +52,8 @@ bool __read_mostly persistent_clock_exist = false;
static inline void tk_normalize_xtime(struct timekeeper *tk)
{
- while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) {
- tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift;
+ while (tk->tkr.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr.shift)) {
+ tk->tkr.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr.shift;
tk->xtime_sec++;
}
}
@@ -63,20 +63,20 @@ static inline struct timespec64 tk_xtime(struct timekeeper *tk)
struct timespec64 ts;
ts.tv_sec = tk->xtime_sec;
- ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift);
+ ts.tv_nsec = (long)(tk->tkr.xtime_nsec >> tk->tkr.shift);
return ts;
}
static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
{
tk->xtime_sec = ts->tv_sec;
- tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift;
+ tk->tkr.xtime_nsec = (u64)ts->tv_nsec << tk->tkr.shift;
}
static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
{
tk->xtime_sec += ts->tv_sec;
- tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
+ tk->tkr.xtime_nsec += (u64)ts->tv_nsec << tk->tkr.shift;
tk_normalize_xtime(tk);
}
@@ -119,11 +119,11 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
u64 tmp, ntpinterval;
struct clocksource *old_clock;
- old_clock = tk->clock;
- tk->clock = clock;
- tk->read = clock->read;
- tk->mask = clock->mask;
- tk->cycle_last = tk->read(clock);
+ old_clock = tk->tkr.clock;
+ tk->tkr.clock = clock;
+ tk->tkr.read = clock->read;
+ tk->tkr.mask = clock->mask;
+ tk->tkr.cycle_last = tk->tkr.read(clock);
/* Do the ns -> cycle conversion first, using original mult */
tmp = NTP_INTERVAL_LENGTH;
@@ -147,11 +147,11 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
if (old_clock) {
int shift_change = clock->shift - old_clock->shift;
if (shift_change < 0)
- tk->xtime_nsec >>= -shift_change;
+ tk->tkr.xtime_nsec >>= -shift_change;
else
- tk->xtime_nsec <<= shift_change;
+ tk->tkr.xtime_nsec <<= shift_change;
}
- tk->shift = clock->shift;
+ tk->tkr.shift = clock->shift;
tk->ntp_error = 0;
tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
@@ -161,7 +161,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
* active clocksource. These value will be adjusted via NTP
* to counteract clock drifting.
*/
- tk->mult = clock->mult;
+ tk->tkr.mult = clock->mult;
}
/* Timekeeper helper functions. */
@@ -179,13 +179,13 @@ static inline s64 timekeeping_get_ns(struct timekeeper *tk)
s64 nsec;
/* read clocksource: */
- cycle_now = tk->read(tk->clock);
+ cycle_now = tk->tkr.read(tk->tkr.clock);
/* calculate the delta since the last update_wall_time: */
- delta = clocksource_delta(cycle_now, tk->cycle_last, tk->mask);
+ delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
- nsec = delta * tk->mult + tk->xtime_nsec;
- nsec >>= tk->shift;
+ nsec = delta * tk->tkr.mult + tk->tkr.xtime_nsec;
+ nsec >>= tk->tkr.shift;
/* If arch requires, add in get_arch_timeoffset() */
return nsec + arch_gettimeoffset();
@@ -193,15 +193,15 @@ static inline s64 timekeeping_get_ns(struct timekeeper *tk)
static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
{
- struct clocksource *clock = tk->clock;
+ struct clocksource *clock = tk->tkr.clock;
cycle_t cycle_now, delta;
s64 nsec;
/* read clocksource: */
- cycle_now = tk->read(clock);
+ cycle_now = tk->tkr.read(clock);
/* calculate the delta since the last update_wall_time: */
- delta = clocksource_delta(cycle_now, tk->cycle_last, tk->mask);
+ delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
/* convert delta to nanoseconds. */
nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
@@ -217,8 +217,8 @@ static inline void update_vsyscall(struct timekeeper *tk)
struct timespec xt;
xt = tk_xtime(tk);
- update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult,
- tk->cycle_last);
+ update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->tkr.clock, tk->tkr.mult,
+ tk->tkr.cycle_last);
}
static inline void old_vsyscall_fixup(struct timekeeper *tk)
@@ -235,11 +235,11 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk)
* (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
* users are removed, this can be killed.
*/
- remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
- tk->xtime_nsec -= remainder;
- tk->xtime_nsec += 1ULL << tk->shift;
+ remainder = tk->tkr.xtime_nsec & ((1ULL << tk->tkr.shift) - 1);
+ tk->tkr.xtime_nsec -= remainder;
+ tk->tkr.xtime_nsec += 1ULL << tk->tkr.shift;
tk->ntp_error += remainder << tk->ntp_error_shift;
- tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift;
+ tk->ntp_error -= (1ULL << tk->tkr.shift) << tk->ntp_error_shift;
}
#else
#define old_vsyscall_fixup(tk)
@@ -304,7 +304,7 @@ static inline void tk_update_ktime_data(struct timekeeper *tk)
nsec = (s64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
nsec *= NSEC_PER_SEC;
nsec += tk->wall_to_monotonic.tv_nsec;
- tk->base_mono = ns_to_ktime(nsec);
+ tk->tkr.base_mono = ns_to_ktime(nsec);
/* Update the monotonic raw base */
tk->base_raw = timespec64_to_ktime(tk->raw_time);
@@ -336,18 +336,18 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
*/
static void timekeeping_forward_now(struct timekeeper *tk)
{
- struct clocksource *clock = tk->clock;
+ struct clocksource *clock = tk->tkr.clock;
cycle_t cycle_now, delta;
s64 nsec;
- cycle_now = tk->read(clock);
- delta = clocksource_delta(cycle_now, tk->cycle_last, tk->mask);
- tk->cycle_last = cycle_now;
+ cycle_now = tk->tkr.read(clock);
+ delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
+ tk->tkr.cycle_last = cycle_now;
- tk->xtime_nsec += delta * tk->mult;
+ tk->tkr.xtime_nsec += delta * tk->tkr.mult;
/* If arch requires, add in get_arch_timeoffset() */
- tk->xtime_nsec += (u64)arch_gettimeoffset() << tk->shift;
+ tk->tkr.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr.shift;
tk_normalize_xtime(tk);
@@ -412,7 +412,7 @@ ktime_t ktime_get(void)
do {
seq = read_seqcount_begin(&tk_core.seq);
- base = tk->base_mono;
+ base = tk->tkr.base_mono;
nsecs = timekeeping_get_ns(tk);
} while (read_seqcount_retry(&tk_core.seq, seq));
@@ -438,7 +438,7 @@ ktime_t ktime_get_with_offset(enum tk_offsets offs)
do {
seq = read_seqcount_begin(&tk_core.seq);
- base = ktime_add(tk->base_mono, *offset);
+ base = ktime_add(tk->tkr.base_mono, *offset);
nsecs = timekeeping_get_ns(tk);
} while (read_seqcount_retry(&tk_core.seq, seq));
@@ -731,7 +731,7 @@ static int change_clocksource(void *data)
*/
if (try_module_get(new->owner)) {
if (!new->enable || new->enable(new) == 0) {
- old = tk->clock;
+ old = tk->tkr.clock;
tk_setup_internals(tk, new);
if (old->disable)
old->disable(old);
@@ -759,11 +759,11 @@ int timekeeping_notify(struct clocksource *clock)
{
struct timekeeper *tk = &tk_core.timekeeper;
- if (tk->clock == clock)
+ if (tk->tkr.clock == clock)
return 0;
stop_machine(change_clocksource, clock, NULL);
tick_clock_notify();
- return tk->clock == clock ? 0 : -1;
+ return tk->tkr.clock == clock ? 0 : -1;
}
/**
@@ -803,7 +803,7 @@ int timekeeping_valid_for_hres(void)
do {
seq = read_seqcount_begin(&tk_core.seq);
- ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
+ ret = tk->tkr.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
} while (read_seqcount_retry(&tk_core.seq, seq));
@@ -822,7 +822,7 @@ u64 timekeeping_max_deferment(void)
do {
seq = read_seqcount_begin(&tk_core.seq);
- ret = tk->clock->max_idle_ns;
+ ret = tk->tkr.clock->max_idle_ns;
} while (read_seqcount_retry(&tk_core.seq, seq));
@@ -989,7 +989,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
static void timekeeping_resume(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
- struct clocksource *clock = tk->clock;
+ struct clocksource *clock = tk->tkr.clock;
unsigned long flags;
struct timespec64 ts_new, ts_delta;
struct timespec tmp;
@@ -1017,16 +1017,16 @@ static void timekeeping_resume(void)
* The less preferred source will only be tried if there is no better
* usable source. The rtc part is handled separately in rtc core code.
*/
- cycle_now = tk->read(clock);
+ cycle_now = tk->tkr.read(clock);
if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
- cycle_now > tk->cycle_last) {
+ cycle_now > tk->tkr.cycle_last) {
u64 num, max = ULLONG_MAX;
u32 mult = clock->mult;
u32 shift = clock->shift;
s64 nsec = 0;
- cycle_delta = clocksource_delta(cycle_now, tk->cycle_last,
- tk->mask);
+ cycle_delta = clocksource_delta(cycle_now, tk->tkr.cycle_last,
+ tk->tkr.mask);
/*
* "cycle_delta * mutl" may cause 64 bits overflow, if the
@@ -1052,7 +1052,7 @@ static void timekeeping_resume(void)
__timekeeping_inject_sleeptime(tk, &ts_delta);
/* Re-base the last cycle value */
- tk->cycle_last = cycle_now;
+ tk->tkr.cycle_last = cycle_now;
tk->ntp_error = 0;
timekeeping_suspended = 0;
timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
@@ -1239,12 +1239,12 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
}
}
- if (unlikely(tk->clock->maxadj &&
- (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
+ if (unlikely(tk->tkr.clock->maxadj &&
+ (tk->tkr.mult + adj > tk->tkr.clock->mult + tk->tkr.clock->maxadj))) {
printk_deferred_once(KERN_WARNING
"Adjusting %s more than 11%% (%ld vs %ld)\n",
- tk->clock->name, (long)tk->mult + adj,
- (long)tk->clock->mult + tk->clock->maxadj);
+ tk->tkr.clock->name, (long)tk->tkr.mult + adj,
+ (long)tk->tkr.clock->mult + tk->tkr.clock->maxadj);
}
/*
* So the following can be confusing.
@@ -1295,9 +1295,9 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
*
* XXX - TODO: Doc ntp_error calculation.
*/
- tk->mult += adj;
+ tk->tkr.mult += adj;
tk->xtime_interval += interval;
- tk->xtime_nsec -= offset;
+ tk->tkr.xtime_nsec -= offset;
tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
out_adjust:
@@ -1315,9 +1315,9 @@ out_adjust:
* We'll correct this error next time through this function, when
* xtime_nsec is not as small.
*/
- if (unlikely((s64)tk->xtime_nsec < 0)) {
- s64 neg = -(s64)tk->xtime_nsec;
- tk->xtime_nsec = 0;
+ if (unlikely((s64)tk->tkr.xtime_nsec < 0)) {
+ s64 neg = -(s64)tk->tkr.xtime_nsec;
+ tk->tkr.xtime_nsec = 0;
tk->ntp_error += neg << tk->ntp_error_shift;
}
@@ -1333,13 +1333,13 @@ out_adjust:
*/
static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
{
- u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
+ u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr.shift;
unsigned int clock_set = 0;
- while (tk->xtime_nsec >= nsecps) {
+ while (tk->tkr.xtime_nsec >= nsecps) {
int leap;
- tk->xtime_nsec -= nsecps;
+ tk->tkr.xtime_nsec -= nsecps;
tk->xtime_sec++;
/* Figure out if its a leap sec and apply if needed */
@@ -1384,9 +1384,9 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
/* Accumulate one shifted interval */
offset -= interval;
- tk->cycle_last += interval;
+ tk->tkr.cycle_last += interval;
- tk->xtime_nsec += tk->xtime_interval << shift;
+ tk->tkr.xtime_nsec += tk->xtime_interval << shift;
*clock_set |= accumulate_nsecs_to_secs(tk);
/* Accumulate raw time */
@@ -1429,8 +1429,8 @@ void update_wall_time(void)
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
offset = real_tk->cycle_interval;
#else
- offset = clocksource_delta(tk->read(tk->clock), tk->cycle_last,
- tk->mask);
+ offset = clocksource_delta(tk->tkr.read(tk->tkr.clock),
+ tk->tkr.cycle_last, tk->tkr.mask);
#endif
/* Check if there's really nothing to do */
@@ -1591,8 +1591,8 @@ ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, ktime_t *offs_boot,
do {
seq = read_seqcount_begin(&tk_core.seq);
- base = tk->base_mono;
- nsecs = tk->xtime_nsec >> tk->shift;
+ base = tk->tkr.base_mono;
+ nsecs = tk->tkr.xtime_nsec >> tk->tkr.shift;
*offs_real = tk->offs_real;
*offs_boot = tk->offs_boot;
@@ -1623,7 +1623,7 @@ ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot,
do {
seq = read_seqcount_begin(&tk_core.seq);
- base = tk->base_mono;
+ base = tk->tkr.base_mono;
nsecs = timekeeping_get_ns(tk);
*offs_real = tk->offs_real;