summaryrefslogtreecommitdiffstats
path: root/kernel/time/timekeeping.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/timekeeping.c')
-rw-r--r--kernel/time/timekeeping.c94
1 files changed, 92 insertions, 2 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 169479994755..e6a5a6bc2769 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -131,7 +131,7 @@ static inline s64 timekeeping_get_ns_raw(void)
/* calculate the delta since the last update_wall_time: */
cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
- /* return delta convert to nanoseconds using ntp adjusted mult. */
+ /* return delta convert to nanoseconds. */
return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
}
@@ -249,6 +249,8 @@ ktime_t ktime_get(void)
secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
nsecs += timekeeping_get_ns();
+ /* If arch requires, add in gettimeoffset() */
+ nsecs += arch_gettimeoffset();
} while (read_seqretry(&xtime_lock, seq));
/*
@@ -280,6 +282,8 @@ void ktime_get_ts(struct timespec *ts)
*ts = xtime;
tomono = wall_to_monotonic;
nsecs = timekeeping_get_ns();
+ /* If arch requires, add in gettimeoffset() */
+ nsecs += arch_gettimeoffset();
} while (read_seqretry(&xtime_lock, seq));
@@ -802,14 +806,44 @@ static void timekeeping_adjust(s64 offset)
s64 error, interval = timekeeper.cycle_interval;
int adj;
+ /*
+ * The point of this is to check if the error is greater then half
+ * an interval.
+ *
+ * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
+ *
+ * Note we subtract one in the shift, so that error is really error*2.
+ * This "saves" dividing(shifting) interval twice, but keeps the
+ * (error > interval) comparison as still measuring if error is
+ * larger then half an interval.
+ *
+ * Note: It does not "save" on aggravation when reading the code.
+ */
error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
if (error > interval) {
+ /*
+ * We now divide error by 4(via shift), which checks if
+ * the error is greater then twice the interval.
+ * If it is greater, we need a bigadjust, if its smaller,
+ * we can adjust by 1.
+ */
error >>= 2;
+ /*
+ * XXX - In update_wall_time, we round up to the next
+ * nanosecond, and store the amount rounded up into
+ * the error. This causes the likely below to be unlikely.
+ *
+ * The proper fix is to avoid rounding up by using
+ * the high precision timekeeper.xtime_nsec instead of
+ * xtime.tv_nsec everywhere. Fixing this will take some
+ * time.
+ */
if (likely(error <= interval))
adj = 1;
else
adj = timekeeping_bigadjust(error, &interval, &offset);
} else if (error < -interval) {
+ /* See comment above, this is just switched for the negative */
error >>= 2;
if (likely(error >= -interval)) {
adj = -1;
@@ -817,9 +851,65 @@ static void timekeeping_adjust(s64 offset)
offset = -offset;
} else
adj = timekeeping_bigadjust(error, &interval, &offset);
- } else
+ } else /* No adjustment needed */
return;
+ WARN_ONCE(timekeeper.clock->maxadj &&
+ (timekeeper.mult + adj > timekeeper.clock->mult +
+ timekeeper.clock->maxadj),
+ "Adjusting %s more then 11%% (%ld vs %ld)\n",
+ timekeeper.clock->name, (long)timekeeper.mult + adj,
+ (long)timekeeper.clock->mult +
+ timekeeper.clock->maxadj);
+ /*
+ * So the following can be confusing.
+ *
+ * To keep things simple, lets assume adj == 1 for now.
+ *
+ * When adj != 1, remember that the interval and offset values
+ * have been appropriately scaled so the math is the same.
+ *
+ * The basic idea here is that we're increasing the multiplier
+ * by one, this causes the xtime_interval to be incremented by
+ * one cycle_interval. This is because:
+ * xtime_interval = cycle_interval * mult
+ * So if mult is being incremented by one:
+ * xtime_interval = cycle_interval * (mult + 1)
+ * Its the same as:
+ * xtime_interval = (cycle_interval * mult) + cycle_interval
+ * Which can be shortened to:
+ * xtime_interval += cycle_interval
+ *
+ * So offset stores the non-accumulated cycles. Thus the current
+ * time (in shifted nanoseconds) is:
+ * now = (offset * adj) + xtime_nsec
+ * Now, even though we're adjusting the clock frequency, we have
+ * to keep time consistent. In other words, we can't jump back
+ * in time, and we also want to avoid jumping forward in time.
+ *
+ * So given the same offset value, we need the time to be the same
+ * both before and after the freq adjustment.
+ * now = (offset * adj_1) + xtime_nsec_1
+ * now = (offset * adj_2) + xtime_nsec_2
+ * So:
+ * (offset * adj_1) + xtime_nsec_1 =
+ * (offset * adj_2) + xtime_nsec_2
+ * And we know:
+ * adj_2 = adj_1 + 1
+ * So:
+ * (offset * adj_1) + xtime_nsec_1 =
+ * (offset * (adj_1+1)) + xtime_nsec_2
+ * (offset * adj_1) + xtime_nsec_1 =
+ * (offset * adj_1) + offset + xtime_nsec_2
+ * Canceling the sides:
+ * xtime_nsec_1 = offset + xtime_nsec_2
+ * Which gives us:
+ * xtime_nsec_2 = xtime_nsec_1 - offset
+ * Which simplfies to:
+ * xtime_nsec -= offset
+ *
+ * XXX - TODO: Doc ntp_error calculation.
+ */
timekeeper.mult += adj;
timekeeper.xtime_interval += interval;
timekeeper.xtime_nsec -= offset;