summaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 12:15:40 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 12:15:40 -0800
commitf08d8bcc12de5a153e587027e77de83662eefb8a (patch)
tree301fedc4a3ba056db2699b471f4cb69edc2151d3 /arch/ia64
parentf3573b8f902c507c721999cc669fbb7e045081b8 (diff)
parentd4d1fc61eb38ff8e5af657e2d2f2290859a277f2 (diff)
downloadlinux-f08d8bcc12de5a153e587027e77de83662eefb8a.tar.bz2
Merge tag 'please-pull-gettime_vsyscall_update' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux
Pull ia64 update from Tony Luck: "Stop ia64 being the last holdout using GENERIC_TIME_VSYSCALL_OLD so that John Stultz can drop that code" * tag 'please-pull-gettime_vsyscall_update' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux: ia64: Update fsyscall gettime to use modern vsyscall_update
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/ia64/kernel/asm-offsets.c2
-rw-r--r--arch/ia64/kernel/fsys.S8
-rw-r--r--arch/ia64/kernel/fsyscall_gtod_data.h10
-rw-r--r--arch/ia64/kernel/time.c40
5 files changed, 36 insertions, 26 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 1efc444f5fa1..49583c5a5d44 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -47,7 +47,7 @@ config IA64
select ARCH_TASK_STRUCT_ALLOCATOR
select ARCH_THREAD_STACK_ALLOCATOR
select ARCH_CLOCKSOURCE_DATA
- select GENERIC_TIME_VSYSCALL_OLD
+ select GENERIC_TIME_VSYSCALL
select SYSCTL_ARCH_UNALIGN_NO_WARN
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index b385ff2bf6ce..f7693f49c573 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -212,6 +212,8 @@ void foo(void)
BLANK();
DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET,
offsetof (struct timespec, tv_nsec));
+ DEFINE(IA64_TIME_SN_SPEC_SNSEC_OFFSET,
+ offsetof (struct time_sn_spec, snsec));
DEFINE(CLONE_SETTLS_BIT, 19);
#if CLONE_SETTLS != (1<<19)
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index c0e7c9af2bb9..fe742ffafc7a 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -236,9 +236,9 @@ ENTRY(fsys_gettimeofday)
MOV_FROM_ITC(p8, p6, r2, r10) // CPU_TIMER. 36 clocks latency!!!
(p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues..
(p13) ld8 r25 = [r19] // get itc_lastcycle value
- ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec
+ ld8 r9 = [r22],IA64_TIME_SN_SPEC_SNSEC_OFFSET // sec
;;
- ld8 r8 = [r22],-IA64_TIMESPEC_TV_NSEC_OFFSET // tv_nsec
+ ld8 r8 = [r22],-IA64_TIME_SN_SPEC_SNSEC_OFFSET // snsec
(p13) sub r3 = r25,r2 // Diff needed before comparison (thanks davidm)
;;
(p13) cmp.gt.unc p6,p7 = r3,r0 // check if it is less than last. p6,p7 cleared
@@ -266,9 +266,9 @@ EX(.fail_efault, probe.w.fault r31, 3)
mf
;;
ld4 r10 = [r20] // gtod_lock.sequence
- shr.u r2 = r2,r23 // shift by factor
- ;;
add r8 = r8,r2 // Add xtime.nsecs
+ ;;
+ shr.u r8 = r8,r23 // shift by factor
cmp4.ne p7,p0 = r28,r10
(p7) br.cond.dpnt.few .time_redo // sequence number changed, redo
// End critical section.
diff --git a/arch/ia64/kernel/fsyscall_gtod_data.h b/arch/ia64/kernel/fsyscall_gtod_data.h
index 0914c02a1eb0..cc2861445965 100644
--- a/arch/ia64/kernel/fsyscall_gtod_data.h
+++ b/arch/ia64/kernel/fsyscall_gtod_data.h
@@ -6,10 +6,16 @@
* fsyscall gettimeofday data
*/
+/* like timespec, but includes "shifted nanoseconds" */
+struct time_sn_spec {
+ u64 sec;
+ u64 snsec;
+};
+
struct fsyscall_gtod_data_t {
seqcount_t seq;
- struct timespec wall_time;
- struct timespec monotonic_time;
+ struct time_sn_spec wall_time;
+ struct time_sn_spec monotonic_time;
u64 clk_mask;
u32 clk_mult;
u32 clk_shift;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index aa7be020a904..c6ecb97151a2 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -430,30 +430,32 @@ void update_vsyscall_tz(void)
{
}
-void update_vsyscall_old(struct timespec *wall, struct timespec *wtm,
- struct clocksource *c, u32 mult, u64 cycle_last)
+void update_vsyscall(struct timekeeper *tk)
{
write_seqcount_begin(&fsyscall_gtod_data.seq);
- /* copy fsyscall clock data */
- fsyscall_gtod_data.clk_mask = c->mask;
- fsyscall_gtod_data.clk_mult = mult;
- fsyscall_gtod_data.clk_shift = c->shift;
- fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio;
- fsyscall_gtod_data.clk_cycle_last = cycle_last;
-
- /* copy kernel time structures */
- fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
- fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec;
- fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec
- + wall->tv_sec;
- fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec
- + wall->tv_nsec;
+ /* copy vsyscall data */
+ fsyscall_gtod_data.clk_mask = tk->tkr_mono.mask;
+ fsyscall_gtod_data.clk_mult = tk->tkr_mono.mult;
+ fsyscall_gtod_data.clk_shift = tk->tkr_mono.shift;
+ fsyscall_gtod_data.clk_fsys_mmio = tk->tkr_mono.clock->archdata.fsys_mmio;
+ fsyscall_gtod_data.clk_cycle_last = tk->tkr_mono.cycle_last;
+
+ fsyscall_gtod_data.wall_time.sec = tk->xtime_sec;
+ fsyscall_gtod_data.wall_time.snsec = tk->tkr_mono.xtime_nsec;
+
+ fsyscall_gtod_data.monotonic_time.sec = tk->xtime_sec
+ + tk->wall_to_monotonic.tv_sec;
+ fsyscall_gtod_data.monotonic_time.snsec = tk->tkr_mono.xtime_nsec
+ + ((u64)tk->wall_to_monotonic.tv_nsec
+ << tk->tkr_mono.shift);
/* normalize */
- while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) {
- fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC;
- fsyscall_gtod_data.monotonic_time.tv_sec++;
+ while (fsyscall_gtod_data.monotonic_time.snsec >=
+ (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
+ fsyscall_gtod_data.monotonic_time.snsec -=
+ ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
+ fsyscall_gtod_data.monotonic_time.sec++;
}
write_seqcount_end(&fsyscall_gtod_data.seq);