summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/include/asm/clocksource.h7
-rw-r--r--arch/arm/include/asm/cp15.h20
-rw-r--r--arch/arm/include/asm/processor.h11
-rw-r--r--arch/arm/include/asm/vdso/clocksource.h8
-rw-r--r--arch/arm/include/asm/vdso/cp15.h38
-rw-r--r--arch/arm/include/asm/vdso/gettimeofday.h22
-rw-r--r--arch/arm/include/asm/vdso/processor.h22
-rw-r--r--arch/arm/include/asm/vdso/vsyscall.h35
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/clocksource.h4
-rw-r--r--arch/arm64/include/asm/processor.h7
-rw-r--r--arch/arm64/include/asm/vdso/clocksource.h8
-rw-r--r--arch/arm64/include/asm/vdso/compat_gettimeofday.h21
-rw-r--r--arch/arm64/include/asm/vdso/gettimeofday.h12
-rw-r--r--arch/arm64/include/asm/vdso/processor.h17
-rw-r--r--arch/arm64/include/asm/vdso/vsyscall.h9
-rw-r--r--arch/arm64/kernel/vdso/vgettimeofday.c2
-rw-r--r--arch/arm64/kernel/vdso32/Makefile11
-rw-r--r--arch/arm64/kernel/vdso32/vgettimeofday.c14
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/include/asm/clocksource.h16
-rw-r--r--arch/mips/include/asm/processor.h16
-rw-r--r--arch/mips/include/asm/vdso/clocksource.h9
-rw-r--r--arch/mips/include/asm/vdso/gettimeofday.h39
-rw-r--r--arch/mips/include/asm/vdso/processor.h27
-rw-r--r--arch/mips/include/asm/vdso/vsyscall.h9
-rw-r--r--arch/mips/kernel/csrc-r4k.c2
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/entry/vdso/vma.c8
-rw-r--r--arch/x86/include/asm/clocksource.h20
-rw-r--r--arch/x86/include/asm/mshyperv.h4
-rw-r--r--arch/x86/include/asm/processor.h12
-rw-r--r--arch/x86/include/asm/vdso/clocksource.h10
-rw-r--r--arch/x86/include/asm/vdso/gettimeofday.h6
-rw-r--r--arch/x86/include/asm/vdso/processor.h23
-rw-r--r--arch/x86/include/asm/vdso/vsyscall.h15
-rw-r--r--arch/x86/include/asm/vgtod.h12
-rw-r--r--arch/x86/kernel/kvmclock.c9
-rw-r--r--arch/x86/kernel/pvclock.c2
-rw-r--r--arch/x86/kernel/time.c12
-rw-r--r--arch/x86/kernel/tsc.c32
-rw-r--r--arch/x86/kvm/trace.h4
-rw-r--r--arch/x86/kvm/x86.c22
-rw-r--r--arch/x86/xen/time.c36
45 files changed, 326 insertions, 291 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 97864aabc2a6..03bbfc312fe7 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -3,7 +3,6 @@ config ARM
bool
default y
select ARCH_32BIT_OFF_T
- select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_BINFMT_FLAT
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEVMEM_IS_ALLOWED
diff --git a/arch/arm/include/asm/clocksource.h b/arch/arm/include/asm/clocksource.h
index 0b350a7e26f3..13651c731a81 100644
--- a/arch/arm/include/asm/clocksource.h
+++ b/arch/arm/include/asm/clocksource.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_CLOCKSOURCE_H
#define _ASM_CLOCKSOURCE_H
-struct arch_clocksource_data {
- bool vdso_direct; /* Usable for direct VDSO access? */
-};
+#include <asm/vdso/clocksource.h>
-#endif
+#endif /* _ASM_CLOCKSOURCE_H */
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index d2453e2d3f1f..a54230e65647 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -50,25 +50,7 @@
#ifdef CONFIG_CPU_CP15
-#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
- "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
-#define __ACCESS_CP15_64(Op1, CRm) \
- "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
-
-#define __read_sysreg(r, w, c, t) ({ \
- t __val; \
- asm volatile(r " " c : "=r" (__val)); \
- __val; \
-})
-#define read_sysreg(...) __read_sysreg(__VA_ARGS__)
-
-#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
-#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
-
-#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
-#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
-
-#define CNTVCT __ACCESS_CP15_64(1, c14)
+#include <asm/vdso/cp15.h>
extern unsigned long cr_alignment; /* defined in entry-armv.S */
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 614bf829e454..b9241051e5cb 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -14,6 +14,7 @@
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/unified.h>
+#include <asm/vdso/processor.h>
#ifdef __KERNEL__
#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
@@ -85,16 +86,6 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
-#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
-#define cpu_relax() \
- do { \
- smp_mb(); \
- __asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \
- } while (0)
-#else
-#define cpu_relax() barrier()
-#endif
-
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
diff --git a/arch/arm/include/asm/vdso/clocksource.h b/arch/arm/include/asm/vdso/clocksource.h
new file mode 100644
index 000000000000..50c0b19fb755
--- /dev/null
+++ b/arch/arm/include/asm/vdso/clocksource.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSOCLOCKSOURCE_H
+#define __ASM_VDSOCLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES \
+ VDSO_CLOCKMODE_ARCHTIMER
+
+#endif /* __ASM_VDSOCLOCKSOURCE_H */
diff --git a/arch/arm/include/asm/vdso/cp15.h b/arch/arm/include/asm/vdso/cp15.h
new file mode 100644
index 000000000000..bed16fa1865e
--- /dev/null
+++ b/arch/arm/include/asm/vdso/cp15.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __ASM_VDSO_CP15_H
+#define __ASM_VDSO_CP15_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_CPU_CP15
+
+#include <linux/stringify.h>
+
+#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
+ "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
+#define __ACCESS_CP15_64(Op1, CRm) \
+ "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
+
+#define __read_sysreg(r, w, c, t) ({ \
+ t __val; \
+ asm volatile(r " " c : "=r" (__val)); \
+ __val; \
+})
+#define read_sysreg(...) __read_sysreg(__VA_ARGS__)
+
+#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
+#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
+
+#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
+#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
+
+#define CNTVCT __ACCESS_CP15_64(1, c14)
+
+#endif /* CONFIG_CPU_CP15 */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_CP15_H */
diff --git a/arch/arm/include/asm/vdso/gettimeofday.h b/arch/arm/include/asm/vdso/gettimeofday.h
index fe6e1f65932d..36dc18553ed8 100644
--- a/arch/arm/include/asm/vdso/gettimeofday.h
+++ b/arch/arm/include/asm/vdso/gettimeofday.h
@@ -7,9 +7,9 @@
#ifndef __ASSEMBLY__
-#include <asm/barrier.h>
-#include <asm/cp15.h>
+#include <asm/errno.h>
#include <asm/unistd.h>
+#include <asm/vdso/cp15.h>
#include <uapi/linux/time.h>
#define VDSO_HAS_CLOCK_GETRES 1
@@ -106,20 +106,32 @@ static __always_inline int clock_getres32_fallback(
return ret;
}
+static inline bool arm_vdso_hres_capable(void)
+{
+ return IS_ENABLED(CONFIG_ARM_ARCH_TIMER);
+}
+#define __arch_vdso_hres_capable arm_vdso_hres_capable
+
static __always_inline u64 __arch_get_hw_counter(int clock_mode)
{
#ifdef CONFIG_ARM_ARCH_TIMER
u64 cycle_now;
- if (!clock_mode)
- return -EINVAL;
+ /*
+ * Core checks for mode already, so this raced against a concurrent
+ * update. Return something. Core will do another round and then
+ * see the mode change and fallback to the syscall.
+ */
+ if (clock_mode == VDSO_CLOCKMODE_NONE)
+ return 0;
isb();
cycle_now = read_sysreg(CNTVCT);
return cycle_now;
#else
- return -EINVAL; /* use fallback */
+ /* Make GCC happy. This is compiled out anyway */
+ return 0;
#endif
}
diff --git a/arch/arm/include/asm/vdso/processor.h b/arch/arm/include/asm/vdso/processor.h
new file mode 100644
index 000000000000..45efb3ff511c
--- /dev/null
+++ b/arch/arm/include/asm/vdso/processor.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __ASM_VDSO_PROCESSOR_H
+#define __ASM_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
+#define cpu_relax() \
+ do { \
+ smp_mb(); \
+ __asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \
+ } while (0)
+#else
+#define cpu_relax() barrier()
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/arm/include/asm/vdso/vsyscall.h b/arch/arm/include/asm/vdso/vsyscall.h
index cff87d8d30da..47e41ae8ccd0 100644
--- a/arch/arm/include/asm/vdso/vsyscall.h
+++ b/arch/arm/include/asm/vdso/vsyscall.h
@@ -11,18 +11,6 @@
extern struct vdso_data *vdso_data;
extern bool cntvct_ok;
-static __always_inline
-bool tk_is_cntvct(const struct timekeeper *tk)
-{
- if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
- return false;
-
- if (!tk->tkr_mono.clock->archdata.vdso_direct)
- return false;
-
- return true;
-}
-
/*
* Update the vDSO data page to keep in sync with kernel timekeeping.
*/
@@ -34,29 +22,6 @@ struct vdso_data *__arm_get_k_vdso_data(void)
#define __arch_get_k_vdso_data __arm_get_k_vdso_data
static __always_inline
-bool __arm_update_vdso_data(void)
-{
- return cntvct_ok;
-}
-#define __arch_update_vdso_data __arm_update_vdso_data
-
-static __always_inline
-int __arm_get_clock_mode(struct timekeeper *tk)
-{
- u32 __tk_is_cntvct = tk_is_cntvct(tk);
-
- return __tk_is_cntvct;
-}
-#define __arch_get_clock_mode __arm_get_clock_mode
-
-static __always_inline
-int __arm_use_vsyscall(struct vdso_data *vdata)
-{
- return vdata[CS_HRES_COARSE].clock_mode;
-}
-#define __arch_use_vsyscall __arm_use_vsyscall
-
-static __always_inline
void __arm_sync_vdso_data(struct vdso_data *vdata)
{
flush_dcache_page(virt_to_page(vdata));
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 0b30e884e088..c6c32fb7f546 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -9,7 +9,6 @@ config ARM64
select ACPI_MCFG if (ACPI && PCI)
select ACPI_SPCR_TABLE if ACPI
select ACPI_PPTT if ACPI
- select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_PREP_COHERENT
diff --git a/arch/arm64/include/asm/clocksource.h b/arch/arm64/include/asm/clocksource.h
index 0ece64a26c8c..482185566b0c 100644
--- a/arch/arm64/include/asm/clocksource.h
+++ b/arch/arm64/include/asm/clocksource.h
@@ -2,8 +2,6 @@
#ifndef _ASM_CLOCKSOURCE_H
#define _ASM_CLOCKSOURCE_H
-struct arch_clocksource_data {
- bool vdso_direct; /* Usable for direct VDSO access? */
-};
+#include <asm/vdso/clocksource.h>
#endif
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 5ba63204d078..e51ef2dc5749 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -28,6 +28,8 @@
#include <linux/string.h>
#include <linux/thread_info.h>
+#include <vdso/processor.h>
+
#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/hw_breakpoint.h>
@@ -256,11 +258,6 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
-static inline void cpu_relax(void)
-{
- asm volatile("yield" ::: "memory");
-}
-
/* Thread switching */
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
struct task_struct *next);
diff --git a/arch/arm64/include/asm/vdso/clocksource.h b/arch/arm64/include/asm/vdso/clocksource.h
new file mode 100644
index 000000000000..df6ea65c1dec
--- /dev/null
+++ b/arch/arm64/include/asm/vdso/clocksource.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSOCLOCKSOURCE_H
+#define __ASM_VDSOCLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES \
+ VDSO_CLOCKMODE_ARCHTIMER
+
+#endif
diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
index 537b1e695365..b6907ae78e53 100644
--- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h
+++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
@@ -8,12 +8,10 @@
#ifndef __ASSEMBLY__
#include <asm/unistd.h>
-#include <uapi/linux/time.h>
+#include <asm/errno.h>
#include <asm/vdso/compat_barrier.h>
-#define __VDSO_USE_SYSCALL ULLONG_MAX
-
#define VDSO_HAS_CLOCK_GETRES 1
#define BUILD_VDSO32 1
@@ -78,10 +76,6 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
register long ret asm ("r0");
register long nr asm("r7") = __NR_compat_clock_getres_time64;
- /* The checks below are required for ABI consistency with arm */
- if ((_clkid >= MAX_CLOCKS) && (_ts == NULL))
- return -EINVAL;
-
asm volatile(
" swi #0\n"
: "=r" (ret)
@@ -99,10 +93,6 @@ int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
register long ret asm ("r0");
register long nr asm("r7") = __NR_compat_clock_getres;
- /* The checks below are required for ABI consistency with arm */
- if ((_clkid >= MAX_CLOCKS) && (_ts == NULL))
- return -EINVAL;
-
asm volatile(
" swi #0\n"
: "=r" (ret)
@@ -117,11 +107,12 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
u64 res;
/*
- * clock_mode == 0 implies that vDSO are enabled otherwise
- * fallback on syscall.
+ * Core checks for mode already, so this raced against a concurrent
+ * update. Return something. Core will do another round and then
+ * see the mode change and fallback to the syscall.
*/
- if (clock_mode)
- return __VDSO_USE_SYSCALL;
+ if (clock_mode == VDSO_CLOCKMODE_NONE)
+ return 0;
/*
* This isb() is required to prevent that the counter value
diff --git a/arch/arm64/include/asm/vdso/gettimeofday.h b/arch/arm64/include/asm/vdso/gettimeofday.h
index b08f476b72b4..afba6ba332f8 100644
--- a/arch/arm64/include/asm/vdso/gettimeofday.h
+++ b/arch/arm64/include/asm/vdso/gettimeofday.h
@@ -8,9 +8,6 @@
#ifndef __ASSEMBLY__
#include <asm/unistd.h>
-#include <uapi/linux/time.h>
-
-#define __VDSO_USE_SYSCALL ULLONG_MAX
#define VDSO_HAS_CLOCK_GETRES 1
@@ -71,11 +68,12 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
u64 res;
/*
- * clock_mode == 0 implies that vDSO are enabled otherwise
- * fallback on syscall.
+ * Core checks for mode already, so this raced against a concurrent
+ * update. Return something. Core will do another round and then
+ * see the mode change and fallback to the syscall.
*/
- if (clock_mode)
- return __VDSO_USE_SYSCALL;
+ if (clock_mode == VDSO_CLOCKMODE_NONE)
+ return 0;
/*
* This isb() is required to prevent that the counter value
diff --git a/arch/arm64/include/asm/vdso/processor.h b/arch/arm64/include/asm/vdso/processor.h
new file mode 100644
index 000000000000..ff830b766ad2
--- /dev/null
+++ b/arch/arm64/include/asm/vdso/processor.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __ASM_VDSO_PROCESSOR_H
+#define __ASM_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+static inline void cpu_relax(void)
+{
+ asm volatile("yield" ::: "memory");
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/vdso/vsyscall.h b/arch/arm64/include/asm/vdso/vsyscall.h
index 0c20a7c1bee5..f94b1457c117 100644
--- a/arch/arm64/include/asm/vdso/vsyscall.h
+++ b/arch/arm64/include/asm/vdso/vsyscall.h
@@ -22,15 +22,6 @@ struct vdso_data *__arm64_get_k_vdso_data(void)
#define __arch_get_k_vdso_data __arm64_get_k_vdso_data
static __always_inline
-int __arm64_get_clock_mode(struct timekeeper *tk)
-{
- u32 use_syscall = !tk->tkr_mono.clock->archdata.vdso_direct;
-
- return use_syscall;
-}
-#define __arch_get_clock_mode __arm64_get_clock_mode
-
-static __always_inline
void __arm64_update_vsyscall(struct vdso_data *vdata, struct timekeeper *tk)
{
vdata[CS_HRES_COARSE].mask = VDSO_PRECISION_MASK;
diff --git a/arch/arm64/kernel/vdso/vgettimeofday.c b/arch/arm64/kernel/vdso/vgettimeofday.c
index 747635501a14..4236cf34d7d9 100644
--- a/arch/arm64/kernel/vdso/vgettimeofday.c
+++ b/arch/arm64/kernel/vdso/vgettimeofday.c
@@ -5,8 +5,6 @@
* Copyright (C) 2018 ARM Limited
*
*/
-#include <linux/time.h>
-#include <linux/types.h>
int __kernel_clock_gettime(clockid_t clock,
struct __kernel_timespec *ts)
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index 04df57b43cb1..3964738ebbde 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -10,7 +10,18 @@ include $(srctree)/lib/vdso/Makefile
# Same as cc-*option, but using CC_COMPAT instead of CC
ifeq ($(CONFIG_CC_IS_CLANG), y)
+COMPAT_GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE_COMPAT)elfedit))
+COMPAT_GCC_TOOLCHAIN := $(realpath $(COMPAT_GCC_TOOLCHAIN_DIR)/..)
+
+CC_COMPAT_CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%))
+CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)
+CC_COMPAT_CLANG_FLAGS += -no-integrated-as -Qunused-arguments
+ifneq ($(COMPAT_GCC_TOOLCHAIN),)
+CC_COMPAT_CLANG_FLAGS += --gcc-toolchain=$(COMPAT_GCC_TOOLCHAIN)
+endif
+
CC_COMPAT ?= $(CC)
+CC_COMPAT += $(CC_COMPAT_CLANG_FLAGS)
else
CC_COMPAT ?= $(CROSS_COMPILE_COMPAT)gcc
endif
diff --git a/arch/arm64/kernel/vdso32/vgettimeofday.c b/arch/arm64/kernel/vdso32/vgettimeofday.c
index 54fc1c2ce93f..5acff29c5991 100644
--- a/arch/arm64/kernel/vdso32/vgettimeofday.c
+++ b/arch/arm64/kernel/vdso32/vgettimeofday.c
@@ -5,26 +5,16 @@
* Copyright (C) 2018 ARM Limited
*
*/
-#include <linux/time.h>
-#include <linux/types.h>
int __vdso_clock_gettime(clockid_t clock,
struct old_timespec32 *ts)
{
- /* The checks below are required for ABI consistency with arm */
- if ((u32)ts >= TASK_SIZE_32)
- return -EFAULT;
-
return __cvdso_clock_gettime32(clock, ts);
}
int __vdso_clock_gettime64(clockid_t clock,
struct __kernel_timespec *ts)
{
- /* The checks below are required for ABI consistency with arm */
- if ((u32)ts >= TASK_SIZE_32)
- return -EFAULT;
-
return __cvdso_clock_gettime(clock, ts);
}
@@ -37,10 +27,6 @@ int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
int __vdso_clock_getres(clockid_t clock_id,
struct old_timespec32 *res)
{
- /* The checks below are required for ABI consistency with arm */
- if ((u32)res >= TASK_SIZE_32)
- return -EFAULT;
-
return __cvdso_clock_getres_time32(clock_id, res);
}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 2589d4760e45..66e94278bd10 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -4,7 +4,6 @@ config MIPS
default y
select ARCH_32BIT_OFF_T if !64BIT
select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
- select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KCOV
select ARCH_HAS_PTE_SPECIAL if !(32BIT && CPU_HAS_RIXI)
diff --git a/arch/mips/include/asm/clocksource.h b/arch/mips/include/asm/clocksource.h
index cab9ae9f1e14..2f1ebbea3d72 100644
--- a/arch/mips/include/asm/clocksource.h
+++ b/arch/mips/include/asm/clocksource.h
@@ -3,23 +3,9 @@
* Copyright (C) 2015 Imagination Technologies
* Author: Alex Smith <alex.smith@imgtec.com>
*/
-
#ifndef __ASM_CLOCKSOURCE_H
#define __ASM_CLOCKSOURCE_H
-#include <linux/types.h>
-
-/* VDSO clocksources. */
-#define VDSO_CLOCK_NONE 0 /* No suitable clocksource. */
-#define VDSO_CLOCK_R4K 1 /* Use the coprocessor 0 count. */
-#define VDSO_CLOCK_GIC 2 /* Use the GIC. */
-
-/**
- * struct arch_clocksource_data - Architecture-specific clocksource information.
- * @vdso_clock_mode: Method the VDSO should use to access the clocksource.
- */
-struct arch_clocksource_data {
- u8 vdso_clock_mode;
-};
+#include <asm/vdso/clocksource.h>
#endif /* __ASM_CLOCKSOURCE_H */
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 7619ad319400..4c9cc667f3ed 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -22,6 +22,7 @@
#include <asm/dsemul.h>
#include <asm/mipsregs.h>
#include <asm/prefetch.h>
+#include <asm/vdso/processor.h>
/*
* System setup and hardware flags..
@@ -385,21 +386,6 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
-#ifdef CONFIG_CPU_LOONGSON64
-/*
- * Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a
- * tight read loop is executed, because reads take priority over writes & the
- * hardware (incorrectly) doesn't ensure that writes will eventually occur.
- *
- * Since spin loops of any kind should have a cpu_relax() in them, force an SFB
- * flush from cpu_relax() such that any pending writes will become visible as
- * expected.
- */
-#define cpu_relax() smp_mb()
-#else
-#define cpu_relax() barrier()
-#endif
-
/*
* Return_address is a replacement for __builtin_return_address(count)
* which on certain architectures cannot reasonably be implemented in GCC
diff --git a/arch/mips/include/asm/vdso/clocksource.h b/arch/mips/include/asm/vdso/clocksource.h
new file mode 100644
index 000000000000..510e1671d898
--- /dev/null
+++ b/arch/mips/include/asm/vdso/clocksource.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __ASM_VDSOCLOCKSOURCE_H
+#define __ASM_VDSOCLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES \
+ VDSO_CLOCKMODE_R4K, \
+ VDSO_CLOCKMODE_GIC
+
+#endif /* __ASM_VDSOCLOCKSOURCE_H */
diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h
index a58687e26c5d..c63ddcaea54c 100644
--- a/arch/mips/include/asm/vdso/gettimeofday.h
+++ b/arch/mips/include/asm/vdso/gettimeofday.h
@@ -13,19 +13,13 @@
#ifndef __ASSEMBLY__
-#include <linux/compiler.h>
-#include <linux/time.h>
-
#include <asm/vdso/vdso.h>
#include <asm/clocksource.h>
-#include <asm/io.h>
#include <asm/unistd.h>
#include <asm/vdso.h>
#define VDSO_HAS_CLOCK_GETRES 1
-#define __VDSO_USE_SYSCALL ULLONG_MAX
-
static __always_inline long gettimeofday_fallback(
struct __kernel_old_timeval *_tv,
struct timezone *_tz)
@@ -175,29 +169,28 @@ static __always_inline u64 read_gic_count(const struct vdso_data *data)
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
{
-#ifdef CONFIG_CLKSRC_MIPS_GIC
- const struct vdso_data *data = get_vdso_data();
-#endif
- u64 cycle_now;
-
- switch (clock_mode) {
#ifdef CONFIG_CSRC_R4K
- case VDSO_CLOCK_R4K:
- cycle_now = read_r4k_count();
- break;
+ if (clock_mode == VDSO_CLOCKMODE_R4K)
+ return read_r4k_count();
#endif
#ifdef CONFIG_CLKSRC_MIPS_GIC
- case VDSO_CLOCK_GIC:
- cycle_now = read_gic_count(data);
- break;
+ if (clock_mode == VDSO_CLOCKMODE_GIC)
+ return read_gic_count(get_vdso_data());
#endif
- default:
- cycle_now = __VDSO_USE_SYSCALL;
- break;
- }
+ /*
+ * Core checks mode already. So this raced against a concurrent
+ * update. Return something. Core will do another round see the
+ * change and fallback to syscall.
+ */
+ return 0;
+}
- return cycle_now;
+static inline bool mips_vdso_hres_capable(void)
+{
+ return IS_ENABLED(CONFIG_CSRC_R4K) ||
+ IS_ENABLED(CONFIG_CLKSRC_MIPS_GIC);
}
+#define __arch_vdso_hres_capable mips_vdso_hres_capable
static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
{
diff --git a/arch/mips/include/asm/vdso/processor.h b/arch/mips/include/asm/vdso/processor.h
new file mode 100644
index 000000000000..511c95d735e6
--- /dev/null
+++ b/arch/mips/include/asm/vdso/processor.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __ASM_VDSO_PROCESSOR_H
+#define __ASM_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_CPU_LOONGSON64
+/*
+ * Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a
+ * tight read loop is executed, because reads take priority over writes & the
+ * hardware (incorrectly) doesn't ensure that writes will eventually occur.
+ *
+ * Since spin loops of any kind should have a cpu_relax() in them, force an SFB
+ * flush from cpu_relax() such that any pending writes will become visible as
+ * expected.
+ */
+#define cpu_relax() smp_mb()
+#else
+#define cpu_relax() barrier()
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/mips/include/asm/vdso/vsyscall.h b/arch/mips/include/asm/vdso/vsyscall.h
index 00d41b94ba31..47168aaf1eff 100644
--- a/arch/mips/include/asm/vdso/vsyscall.h
+++ b/arch/mips/include/asm/vdso/vsyscall.h
@@ -19,15 +19,6 @@ struct vdso_data *__mips_get_k_vdso_data(void)
}
#define __arch_get_k_vdso_data __mips_get_k_vdso_data
-static __always_inline
-int __mips_get_clock_mode(struct timekeeper *tk)
-{
- u32 clock_mode = tk->tkr_mono.clock->archdata.vdso_clock_mode;
-
- return clock_mode;
-}
-#define __arch_get_clock_mode __mips_get_clock_mode
-
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c
index eed099f35bf1..437dda64fd7a 100644
--- a/arch/mips/kernel/csrc-r4k.c
+++ b/arch/mips/kernel/csrc-r4k.c
@@ -78,7 +78,7 @@ int __init init_r4k_clocksource(void)
* by the VDSO (HWREna is configured by configure_hwrena()).
*/
if (cpu_has_mips_r2_r6 && rdhwr_count_usable())
- clocksource_mips.archdata.vdso_clock_mode = VDSO_CLOCK_R4K;
+ clocksource_mips.vdso_clock_mode = VDSO_CLOCKMODE_R4K;
clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 51da546e0745..e2e326a96788 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -57,7 +57,6 @@ config X86
select ACPI_LEGACY_TABLES_LOOKUP if ACPI
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_32BIT_OFF_T if X86_32
- select ARCH_CLOCKSOURCE_DATA
select ARCH_CLOCKSOURCE_INIT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_DEBUG_VIRTUAL
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index c1b8496b5606..43428cc514c8 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -38,6 +38,8 @@ struct vdso_data *arch_get_vdso_data(void *vvar_page)
}
#undef EMIT_VVAR
+unsigned int vclocks_used __read_mostly;
+
#if defined(CONFIG_X86_64)
unsigned int __read_mostly vdso64_enabled = 1;
#endif
@@ -219,7 +221,7 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
} else if (sym_offset == image->sym_pvclock_page) {
struct pvclock_vsyscall_time_info *pvti =
pvclock_get_pvti_cpu0_va();
- if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
+ if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK)) {
return vmf_insert_pfn_prot(vma, vmf->address,
__pa(pvti) >> PAGE_SHIFT,
pgprot_decrypted(vma->vm_page_prot));
@@ -227,7 +229,7 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
} else if (sym_offset == image->sym_hvclock_page) {
struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page();
- if (tsc_pg && vclock_was_used(VCLOCK_HVCLOCK))
+ if (tsc_pg && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK))
return vmf_insert_pfn(vma, vmf->address,
virt_to_phys(tsc_pg) >> PAGE_SHIFT);
} else if (sym_offset == image->sym_timens_page) {
@@ -445,6 +447,8 @@ __setup("vdso=", vdso_setup);
static int __init init_vdso(void)
{
+ BUILD_BUG_ON(VDSO_CLOCKMODE_MAX >= 32);
+
init_vdso_image(&vdso_image_64);
#ifdef CONFIG_X86_X32_ABI
diff --git a/arch/x86/include/asm/clocksource.h b/arch/x86/include/asm/clocksource.h
index dc4cfc888d6d..dc9dc7b3911a 100644
--- a/arch/x86/include/asm/clocksource.h
+++ b/arch/x86/include/asm/clocksource.h
@@ -4,14 +4,18 @@
#ifndef _ASM_X86_CLOCKSOURCE_H
#define _ASM_X86_CLOCKSOURCE_H
-#define VCLOCK_NONE 0 /* No vDSO clock available. */
-#define VCLOCK_TSC 1 /* vDSO should use vread_tsc. */
-#define VCLOCK_PVCLOCK 2 /* vDSO should use vread_pvclock. */
-#define VCLOCK_HVCLOCK 3 /* vDSO should use vread_hvclock. */
-#define VCLOCK_MAX 3
+#include <asm/vdso/clocksource.h>
-struct arch_clocksource_data {
- int vclock_mode;
-};
+extern unsigned int vclocks_used;
+
+static inline bool vclock_was_used(int vclock)
+{
+ return READ_ONCE(vclocks_used) & (1U << vclock);
+}
+
+static inline void vclocks_set_used(unsigned int which)
+{
+ WRITE_ONCE(vclocks_used, READ_ONCE(vclocks_used) | (1 << which));
+}
#endif /* _ASM_X86_CLOCKSOURCE_H */
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 6b79515abb82..edc2c581704a 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -46,7 +46,9 @@ typedef int (*hyperv_fill_flush_list_func)(
#define hv_set_reference_tsc(val) \
wrmsrl(HV_X64_MSR_REFERENCE_TSC, val)
#define hv_set_clocksource_vdso(val) \
- ((val).archdata.vclock_mode = VCLOCK_HVCLOCK)
+ ((val).vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK)
+#define hv_enable_vdso_clocksource() \
+ vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK);
#define hv_get_raw_timer() rdtsc_ordered()
void hyperv_callback_vector(void);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 09705ccc393c..94789db550df 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -26,6 +26,7 @@ struct vm86;
#include <asm/fpu/types.h>
#include <asm/unwind_hints.h>
#include <asm/vmxfeatures.h>
+#include <asm/vdso/processor.h>
#include <linux/personality.h>
#include <linux/cache.h>
@@ -677,17 +678,6 @@ static inline unsigned int cpuid_edx(unsigned int op)
return edx;
}
-/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-static __always_inline void rep_nop(void)
-{
- asm volatile("rep; nop" ::: "memory");
-}
-
-static __always_inline void cpu_relax(void)
-{
- rep_nop();
-}
-
/*
* This function forces the icache and prefetched instruction stream to
* catch up with reality in two very specific cases:
diff --git a/arch/x86/include/asm/vdso/clocksource.h b/arch/x86/include/asm/vdso/clocksource.h
new file mode 100644
index 000000000000..119ac8612d89
--- /dev/null
+++ b/arch/x86/include/asm/vdso/clocksource.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_CLOCKSOURCE_H
+#define __ASM_VDSO_CLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES \
+ VDSO_CLOCKMODE_TSC, \
+ VDSO_CLOCKMODE_PVCLOCK, \
+ VDSO_CLOCKMODE_HVCLOCK
+
+#endif /* __ASM_VDSO_CLOCKSOURCE_H */
diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h
index 6ee1f7dba34b..9a6dc9b4ec99 100644
--- a/arch/x86/include/asm/vdso/gettimeofday.h
+++ b/arch/x86/include/asm/vdso/gettimeofday.h
@@ -243,7 +243,7 @@ static u64 vread_hvclock(void)
static inline u64 __arch_get_hw_counter(s32 clock_mode)
{
- if (clock_mode == VCLOCK_TSC)
+ if (likely(clock_mode == VDSO_CLOCKMODE_TSC))
return (u64)rdtsc_ordered();
/*
* For any memory-mapped vclock type, we need to make sure that gcc
@@ -252,13 +252,13 @@ static inline u64 __arch_get_hw_counter(s32 clock_mode)
* question isn't enabled, which will segfault. Hence the barriers.
*/
#ifdef CONFIG_PARAVIRT_CLOCK
- if (clock_mode == VCLOCK_PVCLOCK) {
+ if (clock_mode == VDSO_CLOCKMODE_PVCLOCK) {
barrier();
return vread_pvclock();
}
#endif
#ifdef CONFIG_HYPERV_TIMER
- if (clock_mode == VCLOCK_HVCLOCK) {
+ if (clock_mode == VDSO_CLOCKMODE_HVCLOCK) {
barrier();
return vread_hvclock();
}
diff --git a/arch/x86/include/asm/vdso/processor.h b/arch/x86/include/asm/vdso/processor.h
new file mode 100644
index 000000000000..57b1a7034c64
--- /dev/null
+++ b/arch/x86/include/asm/vdso/processor.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __ASM_VDSO_PROCESSOR_H
+#define __ASM_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
+static __always_inline void rep_nop(void)
+{
+ asm volatile("rep; nop" ::: "memory");
+}
+
+static __always_inline void cpu_relax(void)
+{
+ rep_nop();
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/x86/include/asm/vdso/vsyscall.h b/arch/x86/include/asm/vdso/vsyscall.h
index 0026ab2123ce..be199a9b2676 100644
--- a/arch/x86/include/asm/vdso/vsyscall.h
+++ b/arch/x86/include/asm/vdso/vsyscall.h
@@ -10,8 +10,6 @@
#include <asm/vgtod.h>
#include <asm/vvar.h>
-int vclocks_used __read_mostly;
-
DEFINE_VVAR(struct vdso_data, _vdso_data);
/*
* Update the vDSO data page to keep in sync with kernel timekeeping.
@@ -23,19 +21,6 @@ struct vdso_data *__x86_get_k_vdso_data(void)
}
#define __arch_get_k_vdso_data __x86_get_k_vdso_data
-static __always_inline
-int __x86_get_clock_mode(struct timekeeper *tk)
-{
- int vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
-
- /* Mark the new vclock used. */
- BUILD_BUG_ON(VCLOCK_MAX >= 32);
- WRITE_ONCE(vclocks_used, READ_ONCE(vclocks_used) | (1 << vclock_mode));
-
- return vclock_mode;
-}
-#define __arch_get_clock_mode __x86_get_clock_mode
-
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index a2638c6124ed..7aa38b2ad8a9 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -2,6 +2,11 @@
#ifndef _ASM_X86_VGTOD_H
#define _ASM_X86_VGTOD_H
+/*
+ * This check is required to prevent ARCH=um to include
+ * unwanted headers.
+ */
+#ifdef CONFIG_GENERIC_GETTIMEOFDAY
#include <linux/compiler.h>
#include <asm/clocksource.h>
#include <vdso/datapage.h>
@@ -14,11 +19,6 @@ typedef u64 gtod_long_t;
#else
typedef unsigned long gtod_long_t;
#endif
-
-extern int vclocks_used;
-static inline bool vclock_was_used(int vclock)
-{
- return READ_ONCE(vclocks_used) & (1 << vclock);
-}
+#endif /* CONFIG_GENERIC_GETTIMEOFDAY */
#endif /* _ASM_X86_VGTOD_H */
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 904494b924c1..34b18f6eeb2c 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -159,12 +159,19 @@ bool kvm_check_and_clear_guest_paused(void)
return ret;
}
+static int kvm_cs_enable(struct clocksource *cs)
+{
+ vclocks_set_used(VDSO_CLOCKMODE_PVCLOCK);
+ return 0;
+}
+
struct clocksource kvm_clock = {
.name = "kvm-clock",
.read = kvm_clock_get_cycles,
.rating = 400,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .enable = kvm_cs_enable,
};
EXPORT_SYMBOL_GPL(kvm_clock);
@@ -272,7 +279,7 @@ static int __init kvm_setup_vsyscall_timeinfo(void)
if (!(flags & PVCLOCK_TSC_STABLE_BIT))
return 0;
- kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
+ kvm_clock.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK;
#endif
kvmclock_init_mem();
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 10125358b9c4..11065dc03f5b 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -145,7 +145,7 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
void pvclock_set_pvti_cpu0_va(struct pvclock_vsyscall_time_info *pvti)
{
- WARN_ON(vclock_was_used(VCLOCK_PVCLOCK));
+ WARN_ON(vclock_was_used(VDSO_CLOCKMODE_PVCLOCK));
pvti_cpu0_va = pvti;
}
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index d8673d8a779b..4d545db52efc 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -122,18 +122,12 @@ void __init time_init(void)
*/
void clocksource_arch_init(struct clocksource *cs)
{
- if (cs->archdata.vclock_mode == VCLOCK_NONE)
+ if (cs->vdso_clock_mode == VDSO_CLOCKMODE_NONE)
return;
- if (cs->archdata.vclock_mode > VCLOCK_MAX) {
- pr_warn("clocksource %s registered with invalid vclock_mode %d. Disabling vclock.\n",
- cs->name, cs->archdata.vclock_mode);
- cs->archdata.vclock_mode = VCLOCK_NONE;
- }
-
if (cs->mask != CLOCKSOURCE_MASK(64)) {
- pr_warn("clocksource %s registered with invalid mask %016llx. Disabling vclock.\n",
+ pr_warn("clocksource %s registered with invalid mask %016llx for VDSO. Disabling VDSO support.\n",
cs->name, cs->mask);
- cs->archdata.vclock_mode = VCLOCK_NONE;
+ cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
}
}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 7e322e2daaf5..971d6f0216df 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1108,17 +1108,24 @@ static void tsc_cs_tick_stable(struct clocksource *cs)
sched_clock_tick_stable();
}
+static int tsc_cs_enable(struct clocksource *cs)
+{
+ vclocks_set_used(VDSO_CLOCKMODE_TSC);
+ return 0;
+}
+
/*
* .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
*/
static struct clocksource clocksource_tsc_early = {
- .name = "tsc-early",
- .rating = 299,
- .read = read_tsc,
- .mask = CLOCKSOURCE_MASK(64),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS |
+ .name = "tsc-early",
+ .rating = 299,
+ .read = read_tsc,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS |
CLOCK_SOURCE_MUST_VERIFY,
- .archdata = { .vclock_mode = VCLOCK_TSC },
+ .vdso_clock_mode = VDSO_CLOCKMODE_TSC,
+ .enable = tsc_cs_enable,
.resume = tsc_resume,
.mark_unstable = tsc_cs_mark_unstable,
.tick_stable = tsc_cs_tick_stable,
@@ -1131,14 +1138,15 @@ static struct clocksource clocksource_tsc_early = {
* been found good.
*/
static struct clocksource clocksource_tsc = {
- .name = "tsc",
- .rating = 300,
- .read = read_tsc,
- .mask = CLOCKSOURCE_MASK(64),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS |
+ .name = "tsc",
+ .rating = 300,
+ .read = read_tsc,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS |
CLOCK_SOURCE_VALID_FOR_HRES |
CLOCK_SOURCE_MUST_VERIFY,
- .archdata = { .vclock_mode = VCLOCK_TSC },
+ .vdso_clock_mode = VDSO_CLOCKMODE_TSC,
+ .enable = tsc_cs_enable,
.resume = tsc_resume,
.mark_unstable = tsc_cs_mark_unstable,
.tick_stable = tsc_cs_tick_stable,
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index f194dd058470..cef5a344fedb 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -815,8 +815,8 @@ TRACE_EVENT(kvm_write_tsc_offset,
#ifdef CONFIG_X86_64
#define host_clocks \
- {VCLOCK_NONE, "none"}, \
- {VCLOCK_TSC, "tsc"} \
+ {VDSO_CLOCKMODE_NONE, "none"}, \
+ {VDSO_CLOCKMODE_TSC, "tsc"} \
TRACE_EVENT(kvm_update_master_clock,
TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched),
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index cf95c36cb4f4..bf8564d73fc3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1634,7 +1634,7 @@ static void update_pvclock_gtod(struct timekeeper *tk)
write_seqcount_begin(&vdata->seq);
/* copy pvclock gtod data */
- vdata->clock.vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
+ vdata->clock.vclock_mode = tk->tkr_mono.clock->vdso_clock_mode;
vdata->clock.cycle_last = tk->tkr_mono.cycle_last;
vdata->clock.mask = tk->tkr_mono.mask;
vdata->clock.mult = tk->tkr_mono.mult;
@@ -1642,7 +1642,7 @@ static void update_pvclock_gtod(struct timekeeper *tk)
vdata->clock.base_cycles = tk->tkr_mono.xtime_nsec;
vdata->clock.offset = tk->tkr_mono.base;
- vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->archdata.vclock_mode;
+ vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->vdso_clock_mode;
vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last;
vdata->raw_clock.mask = tk->tkr_raw.mask;
vdata->raw_clock.mult = tk->tkr_raw.mult;
@@ -1843,7 +1843,7 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
static inline int gtod_is_based_on_tsc(int mode)
{
- return mode == VCLOCK_TSC || mode == VCLOCK_HVCLOCK;
+ return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK;
}
static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
@@ -1936,7 +1936,7 @@ static inline bool kvm_check_tsc_unstable(void)
* TSC is marked unstable when we're running on Hyper-V,
* 'TSC page' clocksource is good.
*/
- if (pvclock_gtod_data.clock.vclock_mode == VCLOCK_HVCLOCK)
+ if (pvclock_gtod_data.clock.vclock_mode == VDSO_CLOCKMODE_HVCLOCK)
return false;
#endif
return check_tsc_unstable();
@@ -2091,30 +2091,30 @@ static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp,
u64 tsc_pg_val;
switch (clock->vclock_mode) {
- case VCLOCK_HVCLOCK:
+ case VDSO_CLOCKMODE_HVCLOCK:
tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(),
tsc_timestamp);
if (tsc_pg_val != U64_MAX) {
/* TSC page valid */
- *mode = VCLOCK_HVCLOCK;
+ *mode = VDSO_CLOCKMODE_HVCLOCK;
v = (tsc_pg_val - clock->cycle_last) &
clock->mask;
} else {
/* TSC page invalid */
- *mode = VCLOCK_NONE;
+ *mode = VDSO_CLOCKMODE_NONE;
}
break;
- case VCLOCK_TSC:
- *mode = VCLOCK_TSC;
+ case VDSO_CLOCKMODE_TSC:
+ *mode = VDSO_CLOCKMODE_TSC;
*tsc_timestamp = read_tsc();
v = (*tsc_timestamp - clock->cycle_last) &
clock->mask;
break;
default:
- *mode = VCLOCK_NONE;
+ *mode = VDSO_CLOCKMODE_NONE;
}
- if (*mode == VCLOCK_NONE)
+ if (*mode == VDSO_CLOCKMODE_NONE)
*tsc_timestamp = v = 0;
return v * clock->mult;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index befbdd8b17f0..c8897aad13cd 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -145,12 +145,19 @@ static struct notifier_block xen_pvclock_gtod_notifier = {
.notifier_call = xen_pvclock_gtod_notify,
};
+static int xen_cs_enable(struct clocksource *cs)
+{
+ vclocks_set_used(VDSO_CLOCKMODE_PVCLOCK);
+ return 0;
+}
+
static struct clocksource xen_clocksource __read_mostly = {
- .name = "xen",
- .rating = 400,
- .read = xen_clocksource_get_cycles,
- .mask = ~0,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .name = "xen",
+ .rating = 400,
+ .read = xen_clocksource_get_cycles,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .enable = xen_cs_enable,
};
/*
@@ -412,12 +419,13 @@ void xen_restore_time_memory_area(void)
ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
/*
- * We don't disable VCLOCK_PVCLOCK entirely if it fails to register the
- * secondary time info with Xen or if we migrated to a host without the
- * necessary flags. On both of these cases what happens is either
- * process seeing a zeroed out pvti or seeing no PVCLOCK_TSC_STABLE_BIT
- * bit set. Userspace checks the latter and if 0, it discards the data
- * in pvti and fallbacks to a system call for a reliable timestamp.
+ * We don't disable VDSO_CLOCKMODE_PVCLOCK entirely if it fails to
+ * register the secondary time info with Xen or if we migrated to a
+ * host without the necessary flags. On both of these cases what
+ * happens is either process seeing a zeroed out pvti or seeing no
+ * PVCLOCK_TSC_STABLE_BIT bit set. Userspace checks the latter and
+ * if 0, it discards the data in pvti and fallbacks to a system
+ * call for a reliable timestamp.
*/
if (ret != 0)
pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
@@ -443,7 +451,7 @@ static void xen_setup_vsyscall_time_info(void)
ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
if (ret) {
- pr_notice("xen: VCLOCK_PVCLOCK not supported (err %d)\n", ret);
+ pr_notice("xen: VDSO_CLOCKMODE_PVCLOCK not supported (err %d)\n", ret);
free_page((unsigned long)ti);
return;
}
@@ -460,14 +468,14 @@ static void xen_setup_vsyscall_time_info(void)
if (!ret)
free_page((unsigned long)ti);
- pr_notice("xen: VCLOCK_PVCLOCK not supported (tsc unstable)\n");
+ pr_notice("xen: VDSO_CLOCKMODE_PVCLOCK not supported (tsc unstable)\n");
return;
}
xen_clock = ti;
pvclock_set_pvti_cpu0_va(xen_clock);
- xen_clocksource.archdata.vclock_mode = VCLOCK_PVCLOCK;
+ xen_clocksource.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK;
}
static void __init xen_time_init(void)