summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/tsc_32.c19
-rw-r--r--arch/x86/kernel/tsc_64.c27
-rw-r--r--include/asm-x86/nops.h20
3 files changed, 16 insertions, 50 deletions
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
index d7498b34c8e9..c2241e04ea5f 100644
--- a/arch/x86/kernel/tsc_32.c
+++ b/arch/x86/kernel/tsc_32.c
@@ -256,9 +256,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
ref_freq, freq->new);
if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
tsc_khz = cpu_khz;
- preempt_disable();
- set_cyc2ns_scale(cpu_khz, smp_processor_id());
- preempt_enable();
+ set_cyc2ns_scale(cpu_khz, freq->cpu);
/*
* TSC based sched_clock turns
* to junk w/ cpufreq
@@ -287,27 +285,14 @@ core_initcall(cpufreq_tsc);
/* clock source code */
static unsigned long current_tsc_khz = 0;
-static struct clocksource clocksource_tsc;
-/*
- * We compare the TSC to the cycle_last value in the clocksource
- * structure to avoid a nasty time-warp issue. This can be observed in
- * a very small window right after one CPU updated cycle_last under
- * xtime lock and the other CPU reads a TSC value which is smaller
- * than the cycle_last reference value due to a TSC which is slighty
- * behind. This delta is nowhere else observable, but in that case it
- * results in a forward time jump in the range of hours due to the
- * unsigned delta calculation of the time keeping core code, which is
- * necessary to support wrapping clocksources like pm timer.
- */
static cycle_t read_tsc(void)
{
cycle_t ret;
rdtscll(ret);
- return ret >= clocksource_tsc.cycle_last ?
- ret : clocksource_tsc.cycle_last;
+ return ret;
}
static struct clocksource clocksource_tsc = {
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c
index 01fc9f0c39e2..d3bebaaad842 100644
--- a/arch/x86/kernel/tsc_64.c
+++ b/arch/x86/kernel/tsc_64.c
@@ -11,7 +11,6 @@
#include <asm/hpet.h>
#include <asm/timex.h>
#include <asm/timer.h>
-#include <asm/vgtod.h>
static int notsc __initdata = 0;
@@ -149,9 +148,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
mark_tsc_unstable("cpufreq changes");
}
- preempt_disable();
- set_cyc2ns_scale(tsc_khz_ref, smp_processor_id());
- preempt_enable();
+ set_cyc2ns_scale(tsc_khz_ref, freq->cpu);
return 0;
}
@@ -291,34 +288,18 @@ int __init notsc_setup(char *s)
__setup("notsc", notsc_setup);
-static struct clocksource clocksource_tsc;
-/*
- * We compare the TSC to the cycle_last value in the clocksource
- * structure to avoid a nasty time-warp. This can be observed in a
- * very small window right after one CPU updated cycle_last under
- * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
- * is smaller than the cycle_last reference value due to a TSC which
- * is slighty behind. This delta is nowhere else observable, but in
- * that case it results in a forward time jump in the range of hours
- * due to the unsigned delta calculation of the time keeping core
- * code, which is necessary to support wrapping clocksources like pm
- * timer.
- */
+/* clock source code: */
static cycle_t read_tsc(void)
{
cycle_t ret = (cycle_t)get_cycles();
-
- return ret >= clocksource_tsc.cycle_last ?
- ret : clocksource_tsc.cycle_last;
+ return ret;
}
static cycle_t __vsyscall_fn vread_tsc(void)
{
cycle_t ret = (cycle_t)vget_cycles();
-
- return ret >= __vsyscall_gtod_data.clock.cycle_last ?
- ret : __vsyscall_gtod_data.clock.cycle_last;
+ return ret;
}
static struct clocksource clocksource_tsc = {
diff --git a/include/asm-x86/nops.h b/include/asm-x86/nops.h
index e3b2bce0aff8..b3930ae539b3 100644
--- a/include/asm-x86/nops.h
+++ b/include/asm-x86/nops.h
@@ -73,16 +73,7 @@
#define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
#define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
-#if defined(CONFIG_MK8)
-#define ASM_NOP1 K8_NOP1
-#define ASM_NOP2 K8_NOP2
-#define ASM_NOP3 K8_NOP3
-#define ASM_NOP4 K8_NOP4
-#define ASM_NOP5 K8_NOP5
-#define ASM_NOP6 K8_NOP6
-#define ASM_NOP7 K8_NOP7
-#define ASM_NOP8 K8_NOP8
-#elif defined(CONFIG_MK7)
+#if defined(CONFIG_MK7)
#define ASM_NOP1 K7_NOP1
#define ASM_NOP2 K7_NOP2
#define ASM_NOP3 K7_NOP3
@@ -100,6 +91,15 @@
#define ASM_NOP6 P6_NOP6
#define ASM_NOP7 P6_NOP7
#define ASM_NOP8 P6_NOP8
+#elif defined(CONFIG_X86_64)
+#define ASM_NOP1 K8_NOP1
+#define ASM_NOP2 K8_NOP2
+#define ASM_NOP3 K8_NOP3
+#define ASM_NOP4 K8_NOP4
+#define ASM_NOP5 K8_NOP5
+#define ASM_NOP6 K8_NOP6
+#define ASM_NOP7 K8_NOP7
+#define ASM_NOP8 K8_NOP8
#else
#define ASM_NOP1 GENERIC_NOP1
#define ASM_NOP2 GENERIC_NOP2