diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-20 10:32:09 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-20 10:32:09 -0700 |
commit | 161f7a7161191ab9c2e97f787829ef8dd2b95771 (patch) | |
tree | 9776d3f963c7f0d247b7fb324eab4811a1302f67 /arch/x86 | |
parent | 2ba68940c893c8f0bfc8573c041254251bb6aeab (diff) | |
parent | a078c6d0e6288fad6d83fb6d5edd91ddb7b6ab33 (diff) | |
download | linux-161f7a7161191ab9c2e97f787829ef8dd2b95771.tar.bz2 |
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer changes for v3.4 from Ingo Molnar
* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (32 commits)
ntp: Fix integer overflow when setting time
math: Introduce div64_long
cs5535-clockevt: Allow the MFGPT IRQ to be shared
cs5535-clockevt: Don't ignore MFGPT on SMP-capable kernels
x86/time: Eliminate unused irq0_irqs counter
clocksource: scx200_hrt: Fix the build
x86/tsc: Reduce the TSC sync check time for core-siblings
timer: Fix bad idle check on irq entry
nohz: Remove ts->Einidle checks before restarting the tick
nohz: Remove update_ts_time_stat from tick_nohz_start_idle
clockevents: Leave the broadcast device in shutdown mode when not needed
clocksource: Load the ACPI PM clocksource asynchronously
clocksource: scx200_hrt: Convert scx200 to use clocksource_register_hz
clocksource: Get rid of clocksource_calc_mult_shift()
clocksource: dbx500: convert to clocksource_register_hz()
clocksource: scx200_hrt: use pr_<level> instead of printk
time: Move common updates to a function
time: Reorder so the hot data is together
time: Remove most of xtime_lock usage in timekeeping.c
ntp: Add ntp_lock to replace xtime_locking
...
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/hardirq.h | 1 | ||||
-rw-r--r-- | arch/x86/kernel/time.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/tsc_sync.c | 29 |
3 files changed, 24 insertions, 9 deletions
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index da0b3ca815b7..382f75d735f3 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -7,7 +7,6 @@ typedef struct { unsigned int __softirq_pending; unsigned int __nmi_count; /* arch dependent */ - unsigned int irq0_irqs; #ifdef CONFIG_X86_LOCAL_APIC unsigned int apic_timer_irqs; /* arch dependent */ unsigned int irq_spurious_count; diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index dd5fbf4101fc..c6eba2b42673 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -57,9 +57,6 @@ EXPORT_SYMBOL(profile_pc); */ static irqreturn_t timer_interrupt(int irq, void *dev_id) { - /* Keep nmi watchdog up to date */ - inc_irq_stat(irq0_irqs); - global_clock_event->event_handler(global_clock_event); /* MCA bus quirk: Acknowledge irq0 by setting bit 7 in port 0x61 */ diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 9eba29b46cb7..fc25e60a5884 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -42,7 +42,7 @@ static __cpuinitdata int nr_warps; /* * TSC-warp measurement loop running on both CPUs: */ -static __cpuinit void check_tsc_warp(void) +static __cpuinit void check_tsc_warp(unsigned int timeout) { cycles_t start, now, prev, end; int i; @@ -51,9 +51,9 @@ static __cpuinit void check_tsc_warp(void) start = get_cycles(); rdtsc_barrier(); /* - * The measurement runs for 20 msecs: + * The measurement runs for 'timeout' msecs: */ - end = start + tsc_khz * 20ULL; + end = start + (cycles_t) tsc_khz * timeout; now = start; for (i = 0; ; i++) { @@ -99,6 +99,25 @@ static __cpuinit void check_tsc_warp(void) } /* + * If the target CPU coming online doesn't have any of its core-siblings + * online, a timeout of 20msec will be used for the TSC-warp measurement + * loop. Otherwise a smaller timeout of 2msec will be used, as we have some + * information about this socket already (and this information grows as we + * have more and more logical-siblings in that socket). + * + * Ideally we should be able to skip the TSC sync check on the other + * core-siblings, if the first logical CPU in a socket passed the sync test. + * But as the TSC is per-logical CPU and can potentially be modified wrongly + * by the bios, TSC sync test for smaller duration should be able + * to catch such errors. Also this will catch the condition where all the + * cores in the socket doesn't get reset at the same time. + */ +static inline unsigned int loop_timeout(int cpu) +{ + return (cpumask_weight(cpu_core_mask(cpu)) > 1) ? 2 : 20; +} + +/* * Source CPU calls into this - it waits for the freshly booted * target CPU to arrive and then starts the measurement: */ @@ -135,7 +154,7 @@ void __cpuinit check_tsc_sync_source(int cpu) */ atomic_inc(&start_count); - check_tsc_warp(); + check_tsc_warp(loop_timeout(cpu)); while (atomic_read(&stop_count) != cpus-1) cpu_relax(); @@ -183,7 +202,7 @@ void __cpuinit check_tsc_sync_target(void) while (atomic_read(&start_count) != cpus) cpu_relax(); - check_tsc_warp(); + check_tsc_warp(loop_timeout(smp_processor_id())); /* * Ok, we are done: |