diff options
author | Andi Kleen <ak@suse.de> | 2007-05-02 19:27:20 +0200 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2007-05-02 19:27:20 +0200 |
commit | 05cb007dac9a50148daf87d0b9469e0cd05fd5e7 (patch) | |
tree | 06d013ba2ce29441b58cb458d739083bc87af148 /arch/x86_64 | |
parent | 09198e68501a7e34737cd9264d266f42429abcdc (diff) | |
download | linux-05cb007dac9a50148daf87d0b9469e0cd05fd5e7.tar.bz2 |
[PATCH] x86-64: Use the 32bit wd_ops for 64bit too.
This mainly removes a lot of code, replacing it with calls into the new 32bit
perfctr-watchdog.c
Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch/x86_64')
-rw-r--r-- | arch/x86_64/kernel/Makefile | 4 | ||||
-rw-r--r-- | arch/x86_64/kernel/nmi.c | 678 |
2 files changed, 36 insertions, 646 deletions
diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile index a613e13d0e75..4d94c51803d8 100644 --- a/arch/x86_64/kernel/Makefile +++ b/arch/x86_64/kernel/Makefile @@ -8,7 +8,8 @@ obj-y := process.o signal.o entry.o traps.o irq.o \ ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_x86_64.o \ x8664_ksyms.o i387.o syscall.o vsyscall.o \ setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \ - pci-dma.o pci-nommu.o alternative.o hpet.o tsc.o bugs.o + pci-dma.o pci-nommu.o alternative.o hpet.o tsc.o bugs.o \ + perfctr-watchdog.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_X86_MCE) += mce.o therm_throt.o @@ -57,3 +58,4 @@ i8237-y += ../../i386/kernel/i8237.o msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o alternative-y += ../../i386/kernel/alternative.o pcspeaker-y += ../../i386/kernel/pcspeaker.o +perfctr-watchdog-y += ../../i386/kernel/cpu/perfctr-watchdog.o diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c index 010d3d9bd56a..6cd2b30e2ffc 100644 --- a/arch/x86_64/kernel/nmi.c +++ b/arch/x86_64/kernel/nmi.c @@ -27,28 +27,11 @@ #include <asm/proto.h> #include <asm/kdebug.h> #include <asm/mce.h> -#include <asm/intel_arch_perfmon.h> int unknown_nmi_panic; int nmi_watchdog_enabled; int panic_on_unrecovered_nmi; -/* perfctr_nmi_owner tracks the ownership of the perfctr registers: - * evtsel_nmi_owner tracks the ownership of the event selection - * - different performance counters/ event selection may be reserved for - * different subsystems this reservation system just tries to coordinate - * things a little - */ - -/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's - * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) - */ -#define NMI_MAX_COUNTER_BITS 66 -#define NMI_MAX_COUNTER_LONGS BITS_TO_LONGS(NMI_MAX_COUNTER_BITS) - -static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner[NMI_MAX_COUNTER_LONGS]); -static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[NMI_MAX_COUNTER_LONGS]); - static cpumask_t backtrace_mask = CPU_MASK_NONE; /* nmi_active: @@ -63,191 +46,11 @@ int panic_on_timeout; unsigned int nmi_watchdog = NMI_DEFAULT; static unsigned int nmi_hz = HZ; -struct nmi_watchdog_ctlblk { - int enabled; - u64 check_bit; - unsigned int cccr_msr; - unsigned int perfctr_msr; /* the MSR to reset in NMI handler */ - unsigned int evntsel_msr; /* the MSR to select the events to handle */ -}; -static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk); +static DEFINE_PER_CPU(short, wd_enabled); /* local prototypes */ static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu); -/* converts an msr to an appropriate reservation bit */ -static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) -{ - /* returns the bit offset of the performance counter register */ - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_AMD: - return (msr - MSR_K7_PERFCTR0); - case X86_VENDOR_INTEL: - if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) - return (msr - MSR_ARCH_PERFMON_PERFCTR0); - else - return (msr - MSR_P4_BPU_PERFCTR0); - } - return 0; -} - -/* converts an msr to an appropriate reservation bit */ -static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) -{ - /* returns the bit offset of the event selection register */ - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_AMD: - return (msr - MSR_K7_EVNTSEL0); - case X86_VENDOR_INTEL: - if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) - return (msr - MSR_ARCH_PERFMON_EVENTSEL0); - else - return (msr - MSR_P4_BSU_ESCR0); - } - return 0; -} - -/* checks for a bit availability (hack for oprofile) */ -int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) -{ - int cpu; - BUG_ON(counter > NMI_MAX_COUNTER_BITS); - for_each_possible_cpu (cpu) { - if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu))) - return 0; - } - return 1; -} - -/* checks the an msr for availability */ -int avail_to_resrv_perfctr_nmi(unsigned int msr) -{ - unsigned int counter; - int cpu; - - counter = nmi_perfctr_msr_to_bit(msr); - BUG_ON(counter > NMI_MAX_COUNTER_BITS); - - for_each_possible_cpu (cpu) { - if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu))) - return 0; - } - return 1; -} - -static int __reserve_perfctr_nmi(int cpu, unsigned int msr) -{ - unsigned int counter; - if (cpu < 0) - cpu = smp_processor_id(); - - counter = nmi_perfctr_msr_to_bit(msr); - BUG_ON(counter > NMI_MAX_COUNTER_BITS); - - if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu))) - return 1; - return 0; -} - -static void __release_perfctr_nmi(int cpu, unsigned int msr) -{ - unsigned int counter; - if (cpu < 0) - cpu = smp_processor_id(); - - counter = nmi_perfctr_msr_to_bit(msr); - BUG_ON(counter > NMI_MAX_COUNTER_BITS); - - clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)); -} - -int reserve_perfctr_nmi(unsigned int msr) -{ - int cpu, i; - for_each_possible_cpu (cpu) { - if (!__reserve_perfctr_nmi(cpu, msr)) { - for_each_possible_cpu (i) { - if (i >= cpu) - break; - __release_perfctr_nmi(i, msr); - } - return 0; - } - } - return 1; -} - -void release_perfctr_nmi(unsigned int msr) -{ - int cpu; - for_each_possible_cpu (cpu) - __release_perfctr_nmi(cpu, msr); -} - -int __reserve_evntsel_nmi(int cpu, unsigned int msr) -{ - unsigned int counter; - if (cpu < 0) - cpu = smp_processor_id(); - - counter = nmi_evntsel_msr_to_bit(msr); - BUG_ON(counter > NMI_MAX_COUNTER_BITS); - - if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0])) - return 1; - return 0; -} - -static void __release_evntsel_nmi(int cpu, unsigned int msr) -{ - unsigned int counter; - if (cpu < 0) - cpu = smp_processor_id(); - - counter = nmi_evntsel_msr_to_bit(msr); - BUG_ON(counter > NMI_MAX_COUNTER_BITS); - - clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]); -} - -int reserve_evntsel_nmi(unsigned int msr) -{ - int cpu, i; - for_each_possible_cpu (cpu) { - if (!__reserve_evntsel_nmi(cpu, msr)) { - for_each_possible_cpu (i) { - if (i >= cpu) - break; - __release_evntsel_nmi(i, msr); - } - return 0; - } - } - return 1; -} - -void release_evntsel_nmi(unsigned int msr) -{ - int cpu; - for_each_possible_cpu (cpu) { - __release_evntsel_nmi(cpu, msr); - } -} - -static __cpuinit inline int nmi_known_cpu(void) -{ - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_AMD: - return boot_cpu_data.x86 == 15 || boot_cpu_data.x86 == 16; - case X86_VENDOR_INTEL: - if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) - return 1; - else - return (boot_cpu_data.x86 == 15); - } - return 0; -} - /* Run after command line and cpu_init init, but before all other checks */ void nmi_watchdog_default(void) { @@ -277,23 +80,6 @@ static __init void nmi_cpu_busy(void *data) } #endif -static unsigned int adjust_for_32bit_ctr(unsigned int hz) -{ - unsigned int retval = hz; - - /* - * On Intel CPUs with ARCH_PERFMON only 32 bits in the counter - * are writable, with higher bits sign extending from bit 31. - * So, we can only program the counter with 31 bit values and - * 32nd bit should be 1, for 33.. to be 1. - * Find the appropriate nmi_hz - */ - if ((((u64)cpu_khz * 1000) / retval) > 0x7fffffffULL) { - retval = ((u64)cpu_khz * 1000) / 0x7fffffffUL + 1; - } - return retval; -} - int __init check_nmi_watchdog (void) { int *counts; @@ -322,14 +108,14 @@ int __init check_nmi_watchdog (void) mdelay((20*1000)/nmi_hz); // wait 20 ticks for_each_online_cpu(cpu) { - if (!per_cpu(nmi_watchdog_ctlblk, cpu).enabled) + if (!per_cpu(wd_enabled, cpu)) continue; if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) { printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n", cpu, counts[cpu], cpu_pda(cpu)->__nmi_count); - per_cpu(nmi_watchdog_ctlblk, cpu).enabled = 0; + per_cpu(wd_enabled, cpu) = 0; atomic_dec(&nmi_active); } } @@ -344,13 +130,8 @@ int __init check_nmi_watchdog (void) /* now that we know it works we can reduce NMI frequency to something more reasonable; makes a difference in some configs */ - if (nmi_watchdog == NMI_LOCAL_APIC) { - struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); - - nmi_hz = 1; - if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR1) - nmi_hz = adjust_for_32bit_ctr(nmi_hz); - } + if (nmi_watchdog == NMI_LOCAL_APIC) + nmi_hz = lapic_adjust_nmi_hz(1); kfree(counts); return 0; @@ -379,57 +160,6 @@ int __init setup_nmi_watchdog(char *str) __setup("nmi_watchdog=", setup_nmi_watchdog); -static void disable_lapic_nmi_watchdog(void) -{ - BUG_ON(nmi_watchdog != NMI_LOCAL_APIC); - - if (atomic_read(&nmi_active) <= 0) - return; - - on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1); - - BUG_ON(atomic_read(&nmi_active) != 0); -} - -static void enable_lapic_nmi_watchdog(void) -{ - BUG_ON(nmi_watchdog != NMI_LOCAL_APIC); - - /* are we already enabled */ - if (atomic_read(&nmi_active) != 0) - return; - - /* are we lapic aware */ - if (nmi_known_cpu() <= 0) - return; - - on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1); - touch_nmi_watchdog(); -} - -void disable_timer_nmi_watchdog(void) -{ - BUG_ON(nmi_watchdog != NMI_IO_APIC); - - if (atomic_read(&nmi_active) <= 0) - return; - - disable_irq(0); - on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1); - - BUG_ON(atomic_read(&nmi_active) != 0); -} - -void enable_timer_nmi_watchdog(void) -{ - BUG_ON(nmi_watchdog != NMI_IO_APIC); - - if (atomic_read(&nmi_active) == 0) { - touch_nmi_watchdog(); - on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1); - enable_irq(0); - } -} static void __acpi_nmi_disable(void *__unused) { @@ -515,275 +245,9 @@ late_initcall(init_lapic_nmi_sysfs); #endif /* CONFIG_PM */ -/* - * Activate the NMI watchdog via the local APIC. - * Original code written by Keith Owens. - */ - -/* Note that these events don't tick when the CPU idles. This means - the frequency varies with CPU load. */ - -#define K7_EVNTSEL_ENABLE (1 << 22) -#define K7_EVNTSEL_INT (1 << 20) -#define K7_EVNTSEL_OS (1 << 17) -#define K7_EVNTSEL_USR (1 << 16) -#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 -#define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING - -static int setup_k7_watchdog(void) -{ - unsigned int perfctr_msr, evntsel_msr; - unsigned int evntsel; - struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); - - perfctr_msr = MSR_K7_PERFCTR0; - evntsel_msr = MSR_K7_EVNTSEL0; - if (!__reserve_perfctr_nmi(-1, perfctr_msr)) - goto fail; - - if (!__reserve_evntsel_nmi(-1, evntsel_msr)) - goto fail1; - - /* Simulator may not support it */ - if (checking_wrmsrl(evntsel_msr, 0UL)) - goto fail2; - wrmsrl(perfctr_msr, 0UL); - - evntsel = K7_EVNTSEL_INT - | K7_EVNTSEL_OS - | K7_EVNTSEL_USR - | K7_NMI_EVENT; - - /* setup the timer */ - wrmsr(evntsel_msr, evntsel, 0); - wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); - apic_write(APIC_LVTPC, APIC_DM_NMI); - evntsel |= K7_EVNTSEL_ENABLE; - wrmsr(evntsel_msr, evntsel, 0); - - wd->perfctr_msr = perfctr_msr; - wd->evntsel_msr = evntsel_msr; - wd->cccr_msr = 0; //unused - wd->check_bit = 1ULL<<63; - return 1; -fail2: - __release_evntsel_nmi(-1, evntsel_msr); -fail1: - __release_perfctr_nmi(-1, perfctr_msr); -fail: - return 0; -} - -static void stop_k7_watchdog(void) -{ - struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); - - wrmsr(wd->evntsel_msr, 0, 0); - - __release_evntsel_nmi(-1, wd->evntsel_msr); - __release_perfctr_nmi(-1, wd->perfctr_msr); -} - -/* Note that these events don't tick when the CPU idles. This means - the frequency varies with CPU load. */ - -#define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7) -#define P4_ESCR_EVENT_SELECT(N) ((N)<<25) -#define P4_ESCR_OS (1<<3) -#define P4_ESCR_USR (1<<2) -#define P4_CCCR_OVF_PMI0 (1<<26) -#define P4_CCCR_OVF_PMI1 (1<<27) -#define P4_CCCR_THRESHOLD(N) ((N)<<20) -#define P4_CCCR_COMPLEMENT (1<<19) -#define P4_CCCR_COMPARE (1<<18) -#define P4_CCCR_REQUIRED (3<<16) -#define P4_CCCR_ESCR_SELECT(N) ((N)<<13) -#define P4_CCCR_ENABLE (1<<12) -#define P4_CCCR_OVF (1<<31) -/* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter - CRU_ESCR0 (with any non-null event selector) through a complemented - max threshold. [IA32-Vol3, Section 14.9.9] */ - -static int setup_p4_watchdog(void) -{ - unsigned int perfctr_msr, evntsel_msr, cccr_msr; - unsigned int evntsel, cccr_val; - unsigned int misc_enable, dummy; - unsigned int ht_num; - struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); - - rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy); - if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL)) - return 0; - -#ifdef CONFIG_SMP - /* detect which hyperthread we are on */ - if (smp_num_siblings == 2) { - unsigned int ebx, apicid; - - ebx = cpuid_ebx(1); - apicid = (ebx >> 24) & 0xff; - ht_num = apicid & 1; - } else -#endif - ht_num = 0; - - /* performance counters are shared resources - * assign each hyperthread its own set - * (re-use the ESCR0 register, seems safe - * and keeps the cccr_val the same) - */ - if (!ht_num) { - /* logical cpu 0 */ - perfctr_msr = MSR_P4_IQ_PERFCTR0; - evntsel_msr = MSR_P4_CRU_ESCR0; - cccr_msr = MSR_P4_IQ_CCCR0; - cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4); - } else { - /* logical cpu 1 */ - perfctr_msr = MSR_P4_IQ_PERFCTR1; - evntsel_msr = MSR_P4_CRU_ESCR0; - cccr_msr = MSR_P4_IQ_CCCR1; - cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); - } - - if (!__reserve_perfctr_nmi(-1, perfctr_msr)) - goto fail; - - if (!__reserve_evntsel_nmi(-1, evntsel_msr)) - goto fail1; - - evntsel = P4_ESCR_EVENT_SELECT(0x3F) - | P4_ESCR_OS - | P4_ESCR_USR; - - cccr_val |= P4_CCCR_THRESHOLD(15) - | P4_CCCR_COMPLEMENT - | P4_CCCR_COMPARE - | P4_CCCR_REQUIRED; - - wrmsr(evntsel_msr, evntsel, 0); - wrmsr(cccr_msr, cccr_val, 0); - wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); - apic_write(APIC_LVTPC, APIC_DM_NMI); - cccr_val |= P4_CCCR_ENABLE; - wrmsr(cccr_msr, cccr_val, 0); - - wd->perfctr_msr = perfctr_msr; - wd->evntsel_msr = evntsel_msr; - wd->cccr_msr = cccr_msr; - wd->check_bit = 1ULL<<39; - return 1; -fail1: - __release_perfctr_nmi(-1, perfctr_msr); -fail: - return 0; -} - -static void stop_p4_watchdog(void) -{ - struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); - - wrmsr(wd->cccr_msr, 0, 0); - wrmsr(wd->evntsel_msr, 0, 0); - - __release_evntsel_nmi(-1, wd->evntsel_msr); - __release_perfctr_nmi(-1, wd->perfctr_msr); -} - -#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL -#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK - -static int setup_intel_arch_watchdog(void) -{ - unsigned int ebx; - union cpuid10_eax eax; - unsigned int unused; - unsigned int perfctr_msr, evntsel_msr; - unsigned int evntsel; - struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); - - /* - * Check whether the Architectural PerfMon supports - * Unhalted Core Cycles Event or not. - * NOTE: Corresponding bit = 0 in ebx indicates event present. - */ - cpuid(10, &(eax.full), &ebx, &unused, &unused); - if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) || - (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) - goto fail; - - perfctr_msr = MSR_ARCH_PERFMON_PERFCTR1; - evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL1; - - if (!__reserve_perfctr_nmi(-1, perfctr_msr)) - goto fail; - - if (!__reserve_evntsel_nmi(-1, evntsel_msr)) - goto fail1; - - wrmsrl(perfctr_msr, 0UL); - - evntsel = ARCH_PERFMON_EVENTSEL_INT - | ARCH_PERFMON_EVENTSEL_OS - | ARCH_PERFMON_EVENTSEL_USR - | ARCH_PERFMON_NMI_EVENT_SEL - | ARCH_PERFMON_NMI_EVENT_UMASK; - - /* setup the timer */ - wrmsr(evntsel_msr, evntsel, 0); - - nmi_hz = adjust_for_32bit_ctr(nmi_hz); - wrmsr(perfctr_msr, (u32)(-((u64)cpu_khz * 1000 / nmi_hz)), 0); - - apic_write(APIC_LVTPC, APIC_DM_NMI); - evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; - wrmsr(evntsel_msr, evntsel, 0); - - wd->perfctr_msr = perfctr_msr; - wd->evntsel_msr = evntsel_msr; - wd->cccr_msr = 0; //unused - wd->check_bit = 1ULL << (eax.split.bit_width - 1); - return 1; -fail1: - __release_perfctr_nmi(-1, perfctr_msr); -fail: - return 0; -} - -static void stop_intel_arch_watchdog(void) -{ - unsigned int ebx; - union cpuid10_eax eax; - unsigned int unused; - struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); - - /* - * Check whether the Architectural PerfMon supports - * Unhalted Core Cycles Event or not. - * NOTE: Corresponding bit = 0 in ebx indicates event present. - */ - cpuid(10, &(eax.full), &ebx, &unused, &unused); - if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) || - (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) - return; - - wrmsr(wd->evntsel_msr, 0, 0); - - __release_evntsel_nmi(-1, wd->evntsel_msr); - __release_perfctr_nmi(-1, wd->perfctr_msr); -} - void setup_apic_nmi_watchdog(void *unused) { - struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); - - /* only support LOCAL and IO APICs for now */ - if ((nmi_watchdog != NMI_LOCAL_APIC) && - (nmi_watchdog != NMI_IO_APIC)) - return; - - if (wd->enabled == 1) + if (__get_cpu_var(wd_enabled) == 1) return; /* cheap hack to support suspend/resume */ @@ -791,62 +255,31 @@ void setup_apic_nmi_watchdog(void *unused) if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0)) return; - if (nmi_watchdog == NMI_LOCAL_APIC) { - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_AMD: - if (strstr(boot_cpu_data.x86_model_id, "Screwdriver")) - return; - if (!setup_k7_watchdog()) - return; - break; - case X86_VENDOR_INTEL: - if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { - if (!setup_intel_arch_watchdog()) - return; - break; - } - if (!setup_p4_watchdog()) - return; - break; - default: + switch (nmi_watchdog) { + case NMI_LOCAL_APIC: + __get_cpu_var(wd_enabled) = 1; + if (lapic_watchdog_init(nmi_hz) < 0) { + __get_cpu_var(wd_enabled) = 0; return; } + /* FALL THROUGH */ + case NMI_IO_APIC: + __get_cpu_var(wd_enabled) = 1; + atomic_inc(&nmi_active); } - wd->enabled = 1; - atomic_inc(&nmi_active); } void stop_apic_nmi_watchdog(void *unused) { - struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); - /* only support LOCAL and IO APICs for now */ if ((nmi_watchdog != NMI_LOCAL_APIC) && (nmi_watchdog != NMI_IO_APIC)) return; - - if (wd->enabled == 0) + if (__get_cpu_var(wd_enabled) == 0) return; - - if (nmi_watchdog == NMI_LOCAL_APIC) { - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_AMD: - if (strstr(boot_cpu_data.x86_model_id, "Screwdriver")) - return; - stop_k7_watchdog(); - break; - case X86_VENDOR_INTEL: - if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { - stop_intel_arch_watchdog(); - break; - } - stop_p4_watchdog(); - break; - default: - return; - } - } - wd->enabled = 0; + if (nmi_watchdog == NMI_LOCAL_APIC) + lapic_watchdog_stop(); + __get_cpu_var(wd_enabled) = 0; atomic_dec(&nmi_active); } @@ -885,9 +318,7 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) int sum; int touched = 0; int cpu = smp_processor_id(); - struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); - u64 dummy; - int rc=0; + int rc = 0; /* check for other users first */ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) @@ -934,55 +365,20 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) } /* see if the nmi watchdog went off */ - if (wd->enabled) { - if (nmi_watchdog == NMI_LOCAL_APIC) { - rdmsrl(wd->perfctr_msr, dummy); - if (dummy & wd->check_bit){ - /* this wasn't a watchdog timer interrupt */ - goto done; - } - - /* only Intel uses the cccr msr */ - if (wd->cccr_msr != 0) { - /* - * P4 quirks: - * - An overflown perfctr will assert its interrupt - * until the OVF flag in its CCCR is cleared. - * - LVTPC is masked on interrupt and must be - * unmasked by the LVTPC handler. - */ - rdmsrl(wd->cccr_msr, dummy); - dummy &= ~P4_CCCR_OVF; - wrmsrl(wd->cccr_msr, dummy); - apic_write(APIC_LVTPC, APIC_DM_NMI); - /* start the cycle over again */ - wrmsrl(wd->perfctr_msr, - -((u64)cpu_khz * 1000 / nmi_hz)); - } else if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR1) { - /* - * ArchPerfom/Core Duo needs to re-unmask - * the apic vector - */ - apic_write(APIC_LVTPC, APIC_DM_NMI); - /* ARCH_PERFMON has 32 bit counter writes */ - wrmsr(wd->perfctr_msr, - (u32)(-((u64)cpu_khz * 1000 / nmi_hz)), 0); - } else { - /* start the cycle over again */ - wrmsrl(wd->perfctr_msr, - -((u64)cpu_khz * 1000 / nmi_hz)); - } - rc = 1; - } else if (nmi_watchdog == NMI_IO_APIC) { - /* don't know how to accurately check for this. - * just assume it was a watchdog timer interrupt - * This matches the old behaviour. - */ - rc = 1; - } else - printk(KERN_WARNING "Unknown enabled NMI hardware?!\n"); + if (!__get_cpu_var(wd_enabled)) + return rc; + switch (nmi_watchdog) { + case NMI_LOCAL_APIC: + rc |= lapic_wd_event(nmi_hz); + break; + case NMI_IO_APIC: + /* don't know how to accurately check for this. + * just assume it was a watchdog timer interrupt + * This matches the old behaviour. + */ + rc = 1; + break; } -done: return rc; } @@ -1067,12 +463,4 @@ void __trigger_all_cpu_backtrace(void) EXPORT_SYMBOL(nmi_active); EXPORT_SYMBOL(nmi_watchdog); -EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi); -EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); -EXPORT_SYMBOL(reserve_perfctr_nmi); -EXPORT_SYMBOL(release_perfctr_nmi); -EXPORT_SYMBOL(reserve_evntsel_nmi); -EXPORT_SYMBOL(release_evntsel_nmi); -EXPORT_SYMBOL(disable_timer_nmi_watchdog); -EXPORT_SYMBOL(enable_timer_nmi_watchdog); EXPORT_SYMBOL(touch_nmi_watchdog); |