diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-30 08:42:45 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-30 08:42:45 -0700 |
commit | 5a5a1bf099d6942399ea0b34a62e5f0bc4c5c36e (patch) | |
tree | df094aa1544281ec0894eee48ad60c9d000a18ba /arch | |
parent | 74c7d2f5200a340ae6655e9adcf990381e387937 (diff) | |
parent | 5379f8c0d72cab43bbe6d974ceb3ad84dddc2b8e (diff) | |
download | linux-5a5a1bf099d6942399ea0b34a62e5f0bc4c5c36e.tar.bz2 |
Merge branch 'x86-ras-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 RAS changes from Ingo Molnar:
- Add an Intel CMCI hotplug fix
- Add AMD family 16h EDAC support
- Make the AMD MCE banks code more flexible for virtual environments
* 'x86-ras-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
amd64_edac: Add Family 16h support
x86/mce: Rework cmci_rediscover() to play well with CPU hotplug
x86, MCE, AMD: Use MCG_CAP MSR to find out number of banks on AMD
x86, MCE, AMD: Replace shared_bank array with is_shared_bank() helper
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/mce.h | 4 | ||||
-rw-r--r-- | arch/x86/kernel/amd_nb.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd.c | 39 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_intel.c | 25 |
5 files changed, 34 insertions, 39 deletions
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index f4076af1f4ed..fa5f71e021d5 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -146,13 +146,13 @@ DECLARE_PER_CPU(struct device *, mce_device); void mce_intel_feature_init(struct cpuinfo_x86 *c); void cmci_clear(void); void cmci_reenable(void); -void cmci_rediscover(int dying); +void cmci_rediscover(void); void cmci_recheck(void); #else static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { } static inline void cmci_clear(void) {} static inline void cmci_reenable(void) {} -static inline void cmci_rediscover(int dying) {} +static inline void cmci_rediscover(void) {} static inline void cmci_recheck(void) {} #endif diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 3684129be947..3048ded1b598 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -20,12 +20,14 @@ const struct pci_device_id amd_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, {} }; EXPORT_SYMBOL(amd_nb_misc_ids); static const struct pci_device_id amd_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, {} }; @@ -81,7 +83,6 @@ int amd_cache_northbridges(void) next_northbridge(link, amd_nb_link_ids); } - /* some CPU families (e.g. family 0x11) do not support GART */ if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || boot_cpu_data.x86 == 0x15) amd_northbridges.flags |= AMD_NB_GART; diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 7bc126346ace..9239504b41cb 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -2358,7 +2358,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) if (action == CPU_POST_DEAD) { /* intentionally ignoring frozen here */ - cmci_rediscover(cpu); + cmci_rediscover(); } return NOTIFY_OK; diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 1ac581f38dfa..9cb52767999a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -33,7 +33,6 @@ #include <asm/mce.h> #include <asm/msr.h> -#define NR_BANKS 6 #define NR_BLOCKS 9 #define THRESHOLD_MAX 0xFFF #define INT_TYPE_APIC 0x00020000 @@ -57,12 +56,7 @@ static const char * const th_names[] = { "execution_unit", }; -static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks); - -static unsigned char shared_bank[NR_BANKS] = { - 0, 0, 0, 0, 1 -}; - +static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks); static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ static void amd_threshold_interrupt(void); @@ -79,6 +73,12 @@ struct thresh_restart { u16 old_limit; }; +static inline bool is_shared_bank(int bank) +{ + /* Bank 4 is for northbridge reporting and is thus shared */ + return (bank == 4); +} + static const char * const bank4_names(struct threshold_block *b) { switch (b->address) { @@ -214,7 +214,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) unsigned int bank, block; int offset = -1; - for (bank = 0; bank < NR_BANKS; ++bank) { + for (bank = 0; bank < mca_cfg.banks; ++bank) { for (block = 0; block < NR_BLOCKS; ++block) { if (block == 0) address = MSR_IA32_MC0_MISC + bank * 4; @@ -276,7 +276,7 @@ static void amd_threshold_interrupt(void) mce_setup(&m); /* assume first bank caused it */ - for (bank = 0; bank < NR_BANKS; ++bank) { + for (bank = 0; bank < mca_cfg.banks; ++bank) { if (!(per_cpu(bank_map, m.cpu) & (1 << bank))) continue; for (block = 0; block < NR_BLOCKS; ++block) { @@ -467,7 +467,7 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu, u32 low, high; int err; - if ((bank >= NR_BANKS) || (block >= NR_BLOCKS)) + if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS)) return 0; if (rdmsr_safe_on_cpu(cpu, address, &low, &high)) @@ -575,7 +575,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) const char *name = th_names[bank]; int err = 0; - if (shared_bank[bank]) { + if (is_shared_bank(bank)) { nb = node_to_amd_nb(amd_get_nb_id(cpu)); /* threshold descriptor already initialized on this node? */ @@ -609,7 +609,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) per_cpu(threshold_banks, cpu)[bank] = b; - if (shared_bank[bank]) { + if (is_shared_bank(bank)) { atomic_set(&b->cpus, 1); /* nb is already initialized, see above */ @@ -635,9 +635,17 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) static __cpuinit int threshold_create_device(unsigned int cpu) { unsigned int bank; + struct threshold_bank **bp; int err = 0; - for (bank = 0; bank < NR_BANKS; ++bank) { + bp = kzalloc(sizeof(struct threshold_bank *) * mca_cfg.banks, + GFP_KERNEL); + if (!bp) + return -ENOMEM; + + per_cpu(threshold_banks, cpu) = bp; + + for (bank = 0; bank < mca_cfg.banks; ++bank) { if (!(per_cpu(bank_map, cpu) & (1 << bank))) continue; err = threshold_create_bank(cpu, bank); @@ -691,7 +699,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) if (!b->blocks) goto free_out; - if (shared_bank[bank]) { + if (is_shared_bank(bank)) { if (!atomic_dec_and_test(&b->cpus)) { __threshold_remove_blocks(b); per_cpu(threshold_banks, cpu)[bank] = NULL; @@ -719,11 +727,12 @@ static void threshold_remove_device(unsigned int cpu) { unsigned int bank; - for (bank = 0; bank < NR_BANKS; ++bank) { + for (bank = 0; bank < mca_cfg.banks; ++bank) { if (!(per_cpu(bank_map, cpu) & (1 << bank))) continue; threshold_remove_bank(cpu, bank); } + kfree(per_cpu(threshold_banks, cpu)); } /* get notified when a cpu comes on/off */ diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 402c454fbff0..ae1697c2afe3 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c @@ -285,39 +285,24 @@ void cmci_clear(void) raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); } -static long cmci_rediscover_work_func(void *arg) +static void cmci_rediscover_work_func(void *arg) { int banks; /* Recheck banks in case CPUs don't all have the same */ if (cmci_supported(&banks)) cmci_discover(banks); - - return 0; } -/* - * After a CPU went down cycle through all the others and rediscover - * Must run in process context. - */ -void cmci_rediscover(int dying) +/* After a CPU went down cycle through all the others and rediscover */ +void cmci_rediscover(void) { - int cpu, banks; + int banks; if (!cmci_supported(&banks)) return; - for_each_online_cpu(cpu) { - if (cpu == dying) - continue; - - if (cpu == smp_processor_id()) { - cmci_rediscover_work_func(NULL); - continue; - } - - work_on_cpu(cpu, cmci_rediscover_work_func, NULL); - } + on_each_cpu(cmci_rediscover_work_func, NULL, 1); } /* |