summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--arch/x86/kernel/cpu/common.c30
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c22
-rw-r--r--arch/x86/kernel/e820_32.c11
-rw-r--r--arch/x86/kernel/setup_32.c6
-rw-r--r--include/asm-x86/e820_32.h3
6 files changed, 64 insertions, 10 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 50d564dabb13..fe3031d56431 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -583,7 +583,7 @@ and is between 256 and 4096 characters. It is defined in the file
See drivers/char/README.epca and
Documentation/digiepca.txt.
- disable_mtrr_trim [X86-64, Intel only]
+ disable_mtrr_trim [X86, Intel and AMD only]
By default the kernel will trim any uncacheable
memory out of your available memory pool based on
MTRR settings. This parameter disables that behavior,
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 56cc341cc586..bba850b05d0e 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -278,6 +278,33 @@ void __init cpu_detect(struct cpuinfo_x86 *c)
c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
}
}
+static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
+{
+ u32 tfms, xlvl;
+ int ebx;
+
+ memset(&c->x86_capability, 0, sizeof c->x86_capability);
+ if (have_cpuid_p()) {
+ /* Intel-defined flags: level 0x00000001 */
+ if (c->cpuid_level >= 0x00000001) {
+ u32 capability, excap;
+ cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
+ c->x86_capability[0] = capability;
+ c->x86_capability[4] = excap;
+ }
+
+ /* AMD-defined flags: level 0x80000001 */
+ xlvl = cpuid_eax(0x80000000);
+ if ((xlvl & 0xffff0000) == 0x80000000) {
+ if (xlvl >= 0x80000001) {
+ c->x86_capability[1] = cpuid_edx(0x80000001);
+ c->x86_capability[6] = cpuid_ecx(0x80000001);
+ }
+ }
+
+ }
+
+}
/* Do minimum CPU detection early.
Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
@@ -306,6 +333,8 @@ static void __init early_cpu_detect(void)
early_init_intel(c);
break;
}
+
+ early_get_cap(c);
}
static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
@@ -485,7 +514,6 @@ void __init identify_boot_cpu(void)
identify_cpu(&boot_cpu_data);
sysenter_setup();
enable_sep_cpu();
- mtrr_bp_init();
}
void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index ccd36ed2187b..ac4b6338f3f4 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -624,7 +624,6 @@ static struct sysdev_driver mtrr_sysdev_driver = {
.resume = mtrr_restore,
};
-#ifdef CONFIG_X86_64
static int disable_mtrr_trim;
static int __init disable_mtrr_trim_setup(char *str)
@@ -643,13 +642,10 @@ early_param("disable_mtrr_trim", disable_mtrr_trim_setup);
#define Tom2Enabled (1U << 21)
#define Tom2ForceMemTypeWB (1U << 22)
-static __init int amd_special_default_mtrr(unsigned long end_pfn)
+static __init int amd_special_default_mtrr(void)
{
u32 l, h;
- /* Doesn't apply to memory < 4GB */
- if (end_pfn <= (0xffffffff >> PAGE_SHIFT))
- return 0;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return 0;
if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
@@ -687,9 +683,14 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
* Make sure we only trim uncachable memory on machines that
* support the Intel MTRR architecture:
*/
+ if (!is_cpu(INTEL) || disable_mtrr_trim)
+ return 0;
rdmsr(MTRRdefType_MSR, def, dummy);
def &= 0xff;
- if (!is_cpu(INTEL) || disable_mtrr_trim || def != MTRR_TYPE_UNCACHABLE)
+ if (def != MTRR_TYPE_UNCACHABLE)
+ return 0;
+
+ if (amd_special_default_mtrr())
return 0;
/* Find highest cached pfn */
@@ -703,8 +704,14 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
highest_addr = base + size;
}
- if (amd_special_default_mtrr(end_pfn))
+ /* kvm/qemu doesn't have mtrr set right, don't trim them all */
+ if (!highest_addr) {
+ printk(KERN_WARNING "***************\n");
+ printk(KERN_WARNING "**** WARNING: likely strange cpu\n");
+ printk(KERN_WARNING "**** MTRRs all blank, cpu in qemu?\n");
+ printk(KERN_WARNING "***************\n");
return 0;
+ }
if ((highest_addr >> PAGE_SHIFT) < end_pfn) {
printk(KERN_WARNING "***************\n");
@@ -726,7 +733,6 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
return 0;
}
-#endif
/**
* mtrr_bp_init - initialize mtrrs on the boot CPU
diff --git a/arch/x86/kernel/e820_32.c b/arch/x86/kernel/e820_32.c
index 931934a7b353..4e16ef4a2659 100644
--- a/arch/x86/kernel/e820_32.c
+++ b/arch/x86/kernel/e820_32.c
@@ -749,3 +749,14 @@ static int __init parse_memmap(char *arg)
return 0;
}
early_param("memmap", parse_memmap);
+void __init update_e820(void)
+{
+ u8 nr_map;
+
+ nr_map = e820.nr_map;
+ if (sanitize_e820_map(e820.map, &nr_map))
+ return;
+ e820.nr_map = nr_map;
+ printk(KERN_INFO "modified physical RAM map:\n");
+ print_memory_map("modified");
+}
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index 26a56f714d34..83ba3ca5f431 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -48,6 +48,7 @@
#include <video/edid.h>
+#include <asm/mtrr.h>
#include <asm/apic.h>
#include <asm/e820.h>
#include <asm/mpspec.h>
@@ -758,6 +759,11 @@ void __init setup_arch(char **cmdline_p)
max_low_pfn = setup_memory();
+ /* update e820 for memory not covered by WB MTRRs */
+ mtrr_bp_init();
+ if (mtrr_trim_uncached_memory(max_pfn))
+ max_low_pfn = setup_memory();
+
#ifdef CONFIG_VMI
/*
* Must be after max_low_pfn is determined, and before kernel
diff --git a/include/asm-x86/e820_32.h b/include/asm-x86/e820_32.h
index e2faf5f3a0bb..f1da7ebd1905 100644
--- a/include/asm-x86/e820_32.h
+++ b/include/asm-x86/e820_32.h
@@ -19,12 +19,15 @@
#ifndef __ASSEMBLY__
extern struct e820map e820;
+extern void update_e820(void);
extern int e820_all_mapped(unsigned long start, unsigned long end,
unsigned type);
extern int e820_any_mapped(u64 start, u64 end, unsigned type);
extern void find_max_pfn(void);
extern void register_bootmem_low_pages(unsigned long max_low_pfn);
+extern void add_memory_region(unsigned long long start,
+ unsigned long long size, int type);
extern void e820_register_memory(void);
extern void limit_regions(unsigned long long size);
extern void print_memory_map(char *who);