summaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-07 15:36:08 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-07 15:36:08 -0800
commit21d37bbc65e39a26856de6b14be371ff24e0d03f (patch)
treea04bb72e191cae13f47462c57bb1641c42b7b52b /arch/x86_64
parentbff288c19e8b6217ddd660d4fa42c29a0ab1d58c (diff)
parent57e1c5c87db512629dd44ddeb882a5aaf0e4299e (diff)
downloadlinux-21d37bbc65e39a26856de6b14be371ff24e0d03f.tar.bz2
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6: (140 commits) ACPICA: reduce table header messages to fit within 80 columns asus-laptop: merge with ACPICA table update ACPI: bay: Convert ACPI Bay driver to be compatible with sysfs update. ACPI: bay: new driver is EXPERIMENTAL ACPI: bay: make drive_bays static ACPI: bay: make bay a platform driver ACPI: bay: remove prototype procfs code ACPI: bay: delete unused variable ACPI: bay: new driver adding removable drive bay support ACPI: dock: check if parent is on dock ACPICA: fix gcc build warnings Altix: Add ACPI SSDT PCI device support (hotplug) Altix: ACPI SSDT PCI device support ACPICA: reduce conflicts with Altix patch series ACPI_NUMA: fix HP IA64 simulator issue with extended memory domain ACPI: fix HP RX2600 IA64 boot ACPI: build fix for IBM x440 - CONFIG_X86_SUMMIT ACPICA: Update version to 20070126 ACPICA: Fix for incorrect parameter passed to AcpiTbDeleteTable during table load. ACPICA: Update copyright to 2007. ...
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/kernel/early-quirks.c4
-rw-r--r--arch/x86_64/kernel/genapic.c4
-rw-r--r--arch/x86_64/kernel/mpparse.c2
-rw-r--r--arch/x86_64/kernel/time.c18
-rw-r--r--arch/x86_64/mm/srat.c48
-rw-r--r--arch/x86_64/pci/mmconfig.c29
6 files changed, 54 insertions, 51 deletions
diff --git a/arch/x86_64/kernel/early-quirks.c b/arch/x86_64/kernel/early-quirks.c
index 49802f1bee94..bd30d138113f 100644
--- a/arch/x86_64/kernel/early-quirks.c
+++ b/arch/x86_64/kernel/early-quirks.c
@@ -32,7 +32,7 @@ static void via_bugs(void)
static int nvidia_hpet_detected __initdata;
-static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
+static int __init nvidia_hpet_check(struct acpi_table_header *header)
{
nvidia_hpet_detected = 1;
return 0;
@@ -53,7 +53,7 @@ static void nvidia_bugs(void)
return;
nvidia_hpet_detected = 0;
- acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
+ acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check);
if (nvidia_hpet_detected == 0) {
acpi_skip_timer_override = 1;
printk(KERN_INFO "Nvidia board "
diff --git a/arch/x86_64/kernel/genapic.c b/arch/x86_64/kernel/genapic.c
index b007433f96bb..0b3603adf56d 100644
--- a/arch/x86_64/kernel/genapic.c
+++ b/arch/x86_64/kernel/genapic.c
@@ -58,8 +58,8 @@ void __init clustered_apic_check(void)
* Some x86_64 machines use physical APIC mode regardless of how many
* procs/clusters are present (x86_64 ES7000 is an example).
*/
- if (acpi_fadt.revision > FADT2_REVISION_ID)
- if (acpi_fadt.force_apic_physical_destination_mode) {
+ if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID)
+ if (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) {
genapic = &apic_cluster;
goto print;
}
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 08072568847d..50dd8bef850e 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -798,7 +798,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
return gsi;
/* Don't set up the ACPI SCI because it's already set up */
- if (acpi_fadt.sci_int == gsi)
+ if (acpi_gbl_FADT.sci_interrupt == gsi)
return gsi;
ioapic = mp_find_ioapic(gsi);
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 5cc76d0d331f..335cc91c49b7 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -498,7 +498,7 @@ static unsigned long get_cmos_time(void)
{
unsigned int year, mon, day, hour, min, sec;
unsigned long flags;
- unsigned extyear = 0;
+ unsigned century = 0;
spin_lock_irqsave(&rtc_lock, flags);
@@ -510,9 +510,9 @@ static unsigned long get_cmos_time(void)
mon = CMOS_READ(RTC_MONTH);
year = CMOS_READ(RTC_YEAR);
#ifdef CONFIG_ACPI
- if (acpi_fadt.revision >= FADT2_REVISION_ID &&
- acpi_fadt.century)
- extyear = CMOS_READ(acpi_fadt.century);
+ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+ acpi_gbl_FADT.century)
+ century = CMOS_READ(acpi_gbl_FADT.century);
#endif
} while (sec != CMOS_READ(RTC_SECONDS));
@@ -530,10 +530,10 @@ static unsigned long get_cmos_time(void)
BCD_TO_BIN(mon);
BCD_TO_BIN(year);
- if (extyear) {
- BCD_TO_BIN(extyear);
- year += extyear;
- printk(KERN_INFO "Extended CMOS year: %d\n", extyear);
+ if (century) {
+ BCD_TO_BIN(century);
+ year += century * 100;
+ printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
} else {
/*
* x86-64 systems only exists since 2002.
@@ -954,7 +954,7 @@ __cpuinit int unsynchronized_tsc(void)
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
#ifdef CONFIG_ACPI
/* But TSC doesn't tick in C3 so don't use it there */
- if (acpi_fadt.length > 0 && acpi_fadt.plvl3_lat < 1000)
+ if (acpi_gbl_FADT.header.length > 0 && acpi_gbl_FADT.C3latency < 1000)
return 1;
#endif
return 0;
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 1087e150a218..2efe215fc76a 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -101,7 +101,7 @@ static __init inline int srat_disabled(void)
static __init int slit_valid(struct acpi_table_slit *slit)
{
int i, j;
- int d = slit->localities;
+ int d = slit->locality_count;
for (i = 0; i < d; i++) {
for (j = 0; j < d; j++) {
u8 val = slit->entry[d*i + j];
@@ -127,18 +127,18 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
/* Callback for Proximity Domain -> LAPIC mapping */
void __init
-acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
+acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
{
int pxm, node;
if (srat_disabled())
return;
- if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) {
+ if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
bad_srat();
return;
}
- if (pa->flags.enabled == 0)
+ if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
return;
- pxm = pa->proximity_domain;
+ pxm = pa->proximity_domain_lo;
node = setup_node(pxm);
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
@@ -254,21 +254,21 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end)
/* Looks good */
if (nd->start == nd->end) {
- nd->start = start;
- nd->end = end;
+ nd->start = start;
+ nd->end = end;
changed = 1;
- } else {
- if (nd->start == end) {
- nd->start = start;
+ } else {
+ if (nd->start == end) {
+ nd->start = start;
changed = 1;
}
- if (nd->end == start) {
- nd->end = end;
+ if (nd->end == start) {
+ nd->end = end;
changed = 1;
}
if (!changed)
printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
- }
+ }
ret = update_end_of_memory(nd->end);
@@ -279,7 +279,7 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end)
/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
void __init
-acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
+acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
struct bootnode *nd, oldnode;
unsigned long start, end;
@@ -288,16 +288,17 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
if (srat_disabled())
return;
- if (ma->header.length != sizeof(struct acpi_table_memory_affinity)) {
+ if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
bad_srat();
return;
}
- if (ma->flags.enabled == 0)
+ if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
return;
- if (ma->flags.hot_pluggable && !save_add_info())
+
+ if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
return;
- start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32);
- end = start + (ma->length_lo | ((u64)ma->length_hi << 32));
+ start = ma->base_address;
+ end = start + ma->length;
pxm = ma->proximity_domain;
node = setup_node(pxm);
if (node < 0) {
@@ -337,7 +338,8 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
push_node_boundaries(node, nd->start >> PAGE_SHIFT,
nd->end >> PAGE_SHIFT);
- if (ma->flags.hot_pluggable && (reserve_hotadd(node, start, end) < 0)) {
+ if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) &&
+ (reserve_hotadd(node, start, end) < 0)) {
/* Ignore hotadd region. Undo damage */
printk(KERN_NOTICE "SRAT: Hotplug region ignored\n");
*nd = oldnode;
@@ -394,7 +396,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
/* First clean up the node list */
for (i = 0; i < MAX_NUMNODES; i++) {
- cutoff_node(i, start, end);
+ cutoff_node(i, start, end);
if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) {
unparse_node(i);
node_set_offline(i);
@@ -426,7 +428,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
if (!node_online(i))
setup_node_bootmem(i, nodes[i].start, nodes[i].end);
- for (i = 0; i < NR_CPUS; i++) {
+ for (i = 0; i < NR_CPUS; i++) {
if (cpu_to_node[i] == NUMA_NO_NODE)
continue;
if (!node_isset(cpu_to_node[i], nodes_parsed))
@@ -461,7 +463,7 @@ int __node_distance(int a, int b)
if (!acpi_slit)
return a == b ? 10 : 20;
- index = acpi_slit->localities * node_to_pxm(a);
+ index = acpi_slit->locality_count * node_to_pxm(a);
return acpi_slit->entry[index + node_to_pxm(b)];
}
diff --git a/arch/x86_64/pci/mmconfig.c b/arch/x86_64/pci/mmconfig.c
index f8b6b2800a62..faabb6e87f12 100644
--- a/arch/x86_64/pci/mmconfig.c
+++ b/arch/x86_64/pci/mmconfig.c
@@ -1,6 +1,6 @@
/*
* mmconfig.c - Low-level direct PCI config space access via MMCONFIG
- *
+ *
* This is an 64bit optimized version that always keeps the full mmconfig
* space mapped. This allows lockless config space operation.
*/
@@ -25,7 +25,7 @@ static DECLARE_BITMAP(fallback_slots, 32*MAX_CHECK_BUS);
/* Static virtual mapping of the MMCONFIG aperture */
struct mmcfg_virt {
- struct acpi_table_mcfg_config *cfg;
+ struct acpi_mcfg_allocation *cfg;
char __iomem *virt;
};
static struct mmcfg_virt *pci_mmcfg_virt;
@@ -33,14 +33,14 @@ static struct mmcfg_virt *pci_mmcfg_virt;
static char __iomem *get_virt(unsigned int seg, unsigned bus)
{
int cfg_num = -1;
- struct acpi_table_mcfg_config *cfg;
+ struct acpi_mcfg_allocation *cfg;
while (1) {
++cfg_num;
if (cfg_num >= pci_mmcfg_config_num)
break;
cfg = pci_mmcfg_virt[cfg_num].cfg;
- if (cfg->pci_segment_group_number != seg)
+ if (cfg->pci_segment != seg)
continue;
if ((cfg->start_bus_number <= bus) &&
(cfg->end_bus_number >= bus))
@@ -52,7 +52,7 @@ static char __iomem *get_virt(unsigned int seg, unsigned bus)
this applies to all busses. */
cfg = &pci_mmcfg_config[0];
if (pci_mmcfg_config_num == 1 &&
- cfg->pci_segment_group_number == 0 &&
+ cfg->pci_segment == 0 &&
(cfg->start_bus_number | cfg->end_bus_number) == 0)
return pci_mmcfg_virt[0].virt;
@@ -170,19 +170,19 @@ void __init pci_mmcfg_init(int type)
if ((pci_probe & PCI_PROBE_MMCONF) == 0)
return;
- acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg);
+ acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
if ((pci_mmcfg_config_num == 0) ||
(pci_mmcfg_config == NULL) ||
- (pci_mmcfg_config[0].base_address == 0))
+ (pci_mmcfg_config[0].address == 0))
return;
/* Only do this check when type 1 works. If it doesn't work
assume we run on a Mac and always use MCFG */
- if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].base_address,
- pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN,
+ if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].address,
+ pci_mmcfg_config[0].address + MMCONFIG_APER_MIN,
E820_RESERVED)) {
- printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n",
- pci_mmcfg_config[0].base_address);
+ printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %lx is not E820-reserved\n",
+ (unsigned long)pci_mmcfg_config[0].address);
printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
return;
}
@@ -194,15 +194,16 @@ void __init pci_mmcfg_init(int type)
}
for (i = 0; i < pci_mmcfg_config_num; ++i) {
pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
- pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address,
+ pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].address,
MMCONFIG_APER_MAX);
if (!pci_mmcfg_virt[i].virt) {
printk(KERN_ERR "PCI: Cannot map mmconfig aperture for "
"segment %d\n",
- pci_mmcfg_config[i].pci_segment_group_number);
+ pci_mmcfg_config[i].pci_segment);
return;
}
- printk(KERN_INFO "PCI: Using MMCONFIG at %x\n", pci_mmcfg_config[i].base_address);
+ printk(KERN_INFO "PCI: Using MMCONFIG at %lx\n",
+ (unsigned long)pci_mmcfg_config[i].address);
}
unreachable_devices();