summaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/hp/common/sba_iommu.c24
-rw-r--r--arch/ia64/hp/sim/simscsi.c4
-rw-r--r--arch/ia64/include/asm/mutex.h10
-rw-r--r--arch/ia64/include/asm/pci.h10
-rw-r--r--arch/ia64/include/asm/pgtable.h3
-rw-r--r--arch/ia64/kernel/acpi.c4
-rw-r--r--arch/ia64/kernel/efi.c5
-rw-r--r--arch/ia64/kernel/err_inject.c8
-rw-r--r--arch/ia64/kernel/head.S2
-rw-r--r--arch/ia64/kernel/mca.c12
-rw-r--r--arch/ia64/kernel/numa.c4
-rw-r--r--arch/ia64/kernel/palinfo.c4
-rw-r--r--arch/ia64/kernel/pci-dma.c9
-rw-r--r--arch/ia64/kernel/perfmon.c20
-rw-r--r--arch/ia64/kernel/salinfo.c4
-rw-r--r--arch/ia64/kernel/setup.c10
-rw-r--r--arch/ia64/kernel/smpboot.c8
-rw-r--r--arch/ia64/kernel/topology.c18
-rw-r--r--arch/ia64/kernel/traps.c2
-rw-r--r--arch/ia64/kvm/Makefile7
-rw-r--r--arch/ia64/mm/contig.c14
-rw-r--r--arch/ia64/mm/discontig.c5
-rw-r--r--arch/ia64/mm/init.c41
-rw-r--r--arch/ia64/mm/numa.c2
-rw-r--r--arch/ia64/pci/pci.c239
-rw-r--r--arch/ia64/sn/kernel/io_init.c122
-rw-r--r--arch/ia64/sn/kernel/setup.c8
-rw-r--r--arch/ia64/xen/hypervisor.c2
29 files changed, 277 insertions, 325 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 1a2b7749b047..5a768ad8e893 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -376,7 +376,6 @@ config NR_CPUS
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
depends on SMP
- select HOTPLUG
default n
---help---
Say Y here to experiment with turning CPUs off and on. CPUs
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index bcda5b2d121a..d43daf192b21 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -2042,7 +2042,8 @@ sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
#endif
static int __init
-acpi_sba_ioc_add(struct acpi_device *device)
+acpi_sba_ioc_add(struct acpi_device *device,
+ const struct acpi_device_id *not_used)
{
struct ioc *ioc;
acpi_status status;
@@ -2090,14 +2091,18 @@ static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
{"HWP0004", 0},
{"", 0},
};
-static struct acpi_driver acpi_sba_ioc_driver = {
- .name = "IOC IOMMU Driver",
- .ids = hp_ioc_iommu_device_ids,
- .ops = {
- .add = acpi_sba_ioc_add,
- },
+static struct acpi_scan_handler acpi_sba_ioc_handler = {
+ .ids = hp_ioc_iommu_device_ids,
+ .attach = acpi_sba_ioc_add,
};
+static int __init acpi_sba_ioc_init_acpi(void)
+{
+ return acpi_scan_add_handler(&acpi_sba_ioc_handler);
+}
+/* This has to run before acpi_scan_init(). */
+arch_initcall(acpi_sba_ioc_init_acpi);
+
extern struct dma_map_ops swiotlb_dma_ops;
static int __init
@@ -2122,7 +2127,10 @@ sba_init(void)
}
#endif
- acpi_bus_register_driver(&acpi_sba_ioc_driver);
+ /*
+ * ioc_list should be populated by the acpi_sba_ioc_handler's .attach()
+ * routine, but that only happens if acpi_scan_init() has already run.
+ */
if (!ioc_list) {
#ifdef CONFIG_IA64_GENERIC
/*
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index 331de723c676..3a428f19a001 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -88,8 +88,8 @@ simscsi_setup (char *s)
if (strlen(s) > MAX_ROOT_LEN) {
printk(KERN_ERR "simscsi_setup: prefix too long---using default %s\n",
simscsi_root);
- }
- simscsi_root = s;
+ } else
+ simscsi_root = s;
return 1;
}
diff --git a/arch/ia64/include/asm/mutex.h b/arch/ia64/include/asm/mutex.h
index bed73a643a56..f41e66d65e31 100644
--- a/arch/ia64/include/asm/mutex.h
+++ b/arch/ia64/include/asm/mutex.h
@@ -29,17 +29,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value
* @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 1
*
- * Change the count from 1 to a value lower than 1, and call <fail_fn> if
- * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
- * or anything the slow path function returns.
+ * Change the count from 1 to a value lower than 1. This function returns 0
+ * if the fastpath succeeds, or -1 otherwise.
*/
static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+__mutex_fastpath_lock_retval(atomic_t *count)
{
if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
- return fail_fn(count);
+ return -1;
return 0;
}
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index 5e04b591e423..80775f55f03f 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -89,9 +89,9 @@ extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
#define pci_legacy_read platform_pci_legacy_read
#define pci_legacy_write platform_pci_legacy_write
-struct pci_window {
- struct resource resource;
- u64 offset;
+struct iospace_resource {
+ struct list_head list;
+ struct resource res;
};
struct pci_controller {
@@ -100,12 +100,10 @@ struct pci_controller {
int segment;
int node; /* nearest node with memory or -1 for global allocation */
- unsigned int windows;
- struct pci_window *window;
-
void *platform_data;
};
+
#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
#define pci_domain_nr(busdev) (PCI_CONTROLLER(busdev)->segment)
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 815810cbbedc..7935115398a6 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -493,9 +493,6 @@ extern void paging_init (void);
#define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3)
#define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE })
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
-
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 335eb07480fe..5eb71d22c3d5 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -807,7 +807,7 @@ int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
* ACPI based hotplug CPU support
*/
#ifdef CONFIG_ACPI_HOTPLUG_CPU
-static __cpuinit
+static
int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
{
#ifdef CONFIG_ACPI_NUMA
@@ -882,7 +882,7 @@ __init void prefill_possible_map(void)
set_cpu_possible(i, true);
}
-static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
+static int _acpi_map_lsapic(acpi_handle handle, int *pcpu)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index f034563aeae5..51bce594eb83 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -1116,11 +1116,6 @@ efi_memmap_init(u64 *s, u64 *e)
if (!is_memory_available(md))
continue;
-#ifdef CONFIG_CRASH_DUMP
- /* saved_max_pfn should ignore max_addr= command line arg */
- if (saved_max_pfn < (efi_md_end(md) >> PAGE_SHIFT))
- saved_max_pfn = (efi_md_end(md) >> PAGE_SHIFT);
-#endif
/*
* Round ends inward to granule boundaries
* Give trimmings to uncached allocator
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index 2d67317a1ec2..f59c0b844e88 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -225,17 +225,17 @@ static struct attribute_group err_inject_attr_group = {
.name = "err_inject"
};
/* Add/Remove err_inject interface for CPU device */
-static int __cpuinit err_inject_add_dev(struct device * sys_dev)
+static int err_inject_add_dev(struct device *sys_dev)
{
return sysfs_create_group(&sys_dev->kobj, &err_inject_attr_group);
}
-static int __cpuinit err_inject_remove_dev(struct device * sys_dev)
+static int err_inject_remove_dev(struct device *sys_dev)
{
sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
return 0;
}
-static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
+static int err_inject_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
+static struct notifier_block err_inject_cpu_notifier =
{
.notifier_call = err_inject_cpu_callback,
};
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 9be4e497f3d3..991ca336b8a2 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1035,7 +1035,7 @@ END(ia64_delay_loop)
* Return a CPU-local timestamp in nano-seconds. This timestamp is
* NOT synchronized across CPUs its return value must never be
* compared against the values returned on another CPU. The usage in
- * kernel/sched.c ensures that.
+ * kernel/sched/core.c ensures that.
*
* The return-value of sched_clock() is NOT supposed to wrap-around.
* If it did, it would cause some scheduling hiccups (at the worst).
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index d7396dbb07bb..b8edfa75a83f 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -631,7 +631,7 @@ ia64_mca_register_cpev (int cpev)
* Outputs
* None
*/
-void __cpuinit
+void
ia64_mca_cmc_vector_setup (void)
{
cmcv_reg_t cmcv;
@@ -1814,7 +1814,7 @@ static struct irqaction mca_cpep_irqaction = {
* format most of the fields.
*/
-static void __cpuinit
+static void
format_mca_init_stack(void *mca_data, unsigned long offset,
const char *type, int cpu)
{
@@ -1844,7 +1844,7 @@ static void * __init_refok mca_bootmem(void)
}
/* Do per-CPU MCA-related initialization. */
-void __cpuinit
+void
ia64_mca_cpu_init(void *cpu_data)
{
void *pal_vaddr;
@@ -1896,7 +1896,7 @@ ia64_mca_cpu_init(void *cpu_data)
PAGE_KERNEL));
}
-static void __cpuinit ia64_mca_cmc_vector_adjust(void *dummy)
+static void ia64_mca_cmc_vector_adjust(void *dummy)
{
unsigned long flags;
@@ -1906,7 +1906,7 @@ static void __cpuinit ia64_mca_cmc_vector_adjust(void *dummy)
local_irq_restore(flags);
}
-static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
+static int mca_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
@@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block mca_cpu_notifier __cpuinitdata = {
+static struct notifier_block mca_cpu_notifier = {
.notifier_call = mca_cpu_callback
};
diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c
index c93420c97409..d288cde93606 100644
--- a/arch/ia64/kernel/numa.c
+++ b/arch/ia64/kernel/numa.c
@@ -30,7 +30,7 @@ EXPORT_SYMBOL(cpu_to_node_map);
cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
EXPORT_SYMBOL(node_to_cpu_mask);
-void __cpuinit map_cpu_to_node(int cpu, int nid)
+void map_cpu_to_node(int cpu, int nid)
{
int oldnid;
if (nid < 0) { /* just initialize by zero */
@@ -51,7 +51,7 @@ void __cpuinit map_cpu_to_node(int cpu, int nid)
return;
}
-void __cpuinit unmap_cpu_from_node(int cpu, int nid)
+void unmap_cpu_from_node(int cpu, int nid)
{
WARN_ON(!cpu_isset(cpu, node_to_cpu_mask[nid]));
WARN_ON(cpu_to_node_map[cpu] != nid);
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 2b3c2d79256f..ab333284f4b2 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -932,7 +932,7 @@ static const struct file_operations proc_palinfo_fops = {
.release = single_release,
};
-static void __cpuinit
+static void
create_palinfo_proc_entries(unsigned int cpu)
{
pal_func_cpu_u_t f;
@@ -962,7 +962,7 @@ remove_palinfo_proc_entries(unsigned int hcpu)
remove_proc_subtree(cpustr, palinfo_dir);
}
-static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
+static int palinfo_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int hotcpu = (unsigned long)hcpu;
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index 1ddcfe5ef353..992c1098c522 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -33,15 +33,6 @@ int force_iommu __read_mostly;
int iommu_pass_through;
-/* Dummy device used for NULL arguments (normally ISA). Better would
- be probably a smaller DMA mask, but this is bug-to-bug compatible
- to i386. */
-struct device fallback_dev = {
- .init_name = "fallback device",
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .dma_mask = &fallback_dev.coherent_dma_mask,
-};
-
extern struct dma_map_ops intel_dma_ops;
static int __init pci_iommu_init(void)
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 9ea25fce06d5..5a9ff1c3c3e9 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -5647,24 +5647,8 @@ pfm_proc_show_header(struct seq_file *m)
list_for_each(pos, &pfm_buffer_fmt_list) {
entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
- seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
- entry->fmt_uuid[0],
- entry->fmt_uuid[1],
- entry->fmt_uuid[2],
- entry->fmt_uuid[3],
- entry->fmt_uuid[4],
- entry->fmt_uuid[5],
- entry->fmt_uuid[6],
- entry->fmt_uuid[7],
- entry->fmt_uuid[8],
- entry->fmt_uuid[9],
- entry->fmt_uuid[10],
- entry->fmt_uuid[11],
- entry->fmt_uuid[12],
- entry->fmt_uuid[13],
- entry->fmt_uuid[14],
- entry->fmt_uuid[15],
- entry->fmt_name);
+ seq_printf(m, "format : %16phD %s\n",
+ entry->fmt_uuid, entry->fmt_name);
}
spin_unlock(&pfm_buffer_fmt_lock);
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 4bc580af67b3..960a396f5929 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -568,7 +568,7 @@ static const struct file_operations salinfo_data_fops = {
.llseek = default_llseek,
};
-static int __cpuinit
+static int
salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
{
unsigned int i, cpu = (unsigned long)hcpu;
@@ -609,7 +609,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
return NOTIFY_OK;
}
-static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
+static struct notifier_block salinfo_cpu_notifier =
{
.notifier_call = salinfo_cpu_callback,
.priority = 0,
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 13bfdd22afc8..4fc2e9569bb2 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -748,7 +748,7 @@ const struct seq_operations cpuinfo_op = {
#define MAX_BRANDS 8
static char brandname[MAX_BRANDS][128];
-static char * __cpuinit
+static char *
get_model_name(__u8 family, __u8 model)
{
static int overflow;
@@ -778,7 +778,7 @@ get_model_name(__u8 family, __u8 model)
return "Unknown";
}
-static void __cpuinit
+static void
identify_cpu (struct cpuinfo_ia64 *c)
{
union {
@@ -850,7 +850,7 @@ identify_cpu (struct cpuinfo_ia64 *c)
* 2. the minimum of the i-cache stride sizes for "flush_icache_range()".
* 3. the minimum of the cache stride sizes for "clflush_cache_range()".
*/
-static void __cpuinit
+static void
get_cache_info(void)
{
unsigned long line_size, max = 1;
@@ -915,10 +915,10 @@ get_cache_info(void)
* cpu_init() initializes state that is per-CPU. This function acts
* as a 'CPU state barrier', nothing should get across.
*/
-void __cpuinit
+void
cpu_init (void)
{
- extern void __cpuinit ia64_mmu_init (void *);
+ extern void ia64_mmu_init(void *);
static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG;
unsigned long num_phys_stacked;
pal_vm_info_2_u_t vmi;
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 8d87168d218d..547a48d78bd7 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -351,7 +351,7 @@ static inline void smp_setup_percpu_timer(void)
{
}
-static void __cpuinit
+static void
smp_callin (void)
{
int cpuid, phys_id, itc_master;
@@ -442,7 +442,7 @@ smp_callin (void)
/*
* Activate a secondary processor. head.S calls this.
*/
-int __cpuinit
+int
start_secondary (void *unused)
{
/* Early console may use I/O ports */
@@ -459,7 +459,7 @@ start_secondary (void *unused)
return 0;
}
-static int __cpuinit
+static int
do_boot_cpu (int sapicid, int cpu, struct task_struct *idle)
{
int timeout;
@@ -728,7 +728,7 @@ static inline void set_cpu_sibling_map(int cpu)
}
}
-int __cpuinit
+int
__cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int ret;
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index dc00b2c1b42a..ca69a5a96dcc 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -135,11 +135,11 @@ struct cpu_cache_info {
struct kobject kobj;
};
-static struct cpu_cache_info all_cpu_cache_info[NR_CPUS] __cpuinitdata;
+static struct cpu_cache_info all_cpu_cache_info[NR_CPUS];
#define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y])
#ifdef CONFIG_SMP
-static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu,
+static void cache_shared_cpu_map_setup(unsigned int cpu,
struct cache_info * this_leaf)
{
pal_cache_shared_info_t csi;
@@ -174,7 +174,7 @@ static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu,
&csi) == PAL_STATUS_SUCCESS);
}
#else
-static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu,
+static void cache_shared_cpu_map_setup(unsigned int cpu,
struct cache_info * this_leaf)
{
cpu_set(cpu, this_leaf->shared_cpu_map);
@@ -298,7 +298,7 @@ static struct kobj_type cache_ktype_percpu_entry = {
.sysfs_ops = &cache_sysfs_ops,
};
-static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
+static void cpu_cache_sysfs_exit(unsigned int cpu)
{
kfree(all_cpu_cache_info[cpu].cache_leaves);
all_cpu_cache_info[cpu].cache_leaves = NULL;
@@ -307,7 +307,7 @@ static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
return;
}
-static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
+static int cpu_cache_sysfs_init(unsigned int cpu)
{
unsigned long i, levels, unique_caches;
pal_cache_config_info_t cci;
@@ -351,7 +351,7 @@ static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
}
/* Add cache interface for CPU device */
-static int __cpuinit cache_add_dev(struct device * sys_dev)
+static int cache_add_dev(struct device *sys_dev)
{
unsigned int cpu = sys_dev->id;
unsigned long i, j;
@@ -401,7 +401,7 @@ static int __cpuinit cache_add_dev(struct device * sys_dev)
}
/* Remove cache interface for CPU device */
-static int __cpuinit cache_remove_dev(struct device * sys_dev)
+static int cache_remove_dev(struct device *sys_dev)
{
unsigned int cpu = sys_dev->id;
unsigned long i;
@@ -425,7 +425,7 @@ static int __cpuinit cache_remove_dev(struct device * sys_dev)
* When a cpu is hot-plugged, do a check and initiate
* cache kobject if necessary
*/
-static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
+static int cache_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata cache_cpu_notifier =
+static struct notifier_block cache_cpu_notifier =
{
.notifier_call = cache_cpu_callback
};
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index f7f9f9c6caf0..d3636e67a98e 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -630,7 +630,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
iip, ifa, isr);
force_sig(SIGSEGV, current);
- break;
+ return;
case 46:
printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile
index 1a4053789d01..18e45ec49bbf 100644
--- a/arch/ia64/kvm/Makefile
+++ b/arch/ia64/kvm/Makefile
@@ -47,12 +47,13 @@ FORCE : $(obj)/$(offsets-file)
ccflags-y := -Ivirt/kvm -Iarch/ia64/kvm/
asflags-y := -Ivirt/kvm -Iarch/ia64/kvm/
+KVM := ../../../virt/kvm
-common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
- coalesced_mmio.o irq_comm.o)
+common-objs = $(KVM)/kvm_main.o $(KVM)/ioapic.o \
+ $(KVM)/coalesced_mmio.o $(KVM)/irq_comm.o
ifeq ($(CONFIG_KVM_DEVICE_ASSIGNMENT),y)
-common-objs += $(addprefix ../../../virt/kvm/, assigned-dev.o iommu.o)
+common-objs += $(KVM)/assigned-dev.o $(KVM)/iommu.o
endif
kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 67c59ebec899..da5237d636d6 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -156,8 +156,7 @@ static void *cpu_data;
*
* Allocate and setup per-cpu data areas.
*/
-void * __cpuinit
-per_cpu_init (void)
+void *per_cpu_init(void)
{
static bool first_time = true;
void *cpu0_data = __cpu0_per_cpu;
@@ -295,14 +294,6 @@ find_memory (void)
alloc_per_cpu_data();
}
-static int count_pages(u64 start, u64 end, void *arg)
-{
- unsigned long *count = arg;
-
- *count += (end - start) >> PAGE_SHIFT;
- return 0;
-}
-
/*
* Set up the page tables.
*/
@@ -313,9 +304,6 @@ paging_init (void)
unsigned long max_dma;
unsigned long max_zone_pfns[MAX_NR_ZONES];
- num_physpages = 0;
- efi_memmap_walk(count_pages, &num_physpages);
-
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
#ifdef CONFIG_ZONE_DMA
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index ae4db4bd6d97..2de08f4d9930 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -37,7 +37,6 @@ struct early_node_data {
struct ia64_node_data *node_data;
unsigned long pernode_addr;
unsigned long pernode_size;
- unsigned long num_physpages;
#ifdef CONFIG_ZONE_DMA
unsigned long num_dma_physpages;
#endif
@@ -593,7 +592,7 @@ void __init find_memory(void)
* find_pernode_space() does most of this already, we just need to set
* local_per_cpu_offset
*/
-void __cpuinit *per_cpu_init(void)
+void *per_cpu_init(void)
{
int cpu;
static int first_time = 1;
@@ -732,7 +731,6 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
{
unsigned long end = start + len;
- mem_data[node].num_physpages += len >> PAGE_SHIFT;
#ifdef CONFIG_ZONE_DMA
if (start <= __pa(MAX_DMA_ADDRESS))
mem_data[node].num_dma_physpages +=
@@ -778,7 +776,6 @@ void __init paging_init(void)
#endif
for_each_online_node(node) {
- num_physpages += mem_data[node].num_physpages;
pfn_offset = mem_data[node].min_pfn;
#ifdef CONFIG_VIRTUAL_MEM_MAP
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index d1fe4b402601..b6f7f43424ec 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -154,9 +154,8 @@ ia64_init_addr_space (void)
void
free_initmem (void)
{
- free_reserved_area((unsigned long)ia64_imva(__init_begin),
- (unsigned long)ia64_imva(__init_end),
- 0, "unused kernel");
+ free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end),
+ -1, "unused kernel");
}
void __init
@@ -546,19 +545,6 @@ int __init register_active_ranges(u64 start, u64 len, int nid)
return 0;
}
-static int __init
-count_reserved_pages(u64 start, u64 end, void *arg)
-{
- unsigned long num_reserved = 0;
- unsigned long *count = arg;
-
- for (; start < end; start += PAGE_SIZE)
- if (PageReserved(virt_to_page(start)))
- ++num_reserved;
- *count += num_reserved;
- return 0;
-}
-
int
find_max_min_low_pfn (u64 start, u64 end, void *arg)
{
@@ -597,8 +583,6 @@ __setup("nolwsys", nolwsys_setup);
void __init
mem_init (void)
{
- long reserved_pages, codesize, datasize, initsize;
- pg_data_t *pgdat;
int i;
BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
@@ -616,27 +600,12 @@ mem_init (void)
#ifdef CONFIG_FLATMEM
BUG_ON(!mem_map);
- max_mapnr = max_low_pfn;
#endif
+ set_max_mapnr(max_low_pfn);
high_memory = __va(max_low_pfn * PAGE_SIZE);
-
- for_each_online_pgdat(pgdat)
- if (pgdat->bdata->node_bootmem_map)
- totalram_pages += free_all_bootmem_node(pgdat);
-
- reserved_pages = 0;
- efi_memmap_walk(count_reserved_pages, &reserved_pages);
-
- codesize = (unsigned long) _etext - (unsigned long) _stext;
- datasize = (unsigned long) _edata - (unsigned long) _etext;
- initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
-
- printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
- "%luk data, %luk init)\n", nr_free_pages() << (PAGE_SHIFT - 10),
- num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
- reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
-
+ free_all_bootmem();
+ mem_init_print_info(NULL);
/*
* For fsyscall entrpoints with no light-weight handler, use the ordinary
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index 4248492b9321..ea21d4cad540 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -86,7 +86,7 @@ int __meminit __early_pfn_to_nid(unsigned long pfn)
return -1;
}
-void __cpuinit numa_clear_node(int cpu)
+void numa_clear_node(int cpu)
{
unmap_cpu_from_node(cpu, NUMA_NO_NODE);
}
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index de1474ff0bc5..2326790b7d8b 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -134,6 +134,10 @@ struct pci_root_info {
struct acpi_device *bridge;
struct pci_controller *controller;
struct list_head resources;
+ struct resource *res;
+ resource_size_t *res_offset;
+ unsigned int res_num;
+ struct list_head io_resources;
char *name;
};
@@ -153,7 +157,7 @@ new_space (u64 phys_base, int sparse)
return i;
if (num_io_spaces == MAX_IO_SPACES) {
- printk(KERN_ERR "PCI: Too many IO port spaces "
+ pr_err("PCI: Too many IO port spaces "
"(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES);
return ~0;
}
@@ -168,25 +172,22 @@ new_space (u64 phys_base, int sparse)
static u64 add_io_space(struct pci_root_info *info,
struct acpi_resource_address64 *addr)
{
+ struct iospace_resource *iospace;
struct resource *resource;
char *name;
unsigned long base, min, max, base_port;
unsigned int sparse = 0, space_nr, len;
- resource = kzalloc(sizeof(*resource), GFP_KERNEL);
- if (!resource) {
- printk(KERN_ERR "PCI: No memory for %s I/O port space\n",
- info->name);
+ len = strlen(info->name) + 32;
+ iospace = kzalloc(sizeof(*iospace) + len, GFP_KERNEL);
+ if (!iospace) {
+ dev_err(&info->bridge->dev,
+ "PCI: No memory for %s I/O port space\n",
+ info->name);
goto out;
}
- len = strlen(info->name) + 32;
- name = kzalloc(len, GFP_KERNEL);
- if (!name) {
- printk(KERN_ERR "PCI: No memory for %s I/O port space name\n",
- info->name);
- goto free_resource;
- }
+ name = (char *)(iospace + 1);
min = addr->minimum;
max = min + addr->address_length - 1;
@@ -195,7 +196,7 @@ static u64 add_io_space(struct pci_root_info *info,
space_nr = new_space(addr->translation_offset, sparse);
if (space_nr == ~0)
- goto free_name;
+ goto free_resource;
base = __pa(io_space[space_nr].mmio_base);
base_port = IO_SPACE_BASE(space_nr);
@@ -210,18 +211,23 @@ static u64 add_io_space(struct pci_root_info *info,
if (space_nr == 0)
sparse = 1;
+ resource = &iospace->res;
resource->name = name;
resource->flags = IORESOURCE_MEM;
resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min);
resource->end = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max);
- insert_resource(&iomem_resource, resource);
+ if (insert_resource(&iomem_resource, resource)) {
+ dev_err(&info->bridge->dev,
+ "can't allocate host bridge io space resource %pR\n",
+ resource);
+ goto free_resource;
+ }
+ list_add_tail(&iospace->list, &info->io_resources);
return base_port;
-free_name:
- kfree(name);
free_resource:
- kfree(resource);
+ kfree(iospace);
out:
return ~0;
}
@@ -265,7 +271,7 @@ static acpi_status count_window(struct acpi_resource *resource, void *data)
static acpi_status add_window(struct acpi_resource *res, void *data)
{
struct pci_root_info *info = data;
- struct pci_window *window;
+ struct resource *resource;
struct acpi_resource_address64 addr;
acpi_status status;
unsigned long flags, offset = 0;
@@ -289,55 +295,146 @@ static acpi_status add_window(struct acpi_resource *res, void *data)
} else
return AE_OK;
- window = &info->controller->window[info->controller->windows++];
- window->resource.name = info->name;
- window->resource.flags = flags;
- window->resource.start = addr.minimum + offset;
- window->resource.end = window->resource.start + addr.address_length - 1;
- window->offset = offset;
+ resource = &info->res[info->res_num];
+ resource->name = info->name;
+ resource->flags = flags;
+ resource->start = addr.minimum + offset;
+ resource->end = resource->start + addr.address_length - 1;
+ info->res_offset[info->res_num] = offset;
- if (insert_resource(root, &window->resource)) {
+ if (insert_resource(root, resource)) {
dev_err(&info->bridge->dev,
"can't allocate host bridge window %pR\n",
- &window->resource);
+ resource);
} else {
if (offset)
dev_info(&info->bridge->dev, "host bridge window %pR "
"(PCI address [%#llx-%#llx])\n",
- &window->resource,
- window->resource.start - offset,
- window->resource.end - offset);
+ resource,
+ resource->start - offset,
+ resource->end - offset);
else
dev_info(&info->bridge->dev,
- "host bridge window %pR\n",
- &window->resource);
+ "host bridge window %pR\n", resource);
}
-
/* HP's firmware has a hack to work around a Windows bug.
* Ignore these tiny memory ranges */
- if (!((window->resource.flags & IORESOURCE_MEM) &&
- (window->resource.end - window->resource.start < 16)))
- pci_add_resource_offset(&info->resources, &window->resource,
- window->offset);
+ if (!((resource->flags & IORESOURCE_MEM) &&
+ (resource->end - resource->start < 16)))
+ pci_add_resource_offset(&info->resources, resource,
+ info->res_offset[info->res_num]);
+ info->res_num++;
return AE_OK;
}
+static void free_pci_root_info_res(struct pci_root_info *info)
+{
+ struct iospace_resource *iospace, *tmp;
+
+ list_for_each_entry_safe(iospace, tmp, &info->io_resources, list)
+ kfree(iospace);
+
+ kfree(info->name);
+ kfree(info->res);
+ info->res = NULL;
+ kfree(info->res_offset);
+ info->res_offset = NULL;
+ info->res_num = 0;
+ kfree(info->controller);
+ info->controller = NULL;
+}
+
+static void __release_pci_root_info(struct pci_root_info *info)
+{
+ int i;
+ struct resource *res;
+ struct iospace_resource *iospace;
+
+ list_for_each_entry(iospace, &info->io_resources, list)
+ release_resource(&iospace->res);
+
+ for (i = 0; i < info->res_num; i++) {
+ res = &info->res[i];
+
+ if (!res->parent)
+ continue;
+
+ if (!(res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
+ continue;
+
+ release_resource(res);
+ }
+
+ free_pci_root_info_res(info);
+ kfree(info);
+}
+
+static void release_pci_root_info(struct pci_host_bridge *bridge)
+{
+ struct pci_root_info *info = bridge->release_data;
+
+ __release_pci_root_info(info);
+}
+
+static int
+probe_pci_root_info(struct pci_root_info *info, struct acpi_device *device,
+ int busnum, int domain)
+{
+ char *name;
+
+ name = kmalloc(16, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ sprintf(name, "PCI Bus %04x:%02x", domain, busnum);
+ info->bridge = device;
+ info->name = name;
+
+ acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window,
+ &info->res_num);
+ if (info->res_num) {
+ info->res =
+ kzalloc_node(sizeof(*info->res) * info->res_num,
+ GFP_KERNEL, info->controller->node);
+ if (!info->res) {
+ kfree(name);
+ return -ENOMEM;
+ }
+
+ info->res_offset =
+ kzalloc_node(sizeof(*info->res_offset) * info->res_num,
+ GFP_KERNEL, info->controller->node);
+ if (!info->res_offset) {
+ kfree(name);
+ kfree(info->res);
+ info->res = NULL;
+ return -ENOMEM;
+ }
+
+ info->res_num = 0;
+ acpi_walk_resources(device->handle, METHOD_NAME__CRS,
+ add_window, info);
+ } else
+ kfree(name);
+
+ return 0;
+}
+
struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
{
struct acpi_device *device = root->device;
int domain = root->segment;
int bus = root->secondary.start;
struct pci_controller *controller;
- unsigned int windows = 0;
- struct pci_root_info info;
+ struct pci_root_info *info = NULL;
+ int busnum = root->secondary.start;
struct pci_bus *pbus;
- char *name;
- int pxm;
+ int pxm, ret;
controller = alloc_pci_controller(domain);
if (!controller)
- goto out1;
+ return NULL;
controller->acpi_handle = device->handle;
@@ -347,29 +444,27 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
controller->node = pxm_to_node(pxm);
#endif
- INIT_LIST_HEAD(&info.resources);
- /* insert busn resource at first */
- pci_add_resource(&info.resources, &root->secondary);
- acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window,
- &windows);
- if (windows) {
- controller->window =
- kzalloc_node(sizeof(*controller->window) * windows,
- GFP_KERNEL, controller->node);
- if (!controller->window)
- goto out2;
-
- name = kmalloc(16, GFP_KERNEL);
- if (!name)
- goto out3;
-
- sprintf(name, "PCI Bus %04x:%02x", domain, bus);
- info.bridge = device;
- info.controller = controller;
- info.name = name;
- acpi_walk_resources(device->handle, METHOD_NAME__CRS,
- add_window, &info);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&device->dev,
+ "pci_bus %04x:%02x: ignored (out of memory)\n",
+ domain, busnum);
+ kfree(controller);
+ return NULL;
}
+
+ info->controller = controller;
+ INIT_LIST_HEAD(&info->io_resources);
+ INIT_LIST_HEAD(&info->resources);
+
+ ret = probe_pci_root_info(info, device, busnum, domain);
+ if (ret) {
+ kfree(info->controller);
+ kfree(info);
+ return NULL;
+ }
+ /* insert busn resource at first */
+ pci_add_resource(&info->resources, &root->secondary);
/*
* See arch/x86/pci/acpi.c.
* The desired pci bus might already be scanned in a quirk. We
@@ -377,21 +472,17 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
* such quirk. So we just ignore the case now.
*/
pbus = pci_create_root_bus(NULL, bus, &pci_root_ops, controller,
- &info.resources);
+ &info->resources);
if (!pbus) {
- pci_free_resource_list(&info.resources);
+ pci_free_resource_list(&info->resources);
+ __release_pci_root_info(info);
return NULL;
}
+ pci_set_host_bridge_release(to_pci_host_bridge(pbus->bridge),
+ release_pci_root_info, info);
pci_scan_child_bus(pbus);
return pbus;
-
-out3:
- kfree(controller->window);
-out2:
- kfree(controller);
-out1:
- return NULL;
}
int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
@@ -691,7 +782,7 @@ static void __init set_pci_dfl_cacheline_size(void)
status = ia64_pal_cache_summary(&levels, &unique_caches);
if (status != 0) {
- printk(KERN_ERR "%s: ia64_pal_cache_summary() failed "
+ pr_err("%s: ia64_pal_cache_summary() failed "
"(status=%ld)\n", __func__, status);
return;
}
@@ -699,7 +790,7 @@ static void __init set_pci_dfl_cacheline_size(void)
status = ia64_pal_cache_config_info(levels - 1,
/* cache_type (data_or_unified)= */ 2, &cci);
if (status != 0) {
- printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed "
+ pr_err("%s: ia64_pal_cache_config_info() failed "
"(status=%ld)\n", __func__, status);
return;
}
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 238e2c511d94..0b5ce82d203d 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -118,76 +118,26 @@ static void __init sn_fixup_ionodes(void)
}
/*
- * sn_pci_legacy_window_fixup - Create PCI controller windows for
+ * sn_pci_legacy_window_fixup - Setup PCI resources for
* legacy IO and MEM space. This needs to
* be done here, as the PROM does not have
* ACPI support defining the root buses
* and their resources (_CRS),
*/
static void
-sn_legacy_pci_window_fixup(struct pci_controller *controller,
- u64 legacy_io, u64 legacy_mem)
+sn_legacy_pci_window_fixup(struct resource *res,
+ u64 legacy_io, u64 legacy_mem)
{
- controller->window = kcalloc(2, sizeof(struct pci_window),
- GFP_KERNEL);
- BUG_ON(controller->window == NULL);
- controller->window[0].offset = legacy_io;
- controller->window[0].resource.name = "legacy_io";
- controller->window[0].resource.flags = IORESOURCE_IO;
- controller->window[0].resource.start = legacy_io;
- controller->window[0].resource.end =
- controller->window[0].resource.start + 0xffff;
- controller->window[0].resource.parent = &ioport_resource;
- controller->window[1].offset = legacy_mem;
- controller->window[1].resource.name = "legacy_mem";
- controller->window[1].resource.flags = IORESOURCE_MEM;
- controller->window[1].resource.start = legacy_mem;
- controller->window[1].resource.end =
- controller->window[1].resource.start + (1024 * 1024) - 1;
- controller->window[1].resource.parent = &iomem_resource;
- controller->windows = 2;
-}
-
-/*
- * sn_pci_window_fixup() - Create a pci_window for each device resource.
- * It will setup pci_windows for use by
- * pcibios_bus_to_resource(), pcibios_resource_to_bus(),
- * etc.
- */
-static void
-sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
- s64 * pci_addrs)
-{
- struct pci_controller *controller = PCI_CONTROLLER(dev->bus);
- unsigned int i;
- unsigned int idx;
- unsigned int new_count;
- struct pci_window *new_window;
-
- if (count == 0)
- return;
- idx = controller->windows;
- new_count = controller->windows + count;
- new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL);
- BUG_ON(new_window == NULL);
- if (controller->window) {
- memcpy(new_window, controller->window,
- sizeof(struct pci_window) * controller->windows);
- kfree(controller->window);
- }
-
- /* Setup a pci_window for each device resource. */
- for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
- if (pci_addrs[i] == -1)
- continue;
-
- new_window[idx].offset = dev->resource[i].start - pci_addrs[i];
- new_window[idx].resource = dev->resource[i];
- idx++;
- }
-
- controller->windows = new_count;
- controller->window = new_window;
+ res[0].name = "legacy_io";
+ res[0].flags = IORESOURCE_IO;
+ res[0].start = legacy_io;
+ res[0].end = res[0].start + 0xffff;
+ res[0].parent = &ioport_resource;
+ res[1].name = "legacy_mem";
+ res[1].flags = IORESOURCE_MEM;
+ res[1].start = legacy_mem;
+ res[1].end = res[1].start + (1024 * 1024) - 1;
+ res[1].parent = &iomem_resource;
}
/*
@@ -199,9 +149,7 @@ sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
void
sn_io_slot_fixup(struct pci_dev *dev)
{
- unsigned int count = 0;
int idx;
- s64 pci_addrs[PCI_ROM_RESOURCE + 1];
unsigned long addr, end, size, start;
struct pcidev_info *pcidev_info;
struct sn_irq_info *sn_irq_info;
@@ -229,7 +177,6 @@ sn_io_slot_fixup(struct pci_dev *dev)
for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
if (!pcidev_info->pdi_pio_mapped_addr[idx]) {
- pci_addrs[idx] = -1;
continue;
}
@@ -237,11 +184,8 @@ sn_io_slot_fixup(struct pci_dev *dev)
end = dev->resource[idx].end;
size = end - start;
if (size == 0) {
- pci_addrs[idx] = -1;
continue;
}
- pci_addrs[idx] = start;
- count++;
addr = pcidev_info->pdi_pio_mapped_addr[idx];
addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET;
dev->resource[idx].start = addr;
@@ -276,11 +220,6 @@ sn_io_slot_fixup(struct pci_dev *dev)
IORESOURCE_ROM_BIOS_COPY;
}
}
- /* Create a pci_window in the pci_controller struct for
- * each device resource.
- */
- if (count > 0)
- sn_pci_window_fixup(dev, count, pci_addrs);
sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
}
@@ -297,8 +236,8 @@ sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
s64 status = 0;
struct pci_controller *controller;
struct pcibus_bussoft *prom_bussoft_ptr;
+ struct resource *res;
LIST_HEAD(resources);
- int i;
status = sal_get_pcibus_info((u64) segment, (u64) busnum,
(u64) ia64_tpa(&prom_bussoft_ptr));
@@ -310,32 +249,29 @@ sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
BUG_ON(!controller);
controller->segment = segment;
+ res = kcalloc(2, sizeof(struct resource), GFP_KERNEL);
+ BUG_ON(!res);
+
/*
* Temporarily save the prom_bussoft_ptr for use by sn_bus_fixup().
* (platform_data will be overwritten later in sn_common_bus_fixup())
*/
controller->platform_data = prom_bussoft_ptr;
- sn_legacy_pci_window_fixup(controller,
- prom_bussoft_ptr->bs_legacy_io,
- prom_bussoft_ptr->bs_legacy_mem);
- for (i = 0; i < controller->windows; i++)
- pci_add_resource_offset(&resources,
- &controller->window[i].resource,
- controller->window[i].offset);
+ sn_legacy_pci_window_fixup(res,
+ prom_bussoft_ptr->bs_legacy_io,
+ prom_bussoft_ptr->bs_legacy_mem);
+ pci_add_resource_offset(&resources, &res[0],
+ prom_bussoft_ptr->bs_legacy_io);
+ pci_add_resource_offset(&resources, &res[1],
+ prom_bussoft_ptr->bs_legacy_mem);
+
bus = pci_scan_root_bus(NULL, busnum, &pci_root_ops, controller,
&resources);
- if (bus == NULL)
- goto error_return; /* error, or bus already scanned */
-
- bus->sysdata = controller;
-
- return;
-
-error_return:
-
- kfree(controller);
- return;
+ if (bus == NULL) {
+ kfree(res);
+ kfree(controller);
+ }
}
/*
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index f82e7b462b7b..53b01b8e2f19 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -192,7 +192,7 @@ void __init early_sn_setup(void)
}
extern int platform_intr_list[];
-static int __cpuinitdata shub_1_1_found;
+static int shub_1_1_found;
/*
* sn_check_for_wars
@@ -200,7 +200,7 @@ static int __cpuinitdata shub_1_1_found;
* Set flag for enabling shub specific wars
*/
-static inline int __cpuinit is_shub_1_1(int nasid)
+static inline int is_shub_1_1(int nasid)
{
unsigned long id;
int rev;
@@ -212,7 +212,7 @@ static inline int __cpuinit is_shub_1_1(int nasid)
return rev <= 2;
}
-static void __cpuinit sn_check_for_wars(void)
+static void sn_check_for_wars(void)
{
int cnode;
@@ -558,7 +558,7 @@ static void __init sn_init_pdas(char **cmdline_p)
* Also sets up a few fields in the nodepda. Also known as
* platform_cpu_init() by the ia64 machvec code.
*/
-void __cpuinit sn_cpu_init(void)
+void sn_cpu_init(void)
{
int cpuid;
int cpuphyid;
diff --git a/arch/ia64/xen/hypervisor.c b/arch/ia64/xen/hypervisor.c
index 52172eee8591..fab62528a80b 100644
--- a/arch/ia64/xen/hypervisor.c
+++ b/arch/ia64/xen/hypervisor.c
@@ -74,7 +74,7 @@ void __init xen_setup_vcpu_info_placement(void)
xen_vcpu_setup(cpu);
}
-void __cpuinit
+void
xen_cpu_init(void)
{
xen_smp_intr_init();