diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/Kconfig | 1 | ||||
-rw-r--r-- | arch/ia64/Makefile | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/acpi.c | 9 | ||||
-rw-r--r-- | arch/ia64/kernel/iosapic.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/palinfo.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 16 | ||||
-rw-r--r-- | arch/ia64/kernel/process.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/sal.c | 7 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/smp.c | 254 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/time.c | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/uncached.c | 5 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 4 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm_fw.c | 2 | ||||
-rw-r--r-- | arch/ia64/kvm/mmio.c | 3 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/irq.c | 1 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/sn2/sn2_smp.c | 2 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/sn2/sn_hwperf.c | 2 | ||||
-rw-r--r-- | arch/ia64/uv/kernel/setup.c | 12 |
21 files changed, 74 insertions, 274 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 16be41446b5b..18bcc10903b4 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -303,6 +303,7 @@ config VIRT_CPU_ACCOUNTING config SMP bool "Symmetric multi-processing support" + select USE_GENERIC_SMP_HELPERS help This enables support for systems with more than one CPU. If you have a system with only one CPU, say N. If you have a system with more diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 3b9c8cadfd34..905d25b13d5a 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -57,6 +57,7 @@ core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ +core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ core-$(CONFIG_KVM) += arch/ia64/kvm/ drivers-$(CONFIG_PCI) += arch/ia64/pci/ diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 853d1f11be00..43687cc60dfb 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -465,7 +465,6 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) printk(KERN_ERR "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", len, slit->header.length); - memset(numa_slit, 10, sizeof(numa_slit)); return; } slit_table = slit; @@ -574,8 +573,14 @@ void __init acpi_numa_arch_fixup(void) printk(KERN_INFO "Number of memory chunks in system = %d\n", num_node_memblks); - if (!slit_table) + if (!slit_table) { + for (i = 0; i < MAX_NUMNODES; i++) + for (j = 0; j < MAX_NUMNODES; j++) + node_distance(i, j) = i == j ? LOCAL_DISTANCE : + REMOTE_DISTANCE; return; + } + memset(numa_slit, -1, sizeof(numa_slit)); for (i = 0; i < slit_table->locality_count; i++) { if (!pxm_bit_test(i)) diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 587196dd84fd..3bc2fa64f87f 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -558,8 +558,6 @@ static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void) if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) { rte = alloc_bootmem(sizeof(struct iosapic_rte_info) * NR_PREALLOCATE_RTE_ENTRIES); - if (!rte) - return NULL; for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++) list_add(&rte->rte_list, &free_rte_list); } diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 705176b434b3..7dd96c127177 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -707,7 +707,7 @@ ia64_mca_cmc_vector_enable (void *dummy) static void ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) { - on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); + on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0); } /* @@ -719,7 +719,7 @@ ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) static void ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) { - on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); + on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0); } /* @@ -1881,7 +1881,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb, case CPU_ONLINE: case CPU_ONLINE_FROZEN: smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust, - NULL, 1, 0); + NULL, 0); break; } return NOTIFY_OK; diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 9dc00f7fe10e..e5c57f413ca2 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -921,7 +921,7 @@ int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page) /* will send IPI to other CPU and wait for completion of remote call */ - if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 0, 1))) { + if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) { printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: " "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret); return 0; diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 71d05133f556..19d4493c6193 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -1820,7 +1820,7 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx) int ret; DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu)); - ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1); + ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1); DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret)); } #endif /* CONFIG_SMP */ @@ -1864,11 +1864,6 @@ pfm_flush(struct file *filp, fl_owner_t id) * invoked after, it will find an empty queue and no * signal will be sent. In both case, we are safe */ - if (filp->f_flags & FASYNC) { - DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue)); - pfm_do_fasync (-1, filp, ctx, 0); - } - PROTECT_CTX(ctx, flags); state = ctx->ctx_state; @@ -1999,6 +1994,11 @@ pfm_close(struct inode *inode, struct file *filp) return -EBADF; } + if (filp->f_flags & FASYNC) { + DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue)); + pfm_do_fasync(-1, filp, ctx, 0); + } + PROTECT_CTX(ctx, flags); state = ctx->ctx_state; @@ -6508,7 +6508,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) } /* save the current system wide pmu states */ - ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1); + ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1); if (ret) { DPRINT(("on_each_cpu() failed: %d\n", ret)); goto cleanup_reserve; @@ -6553,7 +6553,7 @@ pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) pfm_alt_intr_handler = NULL; - ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1); + ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1); if (ret) { DPRINT(("on_each_cpu() failed: %d\n", ret)); } diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index a3a34b4eb038..3ab8373103ec 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -55,6 +55,10 @@ void (*ia64_mark_idle)(int); unsigned long boot_option_idle_override = 0; EXPORT_SYMBOL(boot_option_idle_override); +unsigned long idle_halt; +EXPORT_SYMBOL(idle_halt); +unsigned long idle_nomwait; +EXPORT_SYMBOL(idle_nomwait); void ia64_do_show_stack (struct unw_frame_info *info, void *arg) @@ -286,7 +290,7 @@ void cpu_idle_wait(void) { smp_mb(); /* kick all the CPUs so that they exit out of pm_idle */ - smp_call_function(do_nothing, NULL, 0, 1); + smp_call_function(do_nothing, NULL, 1); } EXPORT_SYMBOL_GPL(cpu_idle_wait); diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c index 7e0259709c04..0464173ea568 100644 --- a/arch/ia64/kernel/sal.c +++ b/arch/ia64/kernel/sal.c @@ -252,11 +252,10 @@ check_sal_cache_flush (void) local_irq_save(flags); /* - * Schedule a timer interrupt, wait until it's reported, and see if - * SAL_CACHE_FLUSH drops it. + * Send ourselves a timer interrupt, wait until it's reported, and see + * if SAL_CACHE_FLUSH drops it. */ - ia64_set_itv(IA64_TIMER_VECTOR); - ia64_set_itm(ia64_get_itc() + 1000); + platform_send_ipi(cpu, IA64_TIMER_VECTOR, IA64_IPI_DM_INT, 0); while (!ia64_get_irr(IA64_TIMER_VECTOR)) cpu_relax(); diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 750749551e86..e5c2de9b29a5 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -552,7 +552,8 @@ setup_arch (char **cmdline_p) # ifdef CONFIG_ACPI_NUMA acpi_numa_init(); per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? - 32 : cpus_weight(early_cpu_possible_map)), additional_cpus); + 32 : cpus_weight(early_cpu_possible_map)), + additional_cpus > 0 ? additional_cpus : 0); # endif #else # ifdef CONFIG_SMP @@ -583,8 +584,6 @@ setup_arch (char **cmdline_p) cpu_init(); /* initialize the bootstrap CPU */ mmu_context_init(); /* initialize context_id bitmap */ - check_sal_cache_flush(); - #ifdef CONFIG_ACPI acpi_boot_init(); #endif @@ -617,6 +616,7 @@ setup_arch (char **cmdline_p) ia64_mca_init(); platform_setup(cmdline_p); + check_sal_cache_flush(); paging_init(); } diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 983296f1c813..3676468612b6 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -60,25 +60,9 @@ static struct local_tlb_flush_counts { static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned; - -/* - * Structure and data for smp_call_function(). This is designed to minimise static memory - * requirements. It also looks cleaner. - */ -static __cacheline_aligned DEFINE_SPINLOCK(call_lock); - -struct call_data_struct { - void (*func) (void *info); - void *info; - long wait; - atomic_t started; - atomic_t finished; -}; - -static volatile struct call_data_struct *call_data; - #define IPI_CALL_FUNC 0 #define IPI_CPU_STOP 1 +#define IPI_CALL_FUNC_SINGLE 2 #define IPI_KDUMP_CPU_STOP 3 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ @@ -86,43 +70,6 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation); extern void cpu_halt (void); -void -lock_ipi_calllock(void) -{ - spin_lock_irq(&call_lock); -} - -void -unlock_ipi_calllock(void) -{ - spin_unlock_irq(&call_lock); -} - -static inline void -handle_call_data(void) -{ - struct call_data_struct *data; - void (*func)(void *info); - void *info; - int wait; - - /* release the 'pointer lock' */ - data = (struct call_data_struct *)call_data; - func = data->func; - info = data->info; - wait = data->wait; - - mb(); - atomic_inc(&data->started); - /* At this point the structure may be gone unless wait is true. */ - (*func)(info); - - /* Notify the sending CPU that the task is done. */ - mb(); - if (wait) - atomic_inc(&data->finished); -} - static void stop_this_cpu(void) { @@ -163,13 +110,15 @@ handle_IPI (int irq, void *dev_id) ops &= ~(1 << which); switch (which) { - case IPI_CALL_FUNC: - handle_call_data(); - break; - case IPI_CPU_STOP: stop_this_cpu(); break; + case IPI_CALL_FUNC: + generic_smp_call_function_interrupt(); + break; + case IPI_CALL_FUNC_SINGLE: + generic_smp_call_function_single_interrupt(); + break; #ifdef CONFIG_KEXEC case IPI_KDUMP_CPU_STOP: unw_init_running(kdump_cpu_freeze, NULL); @@ -187,6 +136,8 @@ handle_IPI (int irq, void *dev_id) return IRQ_HANDLED; } + + /* * Called with preemption disabled. */ @@ -334,7 +285,7 @@ smp_flush_tlb_cpumask(cpumask_t xcpumask) void smp_flush_tlb_all (void) { - on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1); + on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1); } void @@ -357,193 +308,18 @@ smp_flush_tlb_mm (struct mm_struct *mm) * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is * rather trivial. */ - on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1); + on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1); } -/* - * Run a function on a specific CPU - * <func> The function to run. This must be fast and non-blocking. - * <info> An arbitrary pointer to pass to the function. - * <nonatomic> Currently unused. - * <wait> If true, wait until function has completed on other CPUs. - * [RETURNS] 0 on success, else a negative status code. - * - * Does not return until the remote CPU is nearly ready to execute <func> - * or is or has executed. - */ - -int -smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic, - int wait) -{ - struct call_data_struct data; - int cpus = 1; - int me = get_cpu(); /* prevent preemption and reschedule on another processor */ - - if (cpuid == me) { - local_irq_disable(); - func(info); - local_irq_enable(); - put_cpu(); - return 0; - } - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - spin_lock_bh(&call_lock); - - call_data = &data; - mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ - send_IPI_single(cpuid, IPI_CALL_FUNC); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - cpu_relax(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - cpu_relax(); - call_data = NULL; - - spin_unlock_bh(&call_lock); - put_cpu(); - return 0; -} -EXPORT_SYMBOL(smp_call_function_single); - -/** - * smp_call_function_mask(): Run a function on a set of other CPUs. - * <mask> The set of cpus to run on. Must not include the current cpu. - * <func> The function to run. This must be fast and non-blocking. - * <info> An arbitrary pointer to pass to the function. - * <wait> If true, wait (atomically) until function - * has completed on other CPUs. - * - * Returns 0 on success, else a negative status code. - * - * If @wait is true, then returns once @func has returned; otherwise - * it returns just before the target cpu calls @func. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -int smp_call_function_mask(cpumask_t mask, - void (*func)(void *), void *info, - int wait) +void arch_send_call_function_single_ipi(int cpu) { - struct call_data_struct data; - cpumask_t allbutself; - int cpus; - - spin_lock(&call_lock); - allbutself = cpu_online_map; - cpu_clear(smp_processor_id(), allbutself); - - cpus_and(mask, mask, allbutself); - cpus = cpus_weight(mask); - if (!cpus) { - spin_unlock(&call_lock); - return 0; - } - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - call_data = &data; - mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/ - - /* Send a message to other CPUs */ - if (cpus_equal(mask, allbutself)) - send_IPI_allbutself(IPI_CALL_FUNC); - else - send_IPI_mask(mask, IPI_CALL_FUNC); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - cpu_relax(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - cpu_relax(); - call_data = NULL; - - spin_unlock(&call_lock); - return 0; - + send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); } -EXPORT_SYMBOL(smp_call_function_mask); -/* - * this function sends a 'generic call function' IPI to all other CPUs - * in the system. - */ - -/* - * [SUMMARY] Run a function on all other CPUs. - * <func> The function to run. This must be fast and non-blocking. - * <info> An arbitrary pointer to pass to the function. - * <nonatomic> currently unused. - * <wait> If true, wait (atomically) until function has completed on other CPUs. - * [RETURNS] 0 on success, else a negative status code. - * - * Does not return until remote CPUs are nearly ready to execute <func> or are or have - * executed. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -int -smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait) +void arch_send_call_function_ipi(cpumask_t mask) { - struct call_data_struct data; - int cpus; - - spin_lock(&call_lock); - cpus = num_online_cpus() - 1; - if (!cpus) { - spin_unlock(&call_lock); - return 0; - } - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - call_data = &data; - mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ - send_IPI_allbutself(IPI_CALL_FUNC); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - cpu_relax(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - cpu_relax(); - call_data = NULL; - - spin_unlock(&call_lock); - return 0; + send_IPI_mask(mask, IPI_CALL_FUNC); } -EXPORT_SYMBOL(smp_call_function); /* * this function calls the 'stop' function on all other CPUs in the system. diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 933f38811528..03f1a9908afc 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -318,7 +318,7 @@ ia64_sync_itc (unsigned int master) go[MASTER] = 1; - if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) { + if (smp_call_function_single(master, sync_master, NULL, 0) < 0) { printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master); return; } @@ -396,14 +396,14 @@ smp_callin (void) fix_b0_for_bsp(); - lock_ipi_calllock(); + ipi_call_lock_irq(); spin_lock(&vector_lock); /* Setup the per cpu irq handling data structures */ __setup_vector_irq(cpuid); cpu_set(cpuid, cpu_online_map); per_cpu(cpu_state, cpuid) = CPU_ONLINE; spin_unlock(&vector_lock); - unlock_ipi_calllock(); + ipi_call_unlock_irq(); smp_setup_percpu_timer(); diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 046ca89efc05..65c10a42c88f 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -130,6 +130,7 @@ void account_system_vtime(struct task_struct *tsk) local_irq_restore(flags); } +EXPORT_SYMBOL_GPL(account_system_vtime); /* * Called from the timer interrupt handler to charge accumulated user time diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index e77995a6e3ed..8eff8c1d40a6 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -123,8 +123,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { atomic_set(&uc_pool->status, 0); - status = smp_call_function(uncached_ipi_visibility, uc_pool, - 0, 1); + status = smp_call_function(uncached_ipi_visibility, uc_pool, 1); if (status || atomic_read(&uc_pool->status)) goto failed; } else if (status != PAL_VISIBILITY_OK) @@ -146,7 +145,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) if (status != PAL_STATUS_SUCCESS) goto failed; atomic_set(&uc_pool->status, 0); - status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1); + status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1); if (status || atomic_read(&uc_pool->status)) goto failed; diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 318b81100623..68c978be9a51 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -395,7 +395,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) if (kvm->vcpus[i]->cpu != -1) { call_data.vcpu = kvm->vcpus[i]; smp_call_function_single(kvm->vcpus[i]->cpu, - vcpu_global_purge, &call_data, 0, 1); + vcpu_global_purge, &call_data, 1); } else printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n"); @@ -1693,7 +1693,7 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) wake_up_interruptible(&vcpu->wq); if (vcpu->guest_mode) - smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0); + smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); } int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c index 091f936c4485..0c69d9ec92d4 100644 --- a/arch/ia64/kvm/kvm_fw.c +++ b/arch/ia64/kvm/kvm_fw.c @@ -130,7 +130,7 @@ static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu) args.cache_type = gr29; args.operation = gr30; smp_call_function(remote_pal_cache_flush, - (void *)&args, 1, 1); + (void *)&args, 1); if (args.status != 0) printk(KERN_ERR"pal_cache_flush error!," "status:0x%lx\n", args.status); diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c index 351bf70da463..7f1a858bc69f 100644 --- a/arch/ia64/kvm/mmio.c +++ b/arch/ia64/kvm/mmio.c @@ -159,7 +159,8 @@ static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest, if (p->u.ioreq.state == STATE_IORESP_READY) { if (dir == IOREQ_READ) - *dest = p->u.ioreq.data; + /* it's necessary to ensure zero extending */ + *dest = p->u.ioreq.data & (~0UL >> (64-(s*8))); } else panic_vm(vcpu); out: diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 53351c3cd7b1..96c31b4180c3 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c @@ -11,6 +11,7 @@ #include <linux/irq.h> #include <linux/spinlock.h> #include <linux/init.h> +#include <linux/rculist.h> #include <asm/sn/addrs.h> #include <asm/sn/arch.h> #include <asm/sn/intr.h> diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index 49d3120415eb..e585f9a2afb9 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c @@ -512,6 +512,8 @@ static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, si int cpu; char optstr[64]; + if (count == 0 || count > sizeof(optstr)) + return -EINVAL; if (copy_from_user(optstr, user, count)) return -EFAULT; optstr[count - 1] = '\0'; diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index 8cc0c4753d89..636588e7e068 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c @@ -629,7 +629,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info) if (use_ipi) { /* use an interprocessor interrupt to call SAL */ smp_call_function_single(cpu, sn_hwperf_call_sal, - op_info, 1, 1); + op_info, 1); } else { /* migrate the task before calling SAL */ diff --git a/arch/ia64/uv/kernel/setup.c b/arch/ia64/uv/kernel/setup.c index 9aa743203c3c..cf5f28ae96c4 100644 --- a/arch/ia64/uv/kernel/setup.c +++ b/arch/ia64/uv/kernel/setup.c @@ -17,6 +17,9 @@ DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); +#ifdef CONFIG_IA64_SGI_UV +int sn_prom_type; +#endif struct redir_addr { unsigned long redirect; @@ -64,6 +67,15 @@ void __init uv_setup(char **cmdline_p) m_n_config.s.m_skt = 37; m_n_config.s.n_skt = 0; mmr_base = 0; +#if 0 + /* Need BIOS calls - TDB */ + if (!ia64_sn_is_fake_prom()) + sn_prom_type = 1; + else +#endif + sn_prom_type = 2; + printk(KERN_INFO "Running on medusa with %s PROM\n", + (sn_prom_type == 1) ? "real" : "fake"); } else { get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); node_id.v = uv_read_local_mmr(UVH_NODE_ID); |