diff options
Diffstat (limited to 'arch/mips/kernel/smp.c')
-rw-r--r-- | arch/mips/kernel/smp.c | 83 |
1 files changed, 14 insertions, 69 deletions
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 9c1cce9de35f..71a95f55a649 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -148,7 +148,7 @@ static void stop_this_cpu(void *dummy) /* * Remove this CPU: */ - cpu_clear(smp_processor_id(), cpu_online_map); + set_cpu_online(smp_processor_id(), false); for (;;) { if (cpu_wait) (*cpu_wait)(); /* Wait if available. */ @@ -174,7 +174,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) mp_ops->prepare_cpus(max_cpus); set_cpu_sibling_map(0); #ifndef CONFIG_HOTPLUG_CPU - init_cpu_present(&cpu_possible_map); + init_cpu_present(cpu_possible_mask); #endif } @@ -186,61 +186,9 @@ void __devinit smp_prepare_boot_cpu(void) cpu_set(0, cpu_callin_map); } -/* - * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu - * and keep control until "cpu_online(cpu)" is set. Note: cpu is - * physical, not logical. - */ -static struct task_struct *cpu_idle_thread[NR_CPUS]; - -struct create_idle { - struct work_struct work; - struct task_struct *idle; - struct completion done; - int cpu; -}; - -static void __cpuinit do_fork_idle(struct work_struct *work) +int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) { - struct create_idle *c_idle = - container_of(work, struct create_idle, work); - - c_idle->idle = fork_idle(c_idle->cpu); - complete(&c_idle->done); -} - -int __cpuinit __cpu_up(unsigned int cpu) -{ - struct task_struct *idle; - - /* - * Processor goes to start_secondary(), sets online flag - * The following code is purely to make sure - * Linux can schedule processes on this slave. - */ - if (!cpu_idle_thread[cpu]) { - /* - * Schedule work item to avoid forking user task - * Ported from arch/x86/kernel/smpboot.c - */ - struct create_idle c_idle = { - .cpu = cpu, - .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), - }; - - INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); - schedule_work(&c_idle.work); - wait_for_completion(&c_idle.done); - idle = cpu_idle_thread[cpu] = c_idle.idle; - - if (IS_ERR(idle)) - panic(KERN_ERR "Fork failed for CPU %d", cpu); - } else { - idle = cpu_idle_thread[cpu]; - init_idle(idle, cpu); - } - - mp_ops->boot_secondary(cpu, idle); + mp_ops->boot_secondary(cpu, tidle); /* * Trust is futile. We should really have timeouts ... @@ -248,7 +196,7 @@ int __cpuinit __cpu_up(unsigned int cpu) while (!cpu_isset(cpu, cpu_callin_map)) udelay(100); - cpu_set(cpu, cpu_online_map); + set_cpu_online(cpu, true); return 0; } @@ -320,13 +268,12 @@ void flush_tlb_mm(struct mm_struct *mm) if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { smp_on_other_tlbs(flush_tlb_mm_ipi, mm); } else { - cpumask_t mask = cpu_online_map; unsigned int cpu; - cpu_clear(smp_processor_id(), mask); - for_each_cpu_mask(cpu, mask) - if (cpu_context(cpu, mm)) + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id() && cpu_context(cpu, mm)) cpu_context(cpu, mm) = 0; + } } local_flush_tlb_mm(mm); @@ -360,13 +307,12 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l smp_on_other_tlbs(flush_tlb_range_ipi, &fd); } else { - cpumask_t mask = cpu_online_map; unsigned int cpu; - cpu_clear(smp_processor_id(), mask); - for_each_cpu_mask(cpu, mask) - if (cpu_context(cpu, mm)) + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id() && cpu_context(cpu, mm)) cpu_context(cpu, mm) = 0; + } } local_flush_tlb_range(vma, start, end); preempt_enable(); @@ -407,13 +353,12 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) smp_on_other_tlbs(flush_tlb_page_ipi, &fd); } else { - cpumask_t mask = cpu_online_map; unsigned int cpu; - cpu_clear(smp_processor_id(), mask); - for_each_cpu_mask(cpu, mask) - if (cpu_context(cpu, vma->vm_mm)) + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) cpu_context(cpu, vma->vm_mm) = 0; + } } local_flush_tlb_page(vma, page); preempt_enable(); |