diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-03-13 14:49:54 +1030 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2009-03-13 14:49:54 +1030 |
commit | 4f0628963c86d2f97b8cb9acc024a7fe288a6a57 (patch) | |
tree | 83e7592c0706f96979628802344d886481a98b07 /arch/x86 | |
parent | 3f76a183de8ad3aeb7425f3d9685bb6003abd1a5 (diff) | |
download | linux-4f0628963c86d2f97b8cb9acc024a7fe288a6a57.tar.bz2 |
cpumask: use new cpumask functions throughout x86
Impact: cleanup
1) &cpu_online_map -> cpu_online_mask
2) first_cpu/next_cpu_nr -> cpumask_first/cpumask_next
3) cpu_*_map manipulation -> init_cpu_* / set_cpu_*
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/topology.h | 4 | ||||
-rw-r--r-- | arch/x86/kernel/apic/bigsmp_32.c | 16 | ||||
-rw-r--r-- | arch/x86/kernel/apic/es7000_32.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/apic/summit_32.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_intel_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/proc.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 11 | ||||
-rw-r--r-- | arch/x86/xen/smp.c | 6 |
9 files changed, 26 insertions, 27 deletions
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index f8b833e1257f..1ce1e1afa801 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -174,11 +174,11 @@ static inline int early_cpu_to_node(int cpu) static inline const cpumask_t *cpumask_of_node(int node) { - return &cpu_online_map; + return cpu_online_mask; } static inline int node_to_first_cpu(int node) { - return first_cpu(cpu_online_map); + return cpumask_first(cpu_online_mask); } static inline void setup_node_to_cpumask_map(void) { } diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index d806ecaa948f..676cdac385c0 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c @@ -26,12 +26,12 @@ static int bigsmp_apic_id_registered(void) return 1; } -static const cpumask_t *bigsmp_target_cpus(void) +static const struct cpumask *bigsmp_target_cpus(void) { #ifdef CONFIG_SMP - return &cpu_online_map; + return cpu_online_mask; #else - return &cpumask_of_cpu(0); + return cpumask_of(0); #endif } @@ -118,9 +118,9 @@ static int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid) } /* As we are using single CPU as destination, pick only one CPU here */ -static unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask) +static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask) { - return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask)); + return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask)); } static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, @@ -188,10 +188,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = { { } /* NULL entry stops DMI scanning */ }; -static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask) +static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask) { - cpus_clear(*retmask); - cpu_set(cpu, *retmask); + cpumask_clear(retmask); + cpumask_set_cpu(cpu, retmask); } static int probe_bigsmp(void) diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 1322f5409e20..26d3a3eba75b 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c @@ -460,9 +460,9 @@ static const cpumask_t *target_cpus_cluster(void) return cpu_all_mask; } -static const cpumask_t *es7000_target_cpus(void) +static const struct cpumask *es7000_target_cpus(void) { - return &cpumask_of_cpu(smp_processor_id()); + return cpumask_of(smp_processor_id()); } static unsigned long @@ -517,7 +517,7 @@ static void es7000_setup_apic_routing(void) "Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", (apic_version[apic] == 0x14) ? "Physical Cluster" : "Logical Cluster", - nr_ioapics, cpus_addr(*es7000_target_cpus())[0]); + nr_ioapics, cpumask_bits(es7000_target_cpus())[0]); } static int es7000_apicid_to_node(int logical_apicid) diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index aac52fa873ff..7f6bd908da46 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c @@ -192,7 +192,7 @@ static const cpumask_t *summit_target_cpus(void) * dest_LowestPrio mode logical clustered apic interrupt routing * Just start on cpu 0. IRQ balancing will spread load */ - return &cpumask_of_cpu(0); + return cpumask_of(0); } static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid) diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index aaa7d9730938..96b2a85545aa 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c @@ -249,7 +249,7 @@ void cmci_rediscover(int dying) for_each_online_cpu (cpu) { if (cpu == dying) continue; - if (set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu))) + if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) continue; /* Recheck banks in case CPUs don't all have the same */ if (cmci_supported(&banks)) diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 4dd610e226e0..f93047fed791 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -143,9 +143,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) static void *c_start(struct seq_file *m, loff_t *pos) { if (*pos == 0) /* just in case, cpu 0 is not the first */ - *pos = first_cpu(cpu_online_map); + *pos = cpumask_first(cpu_online_mask); else - *pos = next_cpu_nr(*pos - 1, cpu_online_map); + *pos = cpumask_next(*pos - 1, cpu_online_mask); if ((*pos) < nr_cpu_ids) return &cpu_data(*pos); return NULL; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index cad5431951aa..6638294cec8d 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -324,7 +324,7 @@ void stop_this_cpu(void *dummy) /* * Remove this CPU: */ - cpu_clear(smp_processor_id(), cpu_online_map); + set_cpu_online(smp_processor_id(), false); disable_local_APIC(); for (;;) { diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index d6427aa56966..58d24ef917d8 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -296,7 +296,7 @@ notrace static void __cpuinit start_secondary(void *unused) __flush_tlb_all(); #endif - /* This must be done before setting cpu_online_map */ + /* This must be done before setting cpu_online_mask */ set_cpu_sibling_map(raw_smp_processor_id()); wmb(); @@ -904,9 +904,8 @@ int __cpuinit native_cpu_up(unsigned int cpu) */ static __init void disable_smp(void) { - /* use the read/write pointers to the present and possible maps */ - cpumask_copy(&cpu_present_map, cpumask_of(0)); - cpumask_copy(&cpu_possible_map, cpumask_of(0)); + init_cpu_present(cpumask_of(0)); + init_cpu_possible(cpumask_of(0)); smpboot_clear_io_apic_irqs(); if (smp_found_config) @@ -1149,11 +1148,11 @@ early_param("possible_cpus", _setup_possible_cpus); /* - * cpu_possible_map should be static, it cannot change as cpu's + * cpu_possible_mask should be static, it cannot change as cpu's * are onlined, or offlined. The reason is per-cpu data-structures * are allocated by some modules at init time, and dont expect to * do this dynamically on cpu arrival/departure. - * cpu_present_map on the other hand can change dynamically. + * cpu_present_mask on the other hand can change dynamically. * In case when cpu_hotplug is not compiled, then we resort to current * behaviour, which is cpu_possible == cpu_present. * - Ashok Raj diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 8d470562ffc9..585a6e330837 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -158,7 +158,7 @@ static void __init xen_fill_possible_map(void) rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); if (rc >= 0) { num_processors++; - cpu_set(i, cpu_possible_map); + set_cpu_possible(i, true); } } } @@ -197,7 +197,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) continue; - cpu_clear(cpu, cpu_possible_map); + set_cpu_possible(cpu, false); } for_each_possible_cpu (cpu) { @@ -210,7 +210,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) if (IS_ERR(idle)) panic("failed fork for CPU %d", cpu); - cpu_set(cpu, cpu_present_map); + set_cpu_present(cpu, true); } } |