diff options
author | Li Zefan <lizf@cn.fujitsu.com> | 2009-06-15 14:58:26 +0800 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2009-09-24 09:34:24 +0930 |
commit | 79f5599772ac2f138d7a75b8f3f06a93f09c75f7 (patch) | |
tree | 092c9f6e3f7c49d8f5bb9f3d39752ae7cfb9415a | |
parent | a724eada8c2a7b62463b73ccf73fd0bb6e928aeb (diff) | |
download | linux-79f5599772ac2f138d7a75b8f3f06a93f09c75f7.tar.bz2 |
cpumask: use zalloc_cpumask_var() where possible
Remove open-coded zalloc_cpumask_var() and zalloc_cpumask_var_node().
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r-- | arch/x86/kernel/apic/io_apic.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 9 | ||||
-rw-r--r-- | drivers/acpi/processor_perflib.c | 3 | ||||
-rw-r--r-- | drivers/acpi/processor_throttling.c | 3 | ||||
-rw-r--r-- | drivers/net/sfc/efx.c | 3 | ||||
-rw-r--r-- | drivers/oprofile/buffer_sync.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace.c | 7 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 3 |
9 files changed, 14 insertions, 30 deletions
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 64970b9885f2..dc69f28489f5 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -227,17 +227,14 @@ static struct irq_cfg *get_one_free_irq_cfg(int node) cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); if (cfg) { - if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { + if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { kfree(cfg); cfg = NULL; - } else if (!alloc_cpumask_var_node(&cfg->old_domain, + } else if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_ATOMIC, node)) { free_cpumask_var(cfg->domain); kfree(cfg); cfg = NULL; - } else { - cpumask_clear(cfg->domain); - cpumask_clear(cfg->old_domain); } } diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 847ab4160315..5284cd2b5776 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -555,10 +555,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) void __init init_c1e_mask(void) { /* If we're using c1e_idle, we need to allocate c1e_mask. */ - if (pm_idle == c1e_idle) { - alloc_cpumask_var(&c1e_mask, GFP_KERNEL); - cpumask_clear(c1e_mask); - } + if (pm_idle == c1e_idle) + zalloc_cpumask_var(&c1e_mask, GFP_KERNEL); } static int __init idle_setup(char *str) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 09c5e077dff7..565ebc65920e 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1059,12 +1059,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) #endif current_thread_info()->cpu = 0; /* needed? */ for_each_possible_cpu(i) { - alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); - alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); - alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); - cpumask_clear(per_cpu(cpu_core_map, i)); - cpumask_clear(per_cpu(cpu_sibling_map, i)); - cpumask_clear(cpu_data(i).llc_shared_map); + zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); + zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); + zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); } set_cpu_sibling_map(0); diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 11088cf10319..8ba0ed0b9ddb 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -511,7 +511,7 @@ int acpi_processor_preregister_performance( struct acpi_processor *match_pr; struct acpi_psd_package *match_pdomain; - if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) + if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) return -ENOMEM; mutex_lock(&performance_mutex); @@ -558,7 +558,6 @@ int acpi_processor_preregister_performance( * Now that we have _PSD data from all CPUs, lets setup P-state * domain info. */ - cpumask_clear(covered_cpus); for_each_possible_cpu(i) { pr = per_cpu(processors, i); if (!pr) diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index ce7cf3bc5101..4c6c14c1e307 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -77,7 +77,7 @@ static int acpi_processor_update_tsd_coord(void) struct acpi_tsd_package *pdomain, *match_pdomain; struct acpi_processor_throttling *pthrottling, *match_pthrottling; - if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) + if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) return -ENOMEM; /* @@ -105,7 +105,6 @@ static int acpi_processor_update_tsd_coord(void) if (retval) goto err_ret; - cpumask_clear(covered_cpus); for_each_possible_cpu(i) { pr = per_cpu(processors, i); if (!pr) diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 07a7e4b8f8fc..cc4b2f99989d 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -884,13 +884,12 @@ static int efx_wanted_rx_queues(void) int count; int cpu; - if (unlikely(!alloc_cpumask_var(&core_mask, GFP_KERNEL))) { + if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { printk(KERN_WARNING "sfc: RSS disabled due to allocation failure\n"); return 1; } - cpumask_clear(core_mask); count = 0; for_each_online_cpu(cpu) { if (!cpumask_test_cpu(cpu, core_mask)) { diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index 8574622e36a5..c9e2ae90f195 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c @@ -154,9 +154,8 @@ int sync_start(void) { int err; - if (!alloc_cpumask_var(&marked_cpus, GFP_KERNEL)) + if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) return -ENOMEM; - cpumask_clear(marked_cpus); start_cpu_work(); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 6c0f6a8a22eb..411af37f4be4 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1984,11 +1984,9 @@ __tracing_open(struct inode *inode, struct file *file) if (current_trace) *iter->trace = *current_trace; - if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) + if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) goto fail; - cpumask_clear(iter->started); - if (current_trace && current_trace->print_max) iter->tr = &max_tr; else @@ -4389,7 +4387,7 @@ __init static int tracer_alloc_buffers(void) if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) goto out_free_buffer_mask; - if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) + if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) goto out_free_tracing_cpumask; /* To save memory, keep the ring buffer size to its minimum */ @@ -4400,7 +4398,6 @@ __init static int tracer_alloc_buffers(void) cpumask_copy(tracing_buffer_mask, cpu_possible_mask); cpumask_copy(tracing_cpumask, cpu_all_mask); - cpumask_clear(tracing_reader_cpumask); /* TODO: make the number of buffers hot pluggable with CPUS */ global_trace.buffer = ring_buffer_alloc(ring_buf_size, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 897bff3b7df9..034a798b0431 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -738,8 +738,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) bool called = true; struct kvm_vcpu *vcpu; - if (alloc_cpumask_var(&cpus, GFP_ATOMIC)) - cpumask_clear(cpus); + zalloc_cpumask_var(&cpus, GFP_ATOMIC); spin_lock(&kvm->requests_lock); me = smp_processor_id(); |