cpumask: use zalloc_cpumask_var() where possible
authorLi Zefan <lizf@cn.fujitsu.com>
Mon, 15 Jun 2009 06:58:26 +0000 (14:58 +0800)
committerRusty Russell <rusty@rustcorp.com.au>
Thu, 24 Sep 2009 00:04:24 +0000 (09:34 +0930)
Remove open-coded zalloc_cpumask_var() and zalloc_cpumask_var_node().

Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/process.c
arch/x86/kernel/smpboot.c
drivers/acpi/processor_perflib.c
drivers/acpi/processor_throttling.c
drivers/net/sfc/efx.c
drivers/oprofile/buffer_sync.c
kernel/trace/trace.c
virt/kvm/kvm_main.c

index 64970b9885f248d7d0388203477611142ba1b918..dc69f28489f5bc9a2873a8d22b084bec313d9912 100644 (file)
@@ -227,17 +227,14 @@ static struct irq_cfg *get_one_free_irq_cfg(int node)
 
        cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
        if (cfg) {
-               if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
+               if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
                        kfree(cfg);
                        cfg = NULL;
-               } else if (!alloc_cpumask_var_node(&cfg->old_domain,
+               } else if (!zalloc_cpumask_var_node(&cfg->old_domain,
                                                          GFP_ATOMIC, node)) {
                        free_cpumask_var(cfg->domain);
                        kfree(cfg);
                        cfg = NULL;
-               } else {
-                       cpumask_clear(cfg->domain);
-                       cpumask_clear(cfg->old_domain);
                }
        }
 
index 847ab41603155ad493df143b24e6c9055f0f10e8..5284cd2b57769f53e79f520ecc6f8199720497cf 100644 (file)
@@ -555,10 +555,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
 void __init init_c1e_mask(void)
 {
        /* If we're using c1e_idle, we need to allocate c1e_mask. */
-       if (pm_idle == c1e_idle) {
-               alloc_cpumask_var(&c1e_mask, GFP_KERNEL);
-               cpumask_clear(c1e_mask);
-       }
+       if (pm_idle == c1e_idle)
+               zalloc_cpumask_var(&c1e_mask, GFP_KERNEL);
 }
 
 static int __init idle_setup(char *str)
index 09c5e077dff7e0aea7f11cbe3f3c5ca196ef688b..565ebc65920e3e685161758acb03c4f8106c6b40 100644 (file)
@@ -1059,12 +1059,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
 #endif
        current_thread_info()->cpu = 0;  /* needed? */
        for_each_possible_cpu(i) {
-               alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
-               alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
-               alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
-               cpumask_clear(per_cpu(cpu_core_map, i));
-               cpumask_clear(per_cpu(cpu_sibling_map, i));
-               cpumask_clear(cpu_data(i).llc_shared_map);
+               zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
+               zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
+               zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
        }
        set_cpu_sibling_map(0);
 
index 11088cf103197895e79a94359692d16552c4be73..8ba0ed0b9ddbc912684d398e2eda14357920aed4 100644 (file)
@@ -511,7 +511,7 @@ int acpi_processor_preregister_performance(
        struct acpi_processor *match_pr;
        struct acpi_psd_package *match_pdomain;
 
-       if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
                return -ENOMEM;
 
        mutex_lock(&performance_mutex);
@@ -558,7 +558,6 @@ int acpi_processor_preregister_performance(
         * Now that we have _PSD data from all CPUs, lets setup P-state 
         * domain info.
         */
-       cpumask_clear(covered_cpus);
        for_each_possible_cpu(i) {
                pr = per_cpu(processors, i);
                if (!pr)
index ce7cf3bc5101fb37743740f9c9b34def03ac82ae..4c6c14c1e30788d6b025aaaabd511350dc9b678e 100644 (file)
@@ -77,7 +77,7 @@ static int acpi_processor_update_tsd_coord(void)
        struct acpi_tsd_package *pdomain, *match_pdomain;
        struct acpi_processor_throttling *pthrottling, *match_pthrottling;
 
-       if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
                return -ENOMEM;
 
        /*
@@ -105,7 +105,6 @@ static int acpi_processor_update_tsd_coord(void)
        if (retval)
                goto err_ret;
 
-       cpumask_clear(covered_cpus);
        for_each_possible_cpu(i) {
                pr = per_cpu(processors, i);
                if (!pr)
index 07a7e4b8f8fc67a06d1ed8c3ee8a153d923822c1..cc4b2f99989dc5433327ae1b47b184c846da151f 100644 (file)
@@ -884,13 +884,12 @@ static int efx_wanted_rx_queues(void)
        int count;
        int cpu;
 
-       if (unlikely(!alloc_cpumask_var(&core_mask, GFP_KERNEL))) {
+       if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
                printk(KERN_WARNING
                       "sfc: RSS disabled due to allocation failure\n");
                return 1;
        }
 
-       cpumask_clear(core_mask);
        count = 0;
        for_each_online_cpu(cpu) {
                if (!cpumask_test_cpu(cpu, core_mask)) {
index 8574622e36a51abec01aaa05d9f509b00b8ad9f0..c9e2ae90f19508db8f74f91440636689d50321d8 100644 (file)
@@ -154,9 +154,8 @@ int sync_start(void)
 {
        int err;
 
-       if (!alloc_cpumask_var(&marked_cpus, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
                return -ENOMEM;
-       cpumask_clear(marked_cpus);
 
        start_cpu_work();
 
index 6c0f6a8a22ebd5eafce5adf55423d4302734cd18..411af37f4be4755b2bb1670d7cef18d16fc39c94 100644 (file)
@@ -1984,11 +1984,9 @@ __tracing_open(struct inode *inode, struct file *file)
        if (current_trace)
                *iter->trace = *current_trace;
 
-       if (!alloc_cpumask_var(&iter->started, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
                goto fail;
 
-       cpumask_clear(iter->started);
-
        if (current_trace && current_trace->print_max)
                iter->tr = &max_tr;
        else
@@ -4389,7 +4387,7 @@ __init static int tracer_alloc_buffers(void)
        if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
                goto out_free_buffer_mask;
 
-       if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
                goto out_free_tracing_cpumask;
 
        /* To save memory, keep the ring buffer size to its minimum */
@@ -4400,7 +4398,6 @@ __init static int tracer_alloc_buffers(void)
 
        cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
        cpumask_copy(tracing_cpumask, cpu_all_mask);
-       cpumask_clear(tracing_reader_cpumask);
 
        /* TODO: make the number of buffers hot pluggable with CPUS */
        global_trace.buffer = ring_buffer_alloc(ring_buf_size,
index 897bff3b7df914a32f309de7e6b687fa7633cfe2..034a798b0431e98209489f793b904bfda9cf463c 100644 (file)
@@ -738,8 +738,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
        bool called = true;
        struct kvm_vcpu *vcpu;
 
-       if (alloc_cpumask_var(&cpus, GFP_ATOMIC))
-               cpumask_clear(cpus);
+       zalloc_cpumask_var(&cpus, GFP_ATOMIC);
 
        spin_lock(&kvm->requests_lock);
        me = smp_processor_id();