cpumask: use mm_cpumask() wrapper: arm
authorRusty Russell <rusty@rustcorp.com.au>
Thu, 24 Sep 2009 15:34:49 +0000 (09:34 -0600)
committerRusty Russell <rusty@rustcorp.com.au>
Thu, 24 Sep 2009 00:04:49 +0000 (09:34 +0930)
Makes code futureproof against the impending change to mm->cpu_vm_mask.

It's also a chance to use the new cpumask_ ops which take a pointer
(the older ones are deprecated, but there's no hurry for arch code).

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
arch/arm/include/asm/cacheflush.h
arch/arm/include/asm/mmu_context.h
arch/arm/include/asm/tlbflush.h
arch/arm/kernel/smp.c
arch/arm/mm/context.c
arch/arm/mm/flush.c

index 1a711ea8418b6045c581a576caa3f85496ee2673..fd03fb63a33222ca6ff67a25469414371d68431d 100644 (file)
@@ -334,14 +334,14 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
 #ifndef CONFIG_CPU_CACHE_VIPT
 static inline void flush_cache_mm(struct mm_struct *mm)
 {
-       if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
+       if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
                __cpuc_flush_user_all();
 }
 
 static inline void
 flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 {
-       if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
+       if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
                __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
                                        vma->vm_flags);
 }
@@ -349,7 +349,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long
 static inline void
 flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
 {
-       if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+       if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
                unsigned long addr = user_addr & PAGE_MASK;
                __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
        }
@@ -360,7 +360,7 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
                         unsigned long uaddr, void *kaddr,
                         unsigned long len, int write)
 {
-       if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+       if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
                unsigned long addr = (unsigned long)kaddr;
                __cpuc_coherent_kern_range(addr, addr + len);
        }
index bcdb9291ef0c3636d18442d9e8ea6fd36cebc425..de6cefb329dd4aa24cd42731730915938a33aaf4 100644 (file)
@@ -103,14 +103,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
 
 #ifdef CONFIG_SMP
        /* check for possible thread migration */
-       if (!cpus_empty(next->cpu_vm_mask) && !cpu_isset(cpu, next->cpu_vm_mask))
+       if (!cpumask_empty(mm_cpumask(next)) &&
+           !cpumask_test_cpu(cpu, mm_cpumask(next)))
                __flush_icache_all();
 #endif
-       if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) {
+       if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
                check_context(next);
                cpu_switch_mm(next->pgd, next);
                if (cache_is_vivt())
-                       cpu_clear(cpu, prev->cpu_vm_mask);
+                       cpumask_clear_cpu(cpu, mm_cpumask(prev));
        }
 #endif
 }
index c964f3fc3bc57e906c452cbbe059fed506c19ee8..a45ab5dd82559cc7d21e887f24135193bc2fea6b 100644 (file)
@@ -350,7 +350,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
        if (tlb_flag(TLB_WB))
                dsb();
 
-       if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) {
+       if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
                if (tlb_flag(TLB_V3_FULL))
                        asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
                if (tlb_flag(TLB_V4_U_FULL))
@@ -388,7 +388,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
        if (tlb_flag(TLB_WB))
                dsb();
 
-       if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+       if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
                if (tlb_flag(TLB_V3_PAGE))
                        asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc");
                if (tlb_flag(TLB_V4_U_PAGE))
index de885fd256c519b220e6d48d86996e1f7027cd5d..e0d32770bb3d51c061554ba348c9c69beffd25ea 100644 (file)
@@ -189,7 +189,7 @@ int __cpuexit __cpu_disable(void)
        read_lock(&tasklist_lock);
        for_each_process(p) {
                if (p->mm)
-                       cpu_clear(cpu, p->mm->cpu_vm_mask);
+                       cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
        }
        read_unlock(&tasklist_lock);
 
@@ -257,7 +257,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
        atomic_inc(&mm->mm_users);
        atomic_inc(&mm->mm_count);
        current->active_mm = mm;
-       cpu_set(cpu, mm->cpu_vm_mask);
+       cpumask_set_cpu(cpu, mm_cpumask(mm));
        cpu_switch_mm(mm->pgd, mm);
        enter_lazy_tlb(mm, current);
        local_flush_tlb_all();
@@ -643,7 +643,7 @@ void flush_tlb_all(void)
 void flush_tlb_mm(struct mm_struct *mm)
 {
        if (tlb_ops_need_broadcast())
-               on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask);
+               on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
        else
                local_flush_tlb_mm(mm);
 }
@@ -654,7 +654,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
                struct tlb_args ta;
                ta.ta_vma = vma;
                ta.ta_start = uaddr;
-               on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask);
+               on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
        } else
                local_flush_tlb_page(vma, uaddr);
 }
@@ -677,7 +677,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
                ta.ta_vma = vma;
                ta.ta_start = start;
                ta.ta_end = end;
-               on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask);
+               on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
        } else
                local_flush_tlb_range(vma, start, end);
 }
index fc84fcc743804d16241a99ee5cb1c86f650dd048..6bda76a431991287cc86e6995b3769bfde2ebf43 100644 (file)
@@ -59,6 +59,6 @@ void __new_context(struct mm_struct *mm)
        }
        spin_unlock(&cpu_asid_lock);
 
-       mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id());
+       cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
        mm->context.id = asid;
 }
index 575f3ad722e773fabffaac361fa9e72ecfaea62a..b27942909b239e1c7d8dac35ad10cf5fe89d3353 100644 (file)
@@ -50,7 +50,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
 void flush_cache_mm(struct mm_struct *mm)
 {
        if (cache_is_vivt()) {
-               if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
+               if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
                        __cpuc_flush_user_all();
                return;
        }
@@ -73,7 +73,7 @@ void flush_cache_mm(struct mm_struct *mm)
 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 {
        if (cache_is_vivt()) {
-               if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
+               if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
                        __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
                                                vma->vm_flags);
                return;
@@ -97,7 +97,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
 {
        if (cache_is_vivt()) {
-               if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+               if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
                        unsigned long addr = user_addr & PAGE_MASK;
                        __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
                }
@@ -113,7 +113,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
                         unsigned long len, int write)
 {
        if (cache_is_vivt()) {
-               if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+               if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
                        unsigned long addr = (unsigned long)kaddr;
                        __cpuc_coherent_kern_range(addr, addr + len);
                }
@@ -126,7 +126,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
        }
 
        /* VIPT non-aliasing cache */
-       if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) &&
+       if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) &&
            vma->vm_flags & VM_EXEC) {
                unsigned long addr = (unsigned long)kaddr;
                /* only flushing the kernel mapping on non-aliasing VIPT */