Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[firefly-linux-kernel-4.4.55.git] / arch / arm / kernel / smp_tlb.c
1 /*
2  *  linux/arch/arm/kernel/smp_tlb.c
3  *
4  *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/preempt.h>
11 #include <linux/smp.h>
12
13 #include <asm/smp_plat.h>
14 #include <asm/tlbflush.h>
15 #include <asm/mmu_context.h>
16
17 /**********************************************************************/
18
19 /*
20  * TLB operations
21  */
22 struct tlb_args {
23         struct vm_area_struct *ta_vma;
24         unsigned long ta_start;
25         unsigned long ta_end;
26 };
27
28 static inline void ipi_flush_tlb_all(void *ignored)
29 {
30         local_flush_tlb_all();
31 }
32
33 static inline void ipi_flush_tlb_mm(void *arg)
34 {
35         struct mm_struct *mm = (struct mm_struct *)arg;
36
37         local_flush_tlb_mm(mm);
38 }
39
40 static inline void ipi_flush_tlb_page(void *arg)
41 {
42         struct tlb_args *ta = (struct tlb_args *)arg;
43
44         local_flush_tlb_page(ta->ta_vma, ta->ta_start);
45 }
46
47 static inline void ipi_flush_tlb_kernel_page(void *arg)
48 {
49         struct tlb_args *ta = (struct tlb_args *)arg;
50
51         local_flush_tlb_kernel_page(ta->ta_start);
52 }
53
54 static inline void ipi_flush_tlb_range(void *arg)
55 {
56         struct tlb_args *ta = (struct tlb_args *)arg;
57
58         local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
59 }
60
61 static inline void ipi_flush_tlb_kernel_range(void *arg)
62 {
63         struct tlb_args *ta = (struct tlb_args *)arg;
64
65         local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
66 }
67
68 static inline void ipi_flush_bp_all(void *ignored)
69 {
70         local_flush_bp_all();
71 }
72
73 #ifdef CONFIG_ARM_ERRATA_798181
74 static int erratum_a15_798181(void)
75 {
76         unsigned int midr = read_cpuid_id();
77
78         /* Cortex-A15 r0p0..r3p2 affected */
79         if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
80                 return 0;
81         return 1;
82 }
83 #else
84 static int erratum_a15_798181(void)
85 {
86         return 0;
87 }
88 #endif
89
90 static void ipi_flush_tlb_a15_erratum(void *arg)
91 {
92         dmb();
93 }
94
95 static void broadcast_tlb_a15_erratum(void)
96 {
97         if (!erratum_a15_798181())
98                 return;
99
100         dummy_flush_tlb_a15_erratum();
101         smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1);
102 }
103
104 static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
105 {
106         int cpu, this_cpu;
107         cpumask_t mask = { CPU_BITS_NONE };
108
109         if (!erratum_a15_798181())
110                 return;
111
112         dummy_flush_tlb_a15_erratum();
113         this_cpu = get_cpu();
114         for_each_online_cpu(cpu) {
115                 if (cpu == this_cpu)
116                         continue;
117                 /*
118                  * We only need to send an IPI if the other CPUs are running
119                  * the same ASID as the one being invalidated. There is no
120                  * need for locking around the active_asids check since the
121                  * switch_mm() function has at least one dmb() (as required by
122                  * this workaround) in case a context switch happens on
123                  * another CPU after the condition below.
124                  */
125                 if (atomic64_read(&mm->context.id) ==
126                     atomic64_read(&per_cpu(active_asids, cpu)))
127                         cpumask_set_cpu(cpu, &mask);
128         }
129         smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
130         put_cpu();
131 }
132
133 void flush_tlb_all(void)
134 {
135         if (tlb_ops_need_broadcast())
136                 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
137         else
138                 local_flush_tlb_all();
139         broadcast_tlb_a15_erratum();
140 }
141
142 void flush_tlb_mm(struct mm_struct *mm)
143 {
144         if (tlb_ops_need_broadcast())
145                 on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
146         else
147                 local_flush_tlb_mm(mm);
148         broadcast_tlb_mm_a15_erratum(mm);
149 }
150
151 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
152 {
153         if (tlb_ops_need_broadcast()) {
154                 struct tlb_args ta;
155                 ta.ta_vma = vma;
156                 ta.ta_start = uaddr;
157                 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page,
158                                         &ta, 1);
159         } else
160                 local_flush_tlb_page(vma, uaddr);
161         broadcast_tlb_mm_a15_erratum(vma->vm_mm);
162 }
163
164 void flush_tlb_kernel_page(unsigned long kaddr)
165 {
166         if (tlb_ops_need_broadcast()) {
167                 struct tlb_args ta;
168                 ta.ta_start = kaddr;
169                 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
170         } else
171                 local_flush_tlb_kernel_page(kaddr);
172         broadcast_tlb_a15_erratum();
173 }
174
175 void flush_tlb_range(struct vm_area_struct *vma,
176                      unsigned long start, unsigned long end)
177 {
178         if (tlb_ops_need_broadcast()) {
179                 struct tlb_args ta;
180                 ta.ta_vma = vma;
181                 ta.ta_start = start;
182                 ta.ta_end = end;
183                 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range,
184                                         &ta, 1);
185         } else
186                 local_flush_tlb_range(vma, start, end);
187         broadcast_tlb_mm_a15_erratum(vma->vm_mm);
188 }
189
190 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
191 {
192         if (tlb_ops_need_broadcast()) {
193                 struct tlb_args ta;
194                 ta.ta_start = start;
195                 ta.ta_end = end;
196                 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
197         } else
198                 local_flush_tlb_kernel_range(start, end);
199         broadcast_tlb_a15_erratum();
200 }
201
202 void flush_bp_all(void)
203 {
204         if (tlb_ops_need_broadcast())
205                 on_each_cpu(ipi_flush_bp_all, NULL, 1);
206         else
207                 local_flush_bp_all();
208 }