3 * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
18 #include <linux/bitops.h>
20 #include <mali_kbase.h>
21 #include <mali_kbase_mem.h>
22 #include <mali_kbase_mmu_hw.h>
23 #include <mali_kbase_tlstream.h>
24 #include <backend/gpu/mali_kbase_device_internal.h>
25 #include <mali_kbase_as_fault_debugfs.h>
27 static inline u64 lock_region(struct kbase_device *kbdev, u64 pfn,
32 /* can't lock a zero sized range */
33 KBASE_DEBUG_ASSERT(num_pages);
35 region = pfn << PAGE_SHIFT;
37 * fls returns (given the ASSERT above):
41 * results in the range (11 .. 42)
44 /* gracefully handle num_pages being zero */
50 region_width = 10 + fls(num_pages);
51 if (num_pages != (1ul << (region_width - 11))) {
52 /* not pow2, so must go up to the next pow2 */
55 KBASE_DEBUG_ASSERT(region_width <= KBASE_LOCK_REGION_MAX_SIZE);
56 KBASE_DEBUG_ASSERT(region_width >= KBASE_LOCK_REGION_MIN_SIZE);
57 region |= region_width;
63 static int wait_ready(struct kbase_device *kbdev,
64 unsigned int as_nr, struct kbase_context *kctx)
66 unsigned int max_loops = KBASE_AS_INACTIVE_MAX_LOOPS;
67 u32 val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), kctx);
69 /* Wait for the MMU status to indicate there is no active command, in
70 * case one is pending. Do not log remaining register accesses. */
71 while (--max_loops && (val & AS_STATUS_AS_ACTIVE))
72 val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), NULL);
75 dev_err(kbdev->dev, "AS_ACTIVE bit stuck\n");
79 /* If waiting in loop was performed, log last read value. */
80 if (KBASE_AS_INACTIVE_MAX_LOOPS - 1 > max_loops)
81 kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), kctx);
86 static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd,
87 struct kbase_context *kctx)
91 /* write AS_COMMAND when MMU is ready to accept another command */
92 status = wait_ready(kbdev, as_nr, kctx);
94 kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_COMMAND), cmd,
100 void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
102 const int num_as = 16;
103 const int busfault_shift = MMU_PAGE_FAULT_FLAGS;
104 const int pf_shift = 0;
105 const unsigned long as_bit_mask = (1UL << num_as) - 1;
111 u32 bf_bits = (irq_stat >> busfault_shift) & as_bit_mask;
112 /* page faults (note: Ignore ASes with both pf and bf) */
113 u32 pf_bits = ((irq_stat >> pf_shift) & as_bit_mask) & ~bf_bits;
115 KBASE_DEBUG_ASSERT(NULL != kbdev);
117 /* remember current mask */
118 spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
119 new_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL);
120 /* mask interrupts for now */
121 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0, NULL);
122 spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
124 while (bf_bits | pf_bits) {
127 struct kbase_context *kctx;
130 * the while logic ensures we have a bit set, no need to check
133 as_no = ffs(bf_bits | pf_bits) - 1;
134 as = &kbdev->as[as_no];
137 * Refcount the kctx ASAP - it shouldn't disappear anyway, since
138 * Bus/Page faults _should_ only occur whilst jobs are running,
139 * and a job causing the Bus/Page fault shouldn't complete until
142 kctx = kbasep_js_runpool_lookup_ctx(kbdev, as_no);
144 /* find faulting address */
145 as->fault_addr = kbase_reg_read(kbdev,
149 as->fault_addr <<= 32;
150 as->fault_addr |= kbase_reg_read(kbdev,
155 /* report the fault to debugfs */
156 kbase_as_fault_debugfs_new(kbdev, as_no);
158 /* record the fault status */
159 as->fault_status = kbase_reg_read(kbdev,
164 /* find the fault type */
165 as->fault_type = (bf_bits & (1 << as_no)) ?
166 KBASE_MMU_FAULT_TYPE_BUS :
167 KBASE_MMU_FAULT_TYPE_PAGE;
169 #ifdef CONFIG_MALI_GPU_MMU_AARCH64
170 as->fault_extra_addr = kbase_reg_read(kbdev,
171 MMU_AS_REG(as_no, AS_FAULTEXTRA_HI),
173 as->fault_extra_addr <<= 32;
174 as->fault_extra_addr |= kbase_reg_read(kbdev,
175 MMU_AS_REG(as_no, AS_FAULTEXTRA_LO),
177 #endif /* CONFIG_MALI_GPU_MMU_AARCH64 */
179 if (kbase_as_has_bus_fault(as)) {
180 /* Mark bus fault as handled.
181 * Note that a bus fault is processed first in case
182 * where both a bus fault and page fault occur.
184 bf_bits &= ~(1UL << as_no);
186 /* remove the queued BF (and PF) from the mask */
187 new_mask &= ~(MMU_BUS_ERROR(as_no) |
188 MMU_PAGE_FAULT(as_no));
190 /* Mark page fault as handled */
191 pf_bits &= ~(1UL << as_no);
193 /* remove the queued PF from the mask */
194 new_mask &= ~MMU_PAGE_FAULT(as_no);
197 /* Process the interrupt for this address space */
198 spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
199 kbase_mmu_interrupt_process(kbdev, kctx, as);
200 spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock,
204 /* reenable interrupts */
205 spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
206 tmp = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL);
208 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), new_mask, NULL);
209 spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
212 void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as,
213 struct kbase_context *kctx)
215 struct kbase_mmu_setup *current_setup = &as->current_setup;
218 #ifdef CONFIG_MALI_GPU_MMU_AARCH64
219 transcfg = current_setup->transcfg & 0xFFFFFFFFUL;
221 /* Set flag AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK */
222 /* Clear PTW_MEMATTR bits */
223 transcfg &= ~AS_TRANSCFG_PTW_MEMATTR_MASK;
224 /* Enable correct PTW_MEMATTR bits */
225 transcfg |= AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK;
227 if (kbdev->system_coherency == COHERENCY_ACE) {
228 /* Set flag AS_TRANSCFG_PTW_SH_OS (outer shareable) */
229 /* Clear PTW_SH bits */
230 transcfg = (transcfg & ~AS_TRANSCFG_PTW_SH_MASK);
231 /* Enable correct PTW_SH bits */
232 transcfg = (transcfg | AS_TRANSCFG_PTW_SH_OS);
235 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_LO),
237 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_HI),
238 (current_setup->transcfg >> 32) & 0xFFFFFFFFUL, kctx);
240 #else /* CONFIG_MALI_GPU_MMU_AARCH64 */
242 if (kbdev->system_coherency == COHERENCY_ACE)
243 current_setup->transtab |= AS_TRANSTAB_LPAE_SHARE_OUTER;
245 #endif /* CONFIG_MALI_GPU_MMU_AARCH64 */
247 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_LO),
248 current_setup->transtab & 0xFFFFFFFFUL, kctx);
249 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_HI),
250 (current_setup->transtab >> 32) & 0xFFFFFFFFUL, kctx);
252 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_LO),
253 current_setup->memattr & 0xFFFFFFFFUL, kctx);
254 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_HI),
255 (current_setup->memattr >> 32) & 0xFFFFFFFFUL, kctx);
257 kbase_tlstream_tl_attrib_as_config(as,
258 current_setup->transtab,
259 current_setup->memattr,
262 write_cmd(kbdev, as->number, AS_COMMAND_UPDATE, kctx);
265 int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
266 struct kbase_context *kctx, u64 vpfn, u32 nr, u32 op,
267 unsigned int handling_irq)
271 if (op == AS_COMMAND_UNLOCK) {
272 /* Unlock doesn't require a lock first */
273 ret = write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
275 u64 lock_addr = lock_region(kbdev, vpfn, nr);
277 /* Lock the region that needs to be updated */
278 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_LO),
279 lock_addr & 0xFFFFFFFFUL, kctx);
280 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_HI),
281 (lock_addr >> 32) & 0xFFFFFFFFUL, kctx);
282 write_cmd(kbdev, as->number, AS_COMMAND_LOCK, kctx);
284 /* Run the MMU operation */
285 write_cmd(kbdev, as->number, op, kctx);
287 /* Wait for the flush to complete */
288 ret = wait_ready(kbdev, as->number, kctx);
290 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9630)) {
291 /* Issue an UNLOCK command to ensure that valid page
292 tables are re-read by the GPU after an update.
293 Note that, the FLUSH command should perform all the
294 actions necessary, however the bus logs show that if
295 multiple page faults occur within an 8 page region
296 the MMU does not always re-read the updated page
297 table entries for later faults or is only partially
298 read, it subsequently raises the page fault IRQ for
299 the same addresses, the unlock ensures that the MMU
300 cache is flushed, so updates can be re-read. As the
301 region is now unlocked we need to issue 2 UNLOCK
302 commands in order to flush the MMU/uTLB,
305 write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
306 write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
313 void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
314 struct kbase_context *kctx, enum kbase_mmu_fault_type type)
319 spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
322 * A reset is in-flight and we're flushing the IRQ + bottom half
323 * so don't update anything as it could race with the reset code.
325 if (kbdev->irq_reset_flush)
328 /* Clear the page (and bus fault IRQ as well in case one occurred) */
329 pf_bf_mask = MMU_PAGE_FAULT(as->number);
330 if (type == KBASE_MMU_FAULT_TYPE_BUS ||
331 type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
332 pf_bf_mask |= MMU_BUS_ERROR(as->number);
334 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), pf_bf_mask, kctx);
337 spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
340 void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
341 struct kbase_context *kctx, enum kbase_mmu_fault_type type)
346 /* Enable the page fault IRQ (and bus fault IRQ as well in case one
348 spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
351 * A reset is in-flight and we're flushing the IRQ + bottom half
352 * so don't update anything as it could race with the reset code.
354 if (kbdev->irq_reset_flush)
357 irq_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), kctx) |
358 MMU_PAGE_FAULT(as->number);
360 if (type == KBASE_MMU_FAULT_TYPE_BUS ||
361 type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
362 irq_mask |= MMU_BUS_ERROR(as->number);
364 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), irq_mask, kctx);
367 spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);