3 * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
18 #include <linux/bitops.h>
20 #include <mali_kbase.h>
21 #include <mali_kbase_mem.h>
22 #include <mali_kbase_mmu_hw.h>
23 #include <mali_kbase_tlstream.h>
24 #include <backend/gpu/mali_kbase_device_internal.h>
25 #include <mali_kbase_as_fault_debugfs.h>
27 static inline u64 lock_region(struct kbase_device *kbdev, u64 pfn,
32 /* can't lock a zero sized range */
33 KBASE_DEBUG_ASSERT(num_pages);
35 region = pfn << PAGE_SHIFT;
37 * fls returns (given the ASSERT above):
41 * results in the range (11 .. 42)
44 /* gracefully handle num_pages being zero */
50 region_width = 10 + fls(num_pages);
51 if (num_pages != (1ul << (region_width - 11))) {
52 /* not pow2, so must go up to the next pow2 */
55 KBASE_DEBUG_ASSERT(region_width <= KBASE_LOCK_REGION_MAX_SIZE);
56 KBASE_DEBUG_ASSERT(region_width >= KBASE_LOCK_REGION_MIN_SIZE);
57 region |= region_width;
63 static int wait_ready(struct kbase_device *kbdev,
64 unsigned int as_nr, struct kbase_context *kctx)
66 unsigned int max_loops = KBASE_AS_INACTIVE_MAX_LOOPS;
67 u32 val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), kctx);
69 /* Wait for the MMU status to indicate there is no active command, in
70 * case one is pending. Do not log remaining register accesses. */
71 while (--max_loops && (val & AS_STATUS_AS_ACTIVE))
72 val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), NULL);
75 dev_err(kbdev->dev, "AS_ACTIVE bit stuck\n");
79 /* If waiting in loop was performed, log last read value. */
80 if (KBASE_AS_INACTIVE_MAX_LOOPS - 1 > max_loops)
81 kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), kctx);
86 static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd,
87 struct kbase_context *kctx)
91 /* write AS_COMMAND when MMU is ready to accept another command */
92 status = wait_ready(kbdev, as_nr, kctx);
94 kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_COMMAND), cmd,
100 static void validate_protected_page_fault(struct kbase_device *kbdev,
101 struct kbase_context *kctx)
103 /* GPUs which support (native) protected mode shall not report page
104 * fault addresses unless it has protected debug mode and protected
105 * debug mode is turned on */
106 u32 protected_debug_mode = 0;
108 if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE))
111 if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) {
112 protected_debug_mode = kbase_reg_read(kbdev,
113 GPU_CONTROL_REG(GPU_STATUS),
117 if (!protected_debug_mode) {
118 /* fault_addr should never be reported in protected mode.
119 * However, we just continue by printing an error message */
120 dev_err(kbdev->dev, "Fault address reported in protected mode\n");
124 void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
126 const int num_as = 16;
127 const int busfault_shift = MMU_PAGE_FAULT_FLAGS;
128 const int pf_shift = 0;
129 const unsigned long as_bit_mask = (1UL << num_as) - 1;
135 u32 bf_bits = (irq_stat >> busfault_shift) & as_bit_mask;
136 /* page faults (note: Ignore ASes with both pf and bf) */
137 u32 pf_bits = ((irq_stat >> pf_shift) & as_bit_mask) & ~bf_bits;
139 KBASE_DEBUG_ASSERT(NULL != kbdev);
141 /* remember current mask */
142 spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
143 new_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL);
144 /* mask interrupts for now */
145 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0, NULL);
146 spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
148 while (bf_bits | pf_bits) {
151 struct kbase_context *kctx;
154 * the while logic ensures we have a bit set, no need to check
157 as_no = ffs(bf_bits | pf_bits) - 1;
158 as = &kbdev->as[as_no];
161 * Refcount the kctx ASAP - it shouldn't disappear anyway, since
162 * Bus/Page faults _should_ only occur whilst jobs are running,
163 * and a job causing the Bus/Page fault shouldn't complete until
166 kctx = kbasep_js_runpool_lookup_ctx(kbdev, as_no);
169 /* find faulting address */
170 as->fault_addr = kbase_reg_read(kbdev,
174 as->fault_addr <<= 32;
175 as->fault_addr |= kbase_reg_read(kbdev,
180 /* Mark the fault protected or not */
181 as->protected_mode = kbdev->protected_mode;
183 if (kbdev->protected_mode && as->fault_addr)
185 /* check if address reporting is allowed */
186 validate_protected_page_fault(kbdev, kctx);
189 /* report the fault to debugfs */
190 kbase_as_fault_debugfs_new(kbdev, as_no);
192 /* record the fault status */
193 as->fault_status = kbase_reg_read(kbdev,
198 /* find the fault type */
199 as->fault_type = (bf_bits & (1 << as_no)) ?
200 KBASE_MMU_FAULT_TYPE_BUS :
201 KBASE_MMU_FAULT_TYPE_PAGE;
203 #ifdef CONFIG_MALI_GPU_MMU_AARCH64
204 as->fault_extra_addr = kbase_reg_read(kbdev,
205 MMU_AS_REG(as_no, AS_FAULTEXTRA_HI),
207 as->fault_extra_addr <<= 32;
208 as->fault_extra_addr |= kbase_reg_read(kbdev,
209 MMU_AS_REG(as_no, AS_FAULTEXTRA_LO),
211 #endif /* CONFIG_MALI_GPU_MMU_AARCH64 */
213 if (kbase_as_has_bus_fault(as)) {
214 /* Mark bus fault as handled.
215 * Note that a bus fault is processed first in case
216 * where both a bus fault and page fault occur.
218 bf_bits &= ~(1UL << as_no);
220 /* remove the queued BF (and PF) from the mask */
221 new_mask &= ~(MMU_BUS_ERROR(as_no) |
222 MMU_PAGE_FAULT(as_no));
224 /* Mark page fault as handled */
225 pf_bits &= ~(1UL << as_no);
227 /* remove the queued PF from the mask */
228 new_mask &= ~MMU_PAGE_FAULT(as_no);
231 /* Process the interrupt for this address space */
232 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
233 kbase_mmu_interrupt_process(kbdev, kctx, as);
234 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
237 /* reenable interrupts */
238 spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
239 tmp = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL);
241 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), new_mask, NULL);
242 spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
245 void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as,
246 struct kbase_context *kctx)
248 struct kbase_mmu_setup *current_setup = &as->current_setup;
251 #ifdef CONFIG_MALI_GPU_MMU_AARCH64
252 transcfg = current_setup->transcfg & 0xFFFFFFFFUL;
254 /* Set flag AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK */
255 /* Clear PTW_MEMATTR bits */
256 transcfg &= ~AS_TRANSCFG_PTW_MEMATTR_MASK;
257 /* Enable correct PTW_MEMATTR bits */
258 transcfg |= AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK;
260 if (kbdev->system_coherency == COHERENCY_ACE) {
261 /* Set flag AS_TRANSCFG_PTW_SH_OS (outer shareable) */
262 /* Clear PTW_SH bits */
263 transcfg = (transcfg & ~AS_TRANSCFG_PTW_SH_MASK);
264 /* Enable correct PTW_SH bits */
265 transcfg = (transcfg | AS_TRANSCFG_PTW_SH_OS);
268 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_LO),
270 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_HI),
271 (current_setup->transcfg >> 32) & 0xFFFFFFFFUL, kctx);
273 #else /* CONFIG_MALI_GPU_MMU_AARCH64 */
275 if (kbdev->system_coherency == COHERENCY_ACE)
276 current_setup->transtab |= AS_TRANSTAB_LPAE_SHARE_OUTER;
278 #endif /* CONFIG_MALI_GPU_MMU_AARCH64 */
280 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_LO),
281 current_setup->transtab & 0xFFFFFFFFUL, kctx);
282 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_HI),
283 (current_setup->transtab >> 32) & 0xFFFFFFFFUL, kctx);
285 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_LO),
286 current_setup->memattr & 0xFFFFFFFFUL, kctx);
287 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_HI),
288 (current_setup->memattr >> 32) & 0xFFFFFFFFUL, kctx);
290 kbase_tlstream_tl_attrib_as_config(as,
291 current_setup->transtab,
292 current_setup->memattr,
295 write_cmd(kbdev, as->number, AS_COMMAND_UPDATE, kctx);
298 int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
299 struct kbase_context *kctx, u64 vpfn, u32 nr, u32 op,
300 unsigned int handling_irq)
304 lockdep_assert_held(&kbdev->mmu_hw_mutex);
306 if (op == AS_COMMAND_UNLOCK) {
307 /* Unlock doesn't require a lock first */
308 ret = write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
310 u64 lock_addr = lock_region(kbdev, vpfn, nr);
312 /* Lock the region that needs to be updated */
313 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_LO),
314 lock_addr & 0xFFFFFFFFUL, kctx);
315 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_HI),
316 (lock_addr >> 32) & 0xFFFFFFFFUL, kctx);
317 write_cmd(kbdev, as->number, AS_COMMAND_LOCK, kctx);
319 /* Run the MMU operation */
320 write_cmd(kbdev, as->number, op, kctx);
322 /* Wait for the flush to complete */
323 ret = wait_ready(kbdev, as->number, kctx);
325 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9630)) {
326 /* Issue an UNLOCK command to ensure that valid page
327 tables are re-read by the GPU after an update.
328 Note that, the FLUSH command should perform all the
329 actions necessary, however the bus logs show that if
330 multiple page faults occur within an 8 page region
331 the MMU does not always re-read the updated page
332 table entries for later faults or is only partially
333 read, it subsequently raises the page fault IRQ for
334 the same addresses, the unlock ensures that the MMU
335 cache is flushed, so updates can be re-read. As the
336 region is now unlocked we need to issue 2 UNLOCK
337 commands in order to flush the MMU/uTLB,
340 write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
341 write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
348 void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
349 struct kbase_context *kctx, enum kbase_mmu_fault_type type)
354 spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
357 * A reset is in-flight and we're flushing the IRQ + bottom half
358 * so don't update anything as it could race with the reset code.
360 if (kbdev->irq_reset_flush)
363 /* Clear the page (and bus fault IRQ as well in case one occurred) */
364 pf_bf_mask = MMU_PAGE_FAULT(as->number);
365 if (type == KBASE_MMU_FAULT_TYPE_BUS ||
366 type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
367 pf_bf_mask |= MMU_BUS_ERROR(as->number);
369 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), pf_bf_mask, kctx);
372 spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
375 void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
376 struct kbase_context *kctx, enum kbase_mmu_fault_type type)
381 /* Enable the page fault IRQ (and bus fault IRQ as well in case one
383 spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
386 * A reset is in-flight and we're flushing the IRQ + bottom half
387 * so don't update anything as it could race with the reset code.
389 if (kbdev->irq_reset_flush)
392 irq_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), kctx) |
393 MMU_PAGE_FAULT(as->number);
395 if (type == KBASE_MMU_FAULT_TYPE_BUS ||
396 type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
397 irq_mask |= MMU_BUS_ERROR(as->number);
399 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), irq_mask, kctx);
402 spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);