3 * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
21 * @file mali_kbase_mmu.c
22 * Base kernel MMU management.
26 #include <linux/kernel.h>
27 #include <linux/dma-mapping.h>
28 #include <mali_kbase.h>
29 #include <mali_midg_regmap.h>
30 #if defined(CONFIG_MALI_GATOR_SUPPORT)
31 #include <mali_kbase_gator.h>
33 #if defined(CONFIG_MALI_MIPE_ENABLED)
34 #include <mali_kbase_tlstream.h>
36 #include <mali_kbase_debug.h>
38 #define beenthere(kctx, f, a...) dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
40 #include <mali_kbase_defs.h>
41 #include <mali_kbase_hw.h>
42 #include <mali_kbase_mmu_hw.h>
43 #include <mali_kbase_hwaccess_jm.h>
45 #define KBASE_MMU_PAGE_ENTRIES 512
48 * kbase_mmu_sync_pgd - sync page directory to memory
49 * @dev: Device pointer.
50 * @handle: Address of DMA region.
51 * @size: Size of the region to sync.
53 * This should be called after each page directory update.
56 static void kbase_mmu_sync_pgd(struct device *dev,
57 dma_addr_t handle, size_t size)
60 dma_sync_single_for_device(dev, handle, size, DMA_TO_DEVICE);
65 * - PGD: Page Directory.
66 * - PTE: Page Table Entry. A 64bit value pointing to the next
67 * level of translation
68 * - ATE: Address Transation Entry. A 64bit value pointing to
69 * a 4kB physical page.
72 static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
73 struct kbase_as *as, const char *reason_str);
76 static size_t make_multiple(size_t minimum, size_t multiple)
78 size_t remainder = minimum % multiple;
83 return minimum + multiple - remainder;
86 void page_fault_worker(struct work_struct *data)
92 struct kbase_as *faulting_as;
94 struct kbase_context *kctx;
95 struct kbase_device *kbdev;
96 struct kbase_va_region *region;
100 faulting_as = container_of(data, struct kbase_as, work_pagefault);
101 fault_pfn = faulting_as->fault_addr >> PAGE_SHIFT;
102 as_no = faulting_as->number;
104 kbdev = container_of(faulting_as, struct kbase_device, as[as_no]);
106 /* Grab the context that was already refcounted in kbase_mmu_interrupt().
107 * Therefore, it cannot be scheduled out of this AS until we explicitly release it
109 kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no);
110 if (WARN_ON(!kctx)) {
111 atomic_dec(&kbdev->faults_pending);
115 KBASE_DEBUG_ASSERT(kctx->kbdev == kbdev);
117 fault_status = faulting_as->fault_status;
118 switch (fault_status & AS_FAULTSTATUS_EXCEPTION_CODE_MASK) {
120 case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSLATION_FAULT:
121 /* need to check against the region to handle this one */
124 case AS_FAULTSTATUS_EXCEPTION_CODE_PERMISSION_FAULT:
125 kbase_mmu_report_fault_and_kill(kctx, faulting_as,
126 "Permission failure");
129 case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSTAB_BUS_FAULT:
130 kbase_mmu_report_fault_and_kill(kctx, faulting_as,
131 "Tranlation table bus fault");
134 case AS_FAULTSTATUS_EXCEPTION_CODE_ACCESS_FLAG:
135 /* nothing to do, but we don't expect this fault currently */
136 dev_warn(kbdev->dev, "Access flag unexpectedly set");
141 kbase_mmu_report_fault_and_kill(kctx, faulting_as,
142 "Unknown fault code");
146 /* so we have a translation fault, let's see if it is for growable
148 kbase_gpu_vm_lock(kctx);
150 region = kbase_region_tracker_find_region_enclosing_address(kctx,
151 faulting_as->fault_addr);
152 if (!region || region->flags & KBASE_REG_FREE) {
153 kbase_gpu_vm_unlock(kctx);
154 kbase_mmu_report_fault_and_kill(kctx, faulting_as,
155 "Memory is not mapped on the GPU");
159 if ((region->flags & GROWABLE_FLAGS_REQUIRED)
160 != GROWABLE_FLAGS_REQUIRED) {
161 kbase_gpu_vm_unlock(kctx);
162 kbase_mmu_report_fault_and_kill(kctx, faulting_as,
163 "Memory is not growable");
167 /* find the size we need to grow it by */
168 /* we know the result fit in a size_t due to kbase_region_tracker_find_region_enclosing_address
169 * validating the fault_adress to be within a size_t from the start_pfn */
170 fault_rel_pfn = fault_pfn - region->start_pfn;
172 if (fault_rel_pfn < kbase_reg_current_backed_size(region)) {
173 dev_dbg(kbdev->dev, "Page fault @ 0x%llx in allocated region 0x%llx-0x%llx of growable TMEM: Ignoring",
174 faulting_as->fault_addr, region->start_pfn,
176 kbase_reg_current_backed_size(region));
178 kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
179 KBASE_MMU_FAULT_TYPE_PAGE);
180 /* [1] in case another page fault occurred while we were
181 * handling the (duplicate) page fault we need to ensure we
182 * don't loose the other page fault as result of us clearing
183 * the MMU IRQ. Therefore, after we clear the MMU IRQ we send
184 * an UNLOCK command that will retry any stalled memory
185 * transaction (which should cause the other page fault to be
188 kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0,
189 AS_COMMAND_UNLOCK, 1);
190 kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
191 KBASE_MMU_FAULT_TYPE_PAGE);
192 kbase_gpu_vm_unlock(kctx);
197 new_pages = make_multiple(fault_rel_pfn -
198 kbase_reg_current_backed_size(region) + 1,
201 /* cap to max vsize */
202 if (new_pages + kbase_reg_current_backed_size(region) >
204 new_pages = region->nr_pages -
205 kbase_reg_current_backed_size(region);
207 if (0 == new_pages) {
208 /* Duplicate of a fault we've already handled, nothing to do */
209 kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
210 KBASE_MMU_FAULT_TYPE_PAGE);
211 /* See comment [1] about UNLOCK usage */
212 kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0,
213 AS_COMMAND_UNLOCK, 1);
214 kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
215 KBASE_MMU_FAULT_TYPE_PAGE);
216 kbase_gpu_vm_unlock(kctx);
220 if (kbase_alloc_phy_pages_helper(region->gpu_alloc, new_pages) == 0) {
221 if (region->gpu_alloc != region->cpu_alloc) {
222 if (kbase_alloc_phy_pages_helper(
223 region->cpu_alloc, new_pages) == 0) {
226 kbase_free_phy_pages_helper(region->gpu_alloc,
239 KBASE_DEBUG_ASSERT(kbase_reg_current_backed_size(region) <= region->nr_pages);
241 /* AS transaction begin */
242 mutex_lock(&faulting_as->transaction_mutex);
244 /* set up the new pages */
245 err = kbase_mmu_insert_pages(kctx, region->start_pfn + kbase_reg_current_backed_size(region) - new_pages, &kbase_get_gpu_phy_pages(region)[kbase_reg_current_backed_size(region) - new_pages], new_pages, region->flags);
247 /* failed to insert pages, handle as a normal PF */
248 mutex_unlock(&faulting_as->transaction_mutex);
249 kbase_free_phy_pages_helper(region->gpu_alloc, new_pages);
250 if (region->gpu_alloc != region->cpu_alloc)
251 kbase_free_phy_pages_helper(region->cpu_alloc,
253 kbase_gpu_vm_unlock(kctx);
254 /* The locked VA region will be unlocked and the cache invalidated in here */
255 kbase_mmu_report_fault_and_kill(kctx, faulting_as,
256 "Page table update failure");
259 #if defined(CONFIG_MALI_GATOR_SUPPORT)
260 kbase_trace_mali_page_fault_insert_pages(as_no, new_pages);
262 #if defined(CONFIG_MALI_MIPE_ENABLED)
263 kbase_tlstream_aux_pagefault(
265 atomic_read(&kctx->used_pages));
268 /* flush L2 and unlock the VA (resumes the MMU) */
269 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367))
270 op = AS_COMMAND_FLUSH;
272 op = AS_COMMAND_FLUSH_PT;
274 /* clear MMU interrupt - this needs to be done after updating
275 * the page tables but before issuing a FLUSH command. The
276 * FLUSH cmd has a side effect that it restarts stalled memory
277 * transactions in other address spaces which may cause
278 * another fault to occur. If we didn't clear the interrupt at
279 * this stage a new IRQ might not be raised when the GPU finds
280 * a MMU IRQ is already pending.
282 kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
283 KBASE_MMU_FAULT_TYPE_PAGE);
285 kbase_mmu_hw_do_operation(kbdev, faulting_as, kctx,
286 faulting_as->fault_addr >> PAGE_SHIFT,
290 mutex_unlock(&faulting_as->transaction_mutex);
291 /* AS transaction end */
293 /* reenable this in the mask */
294 kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
295 KBASE_MMU_FAULT_TYPE_PAGE);
296 kbase_gpu_vm_unlock(kctx);
298 /* failed to extend, handle as a normal PF */
299 kbase_gpu_vm_unlock(kctx);
300 kbase_mmu_report_fault_and_kill(kctx, faulting_as,
301 "Page allocation failure");
306 * By this point, the fault was handled in some way,
307 * so release the ctx refcount
309 kbasep_js_runpool_release_ctx(kbdev, kctx);
311 atomic_dec(&kbdev->faults_pending);
314 phys_addr_t kbase_mmu_alloc_pgd(struct kbase_context *kctx)
320 KBASE_DEBUG_ASSERT(NULL != kctx);
321 kbase_atomic_add_pages(1, &kctx->used_pages);
322 kbase_atomic_add_pages(1, &kctx->kbdev->memdev.used_pages);
324 p = kbase_mem_pool_alloc(&kctx->mem_pool);
332 kbase_process_page_usage_inc(kctx, 1);
334 for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++)
335 kctx->kbdev->mmu_mode->entry_invalidate(&page[i]);
337 kbase_mmu_sync_pgd(kctx->kbdev->dev, kbase_dma_addr(p), PAGE_SIZE);
340 return page_to_phys(p);
343 kbase_mem_pool_free(&kctx->mem_pool, p, false);
345 kbase_atomic_sub_pages(1, &kctx->used_pages);
346 kbase_atomic_sub_pages(1, &kctx->kbdev->memdev.used_pages);
351 KBASE_EXPORT_TEST_API(kbase_mmu_alloc_pgd);
353 /* Given PGD PFN for level N, return PGD PFN for level N+1 */
354 static phys_addr_t mmu_get_next_pgd(struct kbase_context *kctx, phys_addr_t pgd, u64 vpfn, int level)
357 phys_addr_t target_pgd;
360 KBASE_DEBUG_ASSERT(pgd);
361 KBASE_DEBUG_ASSERT(NULL != kctx);
363 lockdep_assert_held(&kctx->reg_lock);
366 * Architecture spec defines level-0 as being the top-most.
367 * This is a bit unfortunate here, but we keep the same convention.
369 vpfn >>= (3 - level) * 9;
372 p = pfn_to_page(PFN_DOWN(pgd));
375 dev_warn(kctx->kbdev->dev, "mmu_get_next_pgd: kmap failure\n");
379 target_pgd = kctx->kbdev->mmu_mode->pte_to_phy_addr(page[vpfn]);
382 target_pgd = kbase_mmu_alloc_pgd(kctx);
384 dev_warn(kctx->kbdev->dev, "mmu_get_next_pgd: kbase_mmu_alloc_pgd failure\n");
389 kctx->kbdev->mmu_mode->entry_set_pte(&page[vpfn], target_pgd);
391 kbase_mmu_sync_pgd(kctx->kbdev->dev,
392 kbase_dma_addr(p), PAGE_SIZE);
393 /* Rely on the caller to update the address space flags. */
400 static phys_addr_t mmu_get_bottom_pgd(struct kbase_context *kctx, u64 vpfn)
407 for (l = MIDGARD_MMU_TOPLEVEL; l < 3; l++) {
408 pgd = mmu_get_next_pgd(kctx, pgd, vpfn, l);
409 /* Handle failure condition */
411 dev_warn(kctx->kbdev->dev, "mmu_get_bottom_pgd: mmu_get_next_pgd failure\n");
419 static phys_addr_t mmu_insert_pages_recover_get_next_pgd(struct kbase_context *kctx, phys_addr_t pgd, u64 vpfn, int level)
422 phys_addr_t target_pgd;
424 KBASE_DEBUG_ASSERT(pgd);
425 KBASE_DEBUG_ASSERT(NULL != kctx);
427 lockdep_assert_held(&kctx->reg_lock);
430 * Architecture spec defines level-0 as being the top-most.
431 * This is a bit unfortunate here, but we keep the same convention.
433 vpfn >>= (3 - level) * 9;
436 page = kmap_atomic(pfn_to_page(PFN_DOWN(pgd)));
437 /* kmap_atomic should NEVER fail */
438 KBASE_DEBUG_ASSERT(NULL != page);
440 target_pgd = kctx->kbdev->mmu_mode->pte_to_phy_addr(page[vpfn]);
441 /* As we are recovering from what has already been set up, we should have a target_pgd */
442 KBASE_DEBUG_ASSERT(0 != target_pgd);
447 static phys_addr_t mmu_insert_pages_recover_get_bottom_pgd(struct kbase_context *kctx, u64 vpfn)
454 for (l = MIDGARD_MMU_TOPLEVEL; l < 3; l++) {
455 pgd = mmu_insert_pages_recover_get_next_pgd(kctx, pgd, vpfn, l);
456 /* Should never fail */
457 KBASE_DEBUG_ASSERT(0 != pgd);
463 static void mmu_insert_pages_failure_recovery(struct kbase_context *kctx, u64 vpfn,
468 struct kbase_mmu_mode const *mmu_mode;
470 KBASE_DEBUG_ASSERT(NULL != kctx);
471 KBASE_DEBUG_ASSERT(0 != vpfn);
472 /* 64-bit address range is the max */
473 KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
475 lockdep_assert_held(&kctx->reg_lock);
477 mmu_mode = kctx->kbdev->mmu_mode;
481 unsigned int index = vpfn & 0x1FF;
482 unsigned int count = KBASE_MMU_PAGE_ENTRIES - index;
488 pgd = mmu_insert_pages_recover_get_bottom_pgd(kctx, vpfn);
489 KBASE_DEBUG_ASSERT(0 != pgd);
491 p = pfn_to_page(PFN_DOWN(pgd));
493 pgd_page = kmap_atomic(p);
494 KBASE_DEBUG_ASSERT(NULL != pgd_page);
496 /* Invalidate the entries we added */
497 for (i = 0; i < count; i++)
498 mmu_mode->entry_invalidate(&pgd_page[index + i]);
503 kbase_mmu_sync_pgd(kctx->kbdev->dev,
507 kunmap_atomic(pgd_page);
512 * Map the single page 'phys' 'nr' of times, starting at GPU PFN 'vpfn'
514 int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
515 phys_addr_t phys, size_t nr,
520 /* In case the insert_single_page only partially completes we need to be
522 bool recover_required = false;
523 u64 recover_vpfn = vpfn;
524 size_t recover_count = 0;
526 KBASE_DEBUG_ASSERT(NULL != kctx);
527 KBASE_DEBUG_ASSERT(0 != vpfn);
528 /* 64-bit address range is the max */
529 KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
531 lockdep_assert_held(&kctx->reg_lock);
535 unsigned int index = vpfn & 0x1FF;
536 unsigned int count = KBASE_MMU_PAGE_ENTRIES - index;
543 * Repeatedly calling mmu_get_bottom_pte() is clearly
544 * suboptimal. We don't have to re-parse the whole tree
545 * each time (just cache the l0-l2 sequence).
546 * On the other hand, it's only a gain when we map more than
547 * 256 pages at once (on average). Do we really care?
549 pgd = mmu_get_bottom_pgd(kctx, vpfn);
551 dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: mmu_get_bottom_pgd failure\n");
552 if (recover_required) {
553 /* Invalidate the pages we have partially
555 mmu_insert_pages_failure_recovery(kctx,
562 p = pfn_to_page(PFN_DOWN(pgd));
565 dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: kmap failure\n");
566 if (recover_required) {
567 /* Invalidate the pages we have partially
569 mmu_insert_pages_failure_recovery(kctx,
576 for (i = 0; i < count; i++) {
577 unsigned int ofs = index + i;
579 KBASE_DEBUG_ASSERT(0 == (pgd_page[ofs] & 1UL));
580 kctx->kbdev->mmu_mode->entry_set_ate(&pgd_page[ofs],
587 kbase_mmu_sync_pgd(kctx->kbdev->dev,
589 (index * sizeof(u64)),
590 count * sizeof(u64));
593 /* We have started modifying the page table.
594 * If further pages need inserting and fail we need to undo what
595 * has already taken place */
596 recover_required = true;
597 recover_count += count;
603 * Map 'nr' pages pointed to by 'phys' at GPU PFN 'vpfn'
605 int kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn,
606 phys_addr_t *phys, size_t nr,
611 /* In case the insert_pages only partially completes we need to be able
613 bool recover_required = false;
614 u64 recover_vpfn = vpfn;
615 size_t recover_count = 0;
617 KBASE_DEBUG_ASSERT(NULL != kctx);
618 KBASE_DEBUG_ASSERT(0 != vpfn);
619 /* 64-bit address range is the max */
620 KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
622 lockdep_assert_held(&kctx->reg_lock);
626 unsigned int index = vpfn & 0x1FF;
627 unsigned int count = KBASE_MMU_PAGE_ENTRIES - index;
634 * Repeatedly calling mmu_get_bottom_pte() is clearly
635 * suboptimal. We don't have to re-parse the whole tree
636 * each time (just cache the l0-l2 sequence).
637 * On the other hand, it's only a gain when we map more than
638 * 256 pages at once (on average). Do we really care?
640 pgd = mmu_get_bottom_pgd(kctx, vpfn);
642 dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: mmu_get_bottom_pgd failure\n");
643 if (recover_required) {
644 /* Invalidate the pages we have partially
646 mmu_insert_pages_failure_recovery(kctx,
653 p = pfn_to_page(PFN_DOWN(pgd));
656 dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: kmap failure\n");
657 if (recover_required) {
658 /* Invalidate the pages we have partially
660 mmu_insert_pages_failure_recovery(kctx,
667 for (i = 0; i < count; i++) {
668 unsigned int ofs = index + i;
670 KBASE_DEBUG_ASSERT(0 == (pgd_page[ofs] & 1UL));
671 kctx->kbdev->mmu_mode->entry_set_ate(&pgd_page[ofs],
679 kbase_mmu_sync_pgd(kctx->kbdev->dev,
681 (index * sizeof(u64)),
682 count * sizeof(u64));
685 /* We have started modifying the page table. If further pages
686 * need inserting and fail we need to undo what has already
688 recover_required = true;
689 recover_count += count;
694 KBASE_EXPORT_TEST_API(kbase_mmu_insert_pages);
697 * This function is responsible for validating the MMU PTs
698 * triggering reguired flushes.
700 * * IMPORTANT: This uses kbasep_js_runpool_release_ctx() when the context is
701 * currently scheduled into the runpool, and so potentially uses a lot of locks.
702 * These locks must be taken in the correct order with respect to others
703 * already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more
706 static void kbase_mmu_flush(struct kbase_context *kctx, u64 vpfn, size_t nr)
708 struct kbase_device *kbdev;
709 bool ctx_is_in_runpool;
711 KBASE_DEBUG_ASSERT(NULL != kctx);
715 /* We must flush if we're currently running jobs. At the very least, we need to retain the
716 * context to ensure it doesn't schedule out whilst we're trying to flush it */
717 ctx_is_in_runpool = kbasep_js_runpool_retain_ctx(kbdev, kctx);
719 if (ctx_is_in_runpool) {
720 KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
722 /* Second level check is to try to only do this when jobs are running. The refcount is
723 * a heuristic for this. */
724 if (kbdev->js_data.runpool_irq.per_as_data[kctx->as_nr].as_busy_refcount >= 2) {
725 if (!kbase_pm_context_active_handle_suspend(kbdev,
726 KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
730 /* AS transaction begin */
731 mutex_lock(&kbdev->as[
732 kctx->as_nr].transaction_mutex);
734 if (kbase_hw_has_issue(kbdev,
736 op = AS_COMMAND_FLUSH;
738 op = AS_COMMAND_FLUSH_MEM;
740 ret = kbase_mmu_hw_do_operation(kbdev,
741 &kbdev->as[kctx->as_nr],
744 #if KBASE_GPU_RESET_EN
746 /* Flush failed to complete, assume the
747 * GPU has hung and perform a reset to
749 dev_err(kbdev->dev, "Flush for GPU page table update did not complete. Issueing GPU soft-reset to recover\n");
750 if (kbase_prepare_to_reset_gpu(kbdev))
751 kbase_reset_gpu(kbdev);
753 #endif /* KBASE_GPU_RESET_EN */
755 mutex_unlock(&kbdev->as[
756 kctx->as_nr].transaction_mutex);
757 /* AS transaction end */
759 kbase_pm_context_idle(kbdev);
762 kbasep_js_runpool_release_ctx(kbdev, kctx);
767 * We actually only discard the ATE, and not the page table
768 * pages. There is a potential DoS here, as we'll leak memory by
769 * having PTEs that are potentially unused. Will require physical
770 * page accounting, so MMU pages are part of the process allocation.
772 * IMPORTANT: This uses kbasep_js_runpool_release_ctx() when the context is
773 * currently scheduled into the runpool, and so potentially uses a lot of locks.
774 * These locks must be taken in the correct order with respect to others
775 * already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more
778 int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr)
782 struct kbase_device *kbdev;
783 size_t requested_nr = nr;
784 struct kbase_mmu_mode const *mmu_mode;
786 KBASE_DEBUG_ASSERT(NULL != kctx);
787 beenthere(kctx, "kctx %p vpfn %lx nr %zd", (void *)kctx, (unsigned long)vpfn, nr);
789 lockdep_assert_held(&kctx->reg_lock);
792 /* early out if nothing to do */
797 mmu_mode = kbdev->mmu_mode;
801 unsigned int index = vpfn & 0x1FF;
802 unsigned int count = KBASE_MMU_PAGE_ENTRIES - index;
808 pgd = mmu_get_bottom_pgd(kctx, vpfn);
810 dev_warn(kbdev->dev, "kbase_mmu_teardown_pages: mmu_get_bottom_pgd failure\n");
814 p = pfn_to_page(PFN_DOWN(pgd));
817 dev_warn(kbdev->dev, "kbase_mmu_teardown_pages: kmap failure\n");
821 for (i = 0; i < count; i++)
822 mmu_mode->entry_invalidate(&pgd_page[index + i]);
827 kbase_mmu_sync_pgd(kctx->kbdev->dev,
829 (index * sizeof(u64)),
830 count * sizeof(u64));
835 kbase_mmu_flush(kctx, vpfn, requested_nr);
839 KBASE_EXPORT_TEST_API(kbase_mmu_teardown_pages);
842 * Update the entries for specified number of pages pointed to by 'phys' at GPU PFN 'vpfn'.
843 * This call is being triggered as a response to the changes of the mem attributes
845 * @pre : The caller is responsible for validating the memory attributes
847 * IMPORTANT: This uses kbasep_js_runpool_release_ctx() when the context is
848 * currently scheduled into the runpool, and so potentially uses a lot of locks.
849 * These locks must be taken in the correct order with respect to others
850 * already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more
853 int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, unsigned long flags)
857 size_t requested_nr = nr;
858 struct kbase_mmu_mode const *mmu_mode;
860 KBASE_DEBUG_ASSERT(NULL != kctx);
861 KBASE_DEBUG_ASSERT(0 != vpfn);
862 KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
864 lockdep_assert_held(&kctx->reg_lock);
866 mmu_mode = kctx->kbdev->mmu_mode;
868 dev_warn(kctx->kbdev->dev, "kbase_mmu_update_pages(): updating page share flags on GPU PFN 0x%llx from phys %p, %zu pages",
873 unsigned int index = vpfn & 0x1FF;
874 size_t count = KBASE_MMU_PAGE_ENTRIES - index;
880 pgd = mmu_get_bottom_pgd(kctx, vpfn);
882 dev_warn(kctx->kbdev->dev, "mmu_get_bottom_pgd failure\n");
886 p = pfn_to_page(PFN_DOWN(pgd));
889 dev_warn(kctx->kbdev->dev, "kmap failure\n");
893 for (i = 0; i < count; i++)
894 mmu_mode->entry_set_ate(&pgd_page[index + i], phys[i],
901 kbase_mmu_sync_pgd(kctx->kbdev->dev,
903 (index * sizeof(u64)),
904 count * sizeof(u64));
906 kunmap(pfn_to_page(PFN_DOWN(pgd)));
909 kbase_mmu_flush(kctx, vpfn, requested_nr);
914 /* This is a debug feature only */
915 static void mmu_check_unused(struct kbase_context *kctx, phys_addr_t pgd)
920 page = kmap_atomic(pfn_to_page(PFN_DOWN(pgd)));
921 /* kmap_atomic should NEVER fail. */
922 KBASE_DEBUG_ASSERT(NULL != page);
924 for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) {
925 if (kctx->kbdev->mmu_mode->ate_is_valid(page[i]))
926 beenthere(kctx, "live pte %016lx", (unsigned long)page[i]);
931 static void mmu_teardown_level(struct kbase_context *kctx, phys_addr_t pgd, int level, int zap, u64 *pgd_page_buffer)
933 phys_addr_t target_pgd;
936 struct kbase_mmu_mode const *mmu_mode;
938 KBASE_DEBUG_ASSERT(NULL != kctx);
939 lockdep_assert_held(&kctx->reg_lock);
941 pgd_page = kmap_atomic(pfn_to_page(PFN_DOWN(pgd)));
942 /* kmap_atomic should NEVER fail. */
943 KBASE_DEBUG_ASSERT(NULL != pgd_page);
944 /* Copy the page to our preallocated buffer so that we can minimize kmap_atomic usage */
945 memcpy(pgd_page_buffer, pgd_page, PAGE_SIZE);
946 kunmap_atomic(pgd_page);
947 pgd_page = pgd_page_buffer;
949 mmu_mode = kctx->kbdev->mmu_mode;
951 for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) {
952 target_pgd = mmu_mode->pte_to_phy_addr(pgd_page[i]);
956 mmu_teardown_level(kctx, target_pgd, level + 1, zap, pgd_page_buffer + (PAGE_SIZE / sizeof(u64)));
959 * So target_pte is a level-3 page.
960 * As a leaf, it is safe to free it.
961 * Unless we have live pages attached to it!
963 mmu_check_unused(kctx, target_pgd);
966 beenthere(kctx, "pte %lx level %d", (unsigned long)target_pgd, level + 1);
968 struct page *p = phys_to_page(target_pgd);
970 kbase_mem_pool_free(&kctx->mem_pool, p, true);
971 kbase_process_page_usage_dec(kctx, 1);
972 kbase_atomic_sub_pages(1, &kctx->used_pages);
973 kbase_atomic_sub_pages(1, &kctx->kbdev->memdev.used_pages);
979 int kbase_mmu_init(struct kbase_context *kctx)
981 KBASE_DEBUG_ASSERT(NULL != kctx);
982 KBASE_DEBUG_ASSERT(NULL == kctx->mmu_teardown_pages);
984 /* Preallocate MMU depth of four pages for mmu_teardown_level to use */
985 kctx->mmu_teardown_pages = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
987 if (NULL == kctx->mmu_teardown_pages)
993 void kbase_mmu_term(struct kbase_context *kctx)
995 KBASE_DEBUG_ASSERT(NULL != kctx);
996 KBASE_DEBUG_ASSERT(NULL != kctx->mmu_teardown_pages);
998 kfree(kctx->mmu_teardown_pages);
999 kctx->mmu_teardown_pages = NULL;
1002 void kbase_mmu_free_pgd(struct kbase_context *kctx)
1004 KBASE_DEBUG_ASSERT(NULL != kctx);
1005 KBASE_DEBUG_ASSERT(NULL != kctx->mmu_teardown_pages);
1007 lockdep_assert_held(&kctx->reg_lock);
1009 mmu_teardown_level(kctx, kctx->pgd, MIDGARD_MMU_TOPLEVEL, 1, kctx->mmu_teardown_pages);
1011 beenthere(kctx, "pgd %lx", (unsigned long)kctx->pgd);
1012 kbase_mem_pool_free(&kctx->mem_pool, phys_to_page(kctx->pgd), true);
1013 kbase_process_page_usage_dec(kctx, 1);
1014 kbase_atomic_sub_pages(1, &kctx->used_pages);
1015 kbase_atomic_sub_pages(1, &kctx->kbdev->memdev.used_pages);
1018 KBASE_EXPORT_TEST_API(kbase_mmu_free_pgd);
1020 static size_t kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd, int level, char ** const buffer, size_t *size_left)
1022 phys_addr_t target_pgd;
1025 size_t size = KBASE_MMU_PAGE_ENTRIES * sizeof(u64) + sizeof(u64);
1027 struct kbase_mmu_mode const *mmu_mode;
1029 KBASE_DEBUG_ASSERT(NULL != kctx);
1030 lockdep_assert_held(&kctx->reg_lock);
1032 mmu_mode = kctx->kbdev->mmu_mode;
1034 pgd_page = kmap(pfn_to_page(PFN_DOWN(pgd)));
1036 dev_warn(kctx->kbdev->dev, "kbasep_mmu_dump_level: kmap failure\n");
1040 if (*size_left >= size) {
1041 /* A modified physical address that contains the page table level */
1042 u64 m_pgd = pgd | level;
1044 /* Put the modified physical address in the output buffer */
1045 memcpy(*buffer, &m_pgd, sizeof(m_pgd));
1046 *buffer += sizeof(m_pgd);
1048 /* Followed by the page table itself */
1049 memcpy(*buffer, pgd_page, sizeof(u64) * KBASE_MMU_PAGE_ENTRIES);
1050 *buffer += sizeof(u64) * KBASE_MMU_PAGE_ENTRIES;
1055 for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) {
1056 if (mmu_mode->pte_is_valid(pgd_page[i])) {
1057 target_pgd = mmu_mode->pte_to_phy_addr(pgd_page[i]);
1059 dump_size = kbasep_mmu_dump_level(kctx, target_pgd, level + 1, buffer, size_left);
1061 kunmap(pfn_to_page(PFN_DOWN(pgd)));
1068 kunmap(pfn_to_page(PFN_DOWN(pgd)));
1073 void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages)
1078 KBASE_DEBUG_ASSERT(kctx);
1080 lockdep_assert_held(&kctx->reg_lock);
1082 if (0 == nr_pages) {
1083 /* can't dump in a 0 sized buffer, early out */
1087 size_left = nr_pages * PAGE_SIZE;
1089 KBASE_DEBUG_ASSERT(0 != size_left);
1090 kaddr = vmalloc_user(size_left);
1093 u64 end_marker = 0xFFULL;
1095 char *mmu_dump_buffer;
1099 buffer = (char *)kaddr;
1100 mmu_dump_buffer = buffer;
1102 if (kctx->api_version >= KBASE_API_VERSION(8, 4)) {
1103 struct kbase_mmu_setup as_setup;
1105 kctx->kbdev->mmu_mode->get_as_setup(kctx, &as_setup);
1106 config[0] = as_setup.transtab;
1107 config[1] = as_setup.memattr;
1109 memcpy(buffer, &config, sizeof(config));
1110 mmu_dump_buffer += sizeof(config);
1111 size_left -= sizeof(config);
1116 size = kbasep_mmu_dump_level(kctx,
1118 MIDGARD_MMU_TOPLEVEL,
1127 /* Add on the size for the end marker */
1128 size += sizeof(u64);
1129 /* Add on the size for the config */
1130 if (kctx->api_version >= KBASE_API_VERSION(8, 4))
1131 size += sizeof(config);
1134 if (size > nr_pages * PAGE_SIZE || size_left < sizeof(u64)) {
1135 /* The buffer isn't big enough - free the memory and return failure */
1140 /* Add the end marker */
1141 memcpy(mmu_dump_buffer, &end_marker, sizeof(u64));
1146 KBASE_EXPORT_TEST_API(kbase_mmu_dump);
1148 void bus_fault_worker(struct work_struct *data)
1150 struct kbase_as *faulting_as;
1152 struct kbase_context *kctx;
1153 struct kbase_device *kbdev;
1154 #if KBASE_GPU_RESET_EN
1155 bool reset_status = false;
1156 #endif /* KBASE_GPU_RESET_EN */
1158 faulting_as = container_of(data, struct kbase_as, work_busfault);
1160 as_no = faulting_as->number;
1162 kbdev = container_of(faulting_as, struct kbase_device, as[as_no]);
1164 /* Grab the context that was already refcounted in kbase_mmu_interrupt().
1165 * Therefore, it cannot be scheduled out of this AS until we explicitly release it
1167 kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no);
1168 if (WARN_ON(!kctx)) {
1169 atomic_dec(&kbdev->faults_pending);
1173 #if KBASE_GPU_RESET_EN
1174 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
1175 /* Due to H/W issue 8245 we need to reset the GPU after using UNMAPPED mode.
1176 * We start the reset before switching to UNMAPPED to ensure that unrelated jobs
1177 * are evicted from the GPU before the switch.
1179 dev_err(kbdev->dev, "GPU bus error occurred. For this GPU version we now soft-reset as part of bus error recovery\n");
1180 reset_status = kbase_prepare_to_reset_gpu(kbdev);
1182 #endif /* KBASE_GPU_RESET_EN */
1183 /* NOTE: If GPU already powered off for suspend, we don't need to switch to unmapped */
1184 if (!kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
1186 /* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */
1187 /* AS transaction begin */
1188 mutex_lock(&kbdev->as[as_no].transaction_mutex);
1190 /* Set the MMU into unmapped mode */
1191 kbase_mmu_disable_as(kbdev, as_no);
1193 mutex_unlock(&kbdev->as[as_no].transaction_mutex);
1194 /* AS transaction end */
1196 kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
1197 KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
1198 kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
1199 KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
1201 kbase_pm_context_idle(kbdev);
1204 #if KBASE_GPU_RESET_EN
1205 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245) && reset_status)
1206 kbase_reset_gpu(kbdev);
1207 #endif /* KBASE_GPU_RESET_EN */
1209 kbasep_js_runpool_release_ctx(kbdev, kctx);
1211 atomic_dec(&kbdev->faults_pending);
1214 const char *kbase_exception_name(struct kbase_device *kbdev, u32 exception_code)
1218 switch (exception_code) {
1219 /* Non-Fault Status code */
1221 e = "NOT_STARTED/IDLE/OK";
1238 /* Job exceptions */
1240 e = "JOB_CONFIG_FAULT";
1243 e = "JOB_POWER_FAULT";
1246 e = "JOB_READ_FAULT";
1249 e = "JOB_WRITE_FAULT";
1252 e = "JOB_AFFINITY_FAULT";
1255 e = "JOB_BUS_FAULT";
1258 e = "INSTR_INVALID_PC";
1261 e = "INSTR_INVALID_ENC";
1264 e = "INSTR_TYPE_MISMATCH";
1267 e = "INSTR_OPERAND_FAULT";
1270 e = "INSTR_TLS_FAULT";
1273 e = "INSTR_BARRIER_FAULT";
1276 e = "INSTR_ALIGN_FAULT";
1279 e = "DATA_INVALID_FAULT";
1282 e = "TILE_RANGE_FAULT";
1285 e = "ADDR_RANGE_FAULT";
1288 e = "OUT_OF_MEMORY";
1290 /* GPU exceptions */
1292 e = "DELAYED_BUS_FAULT";
1295 e = "SHAREABILITY_FAULT";
1297 /* MMU exceptions */
1306 e = "TRANSLATION_FAULT";
1309 e = "PERMISSION_FAULT";
1319 e = "TRANSTAB_BUS_FAULT";
1333 static const char *access_type_name(struct kbase_device *kbdev,
1336 switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
1338 case AS_FAULTSTATUS_ACCESS_TYPE_READ:
1340 case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
1342 case AS_FAULTSTATUS_ACCESS_TYPE_EX:
1351 * The caller must ensure it's retained the ctx to prevent it from being scheduled out whilst it's being worked on.
1353 static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
1354 struct kbase_as *as, const char *reason_str)
1356 unsigned long flags;
1361 struct kbase_device *kbdev;
1362 struct kbasep_js_device_data *js_devdata;
1364 #if KBASE_GPU_RESET_EN
1365 bool reset_status = false;
1369 kbdev = kctx->kbdev;
1370 js_devdata = &kbdev->js_data;
1372 /* ASSERT that the context won't leave the runpool */
1373 KBASE_DEBUG_ASSERT(kbasep_js_debug_check_ctx_refcount(kbdev, kctx) > 0);
1375 /* decode the fault status */
1376 exception_type = as->fault_status & 0xFF;
1377 access_type = (as->fault_status >> 8) & 0x3;
1378 source_id = (as->fault_status >> 16);
1380 /* terminal fault, print info about the fault */
1382 "Unhandled Page fault in AS%d at VA 0x%016llX\n"
1384 "raw fault status 0x%X\n"
1385 "decoded fault status: %s\n"
1386 "exception type 0x%X: %s\n"
1387 "access type 0x%X: %s\n"
1390 as_no, as->fault_addr,
1393 (as->fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
1394 exception_type, kbase_exception_name(kbdev, exception_type),
1395 access_type, access_type_name(kbdev, as->fault_status),
1399 /* hardware counters dump fault handling */
1400 if ((kbdev->hwcnt.kctx) && (kbdev->hwcnt.kctx->as_nr == as_no) &&
1401 (kbdev->hwcnt.backend.state ==
1402 KBASE_INSTR_STATE_DUMPING)) {
1403 unsigned int num_core_groups = kbdev->gpu_props.num_core_groups;
1405 if ((as->fault_addr >= kbdev->hwcnt.addr) &&
1406 (as->fault_addr < (kbdev->hwcnt.addr +
1407 (num_core_groups * 2048))))
1408 kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_FAULT;
1411 /* Stop the kctx from submitting more jobs and cause it to be scheduled
1412 * out/rescheduled - this will occur on releasing the context's refcount */
1413 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1414 kbasep_js_clear_submit_allowed(js_devdata, kctx);
1415 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1417 /* Kill any running jobs from the context. Submit is disallowed, so no more jobs from this
1418 * context can appear in the job slots from this point on */
1419 kbase_backend_jm_kill_jobs_from_kctx(kctx);
1420 /* AS transaction begin */
1421 mutex_lock(&as->transaction_mutex);
1422 #if KBASE_GPU_RESET_EN
1423 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
1424 /* Due to H/W issue 8245 we need to reset the GPU after using UNMAPPED mode.
1425 * We start the reset before switching to UNMAPPED to ensure that unrelated jobs
1426 * are evicted from the GPU before the switch.
1428 dev_err(kbdev->dev, "Unhandled page fault. For this GPU version we now soft-reset the GPU as part of page fault recovery.");
1429 reset_status = kbase_prepare_to_reset_gpu(kbdev);
1431 #endif /* KBASE_GPU_RESET_EN */
1432 /* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */
1433 kbase_mmu_disable_as(kbdev, as_no);
1435 mutex_unlock(&as->transaction_mutex);
1436 /* AS transaction end */
1437 /* Clear down the fault */
1438 kbase_mmu_hw_clear_fault(kbdev, as, kctx,
1439 KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
1440 kbase_mmu_hw_enable_fault(kbdev, as, kctx,
1441 KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
1443 #if KBASE_GPU_RESET_EN
1444 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245) && reset_status)
1445 kbase_reset_gpu(kbdev);
1446 #endif /* KBASE_GPU_RESET_EN */
1449 void kbasep_as_do_poke(struct work_struct *work)
1451 struct kbase_as *as;
1452 struct kbase_device *kbdev;
1453 struct kbase_context *kctx;
1454 unsigned long flags;
1456 KBASE_DEBUG_ASSERT(work);
1457 as = container_of(work, struct kbase_as, poke_work);
1458 kbdev = container_of(as, struct kbase_device, as[as->number]);
1459 KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT);
1461 /* GPU power will already be active by virtue of the caller holding a JS
1462 * reference on the address space, and will not release it until this worker
1465 /* Further to the comment above, we know that while this function is running
1466 * the AS will not be released as before the atom is released this workqueue
1467 * is flushed (in kbase_as_poking_timer_release_atom)
1469 kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as->number);
1471 /* AS transaction begin */
1472 mutex_lock(&as->transaction_mutex);
1473 /* Force a uTLB invalidate */
1474 kbase_mmu_hw_do_operation(kbdev, as, kctx, 0, 0,
1475 AS_COMMAND_UNLOCK, 0);
1476 mutex_unlock(&as->transaction_mutex);
1477 /* AS transaction end */
1479 spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
1480 if (as->poke_refcount &&
1481 !(as->poke_state & KBASE_AS_POKE_STATE_KILLING_POKE)) {
1482 /* Only queue up the timer if we need it, and we're not trying to kill it */
1483 hrtimer_start(&as->poke_timer, HR_TIMER_DELAY_MSEC(5), HRTIMER_MODE_REL);
1485 spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
1488 enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer)
1490 struct kbase_as *as;
1493 KBASE_DEBUG_ASSERT(NULL != timer);
1494 as = container_of(timer, struct kbase_as, poke_timer);
1495 KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT);
1497 queue_work_ret = queue_work(as->poke_wq, &as->poke_work);
1498 KBASE_DEBUG_ASSERT(queue_work_ret);
1499 return HRTIMER_NORESTART;
1503 * Retain the poking timer on an atom's context (if the atom hasn't already
1504 * done so), and start the timer (if it's not already started).
1506 * This must only be called on a context that's scheduled in, and an atom
1507 * that's running on the GPU.
1509 * The caller must hold kbasep_js_device_data::runpool_irq::lock
1511 * This can be called safely from atomic context
1513 void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom)
1515 struct kbase_as *as;
1517 KBASE_DEBUG_ASSERT(kbdev);
1518 KBASE_DEBUG_ASSERT(kctx);
1519 KBASE_DEBUG_ASSERT(katom);
1520 KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
1521 lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
1528 /* It's safe to work on the as/as_nr without an explicit reference,
1529 * because the caller holds the runpool_irq lock, and the atom itself
1530 * was also running and had already taken a reference */
1531 as = &kbdev->as[kctx->as_nr];
1533 if (++(as->poke_refcount) == 1) {
1534 /* First refcount for poke needed: check if not already in flight */
1535 if (!as->poke_state) {
1536 /* need to start poking */
1537 as->poke_state |= KBASE_AS_POKE_STATE_IN_FLIGHT;
1538 queue_work(as->poke_wq, &as->poke_work);
1544 * If an atom holds a poking timer, release it and wait for it to finish
1546 * This must only be called on a context that's scheduled in, and an atom
1547 * that still has a JS reference on the context
1549 * This must \b not be called from atomic context, since it can sleep.
1551 void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom)
1553 struct kbase_as *as;
1554 unsigned long flags;
1556 KBASE_DEBUG_ASSERT(kbdev);
1557 KBASE_DEBUG_ASSERT(kctx);
1558 KBASE_DEBUG_ASSERT(katom);
1559 KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
1564 as = &kbdev->as[kctx->as_nr];
1566 spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
1567 KBASE_DEBUG_ASSERT(as->poke_refcount > 0);
1568 KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT);
1570 if (--(as->poke_refcount) == 0) {
1571 as->poke_state |= KBASE_AS_POKE_STATE_KILLING_POKE;
1572 spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
1574 hrtimer_cancel(&as->poke_timer);
1575 flush_workqueue(as->poke_wq);
1577 spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
1579 /* Re-check whether it's still needed */
1580 if (as->poke_refcount) {
1582 /* Poking still needed:
1583 * - Another retain will not be starting the timer or queueing work,
1584 * because it's still marked as in-flight
1585 * - The hrtimer has finished, and has not started a new timer or
1586 * queued work because it's been marked as killing
1588 * So whatever happens now, just queue the work again */
1589 as->poke_state &= ~((kbase_as_poke_state)KBASE_AS_POKE_STATE_KILLING_POKE);
1590 queue_work_ret = queue_work(as->poke_wq, &as->poke_work);
1591 KBASE_DEBUG_ASSERT(queue_work_ret);
1593 /* It isn't - so mark it as not in flight, and not killing */
1594 as->poke_state = 0u;
1596 /* The poke associated with the atom has now finished. If this is
1597 * also the last atom on the context, then we can guarentee no more
1598 * pokes (and thus no more poking register accesses) will occur on
1599 * the context until new atoms are run */
1602 spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
1607 void kbase_mmu_interrupt_process(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_as *as)
1609 struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
1611 lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
1614 dev_warn(kbdev->dev, "%s in AS%d at 0x%016llx with no context present! Suprious IRQ or SW Design Error?\n",
1615 kbase_as_has_bus_fault(as) ? "Bus error" : "Page fault",
1616 as->number, as->fault_addr);
1618 /* Since no ctx was found, the MMU must be disabled. */
1619 WARN_ON(as->current_setup.transtab);
1621 if (kbase_as_has_bus_fault(as)) {
1622 kbase_mmu_hw_clear_fault(kbdev, as, kctx,
1623 KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
1624 kbase_mmu_hw_enable_fault(kbdev, as, kctx,
1625 KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
1626 } else if (kbase_as_has_page_fault(as)) {
1627 kbase_mmu_hw_clear_fault(kbdev, as, kctx,
1628 KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
1629 kbase_mmu_hw_enable_fault(kbdev, as, kctx,
1630 KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
1633 #if KBASE_GPU_RESET_EN
1634 if (kbase_as_has_bus_fault(as) &&
1635 kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
1638 * Reset the GPU, like in bus_fault_worker, in case an
1639 * earlier error hasn't been properly cleared by this
1642 dev_err(kbdev->dev, "GPU bus error occurred. For this GPU version we now soft-reset as part of bus error recovery\n");
1643 reset_status = kbase_prepare_to_reset_gpu_locked(kbdev);
1645 kbase_reset_gpu_locked(kbdev);
1647 #endif /* KBASE_GPU_RESET_EN */
1652 if (kbase_as_has_bus_fault(as)) {
1654 * hw counters dumping in progress, signal the
1655 * other thread that it failed
1657 if ((kbdev->hwcnt.kctx == kctx) &&
1658 (kbdev->hwcnt.backend.state ==
1659 KBASE_INSTR_STATE_DUMPING))
1660 kbdev->hwcnt.backend.state =
1661 KBASE_INSTR_STATE_FAULT;
1664 * Stop the kctx from submitting more jobs and cause it
1665 * to be scheduled out/rescheduled when all references
1666 * to it are released
1668 kbasep_js_clear_submit_allowed(js_devdata, kctx);
1670 dev_warn(kbdev->dev, "Bus error in AS%d at 0x%016llx\n",
1671 as->number, as->fault_addr);
1674 * We need to switch to UNMAPPED mode - but we do this in a
1675 * worker so that we can sleep
1677 kbdev->kbase_group_error++;
1678 KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&as->work_busfault));
1679 WARN_ON(work_pending(&as->work_busfault));
1680 queue_work(as->pf_wq, &as->work_busfault);
1681 atomic_inc(&kbdev->faults_pending);
1683 kbdev->kbase_group_error++;
1684 KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&as->work_pagefault));
1685 WARN_ON(work_pending(&as->work_pagefault));
1686 queue_work(as->pf_wq, &as->work_pagefault);
1687 atomic_inc(&kbdev->faults_pending);
1691 void kbase_flush_mmu_wqs(struct kbase_device *kbdev)
1695 for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
1696 struct kbase_as *as = &kbdev->as[i];
1698 flush_workqueue(as->pf_wq);