Revert "MALI: rockchip: upgrade midgard DDK to r14p0-01rel0"
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / mali_kbase_mmu.c
index 5c1b9c3b84c424e70b6c5c8fb0ea6cabe20763fe..48d53723a9b40f9802c351a4aedf205bc0f544c8 100644 (file)
@@ -41,7 +41,6 @@
 #include <mali_kbase_mmu_hw.h>
 #include <mali_kbase_hwaccess_jm.h>
 #include <mali_kbase_time.h>
-#include <mali_kbase_mem.h>
 
 #define KBASE_MMU_PAGE_ENTRIES 512
 
@@ -140,16 +139,6 @@ void page_fault_worker(struct work_struct *data)
 
        KBASE_DEBUG_ASSERT(kctx->kbdev == kbdev);
 
-       if (unlikely(faulting_as->protected_mode))
-       {
-               kbase_mmu_report_fault_and_kill(kctx, faulting_as,
-                               "Protected mode fault");
-               kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
-                               KBASE_MMU_FAULT_TYPE_PAGE);
-
-               goto fault_done;
-       }
-
        fault_status = faulting_as->fault_status;
        switch (fault_status & AS_FAULTSTATUS_EXCEPTION_CODE_MASK) {
 
@@ -164,7 +153,7 @@ void page_fault_worker(struct work_struct *data)
 
        case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSTAB_BUS_FAULT:
                kbase_mmu_report_fault_and_kill(kctx, faulting_as,
-                               "Translation table bus fault");
+                               "Tranlation table bus fault");
                goto fault_done;
 
        case AS_FAULTSTATUS_EXCEPTION_CODE_ACCESS_FLAG:
@@ -230,8 +219,6 @@ void page_fault_worker(struct work_struct *data)
                                region->start_pfn +
                                kbase_reg_current_backed_size(region));
 
-               mutex_lock(&kbdev->mmu_hw_mutex);
-
                kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
                                KBASE_MMU_FAULT_TYPE_PAGE);
                /* [1] in case another page fault occurred while we were
@@ -244,9 +231,6 @@ void page_fault_worker(struct work_struct *data)
                 */
                kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0,
                                AS_COMMAND_UNLOCK, 1);
-
-               mutex_unlock(&kbdev->mmu_hw_mutex);
-
                kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
                                KBASE_MMU_FAULT_TYPE_PAGE);
                kbase_gpu_vm_unlock(kctx);
@@ -265,17 +249,12 @@ void page_fault_worker(struct work_struct *data)
                                kbase_reg_current_backed_size(region);
 
        if (0 == new_pages) {
-               mutex_lock(&kbdev->mmu_hw_mutex);
-
                /* Duplicate of a fault we've already handled, nothing to do */
                kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
                                KBASE_MMU_FAULT_TYPE_PAGE);
                /* See comment [1] about UNLOCK usage */
                kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0,
                                AS_COMMAND_UNLOCK, 1);
-
-               mutex_unlock(&kbdev->mmu_hw_mutex);
-
                kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
                                KBASE_MMU_FAULT_TYPE_PAGE);
                kbase_gpu_vm_unlock(kctx);
@@ -335,7 +314,7 @@ void page_fault_worker(struct work_struct *data)
                kbase_tlstream_aux_pagefault(kctx->id, (u64)new_pages);
 
                /* AS transaction begin */
-               mutex_lock(&kbdev->mmu_hw_mutex);
+               mutex_lock(&faulting_as->transaction_mutex);
 
                /* flush L2 and unlock the VA (resumes the MMU) */
                if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367))
@@ -359,7 +338,7 @@ void page_fault_worker(struct work_struct *data)
                                          new_pages,
                                          op, 1);
 
-               mutex_unlock(&kbdev->mmu_hw_mutex);
+               mutex_unlock(&faulting_as->transaction_mutex);
                /* AS transaction end */
 
                /* reenable this in the mask */
@@ -427,17 +406,14 @@ sub_pages:
 
 KBASE_EXPORT_TEST_API(kbase_mmu_alloc_pgd);
 
-/* Given PGD PFN for level N, return PGD PFN for level N+1, allocating the
- * new table from the pool if needed and possible
- */
-static int mmu_get_next_pgd(struct kbase_context *kctx,
-               phys_addr_t *pgd, u64 vpfn, int level)
+/* Given PGD PFN for level N, return PGD PFN for level N+1 */
+static phys_addr_t mmu_get_next_pgd(struct kbase_context *kctx, phys_addr_t pgd, u64 vpfn, int level)
 {
        u64 *page;
        phys_addr_t target_pgd;
        struct page *p;
 
-       KBASE_DEBUG_ASSERT(*pgd);
+       KBASE_DEBUG_ASSERT(pgd);
        KBASE_DEBUG_ASSERT(NULL != kctx);
 
        lockdep_assert_held(&kctx->mmu_lock);
@@ -449,11 +425,11 @@ static int mmu_get_next_pgd(struct kbase_context *kctx,
        vpfn >>= (3 - level) * 9;
        vpfn &= 0x1FF;
 
-       p = pfn_to_page(PFN_DOWN(*pgd));
+       p = pfn_to_page(PFN_DOWN(pgd));
        page = kmap(p);
        if (NULL == page) {
                dev_warn(kctx->kbdev->dev, "mmu_get_next_pgd: kmap failure\n");
-               return -EINVAL;
+               return 0;
        }
 
        target_pgd = kctx->kbdev->mmu_mode->pte_to_phy_addr(page[vpfn]);
@@ -461,9 +437,9 @@ static int mmu_get_next_pgd(struct kbase_context *kctx,
        if (!target_pgd) {
                target_pgd = kbase_mmu_alloc_pgd(kctx);
                if (!target_pgd) {
-                       dev_dbg(kctx->kbdev->dev, "mmu_get_next_pgd: kbase_mmu_alloc_pgd failure\n");
+                       dev_warn(kctx->kbdev->dev, "mmu_get_next_pgd: kbase_mmu_alloc_pgd failure\n");
                        kunmap(p);
-                       return -ENOMEM;
+                       return 0;
                }
 
                kctx->kbdev->mmu_mode->entry_set_pte(&page[vpfn], target_pgd);
@@ -473,13 +449,10 @@ static int mmu_get_next_pgd(struct kbase_context *kctx,
        }
 
        kunmap(p);
-       *pgd = target_pgd;
-
-       return 0;
+       return target_pgd;
 }
 
-static int mmu_get_bottom_pgd(struct kbase_context *kctx,
-               u64 vpfn, phys_addr_t *out_pgd)
+static phys_addr_t mmu_get_bottom_pgd(struct kbase_context *kctx, u64 vpfn)
 {
        phys_addr_t pgd;
        int l;
@@ -488,17 +461,15 @@ static int mmu_get_bottom_pgd(struct kbase_context *kctx,
 
        pgd = kctx->pgd;
        for (l = MIDGARD_MMU_TOPLEVEL; l < MIDGARD_MMU_BOTTOMLEVEL; l++) {
-               int err = mmu_get_next_pgd(kctx, &pgd, vpfn, l);
+               pgd = mmu_get_next_pgd(kctx, pgd, vpfn, l);
                /* Handle failure condition */
-               if (err) {
-                       dev_dbg(kctx->kbdev->dev, "mmu_get_bottom_pgd: mmu_get_next_pgd failure\n");
-                       return err;
+               if (!pgd) {
+                       dev_warn(kctx->kbdev->dev, "mmu_get_bottom_pgd: mmu_get_next_pgd failure\n");
+                       return 0;
                }
        }
 
-       *out_pgd = pgd;
-
-       return 0;
+       return pgd;
 }
 
 static phys_addr_t mmu_insert_pages_recover_get_next_pgd(struct kbase_context *kctx, phys_addr_t pgd, u64 vpfn, int level)
@@ -637,19 +608,8 @@ int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
                 * On the other hand, it's only a gain when we map more than
                 * 256 pages at once (on average). Do we really care?
                 */
-               do {
-                       err = mmu_get_bottom_pgd(kctx, vpfn, &pgd);
-                       if (err != -ENOMEM)
-                               break;
-                       /* Fill the memory pool with enough pages for
-                        * the page walk to succeed
-                        */
-                       mutex_unlock(&kctx->mmu_lock);
-                       err = kbase_mem_pool_grow(&kctx->mem_pool,
-                                       MIDGARD_MMU_BOTTOMLEVEL);
-                       mutex_lock(&kctx->mmu_lock);
-               } while (!err);
-               if (err) {
+               pgd = mmu_get_bottom_pgd(kctx, vpfn);
+               if (!pgd) {
                        dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: mmu_get_bottom_pgd failure\n");
                        if (recover_required) {
                                /* Invalidate the pages we have partially
@@ -658,6 +618,7 @@ int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
                                                                  recover_vpfn,
                                                                  recover_count);
                        }
+                       err = -EINVAL;
                        goto fail_unlock;
                }
 
@@ -749,19 +710,8 @@ int kbase_mmu_insert_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
                 * On the other hand, it's only a gain when we map more than
                 * 256 pages at once (on average). Do we really care?
                 */
-               do {
-                       err = mmu_get_bottom_pgd(kctx, vpfn, &pgd);
-                       if (err != -ENOMEM)
-                               break;
-                       /* Fill the memory pool with enough pages for
-                        * the page walk to succeed
-                        */
-                       mutex_unlock(&kctx->mmu_lock);
-                       err = kbase_mem_pool_grow(&kctx->mem_pool,
-                                       MIDGARD_MMU_BOTTOMLEVEL);
-                       mutex_lock(&kctx->mmu_lock);
-               } while (!err);
-               if (err) {
+               pgd = mmu_get_bottom_pgd(kctx, vpfn);
+               if (!pgd) {
                        dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: mmu_get_bottom_pgd failure\n");
                        if (recover_required) {
                                /* Invalidate the pages we have partially
@@ -770,6 +720,7 @@ int kbase_mmu_insert_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
                                                                  recover_vpfn,
                                                                  recover_count);
                        }
+                       err = -EINVAL;
                        goto fail_unlock;
                }
 
@@ -918,7 +869,8 @@ static void kbase_mmu_flush_invalidate(struct kbase_context *kctx,
                        u32 op;
 
                        /* AS transaction begin */
-                       mutex_lock(&kbdev->mmu_hw_mutex);
+                       mutex_lock(&kbdev->as[
+                                       kctx->as_nr].transaction_mutex);
 
                        if (sync)
                                op = AS_COMMAND_FLUSH_MEM;
@@ -941,7 +893,8 @@ static void kbase_mmu_flush_invalidate(struct kbase_context *kctx,
                        }
 #endif /* KBASE_GPU_RESET_EN */
 
-                       mutex_unlock(&kbdev->mmu_hw_mutex);
+                       mutex_unlock(&kbdev->as[
+                                       kctx->as_nr].transaction_mutex);
                        /* AS transaction end */
 
 #ifndef CONFIG_MALI_NO_MALI
@@ -968,13 +921,13 @@ static void kbase_mmu_flush_invalidate(struct kbase_context *kctx,
 
 void kbase_mmu_update(struct kbase_context *kctx)
 {
-       lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
-       lockdep_assert_held(&kctx->kbdev->mmu_hw_mutex);
+       lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
        /* ASSERT that the context has a valid as_nr, which is only the case
         * when it's scheduled in.
         *
-        * as_nr won't change because the caller has the hwaccess_lock */
+        * as_nr won't change because the caller has the runpool_irq lock */
        KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+       lockdep_assert_held(&kctx->kbdev->as[kctx->as_nr].transaction_mutex);
 
        kctx->kbdev->mmu_mode->update(kctx);
 }
@@ -982,8 +935,8 @@ KBASE_EXPORT_TEST_API(kbase_mmu_update);
 
 void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr)
 {
-       lockdep_assert_held(&kbdev->hwaccess_lock);
-       lockdep_assert_held(&kbdev->mmu_hw_mutex);
+       lockdep_assert_held(&kbdev->as[as_nr].transaction_mutex);
+       lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
 
        kbdev->mmu_mode->disable_as(kbdev, as_nr);
 }
@@ -993,10 +946,11 @@ void kbase_mmu_disable(struct kbase_context *kctx)
        /* ASSERT that the context has a valid as_nr, which is only the case
         * when it's scheduled in.
         *
-        * as_nr won't change because the caller has the hwaccess_lock */
+        * as_nr won't change because the caller has the runpool_irq lock */
        KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
 
-       lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+       lockdep_assert_held(&kctx->kbdev->as[kctx->as_nr].transaction_mutex);
+       lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
 
        /*
         * The address space is being disabled, drain all knowledge of it out
@@ -1054,8 +1008,8 @@ int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr)
                if (count > nr)
                        count = nr;
 
-               err = mmu_get_bottom_pgd(kctx, vpfn, &pgd);
-               if (err) {
+               pgd = mmu_get_bottom_pgd(kctx, vpfn);
+               if (!pgd) {
                        dev_warn(kbdev->dev, "kbase_mmu_teardown_pages: mmu_get_bottom_pgd failure\n");
                        err = -EINVAL;
                        goto fail_unlock;
@@ -1138,20 +1092,10 @@ int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, phys_addr_t *ph
                if (count > nr)
                        count = nr;
 
-               do {
-                       err = mmu_get_bottom_pgd(kctx, vpfn, &pgd);
-                       if (err != -ENOMEM)
-                               break;
-                       /* Fill the memory pool with enough pages for
-                        * the page walk to succeed
-                        */
-                       mutex_unlock(&kctx->mmu_lock);
-                       err = kbase_mem_pool_grow(&kctx->mem_pool,
-                                       MIDGARD_MMU_BOTTOMLEVEL);
-                       mutex_lock(&kctx->mmu_lock);
-               } while (!err);
-               if (err) {
+               pgd = mmu_get_bottom_pgd(kctx, vpfn);
+               if (!pgd) {
                        dev_warn(kctx->kbdev->dev, "mmu_get_bottom_pgd failure\n");
+                       err = -EINVAL;
                        goto fail_unlock;
                }
 
@@ -1372,13 +1316,13 @@ void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages)
                return NULL;
        }
 
+       mutex_lock(&kctx->mmu_lock);
+
        size_left = nr_pages * PAGE_SIZE;
 
        KBASE_DEBUG_ASSERT(0 != size_left);
        kaddr = vmalloc_user(size_left);
 
-       mutex_lock(&kctx->mmu_lock);
-
        if (kaddr) {
                u64 end_marker = 0xFFULL;
                char *buffer;
@@ -1463,18 +1407,6 @@ void bus_fault_worker(struct work_struct *data)
                return;
        }
 
-       if (unlikely(faulting_as->protected_mode))
-       {
-               kbase_mmu_report_fault_and_kill(kctx, faulting_as,
-                               "Permission failure");
-               kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
-                               KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
-               kbasep_js_runpool_release_ctx(kbdev, kctx);
-               atomic_dec(&kbdev->faults_pending);
-               return;
-
-       }
-
 #if KBASE_GPU_RESET_EN
        if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
                /* Due to H/W issue 8245 we need to reset the GPU after using UNMAPPED mode.
@@ -1491,14 +1423,15 @@ void bus_fault_worker(struct work_struct *data)
 
                /* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */
                /* AS transaction begin */
-               mutex_lock(&kbdev->mmu_hw_mutex);
+               mutex_lock(&kbdev->as[as_no].transaction_mutex);
 
                /* Set the MMU into unmapped mode */
-               spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+               spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
                kbase_mmu_disable(kctx);
-               spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+               spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock,
+                               flags);
 
-               mutex_unlock(&kbdev->mmu_hw_mutex);
+               mutex_unlock(&kbdev->as[as_no].transaction_mutex);
                /* AS transaction end */
 
                kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
@@ -1733,7 +1666,7 @@ static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
        dev_err(kbdev->dev,
                "Unhandled Page fault in AS%d at VA 0x%016llX\n"
                "Reason: %s\n"
-               "raw fault status: 0x%X\n"
+               "raw fault status 0x%X\n"
                "decoded fault status: %s\n"
                "exception type 0x%X: %s\n"
                "access type 0x%X: %s\n"
@@ -1762,15 +1695,15 @@ static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
 
        /* Stop the kctx from submitting more jobs and cause it to be scheduled
         * out/rescheduled - this will occur on releasing the context's refcount */
-       spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+       spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
        kbasep_js_clear_submit_allowed(js_devdata, kctx);
-       spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+       spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
 
        /* Kill any running jobs from the context. Submit is disallowed, so no more jobs from this
         * context can appear in the job slots from this point on */
        kbase_backend_jm_kill_jobs_from_kctx(kctx);
        /* AS transaction begin */
-       mutex_lock(&kbdev->mmu_hw_mutex);
+       mutex_lock(&as->transaction_mutex);
 #if KBASE_GPU_RESET_EN
        if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
                /* Due to H/W issue 8245 we need to reset the GPU after using UNMAPPED mode.
@@ -1782,11 +1715,11 @@ static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
        }
 #endif /* KBASE_GPU_RESET_EN */
        /* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */
-       spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+       spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
        kbase_mmu_disable(kctx);
-       spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+       spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
 
-       mutex_unlock(&kbdev->mmu_hw_mutex);
+       mutex_unlock(&as->transaction_mutex);
        /* AS transaction end */
        /* Clear down the fault */
        kbase_mmu_hw_clear_fault(kbdev, as, kctx,
@@ -1823,20 +1756,20 @@ void kbasep_as_do_poke(struct work_struct *work)
        kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as->number);
 
        /* AS transaction begin */
-       mutex_lock(&kbdev->mmu_hw_mutex);
+       mutex_lock(&as->transaction_mutex);
        /* Force a uTLB invalidate */
        kbase_mmu_hw_do_operation(kbdev, as, kctx, 0, 0,
                                  AS_COMMAND_UNLOCK, 0);
-       mutex_unlock(&kbdev->mmu_hw_mutex);
+       mutex_unlock(&as->transaction_mutex);
        /* AS transaction end */
 
-       spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+       spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
        if (as->poke_refcount &&
                !(as->poke_state & KBASE_AS_POKE_STATE_KILLING_POKE)) {
                /* Only queue up the timer if we need it, and we're not trying to kill it */
                hrtimer_start(&as->poke_timer, HR_TIMER_DELAY_MSEC(5), HRTIMER_MODE_REL);
        }
-       spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+       spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
 }
 
 enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer)
@@ -1860,7 +1793,7 @@ enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer)
  * This must only be called on a context that's scheduled in, and an atom
  * that's running on the GPU.
  *
- * The caller must hold hwaccess_lock
+ * The caller must hold kbasep_js_device_data::runpool_irq::lock
  *
  * This can be called safely from atomic context
  */
@@ -1872,7 +1805,7 @@ void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_
        KBASE_DEBUG_ASSERT(kctx);
        KBASE_DEBUG_ASSERT(katom);
        KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
-       lockdep_assert_held(&kbdev->hwaccess_lock);
+       lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
 
        if (katom->poking)
                return;
@@ -1880,7 +1813,7 @@ void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_
        katom->poking = 1;
 
        /* It's safe to work on the as/as_nr without an explicit reference,
-        * because the caller holds the hwaccess_lock, and the atom itself
+        * because the caller holds the runpool_irq lock, and the atom itself
         * was also running and had already taken a reference  */
        as = &kbdev->as[kctx->as_nr];
 
@@ -1917,18 +1850,18 @@ void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase
 
        as = &kbdev->as[kctx->as_nr];
 
-       spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+       spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
        KBASE_DEBUG_ASSERT(as->poke_refcount > 0);
        KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT);
 
        if (--(as->poke_refcount) == 0) {
                as->poke_state |= KBASE_AS_POKE_STATE_KILLING_POKE;
-               spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+               spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
 
                hrtimer_cancel(&as->poke_timer);
                flush_workqueue(as->poke_wq);
 
-               spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+               spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
 
                /* Re-check whether it's still needed */
                if (as->poke_refcount) {
@@ -1953,7 +1886,7 @@ void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase
                         * the context until new atoms are run */
                }
        }
-       spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+       spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
 
        katom->poking = 0;
 }
@@ -1962,7 +1895,7 @@ void kbase_mmu_interrupt_process(struct kbase_device *kbdev, struct kbase_contex
 {
        struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
 
-       lockdep_assert_held(&kbdev->hwaccess_lock);
+       lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
 
        if (!kctx) {
                dev_warn(kbdev->dev, "%s in AS%d at 0x%016llx with no context present! Suprious IRQ or SW Design Error?\n",