#include <mali_kbase_mmu_hw.h>
#include <mali_kbase_hwaccess_jm.h>
#include <mali_kbase_time.h>
-#include <mali_kbase_mem.h>
#define KBASE_MMU_PAGE_ENTRIES 512
KBASE_DEBUG_ASSERT(kctx->kbdev == kbdev);
- if (unlikely(faulting_as->protected_mode))
- {
- kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Protected mode fault");
- kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
- KBASE_MMU_FAULT_TYPE_PAGE);
-
- goto fault_done;
- }
-
fault_status = faulting_as->fault_status;
switch (fault_status & AS_FAULTSTATUS_EXCEPTION_CODE_MASK) {
case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSTAB_BUS_FAULT:
kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Translation table bus fault");
+ "Tranlation table bus fault");
goto fault_done;
case AS_FAULTSTATUS_EXCEPTION_CODE_ACCESS_FLAG:
region->start_pfn +
kbase_reg_current_backed_size(region));
- mutex_lock(&kbdev->mmu_hw_mutex);
-
kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
KBASE_MMU_FAULT_TYPE_PAGE);
/* [1] in case another page fault occurred while we were
*/
kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0,
AS_COMMAND_UNLOCK, 1);
-
- mutex_unlock(&kbdev->mmu_hw_mutex);
-
kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
KBASE_MMU_FAULT_TYPE_PAGE);
kbase_gpu_vm_unlock(kctx);
kbase_reg_current_backed_size(region);
if (0 == new_pages) {
- mutex_lock(&kbdev->mmu_hw_mutex);
-
/* Duplicate of a fault we've already handled, nothing to do */
kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
KBASE_MMU_FAULT_TYPE_PAGE);
/* See comment [1] about UNLOCK usage */
kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0,
AS_COMMAND_UNLOCK, 1);
-
- mutex_unlock(&kbdev->mmu_hw_mutex);
-
kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
KBASE_MMU_FAULT_TYPE_PAGE);
kbase_gpu_vm_unlock(kctx);
kbase_tlstream_aux_pagefault(kctx->id, (u64)new_pages);
/* AS transaction begin */
- mutex_lock(&kbdev->mmu_hw_mutex);
+ mutex_lock(&faulting_as->transaction_mutex);
/* flush L2 and unlock the VA (resumes the MMU) */
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367))
new_pages,
op, 1);
- mutex_unlock(&kbdev->mmu_hw_mutex);
+ mutex_unlock(&faulting_as->transaction_mutex);
/* AS transaction end */
/* reenable this in the mask */
KBASE_EXPORT_TEST_API(kbase_mmu_alloc_pgd);
-/* Given PGD PFN for level N, return PGD PFN for level N+1, allocating the
- * new table from the pool if needed and possible
- */
-static int mmu_get_next_pgd(struct kbase_context *kctx,
- phys_addr_t *pgd, u64 vpfn, int level)
+/* Given PGD PFN for level N, return PGD PFN for level N+1 */
+static phys_addr_t mmu_get_next_pgd(struct kbase_context *kctx, phys_addr_t pgd, u64 vpfn, int level)
{
u64 *page;
phys_addr_t target_pgd;
struct page *p;
- KBASE_DEBUG_ASSERT(*pgd);
+ KBASE_DEBUG_ASSERT(pgd);
KBASE_DEBUG_ASSERT(NULL != kctx);
lockdep_assert_held(&kctx->mmu_lock);
vpfn >>= (3 - level) * 9;
vpfn &= 0x1FF;
- p = pfn_to_page(PFN_DOWN(*pgd));
+ p = pfn_to_page(PFN_DOWN(pgd));
page = kmap(p);
if (NULL == page) {
dev_warn(kctx->kbdev->dev, "mmu_get_next_pgd: kmap failure\n");
- return -EINVAL;
+ return 0;
}
target_pgd = kctx->kbdev->mmu_mode->pte_to_phy_addr(page[vpfn]);
if (!target_pgd) {
target_pgd = kbase_mmu_alloc_pgd(kctx);
if (!target_pgd) {
- dev_dbg(kctx->kbdev->dev, "mmu_get_next_pgd: kbase_mmu_alloc_pgd failure\n");
+ dev_warn(kctx->kbdev->dev, "mmu_get_next_pgd: kbase_mmu_alloc_pgd failure\n");
kunmap(p);
- return -ENOMEM;
+ return 0;
}
kctx->kbdev->mmu_mode->entry_set_pte(&page[vpfn], target_pgd);
}
kunmap(p);
- *pgd = target_pgd;
-
- return 0;
+ return target_pgd;
}
-static int mmu_get_bottom_pgd(struct kbase_context *kctx,
- u64 vpfn, phys_addr_t *out_pgd)
+static phys_addr_t mmu_get_bottom_pgd(struct kbase_context *kctx, u64 vpfn)
{
phys_addr_t pgd;
int l;
pgd = kctx->pgd;
for (l = MIDGARD_MMU_TOPLEVEL; l < MIDGARD_MMU_BOTTOMLEVEL; l++) {
- int err = mmu_get_next_pgd(kctx, &pgd, vpfn, l);
+ pgd = mmu_get_next_pgd(kctx, pgd, vpfn, l);
/* Handle failure condition */
- if (err) {
- dev_dbg(kctx->kbdev->dev, "mmu_get_bottom_pgd: mmu_get_next_pgd failure\n");
- return err;
+ if (!pgd) {
+ dev_warn(kctx->kbdev->dev, "mmu_get_bottom_pgd: mmu_get_next_pgd failure\n");
+ return 0;
}
}
- *out_pgd = pgd;
-
- return 0;
+ return pgd;
}
static phys_addr_t mmu_insert_pages_recover_get_next_pgd(struct kbase_context *kctx, phys_addr_t pgd, u64 vpfn, int level)
* On the other hand, it's only a gain when we map more than
* 256 pages at once (on average). Do we really care?
*/
- do {
- err = mmu_get_bottom_pgd(kctx, vpfn, &pgd);
- if (err != -ENOMEM)
- break;
- /* Fill the memory pool with enough pages for
- * the page walk to succeed
- */
- mutex_unlock(&kctx->mmu_lock);
- err = kbase_mem_pool_grow(&kctx->mem_pool,
- MIDGARD_MMU_BOTTOMLEVEL);
- mutex_lock(&kctx->mmu_lock);
- } while (!err);
- if (err) {
+ pgd = mmu_get_bottom_pgd(kctx, vpfn);
+ if (!pgd) {
dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: mmu_get_bottom_pgd failure\n");
if (recover_required) {
/* Invalidate the pages we have partially
recover_vpfn,
recover_count);
}
+ err = -EINVAL;
goto fail_unlock;
}
* On the other hand, it's only a gain when we map more than
* 256 pages at once (on average). Do we really care?
*/
- do {
- err = mmu_get_bottom_pgd(kctx, vpfn, &pgd);
- if (err != -ENOMEM)
- break;
- /* Fill the memory pool with enough pages for
- * the page walk to succeed
- */
- mutex_unlock(&kctx->mmu_lock);
- err = kbase_mem_pool_grow(&kctx->mem_pool,
- MIDGARD_MMU_BOTTOMLEVEL);
- mutex_lock(&kctx->mmu_lock);
- } while (!err);
- if (err) {
+ pgd = mmu_get_bottom_pgd(kctx, vpfn);
+ if (!pgd) {
dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: mmu_get_bottom_pgd failure\n");
if (recover_required) {
/* Invalidate the pages we have partially
recover_vpfn,
recover_count);
}
+ err = -EINVAL;
goto fail_unlock;
}
u32 op;
/* AS transaction begin */
- mutex_lock(&kbdev->mmu_hw_mutex);
+ mutex_lock(&kbdev->as[
+ kctx->as_nr].transaction_mutex);
if (sync)
op = AS_COMMAND_FLUSH_MEM;
}
#endif /* KBASE_GPU_RESET_EN */
- mutex_unlock(&kbdev->mmu_hw_mutex);
+ mutex_unlock(&kbdev->as[
+ kctx->as_nr].transaction_mutex);
/* AS transaction end */
#ifndef CONFIG_MALI_NO_MALI
void kbase_mmu_update(struct kbase_context *kctx)
{
- lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
- lockdep_assert_held(&kctx->kbdev->mmu_hw_mutex);
+ lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
/* ASSERT that the context has a valid as_nr, which is only the case
* when it's scheduled in.
*
- * as_nr won't change because the caller has the hwaccess_lock */
+ * as_nr won't change because the caller has the runpool_irq lock */
KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+ lockdep_assert_held(&kctx->kbdev->as[kctx->as_nr].transaction_mutex);
kctx->kbdev->mmu_mode->update(kctx);
}
void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr)
{
- lockdep_assert_held(&kbdev->hwaccess_lock);
- lockdep_assert_held(&kbdev->mmu_hw_mutex);
+ lockdep_assert_held(&kbdev->as[as_nr].transaction_mutex);
+ lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
kbdev->mmu_mode->disable_as(kbdev, as_nr);
}
/* ASSERT that the context has a valid as_nr, which is only the case
* when it's scheduled in.
*
- * as_nr won't change because the caller has the hwaccess_lock */
+ * as_nr won't change because the caller has the runpool_irq lock */
KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
- lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+ lockdep_assert_held(&kctx->kbdev->as[kctx->as_nr].transaction_mutex);
+ lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
/*
* The address space is being disabled, drain all knowledge of it out
if (count > nr)
count = nr;
- err = mmu_get_bottom_pgd(kctx, vpfn, &pgd);
- if (err) {
+ pgd = mmu_get_bottom_pgd(kctx, vpfn);
+ if (!pgd) {
dev_warn(kbdev->dev, "kbase_mmu_teardown_pages: mmu_get_bottom_pgd failure\n");
err = -EINVAL;
goto fail_unlock;
if (count > nr)
count = nr;
- do {
- err = mmu_get_bottom_pgd(kctx, vpfn, &pgd);
- if (err != -ENOMEM)
- break;
- /* Fill the memory pool with enough pages for
- * the page walk to succeed
- */
- mutex_unlock(&kctx->mmu_lock);
- err = kbase_mem_pool_grow(&kctx->mem_pool,
- MIDGARD_MMU_BOTTOMLEVEL);
- mutex_lock(&kctx->mmu_lock);
- } while (!err);
- if (err) {
+ pgd = mmu_get_bottom_pgd(kctx, vpfn);
+ if (!pgd) {
dev_warn(kctx->kbdev->dev, "mmu_get_bottom_pgd failure\n");
+ err = -EINVAL;
goto fail_unlock;
}
return NULL;
}
+ mutex_lock(&kctx->mmu_lock);
+
size_left = nr_pages * PAGE_SIZE;
KBASE_DEBUG_ASSERT(0 != size_left);
kaddr = vmalloc_user(size_left);
- mutex_lock(&kctx->mmu_lock);
-
if (kaddr) {
u64 end_marker = 0xFFULL;
char *buffer;
return;
}
- if (unlikely(faulting_as->protected_mode))
- {
- kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Permission failure");
- kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
- KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
- kbasep_js_runpool_release_ctx(kbdev, kctx);
- atomic_dec(&kbdev->faults_pending);
- return;
-
- }
-
#if KBASE_GPU_RESET_EN
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
/* Due to H/W issue 8245 we need to reset the GPU after using UNMAPPED mode.
/* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */
/* AS transaction begin */
- mutex_lock(&kbdev->mmu_hw_mutex);
+ mutex_lock(&kbdev->as[as_no].transaction_mutex);
/* Set the MMU into unmapped mode */
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
kbase_mmu_disable(kctx);
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock,
+ flags);
- mutex_unlock(&kbdev->mmu_hw_mutex);
+ mutex_unlock(&kbdev->as[as_no].transaction_mutex);
/* AS transaction end */
kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
dev_err(kbdev->dev,
"Unhandled Page fault in AS%d at VA 0x%016llX\n"
"Reason: %s\n"
- "raw fault status: 0x%X\n"
+ "raw fault status 0x%X\n"
"decoded fault status: %s\n"
"exception type 0x%X: %s\n"
"access type 0x%X: %s\n"
/* Stop the kctx from submitting more jobs and cause it to be scheduled
* out/rescheduled - this will occur on releasing the context's refcount */
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
kbasep_js_clear_submit_allowed(js_devdata, kctx);
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
/* Kill any running jobs from the context. Submit is disallowed, so no more jobs from this
* context can appear in the job slots from this point on */
kbase_backend_jm_kill_jobs_from_kctx(kctx);
/* AS transaction begin */
- mutex_lock(&kbdev->mmu_hw_mutex);
+ mutex_lock(&as->transaction_mutex);
#if KBASE_GPU_RESET_EN
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
/* Due to H/W issue 8245 we need to reset the GPU after using UNMAPPED mode.
}
#endif /* KBASE_GPU_RESET_EN */
/* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
kbase_mmu_disable(kctx);
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
- mutex_unlock(&kbdev->mmu_hw_mutex);
+ mutex_unlock(&as->transaction_mutex);
/* AS transaction end */
/* Clear down the fault */
kbase_mmu_hw_clear_fault(kbdev, as, kctx,
kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as->number);
/* AS transaction begin */
- mutex_lock(&kbdev->mmu_hw_mutex);
+ mutex_lock(&as->transaction_mutex);
/* Force a uTLB invalidate */
kbase_mmu_hw_do_operation(kbdev, as, kctx, 0, 0,
AS_COMMAND_UNLOCK, 0);
- mutex_unlock(&kbdev->mmu_hw_mutex);
+ mutex_unlock(&as->transaction_mutex);
/* AS transaction end */
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
if (as->poke_refcount &&
!(as->poke_state & KBASE_AS_POKE_STATE_KILLING_POKE)) {
/* Only queue up the timer if we need it, and we're not trying to kill it */
hrtimer_start(&as->poke_timer, HR_TIMER_DELAY_MSEC(5), HRTIMER_MODE_REL);
}
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
}
enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer)
* This must only be called on a context that's scheduled in, and an atom
* that's running on the GPU.
*
- * The caller must hold hwaccess_lock
+ * The caller must hold kbasep_js_device_data::runpool_irq::lock
*
* This can be called safely from atomic context
*/
KBASE_DEBUG_ASSERT(kctx);
KBASE_DEBUG_ASSERT(katom);
KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
- lockdep_assert_held(&kbdev->hwaccess_lock);
+ lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
if (katom->poking)
return;
katom->poking = 1;
/* It's safe to work on the as/as_nr without an explicit reference,
- * because the caller holds the hwaccess_lock, and the atom itself
+ * because the caller holds the runpool_irq lock, and the atom itself
* was also running and had already taken a reference */
as = &kbdev->as[kctx->as_nr];
as = &kbdev->as[kctx->as_nr];
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
KBASE_DEBUG_ASSERT(as->poke_refcount > 0);
KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT);
if (--(as->poke_refcount) == 0) {
as->poke_state |= KBASE_AS_POKE_STATE_KILLING_POKE;
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
hrtimer_cancel(&as->poke_timer);
flush_workqueue(as->poke_wq);
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
/* Re-check whether it's still needed */
if (as->poke_refcount) {
* the context until new atoms are run */
}
}
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
katom->poking = 0;
}
{
struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
- lockdep_assert_held(&kbdev->hwaccess_lock);
+ lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
if (!kctx) {
dev_warn(kbdev->dev, "%s in AS%d at 0x%016llx with no context present! Suprious IRQ or SW Design Error?\n",