kctx->kbdev = kbdev;
kctx->as_nr = KBASEP_AS_NR_INVALID;
- if (is_compat)
- kbase_ctx_flag_set(kctx, KCTX_COMPAT);
+ kctx->is_compat = is_compat;
#ifdef CONFIG_MALI_TRACE_TIMELINE
kctx->timeline.owner_tgid = task_tgid_nr(current);
#endif
atomic_set(&kctx->setup_complete, 0);
atomic_set(&kctx->setup_in_progress, 0);
+ kctx->infinite_cache_active = 0;
spin_lock_init(&kctx->mm_update_lock);
kctx->process_mm = NULL;
atomic_set(&kctx->nonmapped_pages, 0);
if (err)
goto term_dma_fence;
- do {
- err = kbase_mem_pool_grow(&kctx->mem_pool,
- MIDGARD_MMU_BOTTOMLEVEL);
- if (err)
- goto pgd_no_mem;
- kctx->pgd = kbase_mmu_alloc_pgd(kctx);
- } while (!kctx->pgd);
+ kctx->pgd = kbase_mmu_alloc_pgd(kctx);
+ if (!kctx->pgd)
+ goto free_mmu;
- kctx->aliasing_sink_page = kbase_mem_alloc_page(kctx->kbdev);
+ kctx->aliasing_sink_page = kbase_mem_pool_alloc(&kctx->mem_pool);
if (!kctx->aliasing_sink_page)
goto no_sink_page;
kbase_gpu_vm_lock(kctx);
kbase_mmu_free_pgd(kctx);
kbase_gpu_vm_unlock(kctx);
-pgd_no_mem:
+free_mmu:
kbase_mmu_term(kctx);
term_dma_fence:
kbase_dma_fence_term(kctx);
}
mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
- spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags);
+ spin_lock_irqsave(&kctx->kbdev->js_data.runpool_irq.lock, irq_flags);
/* Translate the flags */
if ((flags & BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
- kbase_ctx_flag_clear(kctx, KCTX_SUBMIT_DISABLED);
+ js_kctx_info->ctx.flags &= ~((u32) KBASE_CTX_FLAG_SUBMIT_DISABLED);
/* Latch the initial attributes into the Job Scheduler */
kbasep_js_ctx_attr_set_initial_attrs(kctx->kbdev, kctx);
- spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags);
+ spin_unlock_irqrestore(&kctx->kbdev->js_data.runpool_irq.lock,
+ irq_flags);
mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
out:
return err;