3 * (C) COPYRIGHT ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
21 * @file mali_kbase_context.c
22 * Base kernel context APIs
25 #include <mali_kbase.h>
26 #include <mali_midg_regmap.h>
28 #define MEMPOOL_PAGES 16384
32 * @brief Create a kernel base context.
34 * Allocate and init a kernel base context.
36 struct kbase_context *
37 kbase_create_context(struct kbase_device *kbdev, bool is_compat)
39 struct kbase_context *kctx;
42 KBASE_DEBUG_ASSERT(kbdev != NULL);
44 /* zero-inited as lot of code assume it's zero'ed out on create */
45 kctx = vzalloc(sizeof(*kctx));
50 /* creating a context is considered a disjoint event */
51 kbase_disjoint_event(kbdev);
54 kctx->as_nr = KBASEP_AS_NR_INVALID;
55 kctx->is_compat = is_compat;
56 #ifdef CONFIG_MALI_TRACE_TIMELINE
57 kctx->timeline.owner_tgid = task_tgid_nr(current);
59 atomic_set(&kctx->setup_complete, 0);
60 atomic_set(&kctx->setup_in_progress, 0);
61 kctx->keep_gpu_powered = MALI_FALSE;
62 spin_lock_init(&kctx->mm_update_lock);
63 kctx->process_mm = NULL;
64 atomic_set(&kctx->nonmapped_pages, 0);
66 if (MALI_ERROR_NONE != kbase_mem_allocator_init(&kctx->osalloc,
71 kctx->pgd_allocator = &kctx->osalloc;
72 atomic_set(&kctx->used_pages, 0);
74 if (kbase_jd_init(kctx))
77 mali_err = kbasep_js_kctx_init(kctx);
78 if (MALI_ERROR_NONE != mali_err)
79 goto free_jd; /* safe to call kbasep_js_kctx_term in this case */
81 mali_err = kbase_event_init(kctx);
82 if (MALI_ERROR_NONE != mali_err)
85 mutex_init(&kctx->reg_lock);
87 INIT_LIST_HEAD(&kctx->waiting_soft_jobs);
89 INIT_LIST_HEAD(&kctx->waiting_kds_resource);
92 mali_err = kbase_mmu_init(kctx);
93 if (MALI_ERROR_NONE != mali_err)
96 kctx->pgd = kbase_mmu_alloc_pgd(kctx);
100 if (MALI_ERROR_NONE != kbase_mem_allocator_alloc(&kctx->osalloc, 1, &kctx->aliasing_sink_page))
103 kctx->tgid = current->tgid;
104 kctx->pid = current->pid;
105 init_waitqueue_head(&kctx->event_queue);
107 kctx->cookies = KBASE_COOKIE_MASK;
109 /* Make sure page 0 is not used... */
110 if (kbase_region_tracker_init(kctx))
111 goto no_region_tracker;
112 #ifdef CONFIG_GPU_TRACEPOINTS
113 atomic_set(&kctx->jctx.work_id, 0);
115 #ifdef CONFIG_MALI_TRACE_TIMELINE
116 atomic_set(&kctx->timeline.jd_atoms_in_flight, 0);
119 kctx->id = atomic_add_return(1, &(kbdev->ctx_num)) - 1;
121 mali_err = kbasep_mem_profile_debugfs_add(kctx);
122 if (MALI_ERROR_NONE != mali_err)
123 goto no_region_tracker;
125 if (kbasep_jd_debugfs_ctx_add(kctx))
126 goto free_mem_profile;
131 kbasep_mem_profile_debugfs_remove(kctx);
134 kbase_mem_allocator_free(&kctx->osalloc, 1, &kctx->aliasing_sink_page, 0);
135 kbase_mmu_free_pgd(kctx);
137 kbase_mmu_term(kctx);
139 kbase_event_cleanup(kctx);
141 /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
142 kbasep_js_kctx_term(kctx);
145 kbase_mem_allocator_term(&kctx->osalloc);
152 KBASE_EXPORT_SYMBOL(kbase_create_context)
154 static void kbase_reg_pending_dtor(struct kbase_va_region *reg)
156 dev_dbg(reg->kctx->kbdev->dev, "Freeing pending unmapped region\n");
157 kbase_mem_phy_alloc_put(reg->alloc);
162 * @brief Destroy a kernel base context.
164 * Destroy a kernel base context. Calls kbase_destroy_os_context() to
165 * free OS specific structures. Will release all outstanding regions.
167 void kbase_destroy_context(struct kbase_context *kctx)
169 struct kbase_device *kbdev;
171 unsigned long pending_regions_to_clean;
173 KBASE_DEBUG_ASSERT(NULL != kctx);
176 KBASE_DEBUG_ASSERT(NULL != kbdev);
178 KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u);
180 kbasep_jd_debugfs_ctx_remove(kctx);
182 kbasep_mem_profile_debugfs_remove(kctx);
184 /* Ensure the core is powered up for the destroy process */
185 /* A suspend won't happen here, because we're in a syscall from a userspace
187 kbase_pm_context_active(kbdev);
189 if (kbdev->hwcnt.kctx == kctx) {
190 /* disable the use of the hw counters if the app didn't use the API correctly or crashed */
191 KBASE_TRACE_ADD(kbdev, CORE_CTX_HWINSTR_TERM, kctx, NULL, 0u, 0u);
192 dev_warn(kbdev->dev, "The privileged process asking for instrumentation forgot to disable it " "before exiting. Will end instrumentation for them");
193 kbase_instr_hwcnt_disable(kctx);
196 kbase_jd_zap_context(kctx);
197 kbase_event_cleanup(kctx);
199 kbase_gpu_vm_lock(kctx);
201 /* MMU is disabled as part of scheduling out the context */
202 kbase_mmu_free_pgd(kctx);
204 /* drop the aliasing sink page now that it can't be mapped anymore */
205 kbase_mem_allocator_free(&kctx->osalloc, 1, &kctx->aliasing_sink_page, 0);
207 /* free pending region setups */
208 pending_regions_to_clean = (~kctx->cookies) & KBASE_COOKIE_MASK;
209 while (pending_regions_to_clean) {
210 unsigned int cookie = __ffs(pending_regions_to_clean);
211 BUG_ON(!kctx->pending_regions[cookie]);
213 kbase_reg_pending_dtor(kctx->pending_regions[cookie]);
215 kctx->pending_regions[cookie] = NULL;
216 pending_regions_to_clean &= ~(1UL << cookie);
219 kbase_region_tracker_term(kctx);
220 kbase_gpu_vm_unlock(kctx);
222 /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
223 kbasep_js_kctx_term(kctx);
227 kbase_pm_context_idle(kbdev);
229 kbase_mmu_term(kctx);
231 pages = atomic_read(&kctx->used_pages);
233 dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages);
235 if (kctx->keep_gpu_powered) {
236 atomic_dec(&kbdev->keep_gpu_powered_count);
237 kbase_pm_context_idle(kbdev);
240 kbase_mem_allocator_term(&kctx->osalloc);
241 WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);
245 KBASE_EXPORT_SYMBOL(kbase_destroy_context)
248 * Set creation flags on a context
250 mali_error kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags)
252 mali_error err = MALI_ERROR_NONE;
253 struct kbasep_js_kctx_info *js_kctx_info;
254 KBASE_DEBUG_ASSERT(NULL != kctx);
256 js_kctx_info = &kctx->jctx.sched_info;
259 if (flags != (flags & BASE_CONTEXT_CREATE_KERNEL_FLAGS)) {
260 err = MALI_ERROR_FUNCTION_FAILED;
264 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
266 /* Translate the flags */
267 if ((flags & BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
268 js_kctx_info->ctx.flags &= ~((u32) KBASE_CTX_FLAG_SUBMIT_DISABLED);
270 if ((flags & BASE_CONTEXT_HINT_ONLY_COMPUTE) != 0)
271 js_kctx_info->ctx.flags |= (u32) KBASE_CTX_FLAG_HINT_ONLY_COMPUTE;
273 /* Latch the initial attributes into the Job Scheduler */
274 kbasep_js_ctx_attr_set_initial_attrs(kctx->kbdev, kctx);
276 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
280 KBASE_EXPORT_SYMBOL(kbase_context_set_create_flags)