3 * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
21 * Base kernel context APIs
24 #include <mali_kbase.h>
25 #include <mali_midg_regmap.h>
26 #include <mali_kbase_mem_linux.h>
29 * kbase_create_context() - Create a kernel base context.
30 * @kbdev: Kbase device
31 * @is_compat: Force creation of a 32-bit context
33 * Allocate and init a kernel base context.
35 * Return: new kbase context
37 struct kbase_context *
38 kbase_create_context(struct kbase_device *kbdev, bool is_compat)
40 struct kbase_context *kctx;
43 KBASE_DEBUG_ASSERT(kbdev != NULL);
45 /* zero-inited as lot of code assume it's zero'ed out on create */
46 kctx = vzalloc(sizeof(*kctx));
51 /* creating a context is considered a disjoint event */
52 kbase_disjoint_event(kbdev);
55 kctx->as_nr = KBASEP_AS_NR_INVALID;
56 kctx->is_compat = is_compat;
57 #ifdef CONFIG_MALI_TRACE_TIMELINE
58 kctx->timeline.owner_tgid = task_tgid_nr(current);
60 atomic_set(&kctx->setup_complete, 0);
61 atomic_set(&kctx->setup_in_progress, 0);
62 kctx->infinite_cache_active = 0;
63 spin_lock_init(&kctx->mm_update_lock);
64 kctx->process_mm = NULL;
65 atomic_set(&kctx->nonmapped_pages, 0);
66 kctx->slots_pullable = 0;
67 kctx->tgid = current->tgid;
68 kctx->pid = current->pid;
70 err = kbase_mem_pool_init(&kctx->mem_pool,
71 kbdev->mem_pool_max_size_default,
72 kctx->kbdev, &kbdev->mem_pool);
76 err = kbase_mem_evictable_init(kctx);
80 atomic_set(&kctx->used_pages, 0);
82 err = kbase_jd_init(kctx);
84 goto deinit_evictable;
86 err = kbasep_js_kctx_init(kctx);
88 goto free_jd; /* safe to call kbasep_js_kctx_term in this case */
90 err = kbase_event_init(kctx);
94 atomic_set(&kctx->drain_pending, 0);
96 mutex_init(&kctx->reg_lock);
98 INIT_LIST_HEAD(&kctx->waiting_soft_jobs);
99 spin_lock_init(&kctx->waiting_soft_jobs_lock);
101 INIT_LIST_HEAD(&kctx->waiting_kds_resource);
103 err = kbase_dma_fence_init(kctx);
107 err = kbase_mmu_init(kctx);
111 kctx->pgd = kbase_mmu_alloc_pgd(kctx);
115 kctx->aliasing_sink_page = kbase_mem_pool_alloc(&kctx->mem_pool);
116 if (!kctx->aliasing_sink_page)
119 init_waitqueue_head(&kctx->event_queue);
121 kctx->cookies = KBASE_COOKIE_MASK;
123 /* Make sure page 0 is not used... */
124 err = kbase_region_tracker_init(kctx);
126 goto no_region_tracker;
128 err = kbase_sticky_resource_init(kctx);
132 err = kbase_jit_init(kctx);
135 #ifdef CONFIG_GPU_TRACEPOINTS
136 atomic_set(&kctx->jctx.work_id, 0);
138 #ifdef CONFIG_MALI_TRACE_TIMELINE
139 atomic_set(&kctx->timeline.jd_atoms_in_flight, 0);
142 kctx->id = atomic_add_return(1, &(kbdev->ctx_num)) - 1;
144 mutex_init(&kctx->vinstr_cli_lock);
146 setup_timer(&kctx->soft_job_timeout,
147 kbasep_soft_job_timeout_worker,
153 kbase_gpu_vm_lock(kctx);
154 kbase_sticky_resource_term(kctx);
155 kbase_gpu_vm_unlock(kctx);
157 kbase_region_tracker_term(kctx);
159 kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false);
161 /* VM lock needed for the call to kbase_mmu_free_pgd */
162 kbase_gpu_vm_lock(kctx);
163 kbase_mmu_free_pgd(kctx);
164 kbase_gpu_vm_unlock(kctx);
166 kbase_mmu_term(kctx);
168 kbase_dma_fence_term(kctx);
170 kbase_event_cleanup(kctx);
172 /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
173 kbasep_js_kctx_term(kctx);
176 kbase_mem_evictable_deinit(kctx);
178 kbase_mem_pool_term(&kctx->mem_pool);
184 KBASE_EXPORT_SYMBOL(kbase_create_context);
186 static void kbase_reg_pending_dtor(struct kbase_va_region *reg)
188 dev_dbg(reg->kctx->kbdev->dev, "Freeing pending unmapped region\n");
189 kbase_mem_phy_alloc_put(reg->cpu_alloc);
190 kbase_mem_phy_alloc_put(reg->gpu_alloc);
195 * kbase_destroy_context - Destroy a kernel base context.
196 * @kctx: Context to destroy
198 * Calls kbase_destroy_os_context() to free OS specific structures.
199 * Will release all outstanding regions.
201 void kbase_destroy_context(struct kbase_context *kctx)
203 struct kbase_device *kbdev;
205 unsigned long pending_regions_to_clean;
207 KBASE_DEBUG_ASSERT(NULL != kctx);
210 KBASE_DEBUG_ASSERT(NULL != kbdev);
212 KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u);
214 /* Ensure the core is powered up for the destroy process */
215 /* A suspend won't happen here, because we're in a syscall from a userspace
217 kbase_pm_context_active(kbdev);
219 kbase_jd_zap_context(kctx);
220 kbase_event_cleanup(kctx);
223 * JIT must be terminated before the code below as it must be called
224 * without the region lock being held.
225 * The code above ensures no new JIT allocations can be made by
226 * by the time we get to this point of context tear down.
228 kbase_jit_term(kctx);
230 kbase_gpu_vm_lock(kctx);
232 kbase_sticky_resource_term(kctx);
234 /* MMU is disabled as part of scheduling out the context */
235 kbase_mmu_free_pgd(kctx);
237 /* drop the aliasing sink page now that it can't be mapped anymore */
238 kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false);
240 /* free pending region setups */
241 pending_regions_to_clean = (~kctx->cookies) & KBASE_COOKIE_MASK;
242 while (pending_regions_to_clean) {
243 unsigned int cookie = __ffs(pending_regions_to_clean);
245 BUG_ON(!kctx->pending_regions[cookie]);
247 kbase_reg_pending_dtor(kctx->pending_regions[cookie]);
249 kctx->pending_regions[cookie] = NULL;
250 pending_regions_to_clean &= ~(1UL << cookie);
253 kbase_region_tracker_term(kctx);
254 kbase_gpu_vm_unlock(kctx);
256 /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
257 kbasep_js_kctx_term(kctx);
261 kbase_pm_context_idle(kbdev);
263 kbase_dma_fence_term(kctx);
265 kbase_mmu_term(kctx);
267 pages = atomic_read(&kctx->used_pages);
269 dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages);
271 kbase_mem_evictable_deinit(kctx);
272 kbase_mem_pool_term(&kctx->mem_pool);
273 WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);
277 KBASE_EXPORT_SYMBOL(kbase_destroy_context);
280 * kbase_context_set_create_flags - Set creation flags on a context
281 * @kctx: Kbase context
282 * @flags: Flags to set
284 * Return: 0 on success
286 int kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags)
289 struct kbasep_js_kctx_info *js_kctx_info;
290 unsigned long irq_flags;
292 KBASE_DEBUG_ASSERT(NULL != kctx);
294 js_kctx_info = &kctx->jctx.sched_info;
297 if (flags != (flags & BASE_CONTEXT_CREATE_KERNEL_FLAGS)) {
302 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
303 spin_lock_irqsave(&kctx->kbdev->js_data.runpool_irq.lock, irq_flags);
305 /* Translate the flags */
306 if ((flags & BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
307 js_kctx_info->ctx.flags &= ~((u32) KBASE_CTX_FLAG_SUBMIT_DISABLED);
309 /* Latch the initial attributes into the Job Scheduler */
310 kbasep_js_ctx_attr_set_initial_attrs(kctx->kbdev, kctx);
312 spin_unlock_irqrestore(&kctx->kbdev->js_data.runpool_irq.lock,
314 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
318 KBASE_EXPORT_SYMBOL(kbase_context_set_create_flags);