3 * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
20 #if defined(CONFIG_DMA_SHARED_BUFFER)
21 #include <linux/dma-buf.h>
22 #endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
24 #include <linux/compat.h>
26 #include <mali_kbase.h>
27 #include <mali_kbase_uku.h>
28 #include <linux/random.h>
29 #include <linux/version.h>
30 #include <linux/ratelimit.h>
31 #include <linux/pagemap.h>
33 #include <mali_kbase_jm.h>
34 #include <mali_kbase_hwaccess_jm.h>
35 #include <mali_kbase_tlstream.h>
37 #define beenthere(kctx, f, a...) dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
39 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
40 /* random32 was renamed to prandom_u32 in 3.8 */
41 #define prandom_u32 random32
44 /* Return whether katom will run on the GPU or not. Currently only soft jobs and
45 * dependency-only atoms do not run on the GPU */
46 #define IS_GPU_ATOM(katom) (!((katom->core_req & BASE_JD_REQ_SOFT_JOB) || \
47 ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE) == \
50 * This is the kernel side of the API. Only entry points are:
51 * - kbase_jd_submit(): Called from userspace to submit a single bag
52 * - kbase_jd_done(): Called from interrupt context to track the
53 * completion of a job.
55 * - to the job manager (enqueue a job)
56 * - to the event subsystem (signals the completion/failure of bag/job-chains).
60 get_compat_pointer(struct kbase_context *kctx, const union kbase_pointer *p)
64 return compat_ptr(p->compat_value);
69 /* Runs an atom, either by handing to the JS or by immediately running it in the case of soft-jobs
71 * Returns whether the JS needs a reschedule.
73 * Note that the caller must also check the atom status and
74 * if it is KBASE_JD_ATOM_STATE_COMPLETED must call jd_done_nolock
76 static int jd_run_atom(struct kbase_jd_atom *katom)
78 struct kbase_context *kctx = katom->kctx;
80 KBASE_DEBUG_ASSERT(katom->status != KBASE_JD_ATOM_STATE_UNUSED);
82 if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP) {
83 /* Dependency only atom */
84 katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
86 } else if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
88 if (katom->will_fail_event_code) {
89 katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
92 if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE)
93 == BASE_JD_REQ_SOFT_REPLAY) {
94 if (!kbase_replay_process(katom))
95 katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
96 } else if (kbase_process_soft_job(katom) == 0) {
97 kbase_finish_soft_job(katom);
98 katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
103 katom->status = KBASE_JD_ATOM_STATE_IN_JS;
104 /* Queue an action about whether we should try scheduling a context */
105 return kbasep_js_add_job(kctx, katom);
110 /* Add the katom to the kds waiting list.
111 * Atoms must be added to the waiting list after a successful call to kds_async_waitall.
112 * The caller must hold the kbase_jd_context.lock */
114 static void kbase_jd_kds_waiters_add(struct kbase_jd_atom *katom)
116 struct kbase_context *kctx;
118 KBASE_DEBUG_ASSERT(katom);
122 list_add_tail(&katom->node, &kctx->waiting_kds_resource);
125 /* Remove the katom from the kds waiting list.
126 * Atoms must be removed from the waiting list before a call to kds_resource_set_release_sync.
127 * The supplied katom must first have been added to the list with a call to kbase_jd_kds_waiters_add.
128 * The caller must hold the kbase_jd_context.lock */
130 static void kbase_jd_kds_waiters_remove(struct kbase_jd_atom *katom)
132 KBASE_DEBUG_ASSERT(katom);
133 list_del(&katom->node);
136 static void kds_dep_clear(void *callback_parameter, void *callback_extra_parameter)
138 struct kbase_jd_atom *katom;
139 struct kbase_jd_context *ctx;
140 struct kbase_device *kbdev;
142 katom = (struct kbase_jd_atom *)callback_parameter;
143 KBASE_DEBUG_ASSERT(katom);
144 ctx = &katom->kctx->jctx;
145 kbdev = katom->kctx->kbdev;
146 KBASE_DEBUG_ASSERT(kbdev);
148 mutex_lock(&ctx->lock);
150 /* KDS resource has already been satisfied (e.g. due to zapping) */
151 if (katom->kds_dep_satisfied)
154 /* This atom's KDS dependency has now been met */
155 katom->kds_dep_satisfied = true;
157 /* Check whether the atom's other dependencies were already met. If
158 * katom is a GPU atom then the job scheduler may be able to represent
159 * the dependencies, hence we may attempt to submit it before they are
160 * met. Other atoms must have had both dependencies resolved */
161 if (IS_GPU_ATOM(katom) ||
162 (!kbase_jd_katom_dep_atom(&katom->dep[0]) &&
163 !kbase_jd_katom_dep_atom(&katom->dep[1]))) {
164 /* katom dep complete, attempt to run it */
165 bool resched = false;
167 resched = jd_run_atom(katom);
169 if (katom->status == KBASE_JD_ATOM_STATE_COMPLETED) {
170 /* The atom has already finished */
171 resched |= jd_done_nolock(katom, NULL);
175 kbase_js_sched_all(kbdev);
178 mutex_unlock(&ctx->lock);
181 static void kbase_cancel_kds_wait_job(struct kbase_jd_atom *katom)
183 KBASE_DEBUG_ASSERT(katom);
185 /* Prevent job_done_nolock from being called twice on an atom when
186 * there is a race between job completion and cancellation */
188 if (katom->status == KBASE_JD_ATOM_STATE_QUEUED) {
189 /* Wait was cancelled - zap the atom */
190 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
191 if (jd_done_nolock(katom, NULL))
192 kbase_js_sched_all(katom->kctx->kbdev);
195 #endif /* CONFIG_KDS */
197 void kbase_jd_free_external_resources(struct kbase_jd_atom *katom)
200 if (katom->kds_rset) {
201 struct kbase_jd_context *jctx = &katom->kctx->jctx;
204 * As the atom is no longer waiting, remove it from
208 mutex_lock(&jctx->lock);
209 kbase_jd_kds_waiters_remove(katom);
210 mutex_unlock(&jctx->lock);
212 /* Release the kds resource or cancel if zapping */
213 kds_resource_set_release_sync(&katom->kds_rset);
215 #endif /* CONFIG_KDS */
218 static void kbase_jd_post_external_resources(struct kbase_jd_atom *katom)
220 KBASE_DEBUG_ASSERT(katom);
221 KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES);
224 /* Prevent the KDS resource from triggering the atom in case of zapping */
226 katom->kds_dep_satisfied = true;
227 #endif /* CONFIG_KDS */
229 kbase_gpu_vm_lock(katom->kctx);
230 /* only roll back if extres is non-NULL */
234 res_no = katom->nr_extres;
235 while (res_no-- > 0) {
236 struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc;
237 struct kbase_va_region *reg;
239 reg = kbase_region_tracker_find_region_base_address(
241 katom->extres[res_no].gpu_address);
242 kbase_unmap_external_resource(katom->kctx, reg, alloc);
244 kfree(katom->extres);
245 katom->extres = NULL;
247 kbase_gpu_vm_unlock(katom->kctx);
251 * Set up external resources needed by this job.
253 * jctx.lock must be held when this is called.
256 static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const struct base_jd_atom_v2 *user_atom)
258 int err_ret_val = -EINVAL;
261 u32 kds_res_count = 0;
262 struct kds_resource **kds_resources = NULL;
263 unsigned long *kds_access_bitmap = NULL;
264 #endif /* CONFIG_KDS */
265 struct base_external_resource *input_extres;
267 KBASE_DEBUG_ASSERT(katom);
268 KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES);
270 /* no resources encoded, early out */
271 if (!katom->nr_extres)
274 katom->extres = kmalloc_array(katom->nr_extres, sizeof(*katom->extres), GFP_KERNEL);
275 if (NULL == katom->extres) {
276 err_ret_val = -ENOMEM;
280 /* copy user buffer to the end of our real buffer.
281 * Make sure the struct sizes haven't changed in a way
282 * we don't support */
283 BUILD_BUG_ON(sizeof(*input_extres) > sizeof(*katom->extres));
284 input_extres = (struct base_external_resource *)
285 (((unsigned char *)katom->extres) +
286 (sizeof(*katom->extres) - sizeof(*input_extres)) *
289 if (copy_from_user(input_extres,
290 get_compat_pointer(katom->kctx, &user_atom->extres_list),
291 sizeof(*input_extres) * katom->nr_extres) != 0) {
292 err_ret_val = -EINVAL;
296 /* assume we have to wait for all */
297 KBASE_DEBUG_ASSERT(0 != katom->nr_extres);
298 kds_resources = kmalloc_array(katom->nr_extres, sizeof(struct kds_resource *), GFP_KERNEL);
300 if (NULL == kds_resources) {
301 err_ret_val = -ENOMEM;
305 KBASE_DEBUG_ASSERT(0 != katom->nr_extres);
306 kds_access_bitmap = kzalloc(sizeof(unsigned long) * ((katom->nr_extres + BITS_PER_LONG - 1) / BITS_PER_LONG), GFP_KERNEL);
308 if (NULL == kds_access_bitmap) {
309 err_ret_val = -ENOMEM;
312 #endif /* CONFIG_KDS */
314 /* Take the processes mmap lock */
315 down_read(¤t->mm->mmap_sem);
317 /* need to keep the GPU VM locked while we set up UMM buffers */
318 kbase_gpu_vm_lock(katom->kctx);
319 for (res_no = 0; res_no < katom->nr_extres; res_no++) {
320 struct base_external_resource *res;
321 struct kbase_va_region *reg;
322 struct kbase_mem_phy_alloc *alloc;
325 res = &input_extres[res_no];
326 exclusive = (res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE)
328 reg = kbase_region_tracker_find_region_enclosing_address(
330 res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
331 /* did we find a matching region object? */
332 if (NULL == reg || (reg->flags & KBASE_REG_FREE)) {
337 if (!(katom->core_req & BASE_JD_REQ_SOFT_JOB) &&
338 (reg->flags & KBASE_REG_SECURE)) {
339 katom->atom_flags |= KBASE_KATOM_FLAG_SECURE;
342 alloc = kbase_map_external_resource(katom->kctx, reg,
345 , &kds_res_count, kds_resources,
346 kds_access_bitmap, exclusive
350 err_ret_val = -EINVAL;
354 /* finish with updating out array with the data we found */
355 /* NOTE: It is important that this is the last thing we do (or
356 * at least not before the first write) as we overwrite elements
357 * as we loop and could be overwriting ourself, so no writes
358 * until the last read for an element.
360 katom->extres[res_no].gpu_address = reg->start_pfn << PAGE_SHIFT; /* save the start_pfn (as an address, not pfn) to use fast lookup later */
361 katom->extres[res_no].alloc = alloc;
363 /* successfully parsed the extres array */
364 /* drop the vm lock before we call into kds */
365 kbase_gpu_vm_unlock(katom->kctx);
367 /* Release the processes mmap lock */
368 up_read(¤t->mm->mmap_sem);
374 /* We have resources to wait for with kds */
375 katom->kds_dep_satisfied = false;
377 wait_failed = kds_async_waitall(&katom->kds_rset,
378 &katom->kctx->jctx.kds_cb, katom, NULL,
379 kds_res_count, kds_access_bitmap,
383 goto failed_kds_setup;
385 kbase_jd_kds_waiters_add(katom);
387 /* Nothing to wait for, so kds dep met */
388 katom->kds_dep_satisfied = true;
390 kfree(kds_resources);
391 kfree(kds_access_bitmap);
392 #endif /* CONFIG_KDS */
397 /* error handling section */
401 /* Lock the processes mmap lock */
402 down_read(¤t->mm->mmap_sem);
404 /* lock before we unmap */
405 kbase_gpu_vm_lock(katom->kctx);
406 #endif /* CONFIG_KDS */
409 /* undo the loop work */
410 while (res_no-- > 0) {
411 struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc;
413 kbase_unmap_external_resource(katom->kctx, NULL, alloc);
415 kbase_gpu_vm_unlock(katom->kctx);
417 /* Release the processes mmap lock */
418 up_read(¤t->mm->mmap_sem);
421 kfree(katom->extres);
422 katom->extres = NULL;
424 kfree(kds_resources);
425 kfree(kds_access_bitmap);
426 #endif /* CONFIG_KDS */
430 static inline void jd_resolve_dep(struct list_head *out_list,
431 struct kbase_jd_atom *katom,
436 while (!list_empty(&katom->dep_head[d])) {
437 struct kbase_jd_atom *dep_atom;
440 dep_atom = list_entry(katom->dep_head[d].next,
441 struct kbase_jd_atom, dep_item[d]);
443 list_del(katom->dep_head[d].next);
445 dep_type = kbase_jd_katom_dep_type(&dep_atom->dep[d]);
446 kbase_jd_katom_dep_clear(&dep_atom->dep[d]);
448 if (katom->event_code != BASE_JD_EVENT_DONE &&
449 (dep_type != BASE_JD_DEP_TYPE_ORDER)) {
451 if (!dep_atom->kds_dep_satisfied) {
452 /* Just set kds_dep_satisfied to true. If the callback happens after this then it will early out and
453 * do nothing. If the callback doesn't happen then kbase_jd_post_external_resources will clean up
455 dep_atom->kds_dep_satisfied = true;
459 dep_atom->event_code = katom->event_code;
460 KBASE_DEBUG_ASSERT(dep_atom->status !=
461 KBASE_JD_ATOM_STATE_UNUSED);
463 if ((dep_atom->core_req & BASE_JD_REQ_SOFT_REPLAY)
464 != BASE_JD_REQ_SOFT_REPLAY) {
465 dep_atom->will_fail_event_code =
466 dep_atom->event_code;
469 KBASE_JD_ATOM_STATE_COMPLETED;
472 if (!kbase_jd_katom_dep_atom(&dep_atom->dep[other_d])) {
474 if (dep_atom->kds_dep_satisfied)
476 list_add_tail(&dep_atom->dep_item[0], out_list);
481 KBASE_EXPORT_TEST_API(jd_resolve_dep);
483 #if MALI_CUSTOMER_RELEASE == 0
484 static void jd_force_failure(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
486 kbdev->force_replay_count++;
488 if (kbdev->force_replay_count >= kbdev->force_replay_limit) {
489 kbdev->force_replay_count = 0;
490 katom->event_code = BASE_JD_EVENT_FORCE_REPLAY;
492 if (kbdev->force_replay_random)
493 kbdev->force_replay_limit =
494 (prandom_u32() % KBASEP_FORCE_REPLAY_RANDOM_LIMIT) + 1;
496 dev_info(kbdev->dev, "force_replay : promoting to error\n");
500 /** Test to see if atom should be forced to fail.
502 * This function will check if an atom has a replay job as a dependent. If so
503 * then it will be considered for forced failure. */
504 static void jd_check_force_failure(struct kbase_jd_atom *katom)
506 struct kbase_context *kctx = katom->kctx;
507 struct kbase_device *kbdev = kctx->kbdev;
510 if ((kbdev->force_replay_limit == KBASEP_FORCE_REPLAY_DISABLED) ||
511 (katom->core_req & BASEP_JD_REQ_EVENT_NEVER))
514 for (i = 1; i < BASE_JD_ATOM_COUNT; i++) {
515 if (kbase_jd_katom_dep_atom(&kctx->jctx.atoms[i].dep[0]) == katom ||
516 kbase_jd_katom_dep_atom(&kctx->jctx.atoms[i].dep[1]) == katom) {
517 struct kbase_jd_atom *dep_atom = &kctx->jctx.atoms[i];
519 if ((dep_atom->core_req & BASEP_JD_REQ_ATOM_TYPE) ==
520 BASE_JD_REQ_SOFT_REPLAY &&
521 (dep_atom->core_req & kbdev->force_replay_core_req)
522 == kbdev->force_replay_core_req) {
523 jd_force_failure(kbdev, katom);
532 * Perform the necessary handling of an atom that has finished running
535 * Note that if this is a soft-job that has had kbase_prepare_soft_job called on it then the caller
536 * is responsible for calling kbase_finish_soft_job *before* calling this function.
538 * The caller must hold the kbase_jd_context.lock.
540 bool jd_done_nolock(struct kbase_jd_atom *katom,
541 struct list_head *completed_jobs_ctx)
543 struct kbase_context *kctx = katom->kctx;
544 struct kbase_device *kbdev = kctx->kbdev;
545 struct list_head completed_jobs;
546 struct list_head runnable_jobs;
547 bool need_to_try_schedule_context = false;
550 INIT_LIST_HEAD(&completed_jobs);
551 INIT_LIST_HEAD(&runnable_jobs);
553 KBASE_DEBUG_ASSERT(katom->status != KBASE_JD_ATOM_STATE_UNUSED);
555 #if MALI_CUSTOMER_RELEASE == 0
556 jd_check_force_failure(katom);
559 /* This is needed in case an atom is failed due to being invalid, this
560 * can happen *before* the jobs that the atom depends on have completed */
561 for (i = 0; i < 2; i++) {
562 if (kbase_jd_katom_dep_atom(&katom->dep[i])) {
563 list_del(&katom->dep_item[i]);
564 kbase_jd_katom_dep_clear(&katom->dep[i]);
568 /* With PRLAM-10817 or PRLAM-10959 the last tile of a fragment job being soft-stopped can fail with
569 * BASE_JD_EVENT_TILE_RANGE_FAULT.
571 * So here if the fragment job failed with TILE_RANGE_FAULT and it has been soft-stopped, then we promote the
572 * error code to BASE_JD_EVENT_DONE
575 if ((kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10817) || kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10959)) &&
576 katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT) {
577 if ((katom->core_req & BASE_JD_REQ_FS) && (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED)) {
578 /* Promote the failure to job done */
579 katom->event_code = BASE_JD_EVENT_DONE;
580 katom->atom_flags = katom->atom_flags & (~KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED);
584 katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
585 list_add_tail(&katom->dep_item[0], &completed_jobs);
587 while (!list_empty(&completed_jobs)) {
588 katom = list_entry(completed_jobs.prev, struct kbase_jd_atom, dep_item[0]);
589 list_del(completed_jobs.prev);
591 KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED);
593 for (i = 0; i < 2; i++)
594 jd_resolve_dep(&runnable_jobs, katom, i);
596 if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
597 kbase_jd_post_external_resources(katom);
599 while (!list_empty(&runnable_jobs)) {
600 struct kbase_jd_atom *node;
602 node = list_entry(runnable_jobs.next,
603 struct kbase_jd_atom, dep_item[0]);
605 list_del(runnable_jobs.next);
607 KBASE_DEBUG_ASSERT(node->status != KBASE_JD_ATOM_STATE_UNUSED);
609 if (node->status != KBASE_JD_ATOM_STATE_COMPLETED &&
610 !kctx->jctx.sched_info.ctx.is_dying) {
611 need_to_try_schedule_context |= jd_run_atom(node);
613 node->event_code = katom->event_code;
615 if ((node->core_req & BASEP_JD_REQ_ATOM_TYPE)
616 == BASE_JD_REQ_SOFT_REPLAY) {
617 if (kbase_replay_process(node))
618 /* Don't complete this atom */
620 } else if (node->core_req &
621 BASE_JD_REQ_SOFT_JOB) {
622 /* If this is a fence wait then remove it from the list of sync waiters. */
623 if (BASE_JD_REQ_SOFT_FENCE_WAIT == node->core_req)
624 list_del(&node->dep_item[0]);
626 kbase_finish_soft_job(node);
628 node->status = KBASE_JD_ATOM_STATE_COMPLETED;
631 if (node->status == KBASE_JD_ATOM_STATE_COMPLETED)
632 list_add_tail(&node->dep_item[0], &completed_jobs);
635 /* Completing an atom might have freed up space
636 * in the ringbuffer, but only on that slot. */
637 jsctx_ll_flush_to_rb(kctx,
638 katom->sched_priority,
641 /* Register a completed job as a disjoint event when the GPU
642 * is in a disjoint state (ie. being reset or replaying jobs).
644 kbase_disjoint_event_potential(kctx->kbdev);
645 if (completed_jobs_ctx)
646 list_add_tail(&katom->dep_item[0], completed_jobs_ctx);
648 kbase_event_post(kctx, katom);
650 /* Decrement and check the TOTAL number of jobs. This includes
651 * those not tracked by the scheduler: 'not ready to run' and
652 * 'dependency-only' jobs. */
653 if (--kctx->jctx.job_nr == 0)
654 wake_up(&kctx->jctx.zero_jobs_wait); /* All events are safely queued now, and we can signal any waiter
655 * that we've got no more jobs (so we can be safely terminated) */
658 return need_to_try_schedule_context;
661 KBASE_EXPORT_TEST_API(jd_done_nolock);
663 #ifdef CONFIG_GPU_TRACEPOINTS
671 CORE_REQ_FRAGMENT_VERTEX,
672 CORE_REQ_FRAGMENT_VERTEX_TILER,
673 CORE_REQ_FRAGMENT_TILER,
674 CORE_REQ_VERTEX_TILER,
677 static const char * const core_req_strings[] = {
678 "Dependency Only Job",
680 "Compute Shader Job",
681 "Fragment Shader Job",
682 "Vertex/Geometry Shader Job",
684 "Fragment Shader + Vertex/Geometry Shader Job",
685 "Fragment Shader + Vertex/Geometry Shader Job + Tiler Job",
686 "Fragment Shader + Tiler Job",
687 "Vertex/Geometry Shader Job + Tiler Job",
690 static const char *kbasep_map_core_reqs_to_string(base_jd_core_req core_req)
692 if (core_req & BASE_JD_REQ_SOFT_JOB)
693 return core_req_strings[CORE_REQ_SOFT];
694 if (core_req & BASE_JD_REQ_ONLY_COMPUTE)
695 return core_req_strings[CORE_REQ_COMPUTE];
696 switch (core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) {
697 case BASE_JD_REQ_DEP:
698 return core_req_strings[CORE_REQ_DEP_ONLY];
700 return core_req_strings[CORE_REQ_FRAGMENT];
702 return core_req_strings[CORE_REQ_VERTEX];
704 return core_req_strings[CORE_REQ_TILER];
705 case (BASE_JD_REQ_FS | BASE_JD_REQ_CS):
706 return core_req_strings[CORE_REQ_FRAGMENT_VERTEX];
707 case (BASE_JD_REQ_FS | BASE_JD_REQ_T):
708 return core_req_strings[CORE_REQ_FRAGMENT_TILER];
709 case (BASE_JD_REQ_CS | BASE_JD_REQ_T):
710 return core_req_strings[CORE_REQ_VERTEX_TILER];
711 case (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T):
712 return core_req_strings[CORE_REQ_FRAGMENT_VERTEX_TILER];
714 return core_req_strings[CORE_REQ_UNKNOWN];
718 bool jd_submit_atom(struct kbase_context *kctx,
719 const struct base_jd_atom_v2 *user_atom,
720 struct kbase_jd_atom *katom)
722 struct kbase_jd_context *jctx = &kctx->jctx;
723 base_jd_core_req core_req;
728 bool will_fail = false;
730 /* Update the TOTAL number of jobs. This includes those not tracked by
731 * the scheduler: 'not ready to run' and 'dependency-only' jobs. */
734 core_req = user_atom->core_req;
736 katom->start_timestamp.tv64 = 0;
737 katom->time_spent_us = 0;
738 katom->udata = user_atom->udata;
740 katom->nr_extres = user_atom->nr_extres;
741 katom->extres = NULL;
742 katom->device_nr = user_atom->device_nr;
744 katom->jc = user_atom->jc;
745 katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
746 katom->core_req = core_req;
747 katom->atom_flags = 0;
748 katom->retry_count = 0;
749 katom->need_cache_flush_cores_retained = 0;
750 katom->x_pre_dep = NULL;
751 katom->x_post_dep = NULL;
752 katom->will_fail_event_code = 0;
754 /* Start by assuming that the KDS dependencies are satisfied,
755 * kbase_jd_pre_external_resources will correct this if there are dependencies */
756 katom->kds_dep_satisfied = true;
757 katom->kds_rset = NULL;
758 #endif /* CONFIG_KDS */
760 /* Don't do anything if there is a mess up with dependencies.
761 This is done in a separate cycle to check both the dependencies at ones, otherwise
762 it will be extra complexity to deal with 1st dependency ( just added to the list )
763 if only the 2nd one has invalid config.
765 for (i = 0; i < 2; i++) {
766 int dep_atom_number = user_atom->pre_dep[i].atom_id;
767 base_jd_dep_type dep_atom_type = user_atom->pre_dep[i].dependency_type;
769 if (dep_atom_number) {
770 if (dep_atom_type != BASE_JD_DEP_TYPE_ORDER &&
771 dep_atom_type != BASE_JD_DEP_TYPE_DATA) {
772 katom->event_code = BASE_JD_EVENT_JOB_CONFIG_FAULT;
773 katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
775 /* Wrong dependency setup. Atom will be sent
776 * back to user space. Do not record any
778 kbase_tlstream_tl_new_atom(
780 kbase_jd_atom_id(kctx, katom));
781 kbase_tlstream_tl_ret_atom_ctx(
784 ret = jd_done_nolock(katom, NULL);
790 /* Add dependencies */
791 for (i = 0; i < 2; i++) {
792 int dep_atom_number = user_atom->pre_dep[i].atom_id;
793 base_jd_dep_type dep_atom_type;
794 struct kbase_jd_atom *dep_atom = &jctx->atoms[dep_atom_number];
796 dep_atom_type = user_atom->pre_dep[i].dependency_type;
797 kbase_jd_katom_dep_clear(&katom->dep[i]);
799 if (!dep_atom_number)
802 if (dep_atom->status == KBASE_JD_ATOM_STATE_UNUSED ||
803 dep_atom->status == KBASE_JD_ATOM_STATE_COMPLETED) {
805 if (dep_atom->event_code == BASE_JD_EVENT_DONE)
807 /* don't stop this atom if it has an order dependency
808 * only to the failed one, try to submit it through
811 if (dep_atom_type == BASE_JD_DEP_TYPE_ORDER &&
812 dep_atom->event_code > BASE_JD_EVENT_ACTIVE) {
816 /* Atom has completed, propagate the error code if any */
817 katom->event_code = dep_atom->event_code;
818 katom->status = KBASE_JD_ATOM_STATE_QUEUED;
820 /* This atom is going through soft replay or
821 * will be sent back to user space. Do not record any
823 kbase_tlstream_tl_new_atom(
825 kbase_jd_atom_id(kctx, katom));
826 kbase_tlstream_tl_ret_atom_ctx(katom, kctx);
828 if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE)
829 == BASE_JD_REQ_SOFT_REPLAY) {
830 if (kbase_replay_process(katom)) {
838 /* Atom is in progress, add this atom to the list */
839 list_add_tail(&katom->dep_item[i], &dep_atom->dep_head[i]);
840 kbase_jd_katom_dep_set(&katom->dep[i], dep_atom, dep_atom_type);
847 ret = jd_done_nolock(katom, NULL);
851 katom->will_fail_event_code = katom->event_code;
857 /* These must occur after the above loop to ensure that an atom
858 * that depends on a previous atom with the same number behaves
860 katom->event_code = BASE_JD_EVENT_DONE;
861 katom->status = KBASE_JD_ATOM_STATE_QUEUED;
864 /* Create a new atom recording all dependencies it was set up with. */
865 kbase_tlstream_tl_new_atom(
867 kbase_jd_atom_id(kctx, katom));
868 kbase_tlstream_tl_ret_atom_ctx(katom, kctx);
869 for (i = 0; i < 2; i++)
870 if (BASE_JD_DEP_TYPE_INVALID != kbase_jd_katom_dep_type(
872 kbase_tlstream_tl_dep_atom_atom(
873 (void *)kbase_jd_katom_dep_atom(
877 /* Reject atoms with job chain = NULL, as these cause issues with soft-stop */
878 if (!katom->jc && (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP) {
879 dev_warn(kctx->kbdev->dev, "Rejecting atom with jc = NULL");
880 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
881 ret = jd_done_nolock(katom, NULL);
885 /* Reject atoms with an invalid device_nr */
886 if ((katom->core_req & BASE_JD_REQ_SPECIFIC_COHERENT_GROUP) &&
887 (katom->device_nr >= kctx->kbdev->gpu_props.num_core_groups)) {
888 dev_warn(kctx->kbdev->dev,
889 "Rejecting atom with invalid device_nr %d",
891 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
892 ret = jd_done_nolock(katom, NULL);
896 /* For invalid priority, be most lenient and choose the default */
897 sched_prio = kbasep_js_atom_prio_to_sched_prio(user_atom->prio);
898 if (sched_prio == KBASE_JS_ATOM_SCHED_PRIO_INVALID)
899 sched_prio = KBASE_JS_ATOM_SCHED_PRIO_DEFAULT;
900 katom->sched_priority = sched_prio;
902 if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
903 /* handle what we need to do to access the external resources */
904 if (kbase_jd_pre_external_resources(katom, user_atom) != 0) {
905 /* setup failed (no access, bad resource, unknown resource types, etc.) */
906 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
907 ret = jd_done_nolock(katom, NULL);
912 /* Validate the atom. Function will return error if the atom is
915 * Soft-jobs never enter the job scheduler but have their own initialize method.
917 * If either fail then we immediately complete the atom with an error.
919 if ((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0) {
920 if (!kbase_js_is_atom_valid(kctx->kbdev, katom)) {
921 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
922 ret = jd_done_nolock(katom, NULL);
927 if (kbase_prepare_soft_job(katom) != 0) {
928 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
929 ret = jd_done_nolock(katom, NULL);
934 #ifdef CONFIG_GPU_TRACEPOINTS
935 katom->work_id = atomic_inc_return(&jctx->work_id);
936 trace_gpu_job_enqueue((u32)kctx->id, katom->work_id,
937 kbasep_map_core_reqs_to_string(katom->core_req));
940 if (queued && !IS_GPU_ATOM(katom)) {
945 if (!katom->kds_dep_satisfied) {
946 /* Queue atom due to KDS dependency */
950 #endif /* CONFIG_KDS */
952 if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE)
953 == BASE_JD_REQ_SOFT_REPLAY) {
954 if (kbase_replay_process(katom))
957 ret = jd_done_nolock(katom, NULL);
960 } else if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
961 if (kbase_process_soft_job(katom) == 0) {
962 kbase_finish_soft_job(katom);
963 ret = jd_done_nolock(katom, NULL);
968 } else if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP) {
969 katom->status = KBASE_JD_ATOM_STATE_IN_JS;
970 ret = kbasep_js_add_job(kctx, katom);
971 /* If job was cancelled then resolve immediately */
972 if (katom->event_code == BASE_JD_EVENT_JOB_CANCELLED)
973 ret = jd_done_nolock(katom, NULL);
975 /* This is a pure dependency. Resolve it immediately */
976 ret = jd_done_nolock(katom, NULL);
983 #ifdef BASE_LEGACY_UK6_SUPPORT
984 int kbase_jd_submit(struct kbase_context *kctx,
985 const struct kbase_uk_job_submit *submit_data,
988 int kbase_jd_submit(struct kbase_context *kctx,
989 const struct kbase_uk_job_submit *submit_data)
990 #endif /* BASE_LEGACY_UK6_SUPPORT */
992 struct kbase_jd_context *jctx = &kctx->jctx;
995 bool need_to_try_schedule_context = false;
996 struct kbase_device *kbdev;
997 void __user *user_addr;
1001 * kbase_jd_submit isn't expected to fail and so all errors with the jobs
1002 * are reported by immediately falling them (through event system)
1004 kbdev = kctx->kbdev;
1006 beenthere(kctx, "%s", "Enter");
1008 if ((kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_SUBMIT_DISABLED) != 0) {
1009 dev_err(kbdev->dev, "Attempt to submit to a context that has SUBMIT_DISABLED set on it");
1013 #ifdef BASE_LEGACY_UK6_SUPPORT
1014 if ((uk6_atom && submit_data->stride !=
1015 sizeof(struct base_jd_atom_v2_uk6)) ||
1016 submit_data->stride != sizeof(base_jd_atom_v2)) {
1018 if (submit_data->stride != sizeof(base_jd_atom_v2)) {
1019 #endif /* BASE_LEGACY_UK6_SUPPORT */
1020 dev_err(kbdev->dev, "Stride passed to job_submit doesn't match kernel");
1024 user_addr = get_compat_pointer(kctx, &submit_data->addr);
1026 KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_add_return(submit_data->nr_atoms, &kctx->timeline.jd_atoms_in_flight));
1028 /* All atoms submitted in this call have the same flush ID */
1029 latest_flush = kbase_backend_get_current_flush_id(kbdev);
1031 for (i = 0; i < submit_data->nr_atoms; i++) {
1032 struct base_jd_atom_v2 user_atom;
1033 struct kbase_jd_atom *katom;
1035 #ifdef BASE_LEGACY_UK6_SUPPORT
1037 struct base_jd_atom_v2_uk6 user_atom_v6;
1038 base_jd_dep_type dep_types[2] = {BASE_JD_DEP_TYPE_DATA, BASE_JD_DEP_TYPE_DATA};
1040 if (copy_from_user(&user_atom_v6, user_addr,
1041 sizeof(user_atom_v6))) {
1043 KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx,
1045 submit_data->nr_atoms - i,
1046 &kctx->timeline.jd_atoms_in_flight));
1049 /* Convert from UK6 atom format to UK7 format */
1050 user_atom.jc = user_atom_v6.jc;
1051 user_atom.udata = user_atom_v6.udata;
1052 user_atom.extres_list = user_atom_v6.extres_list;
1053 user_atom.nr_extres = user_atom_v6.nr_extres;
1054 user_atom.core_req = user_atom_v6.core_req;
1056 /* atom number 0 is used for no dependency atoms */
1057 if (!user_atom_v6.pre_dep[0])
1058 dep_types[0] = BASE_JD_DEP_TYPE_INVALID;
1060 base_jd_atom_dep_set(&user_atom.pre_dep[0],
1061 user_atom_v6.pre_dep[0],
1064 /* atom number 0 is used for no dependency atoms */
1065 if (!user_atom_v6.pre_dep[1])
1066 dep_types[1] = BASE_JD_DEP_TYPE_INVALID;
1068 base_jd_atom_dep_set(&user_atom.pre_dep[1],
1069 user_atom_v6.pre_dep[1],
1072 user_atom.atom_number = user_atom_v6.atom_number;
1073 user_atom.prio = user_atom_v6.prio;
1074 user_atom.device_nr = user_atom_v6.device_nr;
1076 #endif /* BASE_LEGACY_UK6_SUPPORT */
1077 if (copy_from_user(&user_atom, user_addr, sizeof(user_atom)) != 0) {
1079 KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_sub_return(submit_data->nr_atoms - i, &kctx->timeline.jd_atoms_in_flight));
1082 #ifdef BASE_LEGACY_UK6_SUPPORT
1084 #endif /* BASE_LEGACY_UK6_SUPPORT */
1086 user_addr = (void __user *)((uintptr_t) user_addr + submit_data->stride);
1088 mutex_lock(&jctx->lock);
1089 #ifndef compiletime_assert
1090 #define compiletime_assert_defined
1091 #define compiletime_assert(x, msg) do { switch (0) { case 0: case (x):; } } \
1094 compiletime_assert((1 << (8*sizeof(user_atom.atom_number))) ==
1096 "BASE_JD_ATOM_COUNT and base_atom_id type out of sync");
1097 compiletime_assert(sizeof(user_atom.pre_dep[0].atom_id) ==
1098 sizeof(user_atom.atom_number),
1099 "BASE_JD_ATOM_COUNT and base_atom_id type out of sync");
1100 #ifdef compiletime_assert_defined
1101 #undef compiletime_assert
1102 #undef compiletime_assert_defined
1104 katom = &jctx->atoms[user_atom.atom_number];
1106 /* Record the flush ID for the cache flush optimisation */
1107 katom->flush_id = latest_flush;
1109 while (katom->status != KBASE_JD_ATOM_STATE_UNUSED) {
1110 /* Atom number is already in use, wait for the atom to
1113 mutex_unlock(&jctx->lock);
1115 /* This thread will wait for the atom to complete. Due
1116 * to thread scheduling we are not sure that the other
1117 * thread that owns the atom will also schedule the
1118 * context, so we force the scheduler to be active and
1119 * hence eventually schedule this context at some point
1122 kbase_js_sched_all(kbdev);
1124 if (wait_event_killable(katom->completed,
1126 KBASE_JD_ATOM_STATE_UNUSED) != 0) {
1127 /* We're being killed so the result code
1128 * doesn't really matter
1132 mutex_lock(&jctx->lock);
1135 need_to_try_schedule_context |=
1136 jd_submit_atom(kctx, &user_atom, katom);
1138 /* Register a completed job as a disjoint event when the GPU is in a disjoint state
1139 * (ie. being reset or replaying jobs).
1141 kbase_disjoint_event_potential(kbdev);
1143 mutex_unlock(&jctx->lock);
1146 if (need_to_try_schedule_context)
1147 kbase_js_sched_all(kbdev);
1152 KBASE_EXPORT_TEST_API(kbase_jd_submit);
1154 void kbase_jd_done_worker(struct work_struct *data)
1156 struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, work);
1157 struct kbase_jd_context *jctx;
1158 struct kbase_context *kctx;
1159 struct kbasep_js_kctx_info *js_kctx_info;
1160 union kbasep_js_policy *js_policy;
1161 struct kbase_device *kbdev;
1162 struct kbasep_js_device_data *js_devdata;
1163 u64 cache_jc = katom->jc;
1164 struct kbasep_js_atom_retained_state katom_retained_state;
1165 bool schedule = false;
1167 base_jd_core_req core_req = katom->core_req;
1168 u64 affinity = katom->affinity;
1169 enum kbase_atom_coreref_state coreref_state = katom->coreref_state;
1171 /* Soft jobs should never reach this function */
1172 KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0);
1176 kbdev = kctx->kbdev;
1177 js_kctx_info = &kctx->jctx.sched_info;
1178 js_devdata = &kbdev->js_data;
1179 js_policy = &kbdev->js_data.policy;
1181 KBASE_TRACE_ADD(kbdev, JD_DONE_WORKER, kctx, katom, katom->jc, 0);
1183 kbase_backend_complete_wq(kbdev, katom);
1186 * Begin transaction on JD context and JS context
1188 mutex_lock(&jctx->lock);
1189 mutex_lock(&js_devdata->queue_mutex);
1190 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
1192 /* This worker only gets called on contexts that are scheduled *in*. This is
1193 * because it only happens in response to an IRQ from a job that was
1196 KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled);
1198 if (katom->event_code == BASE_JD_EVENT_STOPPED) {
1199 /* Atom has been promoted to stopped */
1200 unsigned long flags;
1202 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
1203 mutex_unlock(&js_devdata->queue_mutex);
1205 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1207 katom->status = KBASE_JD_ATOM_STATE_IN_JS;
1208 kbase_js_unpull(kctx, katom);
1210 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1211 mutex_unlock(&jctx->lock);
1216 if (katom->event_code != BASE_JD_EVENT_DONE)
1218 "t6xx: GPU fault 0x%02lx from job slot %d\n",
1219 (unsigned long)katom->event_code,
1222 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
1223 kbase_as_poking_timer_release_atom(kbdev, kctx, katom);
1225 /* Retain state before the katom disappears */
1226 kbasep_js_atom_retained_state_copy(&katom_retained_state, katom);
1228 if (!kbasep_js_has_atom_finished(&katom_retained_state)) {
1229 mutex_lock(&js_devdata->runpool_mutex);
1230 kbasep_js_clear_job_retry_submit(katom);
1231 /* An atom that has been hard-stopped might have previously
1232 * been soft-stopped and has just finished before the hard-stop
1233 * occurred. For this reason, clear the hard-stopped flag */
1234 katom->atom_flags &= ~(KBASE_KATOM_FLAG_BEEN_HARD_STOPPED);
1235 mutex_unlock(&js_devdata->runpool_mutex);
1238 if (kbasep_js_has_atom_finished(&katom_retained_state))
1241 context_idle = kbase_js_complete_atom_wq(kctx, katom);
1243 KBASE_DEBUG_ASSERT(kbasep_js_has_atom_finished(&katom_retained_state));
1245 kbasep_js_remove_job(kbdev, kctx, katom);
1246 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
1247 mutex_unlock(&js_devdata->queue_mutex);
1248 katom->atom_flags &= ~KBASE_KATOM_FLAG_HOLDING_CTX_REF;
1249 /* jd_done_nolock() requires the jsctx_mutex lock to be dropped */
1250 schedule |= jd_done_nolock(katom, &kctx->completed_jobs);
1252 /* katom may have been freed now, do not use! */
1255 unsigned long flags;
1257 mutex_lock(&js_devdata->queue_mutex);
1258 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1260 /* If kbase_sched() has scheduled this context back in then
1261 * ctx_active will have been set after we marked it as inactive,
1262 * and another pm reference will have been taken, so drop our
1263 * reference. But do not call kbase_jm_idle_ctx(), as the
1264 * context is active and fast-starting is allowed.
1266 * If an atom has been fast-started then kctx->atoms_pulled will
1267 * be non-zero but ctx_active will still be false (as the
1268 * previous pm reference has been inherited). Do NOT drop our
1269 * reference, as it has been re-used, and leave the context as
1272 * If no new atoms have been started then ctx_active will still
1273 * be false and atoms_pulled will be zero, so drop the reference
1274 * and call kbase_jm_idle_ctx().
1276 * As the checks are done under both the queue_mutex and
1277 * runpool_irq.lock is should be impossible for this to race
1278 * with the scheduler code.
1280 if (kctx->ctx_active || !atomic_read(&kctx->atoms_pulled)) {
1281 /* Calling kbase_jm_idle_ctx() here will ensure that
1282 * atoms are not fast-started when we drop the
1283 * runpool_irq.lock. This is not performed if ctx_active
1284 * is set as in that case another pm reference has been
1285 * taken and a fast-start would be valid.
1287 if (!kctx->ctx_active)
1288 kbase_jm_idle_ctx(kbdev, kctx);
1289 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock,
1292 kbase_pm_context_idle(kbdev);
1294 kctx->ctx_active = true;
1295 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock,
1298 mutex_unlock(&js_devdata->queue_mutex);
1302 * Transaction complete
1304 mutex_unlock(&jctx->lock);
1306 /* Job is now no longer running, so can now safely release the context
1307 * reference, and handle any actions that were logged against the atom's retained state */
1309 kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx, &katom_retained_state);
1312 kbase_js_sched_all(kbdev);
1314 if (!atomic_dec_return(&kctx->work_count)) {
1315 /* If worker now idle then post all events that jd_done_nolock()
1317 mutex_lock(&jctx->lock);
1318 while (!list_empty(&kctx->completed_jobs)) {
1319 struct kbase_jd_atom *atom = list_entry(
1320 kctx->completed_jobs.next,
1321 struct kbase_jd_atom, dep_item[0]);
1322 list_del(kctx->completed_jobs.next);
1324 kbase_event_post(kctx, atom);
1326 mutex_unlock(&jctx->lock);
1329 kbase_backend_complete_wq_post_sched(kbdev, core_req, affinity,
1332 KBASE_TRACE_ADD(kbdev, JD_DONE_WORKER_END, kctx, NULL, cache_jc, 0);
1336 * jd_cancel_worker - Work queue job cancel function.
1337 * @data: a &struct work_struct
1339 * Only called as part of 'Zapping' a context (which occurs on termination).
1340 * Operates serially with the kbase_jd_done_worker() on the work queue.
1342 * This can only be called on contexts that aren't scheduled.
1344 * We don't need to release most of the resources that would occur on
1345 * kbase_jd_done() or kbase_jd_done_worker(), because the atoms here must not be
1346 * running (by virtue of only being called on contexts that aren't
1349 static void jd_cancel_worker(struct work_struct *data)
1351 struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, work);
1352 struct kbase_jd_context *jctx;
1353 struct kbase_context *kctx;
1354 struct kbasep_js_kctx_info *js_kctx_info;
1355 bool need_to_try_schedule_context;
1356 bool attr_state_changed;
1357 struct kbase_device *kbdev;
1359 /* Soft jobs should never reach this function */
1360 KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0);
1363 kbdev = kctx->kbdev;
1365 js_kctx_info = &kctx->jctx.sched_info;
1367 KBASE_TRACE_ADD(kbdev, JD_CANCEL_WORKER, kctx, katom, katom->jc, 0);
1369 /* This only gets called on contexts that are scheduled out. Hence, we must
1370 * make sure we don't de-ref the number of running jobs (there aren't
1371 * any), nor must we try to schedule out the context (it's already
1374 KBASE_DEBUG_ASSERT(!js_kctx_info->ctx.is_scheduled);
1376 /* Scheduler: Remove the job from the system */
1377 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
1378 attr_state_changed = kbasep_js_remove_cancelled_job(kbdev, kctx, katom);
1379 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
1381 mutex_lock(&jctx->lock);
1383 need_to_try_schedule_context = jd_done_nolock(katom, NULL);
1384 /* Because we're zapping, we're not adding any more jobs to this ctx, so no need to
1385 * schedule the context. There's also no need for the jsctx_mutex to have been taken
1386 * around this too. */
1387 KBASE_DEBUG_ASSERT(!need_to_try_schedule_context);
1389 /* katom may have been freed now, do not use! */
1390 mutex_unlock(&jctx->lock);
1392 if (attr_state_changed)
1393 kbase_js_sched_all(kbdev);
1397 * kbase_jd_done - Complete a job that has been removed from the Hardware
1398 * @katom: atom which has been completed
1399 * @slot_nr: slot the atom was on
1400 * @end_timestamp: completion time
1401 * @done_code: completion code
1403 * This must be used whenever a job has been removed from the Hardware, e.g.:
1404 * An IRQ indicates that the job finished (for both error and 'done' codes), or
1405 * the job was evicted from the JS_HEAD_NEXT registers during a Soft/Hard stop.
1407 * Some work is carried out immediately, and the rest is deferred onto a
1411 * This can be called safely from atomic context.
1412 * The caller must hold kbasep_js_device_data.runpool_irq.lock
1414 void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr,
1415 ktime_t *end_timestamp, kbasep_js_atom_done_code done_code)
1417 struct kbase_context *kctx;
1418 struct kbase_device *kbdev;
1420 KBASE_DEBUG_ASSERT(katom);
1422 KBASE_DEBUG_ASSERT(kctx);
1423 kbdev = kctx->kbdev;
1424 KBASE_DEBUG_ASSERT(kbdev);
1426 if (done_code & KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT)
1427 katom->event_code = BASE_JD_EVENT_REMOVED_FROM_NEXT;
1429 KBASE_TRACE_ADD(kbdev, JD_DONE, kctx, katom, katom->jc, 0);
1431 kbase_job_check_leave_disjoint(kbdev, katom);
1433 katom->slot_nr = slot_nr;
1435 atomic_inc(&kctx->work_count);
1437 #ifdef CONFIG_DEBUG_FS
1438 /* a failed job happened and is waiting for dumping*/
1439 if (!katom->will_fail_event_code &&
1440 kbase_debug_job_fault_process(katom, katom->event_code))
1444 WARN_ON(work_pending(&katom->work));
1445 KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
1446 INIT_WORK(&katom->work, kbase_jd_done_worker);
1447 queue_work(kctx->jctx.job_done_wq, &katom->work);
1450 KBASE_EXPORT_TEST_API(kbase_jd_done);
1452 void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
1454 struct kbase_context *kctx;
1455 struct kbasep_js_kctx_info *js_kctx_info;
1457 KBASE_DEBUG_ASSERT(NULL != kbdev);
1458 KBASE_DEBUG_ASSERT(NULL != katom);
1460 KBASE_DEBUG_ASSERT(NULL != kctx);
1462 js_kctx_info = &kctx->jctx.sched_info;
1464 KBASE_TRACE_ADD(kbdev, JD_CANCEL, kctx, katom, katom->jc, 0);
1466 /* This should only be done from a context that is not scheduled */
1467 KBASE_DEBUG_ASSERT(!js_kctx_info->ctx.is_scheduled);
1469 WARN_ON(work_pending(&katom->work));
1471 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
1473 KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
1474 INIT_WORK(&katom->work, jd_cancel_worker);
1475 queue_work(kctx->jctx.job_done_wq, &katom->work);
1479 void kbase_jd_zap_context(struct kbase_context *kctx)
1481 struct kbase_jd_atom *katom;
1482 struct list_head *entry, *tmp;
1483 struct kbase_device *kbdev;
1485 KBASE_DEBUG_ASSERT(kctx);
1487 kbdev = kctx->kbdev;
1489 KBASE_TRACE_ADD(kbdev, JD_ZAP_CONTEXT, kctx, NULL, 0u, 0u);
1491 kbase_js_zap_context(kctx);
1493 mutex_lock(&kctx->jctx.lock);
1496 * While holding the struct kbase_jd_context lock clean up jobs which are known to kbase but are
1497 * queued outside the job scheduler.
1500 hrtimer_cancel(&kctx->soft_event_timeout);
1501 list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
1502 katom = list_entry(entry, struct kbase_jd_atom, dep_item[0]);
1503 kbase_cancel_soft_job(katom);
1509 /* For each job waiting on a kds resource, cancel the wait and force the job to
1510 * complete early, this is done so that we don't leave jobs outstanding waiting
1511 * on kds resources which may never be released when contexts are zapped, resulting
1514 * Note that we can safely iterate over the list as the struct kbase_jd_context lock is held,
1515 * this prevents items being removed when calling job_done_nolock in kbase_cancel_kds_wait_job.
1518 list_for_each(entry, &kctx->waiting_kds_resource) {
1519 katom = list_entry(entry, struct kbase_jd_atom, node);
1521 kbase_cancel_kds_wait_job(katom);
1525 mutex_unlock(&kctx->jctx.lock);
1527 kbase_jm_wait_for_zero_jobs(kctx);
1530 KBASE_EXPORT_TEST_API(kbase_jd_zap_context);
1532 int kbase_jd_init(struct kbase_context *kctx)
1538 #endif /* CONFIG_KDS */
1540 KBASE_DEBUG_ASSERT(kctx);
1542 kctx->jctx.job_done_wq = alloc_workqueue("mali_jd", 0, 1);
1543 if (NULL == kctx->jctx.job_done_wq) {
1548 for (i = 0; i < BASE_JD_ATOM_COUNT; i++) {
1549 init_waitqueue_head(&kctx->jctx.atoms[i].completed);
1551 INIT_LIST_HEAD(&kctx->jctx.atoms[i].dep_head[0]);
1552 INIT_LIST_HEAD(&kctx->jctx.atoms[i].dep_head[1]);
1554 /* Catch userspace attempting to use an atom which doesn't exist as a pre-dependency */
1555 kctx->jctx.atoms[i].event_code = BASE_JD_EVENT_JOB_INVALID;
1556 kctx->jctx.atoms[i].status = KBASE_JD_ATOM_STATE_UNUSED;
1559 mutex_init(&kctx->jctx.lock);
1561 init_waitqueue_head(&kctx->jctx.zero_jobs_wait);
1563 spin_lock_init(&kctx->jctx.tb_lock);
1566 err = kds_callback_init(&kctx->jctx.kds_cb, 0, kds_dep_clear);
1571 #endif /* CONFIG_KDS */
1573 kctx->jctx.job_nr = 0;
1574 INIT_LIST_HEAD(&kctx->completed_jobs);
1575 atomic_set(&kctx->work_count, 0);
1581 destroy_workqueue(kctx->jctx.job_done_wq);
1582 #endif /* CONFIG_KDS */
1587 KBASE_EXPORT_TEST_API(kbase_jd_init);
1589 void kbase_jd_exit(struct kbase_context *kctx)
1591 KBASE_DEBUG_ASSERT(kctx);
1594 kds_callback_term(&kctx->jctx.kds_cb);
1595 #endif /* CONFIG_KDS */
1596 /* Work queue is emptied by this */
1597 destroy_workqueue(kctx->jctx.job_done_wq);
1600 KBASE_EXPORT_TEST_API(kbase_jd_exit);