MALI: rockchip: upgrade midgard DDK to r14p0-01rel0
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / mali_kbase_jd.c
1 /*
2  *
3  * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19
20 #if defined(CONFIG_DMA_SHARED_BUFFER)
21 #include <linux/dma-buf.h>
22 #endif                          /* defined(CONFIG_DMA_SHARED_BUFFER) */
23 #ifdef CONFIG_COMPAT
24 #include <linux/compat.h>
25 #endif
26 #include <mali_kbase.h>
27 #include <mali_kbase_uku.h>
28 #include <linux/random.h>
29 #include <linux/version.h>
30 #include <linux/ratelimit.h>
31
32 #include <mali_kbase_jm.h>
33 #include <mali_kbase_hwaccess_jm.h>
34 #include <mali_kbase_tlstream.h>
35
36 #include "mali_kbase_dma_fence.h"
37
38 #define beenthere(kctx, f, a...)  dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
39
40 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
41 /* random32 was renamed to prandom_u32 in 3.8 */
42 #define prandom_u32 random32
43 #endif
44
45 /* Return whether katom will run on the GPU or not. Currently only soft jobs and
46  * dependency-only atoms do not run on the GPU */
47 #define IS_GPU_ATOM(katom) (!((katom->core_req & BASE_JD_REQ_SOFT_JOB) ||  \
48                         ((katom->core_req & BASE_JD_REQ_ATOM_TYPE) ==    \
49                                                         BASE_JD_REQ_DEP)))
50 /*
51  * This is the kernel side of the API. Only entry points are:
52  * - kbase_jd_submit(): Called from userspace to submit a single bag
53  * - kbase_jd_done(): Called from interrupt context to track the
54  *   completion of a job.
55  * Callouts:
56  * - to the job manager (enqueue a job)
57  * - to the event subsystem (signals the completion/failure of bag/job-chains).
58  */
59
60 static void __user *
61 get_compat_pointer(struct kbase_context *kctx, const union kbase_pointer *p)
62 {
63 #ifdef CONFIG_COMPAT
64         if (kbase_ctx_flag(kctx, KCTX_COMPAT))
65                 return compat_ptr(p->compat_value);
66 #endif
67         return p->value;
68 }
69
70 /* Runs an atom, either by handing to the JS or by immediately running it in the case of soft-jobs
71  *
72  * Returns whether the JS needs a reschedule.
73  *
74  * Note that the caller must also check the atom status and
75  * if it is KBASE_JD_ATOM_STATE_COMPLETED must call jd_done_nolock
76  */
77 static int jd_run_atom(struct kbase_jd_atom *katom)
78 {
79         struct kbase_context *kctx = katom->kctx;
80
81         KBASE_DEBUG_ASSERT(katom->status != KBASE_JD_ATOM_STATE_UNUSED);
82
83         if ((katom->core_req & BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP) {
84                 /* Dependency only atom */
85                 katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
86                 return 0;
87         } else if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
88                 /* Soft-job */
89                 if (katom->will_fail_event_code) {
90                         katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
91                         return 0;
92                 }
93                 if ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
94                                                   == BASE_JD_REQ_SOFT_REPLAY) {
95                         if (!kbase_replay_process(katom))
96                                 katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
97                 } else if (kbase_process_soft_job(katom) == 0) {
98                         kbase_finish_soft_job(katom);
99                         katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
100                 }
101                 return 0;
102         }
103
104         katom->status = KBASE_JD_ATOM_STATE_IN_JS;
105         /* Queue an action about whether we should try scheduling a context */
106         return kbasep_js_add_job(kctx, katom);
107 }
108
109 #if defined(CONFIG_KDS) || defined(CONFIG_MALI_DMA_FENCE)
110 void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom)
111 {
112         struct kbase_device *kbdev;
113
114         KBASE_DEBUG_ASSERT(katom);
115         kbdev = katom->kctx->kbdev;
116         KBASE_DEBUG_ASSERT(kbdev);
117
118         /* Check whether the atom's other dependencies were already met. If
119          * katom is a GPU atom then the job scheduler may be able to represent
120          * the dependencies, hence we may attempt to submit it before they are
121          * met. Other atoms must have had both dependencies resolved.
122          */
123         if (IS_GPU_ATOM(katom) ||
124                         (!kbase_jd_katom_dep_atom(&katom->dep[0]) &&
125                         !kbase_jd_katom_dep_atom(&katom->dep[1]))) {
126                 /* katom dep complete, attempt to run it */
127                 bool resched = false;
128
129                 resched = jd_run_atom(katom);
130
131                 if (katom->status == KBASE_JD_ATOM_STATE_COMPLETED) {
132                         /* The atom has already finished */
133                         resched |= jd_done_nolock(katom, NULL);
134                 }
135
136                 if (resched)
137                         kbase_js_sched_all(kbdev);
138         }
139 }
140 #endif
141
142 #ifdef CONFIG_KDS
143
144 /* Add the katom to the kds waiting list.
145  * Atoms must be added to the waiting list after a successful call to kds_async_waitall.
146  * The caller must hold the kbase_jd_context.lock */
147
148 static void kbase_jd_kds_waiters_add(struct kbase_jd_atom *katom)
149 {
150         struct kbase_context *kctx;
151
152         KBASE_DEBUG_ASSERT(katom);
153
154         kctx = katom->kctx;
155
156         list_add_tail(&katom->node, &kctx->waiting_kds_resource);
157 }
158
159 /* Remove the katom from the kds waiting list.
160  * Atoms must be removed from the waiting list before a call to kds_resource_set_release_sync.
161  * The supplied katom must first have been added to the list with a call to kbase_jd_kds_waiters_add.
162  * The caller must hold the kbase_jd_context.lock */
163
164 static void kbase_jd_kds_waiters_remove(struct kbase_jd_atom *katom)
165 {
166         KBASE_DEBUG_ASSERT(katom);
167         list_del(&katom->node);
168 }
169
170 static void kds_dep_clear(void *callback_parameter, void *callback_extra_parameter)
171 {
172         struct kbase_jd_atom *katom;
173         struct kbase_jd_context *ctx;
174
175         katom = (struct kbase_jd_atom *)callback_parameter;
176         KBASE_DEBUG_ASSERT(katom);
177
178         ctx = &katom->kctx->jctx;
179
180         /* If KDS resource has already been satisfied (e.g. due to zapping)
181          * do nothing.
182          */
183         mutex_lock(&ctx->lock);
184         if (!katom->kds_dep_satisfied) {
185                 katom->kds_dep_satisfied = true;
186                 kbase_jd_dep_clear_locked(katom);
187         }
188         mutex_unlock(&ctx->lock);
189 }
190
191 static void kbase_cancel_kds_wait_job(struct kbase_jd_atom *katom)
192 {
193         KBASE_DEBUG_ASSERT(katom);
194
195         /* Prevent job_done_nolock from being called twice on an atom when
196          *  there is a race between job completion and cancellation */
197
198         if (katom->status == KBASE_JD_ATOM_STATE_QUEUED) {
199                 /* Wait was cancelled - zap the atom */
200                 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
201                 if (jd_done_nolock(katom, NULL))
202                         kbase_js_sched_all(katom->kctx->kbdev);
203         }
204 }
205 #endif                          /* CONFIG_KDS */
206
207 void kbase_jd_free_external_resources(struct kbase_jd_atom *katom)
208 {
209 #ifdef CONFIG_KDS
210         if (katom->kds_rset) {
211                 struct kbase_jd_context *jctx = &katom->kctx->jctx;
212
213                 /*
214                  * As the atom is no longer waiting, remove it from
215                  * the waiting list.
216                  */
217
218                 mutex_lock(&jctx->lock);
219                 kbase_jd_kds_waiters_remove(katom);
220                 mutex_unlock(&jctx->lock);
221
222                 /* Release the kds resource or cancel if zapping */
223                 kds_resource_set_release_sync(&katom->kds_rset);
224         }
225 #endif                          /* CONFIG_KDS */
226
227 #ifdef CONFIG_MALI_DMA_FENCE
228         /* Flush dma-fence workqueue to ensure that any callbacks that may have
229          * been queued are done before continuing.
230          * Any successfully completed atom would have had all it's callbacks
231          * completed before the atom was run, so only flush for failed atoms.
232          */
233         if (katom->event_code != BASE_JD_EVENT_DONE)
234                 flush_workqueue(katom->kctx->dma_fence.wq);
235 #endif /* CONFIG_MALI_DMA_FENCE */
236 }
237
238 static void kbase_jd_post_external_resources(struct kbase_jd_atom *katom)
239 {
240         KBASE_DEBUG_ASSERT(katom);
241         KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES);
242
243 #ifdef CONFIG_KDS
244         /* Prevent the KDS resource from triggering the atom in case of zapping */
245         if (katom->kds_rset)
246                 katom->kds_dep_satisfied = true;
247 #endif                          /* CONFIG_KDS */
248
249 #ifdef CONFIG_MALI_DMA_FENCE
250         kbase_dma_fence_signal(katom);
251 #endif /* CONFIG_MALI_DMA_FENCE */
252
253         kbase_gpu_vm_lock(katom->kctx);
254         /* only roll back if extres is non-NULL */
255         if (katom->extres) {
256                 u32 res_no;
257
258                 res_no = katom->nr_extres;
259                 while (res_no-- > 0) {
260                         struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc;
261                         struct kbase_va_region *reg;
262
263                         reg = kbase_region_tracker_find_region_base_address(
264                                         katom->kctx,
265                                         katom->extres[res_no].gpu_address);
266                         kbase_unmap_external_resource(katom->kctx, reg, alloc);
267                 }
268                 kfree(katom->extres);
269                 katom->extres = NULL;
270         }
271         kbase_gpu_vm_unlock(katom->kctx);
272 }
273
274 /*
275  * Set up external resources needed by this job.
276  *
277  * jctx.lock must be held when this is called.
278  */
279
280 static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const struct base_jd_atom_v2 *user_atom)
281 {
282         int err_ret_val = -EINVAL;
283         u32 res_no;
284 #ifdef CONFIG_KDS
285         u32 kds_res_count = 0;
286         struct kds_resource **kds_resources = NULL;
287         unsigned long *kds_access_bitmap = NULL;
288 #endif                          /* CONFIG_KDS */
289 #ifdef CONFIG_MALI_DMA_FENCE
290         struct kbase_dma_fence_resv_info info = {
291                 .dma_fence_resv_count = 0,
292         };
293 #endif
294         struct base_external_resource *input_extres;
295
296         KBASE_DEBUG_ASSERT(katom);
297         KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES);
298
299         /* no resources encoded, early out */
300         if (!katom->nr_extres)
301                 return -EINVAL;
302
303         katom->extres = kmalloc_array(katom->nr_extres, sizeof(*katom->extres), GFP_KERNEL);
304         if (NULL == katom->extres) {
305                 err_ret_val = -ENOMEM;
306                 goto early_err_out;
307         }
308
309         /* copy user buffer to the end of our real buffer.
310          * Make sure the struct sizes haven't changed in a way
311          * we don't support */
312         BUILD_BUG_ON(sizeof(*input_extres) > sizeof(*katom->extres));
313         input_extres = (struct base_external_resource *)
314                         (((unsigned char *)katom->extres) +
315                         (sizeof(*katom->extres) - sizeof(*input_extres)) *
316                         katom->nr_extres);
317
318         if (copy_from_user(input_extres,
319                         get_compat_pointer(katom->kctx, &user_atom->extres_list),
320                         sizeof(*input_extres) * katom->nr_extres) != 0) {
321                 err_ret_val = -EINVAL;
322                 goto early_err_out;
323         }
324 #ifdef CONFIG_KDS
325         /* assume we have to wait for all */
326         KBASE_DEBUG_ASSERT(0 != katom->nr_extres);
327         kds_resources = kmalloc_array(katom->nr_extres, sizeof(struct kds_resource *), GFP_KERNEL);
328
329         if (!kds_resources) {
330                 err_ret_val = -ENOMEM;
331                 goto early_err_out;
332         }
333
334         KBASE_DEBUG_ASSERT(0 != katom->nr_extres);
335         kds_access_bitmap = kcalloc(BITS_TO_LONGS(katom->nr_extres),
336                                     sizeof(unsigned long),
337                                     GFP_KERNEL);
338         if (!kds_access_bitmap) {
339                 err_ret_val = -ENOMEM;
340                 goto early_err_out;
341         }
342 #endif                          /* CONFIG_KDS */
343
344 #ifdef CONFIG_MALI_DMA_FENCE
345         info.resv_objs = kmalloc_array(katom->nr_extres,
346                                        sizeof(struct reservation_object *),
347                                        GFP_KERNEL);
348         if (!info.resv_objs) {
349                 err_ret_val = -ENOMEM;
350                 goto early_err_out;
351         }
352
353         info.dma_fence_excl_bitmap = kcalloc(BITS_TO_LONGS(katom->nr_extres),
354                                              sizeof(unsigned long),
355                                              GFP_KERNEL);
356         if (!info.dma_fence_excl_bitmap) {
357                 err_ret_val = -ENOMEM;
358                 goto early_err_out;
359         }
360 #endif /* CONFIG_MALI_DMA_FENCE */
361
362         /* Take the processes mmap lock */
363         down_read(&current->mm->mmap_sem);
364
365         /* need to keep the GPU VM locked while we set up UMM buffers */
366         kbase_gpu_vm_lock(katom->kctx);
367         for (res_no = 0; res_no < katom->nr_extres; res_no++) {
368                 struct base_external_resource *res;
369                 struct kbase_va_region *reg;
370                 struct kbase_mem_phy_alloc *alloc;
371                 bool exclusive;
372
373                 res = &input_extres[res_no];
374                 exclusive = (res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE)
375                                 ? true : false;
376                 reg = kbase_region_tracker_find_region_enclosing_address(
377                                 katom->kctx,
378                                 res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
379                 /* did we find a matching region object? */
380                 if (NULL == reg || (reg->flags & KBASE_REG_FREE)) {
381                         /* roll back */
382                         goto failed_loop;
383                 }
384
385                 if (!(katom->core_req & BASE_JD_REQ_SOFT_JOB) &&
386                                 (reg->flags & KBASE_REG_SECURE)) {
387                         katom->atom_flags |= KBASE_KATOM_FLAG_PROTECTED;
388                 }
389
390                 alloc = kbase_map_external_resource(katom->kctx, reg,
391                                 current->mm
392 #ifdef CONFIG_KDS
393                                 , &kds_res_count, kds_resources,
394                                 kds_access_bitmap, exclusive
395 #endif
396                                 );
397                 if (!alloc) {
398                         err_ret_val = -EINVAL;
399                         goto failed_loop;
400                 }
401
402 #ifdef CONFIG_MALI_DMA_FENCE
403                 if (reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
404                         struct reservation_object *resv;
405
406                         resv = reg->gpu_alloc->imported.umm.dma_buf->resv;
407                         if (resv)
408                                 kbase_dma_fence_add_reservation(resv, &info,
409                                                                 exclusive);
410                 }
411 #endif /* CONFIG_MALI_DMA_FENCE */
412
413                 /* finish with updating out array with the data we found */
414                 /* NOTE: It is important that this is the last thing we do (or
415                  * at least not before the first write) as we overwrite elements
416                  * as we loop and could be overwriting ourself, so no writes
417                  * until the last read for an element.
418                  * */
419                 katom->extres[res_no].gpu_address = reg->start_pfn << PAGE_SHIFT; /* save the start_pfn (as an address, not pfn) to use fast lookup later */
420                 katom->extres[res_no].alloc = alloc;
421         }
422         /* successfully parsed the extres array */
423         /* drop the vm lock before we call into kds */
424         kbase_gpu_vm_unlock(katom->kctx);
425
426         /* Release the processes mmap lock */
427         up_read(&current->mm->mmap_sem);
428
429 #ifdef CONFIG_KDS
430         if (kds_res_count) {
431                 int wait_failed;
432
433                 /* We have resources to wait for with kds */
434                 katom->kds_dep_satisfied = false;
435
436                 wait_failed = kds_async_waitall(&katom->kds_rset,
437                                 &katom->kctx->jctx.kds_cb, katom, NULL,
438                                 kds_res_count, kds_access_bitmap,
439                                 kds_resources);
440
441                 if (wait_failed)
442                         goto failed_kds_setup;
443                 else
444                         kbase_jd_kds_waiters_add(katom);
445         } else {
446                 /* Nothing to wait for, so kds dep met */
447                 katom->kds_dep_satisfied = true;
448         }
449         kfree(kds_resources);
450         kfree(kds_access_bitmap);
451 #endif                          /* CONFIG_KDS */
452
453 #ifdef CONFIG_MALI_DMA_FENCE
454         if (info.dma_fence_resv_count) {
455                 int ret;
456
457                 ret = kbase_dma_fence_wait(katom, &info);
458                 if (ret < 0)
459                         goto failed_dma_fence_setup;
460         }
461
462         kfree(info.resv_objs);
463         kfree(info.dma_fence_excl_bitmap);
464 #endif /* CONFIG_MALI_DMA_FENCE */
465
466         /* all done OK */
467         return 0;
468
469 /* error handling section */
470
471 #ifdef CONFIG_MALI_DMA_FENCE
472 failed_dma_fence_setup:
473 #ifdef CONFIG_KDS
474         /* If we are here, dma_fence setup failed but KDS didn't.
475          * Revert KDS setup if any.
476          */
477         if (kds_res_count) {
478                 mutex_unlock(&katom->kctx->jctx.lock);
479                 kds_resource_set_release_sync(&katom->kds_rset);
480                 mutex_lock(&katom->kctx->jctx.lock);
481
482                 kbase_jd_kds_waiters_remove(katom);
483                 katom->kds_dep_satisfied = true;
484         }
485 #endif /* CONFIG_KDS */
486 #endif /* CONFIG_MALI_DMA_FENCE */
487 #ifdef CONFIG_KDS
488 failed_kds_setup:
489 #endif
490 #if defined(CONFIG_KDS) || defined(CONFIG_MALI_DMA_FENCE)
491         /* Lock the processes mmap lock */
492         down_read(&current->mm->mmap_sem);
493
494         /* lock before we unmap */
495         kbase_gpu_vm_lock(katom->kctx);
496 #endif
497
498  failed_loop:
499         /* undo the loop work */
500         while (res_no-- > 0) {
501                 struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc;
502
503                 kbase_unmap_external_resource(katom->kctx, NULL, alloc);
504         }
505         kbase_gpu_vm_unlock(katom->kctx);
506
507         /* Release the processes mmap lock */
508         up_read(&current->mm->mmap_sem);
509
510  early_err_out:
511         kfree(katom->extres);
512         katom->extres = NULL;
513 #ifdef CONFIG_KDS
514         kfree(kds_resources);
515         kfree(kds_access_bitmap);
516 #endif                          /* CONFIG_KDS */
517 #ifdef CONFIG_MALI_DMA_FENCE
518         kfree(info.resv_objs);
519         kfree(info.dma_fence_excl_bitmap);
520 #endif
521         return err_ret_val;
522 }
523
524 static inline void jd_resolve_dep(struct list_head *out_list,
525                                         struct kbase_jd_atom *katom,
526                                         u8 d, bool ctx_is_dying)
527 {
528         u8 other_d = !d;
529
530         while (!list_empty(&katom->dep_head[d])) {
531                 struct kbase_jd_atom *dep_atom;
532                 struct kbase_jd_atom *other_dep_atom;
533                 u8 dep_type;
534
535                 dep_atom = list_entry(katom->dep_head[d].next,
536                                 struct kbase_jd_atom, dep_item[d]);
537                 list_del(katom->dep_head[d].next);
538
539                 dep_type = kbase_jd_katom_dep_type(&dep_atom->dep[d]);
540                 kbase_jd_katom_dep_clear(&dep_atom->dep[d]);
541
542                 if (katom->event_code != BASE_JD_EVENT_DONE &&
543                         (dep_type != BASE_JD_DEP_TYPE_ORDER)) {
544 #ifdef CONFIG_KDS
545                         if (!dep_atom->kds_dep_satisfied) {
546                                 /* Just set kds_dep_satisfied to true. If the callback happens after this then it will early out and
547                                  * do nothing. If the callback doesn't happen then kbase_jd_post_external_resources will clean up
548                                  */
549                                 dep_atom->kds_dep_satisfied = true;
550                         }
551 #endif
552
553 #ifdef CONFIG_MALI_DMA_FENCE
554                         kbase_dma_fence_cancel_callbacks(dep_atom);
555 #endif
556
557                         dep_atom->event_code = katom->event_code;
558                         KBASE_DEBUG_ASSERT(dep_atom->status !=
559                                                 KBASE_JD_ATOM_STATE_UNUSED);
560
561                         if ((dep_atom->core_req & BASE_JD_REQ_SOFT_REPLAY)
562                                         != BASE_JD_REQ_SOFT_REPLAY) {
563                                 dep_atom->will_fail_event_code =
564                                         dep_atom->event_code;
565                         } else {
566                                 dep_atom->status =
567                                         KBASE_JD_ATOM_STATE_COMPLETED;
568                         }
569                 }
570                 other_dep_atom = (struct kbase_jd_atom *)
571                         kbase_jd_katom_dep_atom(&dep_atom->dep[other_d]);
572
573                 if (!dep_atom->in_jd_list && (!other_dep_atom ||
574                                 (IS_GPU_ATOM(dep_atom) && !ctx_is_dying &&
575                                 !dep_atom->will_fail_event_code &&
576                                 !other_dep_atom->will_fail_event_code))) {
577                         bool dep_satisfied = true;
578 #ifdef CONFIG_MALI_DMA_FENCE
579                         int dep_count;
580
581                         dep_count = atomic_read(&dep_atom->dma_fence.dep_count);
582                         if (likely(dep_count == -1)) {
583                                 dep_satisfied = true;
584                         } else {
585                                 /*
586                                  * There are either still active callbacks, or
587                                  * all fences for this @dep_atom has signaled,
588                                  * but the worker that will queue the atom has
589                                  * not yet run.
590                                  *
591                                  * Wait for the fences to signal and the fence
592                                  * worker to run and handle @dep_atom. If
593                                  * @dep_atom was completed due to error on
594                                  * @katom, then the fence worker will pick up
595                                  * the complete status and error code set on
596                                  * @dep_atom above.
597                                  */
598                                 dep_satisfied = false;
599                         }
600 #endif /* CONFIG_MALI_DMA_FENCE */
601
602 #ifdef CONFIG_KDS
603                         dep_satisfied = dep_satisfied && dep_atom->kds_dep_satisfied;
604 #endif
605
606                         if (dep_satisfied) {
607                                 dep_atom->in_jd_list = true;
608                                 list_add_tail(&dep_atom->jd_item, out_list);
609                         }
610                 }
611         }
612 }
613
614 KBASE_EXPORT_TEST_API(jd_resolve_dep);
615
616 #if MALI_CUSTOMER_RELEASE == 0
617 static void jd_force_failure(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
618 {
619         kbdev->force_replay_count++;
620
621         if (kbdev->force_replay_count >= kbdev->force_replay_limit) {
622                 kbdev->force_replay_count = 0;
623                 katom->event_code = BASE_JD_EVENT_FORCE_REPLAY;
624
625                 if (kbdev->force_replay_random)
626                         kbdev->force_replay_limit =
627                            (prandom_u32() % KBASEP_FORCE_REPLAY_RANDOM_LIMIT) + 1;
628
629                 dev_info(kbdev->dev, "force_replay : promoting to error\n");
630         }
631 }
632
633 /** Test to see if atom should be forced to fail.
634  *
635  * This function will check if an atom has a replay job as a dependent. If so
636  * then it will be considered for forced failure. */
637 static void jd_check_force_failure(struct kbase_jd_atom *katom)
638 {
639         struct kbase_context *kctx = katom->kctx;
640         struct kbase_device *kbdev = kctx->kbdev;
641         int i;
642
643         if ((kbdev->force_replay_limit == KBASEP_FORCE_REPLAY_DISABLED) ||
644             (katom->core_req & BASEP_JD_REQ_EVENT_NEVER))
645                 return;
646
647         for (i = 1; i < BASE_JD_ATOM_COUNT; i++) {
648                 if (kbase_jd_katom_dep_atom(&kctx->jctx.atoms[i].dep[0]) == katom ||
649                     kbase_jd_katom_dep_atom(&kctx->jctx.atoms[i].dep[1]) == katom) {
650                         struct kbase_jd_atom *dep_atom = &kctx->jctx.atoms[i];
651
652                         if ((dep_atom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) ==
653                                                      BASE_JD_REQ_SOFT_REPLAY &&
654                             (dep_atom->core_req & kbdev->force_replay_core_req)
655                                              == kbdev->force_replay_core_req) {
656                                 jd_force_failure(kbdev, katom);
657                                 return;
658                         }
659                 }
660         }
661 }
662 #endif
663
664 /**
665  * is_dep_valid - Validate that a dependency is valid for early dependency
666  *                submission
667  * @katom: Dependency atom to validate
668  *
669  * A dependency is valid if any of the following are true :
670  * - It does not exist (a non-existent dependency does not block submission)
671  * - It is in the job scheduler
672  * - It has completed, does not have a failure event code, and has not been
673  *   marked to fail in the future
674  *
675  * Return: true if valid, false otherwise
676  */
677 static bool is_dep_valid(struct kbase_jd_atom *katom)
678 {
679         /* If there's no dependency then this is 'valid' from the perspective of
680          * early dependency submission */
681         if (!katom)
682                 return true;
683
684         /* Dependency must have reached the job scheduler */
685         if (katom->status < KBASE_JD_ATOM_STATE_IN_JS)
686                 return false;
687
688         /* If dependency has completed and has failed or will fail then it is
689          * not valid */
690         if (katom->status >= KBASE_JD_ATOM_STATE_HW_COMPLETED &&
691                         (katom->event_code != BASE_JD_EVENT_DONE ||
692                         katom->will_fail_event_code))
693                 return false;
694
695         return true;
696 }
697
698 static void jd_try_submitting_deps(struct list_head *out_list,
699                 struct kbase_jd_atom *node)
700 {
701         int i;
702
703         for (i = 0; i < 2; i++) {
704                 struct list_head *pos;
705
706                 list_for_each(pos, &node->dep_head[i]) {
707                         struct kbase_jd_atom *dep_atom = list_entry(pos,
708                                         struct kbase_jd_atom, dep_item[i]);
709
710                         if (IS_GPU_ATOM(dep_atom) && !dep_atom->in_jd_list) {
711                                 /*Check if atom deps look sane*/
712                                 bool dep0_valid = is_dep_valid(
713                                                 dep_atom->dep[0].atom);
714                                 bool dep1_valid = is_dep_valid(
715                                                 dep_atom->dep[1].atom);
716                                 bool dep_satisfied = true;
717 #ifdef CONFIG_MALI_DMA_FENCE
718                                 int dep_count;
719
720                                 dep_count = atomic_read(
721                                                 &dep_atom->dma_fence.dep_count);
722                                 if (likely(dep_count == -1)) {
723                                         dep_satisfied = true;
724                                 } else {
725                                 /*
726                                  * There are either still active callbacks, or
727                                  * all fences for this @dep_atom has signaled,
728                                  * but the worker that will queue the atom has
729                                  * not yet run.
730                                  *
731                                  * Wait for the fences to signal and the fence
732                                  * worker to run and handle @dep_atom. If
733                                  * @dep_atom was completed due to error on
734                                  * @katom, then the fence worker will pick up
735                                  * the complete status and error code set on
736                                  * @dep_atom above.
737                                  */
738                                         dep_satisfied = false;
739                                 }
740 #endif /* CONFIG_MALI_DMA_FENCE */
741 #ifdef CONFIG_KDS
742                                 dep_satisfied = dep_satisfied &&
743                                                 dep_atom->kds_dep_satisfied;
744 #endif
745
746                                 if (dep0_valid && dep1_valid && dep_satisfied) {
747                                         dep_atom->in_jd_list = true;
748                                         list_add(&dep_atom->jd_item, out_list);
749                                 }
750                         }
751                 }
752         }
753 }
754
755 /*
756  * Perform the necessary handling of an atom that has finished running
757  * on the GPU.
758  *
759  * Note that if this is a soft-job that has had kbase_prepare_soft_job called on it then the caller
760  * is responsible for calling kbase_finish_soft_job *before* calling this function.
761  *
762  * The caller must hold the kbase_jd_context.lock.
763  */
764 bool jd_done_nolock(struct kbase_jd_atom *katom,
765                 struct list_head *completed_jobs_ctx)
766 {
767         struct kbase_context *kctx = katom->kctx;
768         struct kbase_device *kbdev = kctx->kbdev;
769         struct list_head completed_jobs;
770         struct list_head runnable_jobs;
771         bool need_to_try_schedule_context = false;
772         int i;
773
774         INIT_LIST_HEAD(&completed_jobs);
775         INIT_LIST_HEAD(&runnable_jobs);
776
777         KBASE_DEBUG_ASSERT(katom->status != KBASE_JD_ATOM_STATE_UNUSED);
778
779 #if MALI_CUSTOMER_RELEASE == 0
780         jd_check_force_failure(katom);
781 #endif
782
783         /* This is needed in case an atom is failed due to being invalid, this
784          * can happen *before* the jobs that the atom depends on have completed */
785         for (i = 0; i < 2; i++) {
786                 if (kbase_jd_katom_dep_atom(&katom->dep[i])) {
787                         list_del(&katom->dep_item[i]);
788                         kbase_jd_katom_dep_clear(&katom->dep[i]);
789                 }
790         }
791
792         /* With PRLAM-10817 or PRLAM-10959 the last tile of a fragment job being soft-stopped can fail with
793          * BASE_JD_EVENT_TILE_RANGE_FAULT.
794          *
795          * So here if the fragment job failed with TILE_RANGE_FAULT and it has been soft-stopped, then we promote the
796          * error code to BASE_JD_EVENT_DONE
797          */
798
799         if ((kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10817) || kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10959)) &&
800                   katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT) {
801                 if ((katom->core_req & BASE_JD_REQ_FS) && (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED)) {
802                         /* Promote the failure to job done */
803                         katom->event_code = BASE_JD_EVENT_DONE;
804                         katom->atom_flags = katom->atom_flags & (~KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED);
805                 }
806         }
807
808         katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
809         list_add_tail(&katom->jd_item, &completed_jobs);
810
811         while (!list_empty(&completed_jobs)) {
812                 katom = list_entry(completed_jobs.prev, struct kbase_jd_atom, jd_item);
813                 list_del(completed_jobs.prev);
814                 KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED);
815
816                 for (i = 0; i < 2; i++)
817                         jd_resolve_dep(&runnable_jobs, katom, i,
818                                         kbase_ctx_flag(kctx, KCTX_DYING));
819
820                 if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
821                         kbase_jd_post_external_resources(katom);
822
823                 while (!list_empty(&runnable_jobs)) {
824                         struct kbase_jd_atom *node;
825
826                         node = list_entry(runnable_jobs.next,
827                                         struct kbase_jd_atom, jd_item);
828                         list_del(runnable_jobs.next);
829                         node->in_jd_list = false;
830
831                         KBASE_DEBUG_ASSERT(node->status != KBASE_JD_ATOM_STATE_UNUSED);
832
833                         if (node->status != KBASE_JD_ATOM_STATE_COMPLETED &&
834                                         !kbase_ctx_flag(kctx, KCTX_DYING)) {
835                                 need_to_try_schedule_context |= jd_run_atom(node);
836                         } else {
837                                 node->event_code = katom->event_code;
838
839                                 if ((node->core_req &
840                                         BASE_JD_REQ_SOFT_JOB_TYPE) ==
841                                         BASE_JD_REQ_SOFT_REPLAY) {
842                                         if (kbase_replay_process(node))
843                                                 /* Don't complete this atom */
844                                                 continue;
845                                 } else if (node->core_req &
846                                                         BASE_JD_REQ_SOFT_JOB) {
847                                         /* If this is a fence wait soft job
848                                          * then remove it from the list of sync
849                                          * waiters.
850                                          */
851                                         if (BASE_JD_REQ_SOFT_FENCE_WAIT == node->core_req)
852                                                 kbasep_remove_waiting_soft_job(node);
853
854                                         kbase_finish_soft_job(node);
855                                 }
856                                 node->status = KBASE_JD_ATOM_STATE_COMPLETED;
857                         }
858
859                         if (node->status == KBASE_JD_ATOM_STATE_COMPLETED) {
860                                 list_add_tail(&node->jd_item, &completed_jobs);
861                         } else if (node->status == KBASE_JD_ATOM_STATE_IN_JS &&
862                                         !node->will_fail_event_code) {
863                                 /* Node successfully submitted, try submitting
864                                  * dependencies as they may now be representable
865                                  * in JS */
866                                 jd_try_submitting_deps(&runnable_jobs, node);
867                         }
868                 }
869
870                 /* Register a completed job as a disjoint event when the GPU
871                  * is in a disjoint state (ie. being reset or replaying jobs).
872                  */
873                 kbase_disjoint_event_potential(kctx->kbdev);
874                 if (completed_jobs_ctx)
875                         list_add_tail(&katom->jd_item, completed_jobs_ctx);
876                 else
877                         kbase_event_post(kctx, katom);
878
879                 /* Decrement and check the TOTAL number of jobs. This includes
880                  * those not tracked by the scheduler: 'not ready to run' and
881                  * 'dependency-only' jobs. */
882                 if (--kctx->jctx.job_nr == 0)
883                         wake_up(&kctx->jctx.zero_jobs_wait);    /* All events are safely queued now, and we can signal any waiter
884                                                                  * that we've got no more jobs (so we can be safely terminated) */
885         }
886
887         return need_to_try_schedule_context;
888 }
889
890 KBASE_EXPORT_TEST_API(jd_done_nolock);
891
892 #ifdef CONFIG_GPU_TRACEPOINTS
893 enum {
894         CORE_REQ_DEP_ONLY,
895         CORE_REQ_SOFT,
896         CORE_REQ_COMPUTE,
897         CORE_REQ_FRAGMENT,
898         CORE_REQ_VERTEX,
899         CORE_REQ_TILER,
900         CORE_REQ_FRAGMENT_VERTEX,
901         CORE_REQ_FRAGMENT_VERTEX_TILER,
902         CORE_REQ_FRAGMENT_TILER,
903         CORE_REQ_VERTEX_TILER,
904         CORE_REQ_UNKNOWN
905 };
906 static const char * const core_req_strings[] = {
907         "Dependency Only Job",
908         "Soft Job",
909         "Compute Shader Job",
910         "Fragment Shader Job",
911         "Vertex/Geometry Shader Job",
912         "Tiler Job",
913         "Fragment Shader + Vertex/Geometry Shader Job",
914         "Fragment Shader + Vertex/Geometry Shader Job + Tiler Job",
915         "Fragment Shader + Tiler Job",
916         "Vertex/Geometry Shader Job + Tiler Job",
917         "Unknown Job"
918 };
919 static const char *kbasep_map_core_reqs_to_string(base_jd_core_req core_req)
920 {
921         if (core_req & BASE_JD_REQ_SOFT_JOB)
922                 return core_req_strings[CORE_REQ_SOFT];
923         if (core_req & BASE_JD_REQ_ONLY_COMPUTE)
924                 return core_req_strings[CORE_REQ_COMPUTE];
925         switch (core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) {
926         case BASE_JD_REQ_DEP:
927                 return core_req_strings[CORE_REQ_DEP_ONLY];
928         case BASE_JD_REQ_FS:
929                 return core_req_strings[CORE_REQ_FRAGMENT];
930         case BASE_JD_REQ_CS:
931                 return core_req_strings[CORE_REQ_VERTEX];
932         case BASE_JD_REQ_T:
933                 return core_req_strings[CORE_REQ_TILER];
934         case (BASE_JD_REQ_FS | BASE_JD_REQ_CS):
935                 return core_req_strings[CORE_REQ_FRAGMENT_VERTEX];
936         case (BASE_JD_REQ_FS | BASE_JD_REQ_T):
937                 return core_req_strings[CORE_REQ_FRAGMENT_TILER];
938         case (BASE_JD_REQ_CS | BASE_JD_REQ_T):
939                 return core_req_strings[CORE_REQ_VERTEX_TILER];
940         case (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T):
941                 return core_req_strings[CORE_REQ_FRAGMENT_VERTEX_TILER];
942         }
943         return core_req_strings[CORE_REQ_UNKNOWN];
944 }
945 #endif
946
947 bool jd_submit_atom(struct kbase_context *kctx, const struct base_jd_atom_v2 *user_atom, struct kbase_jd_atom *katom)
948 {
949         struct kbase_jd_context *jctx = &kctx->jctx;
950         int queued = 0;
951         int i;
952         int sched_prio;
953         bool ret;
954         bool will_fail = false;
955
956         /* Update the TOTAL number of jobs. This includes those not tracked by
957          * the scheduler: 'not ready to run' and 'dependency-only' jobs. */
958         jctx->job_nr++;
959
960         katom->start_timestamp.tv64 = 0;
961         katom->time_spent_us = 0;
962         katom->udata = user_atom->udata;
963         katom->kctx = kctx;
964         katom->nr_extres = user_atom->nr_extres;
965         katom->extres = NULL;
966         katom->device_nr = user_atom->device_nr;
967         katom->affinity = 0;
968         katom->jc = user_atom->jc;
969         katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
970         katom->core_req = user_atom->core_req;
971         katom->atom_flags = 0;
972         katom->retry_count = 0;
973         katom->need_cache_flush_cores_retained = 0;
974         katom->pre_dep = NULL;
975         katom->post_dep = NULL;
976         katom->x_pre_dep = NULL;
977         katom->x_post_dep = NULL;
978         katom->will_fail_event_code = BASE_JD_EVENT_NOT_STARTED;
979
980         /* Implicitly sets katom->protected_state.enter as well. */
981         katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK;
982
983         katom->age = kctx->age_count++;
984
985         INIT_LIST_HEAD(&katom->jd_item);
986 #ifdef CONFIG_KDS
987         /* Start by assuming that the KDS dependencies are satisfied,
988          * kbase_jd_pre_external_resources will correct this if there are dependencies */
989         katom->kds_dep_satisfied = true;
990         katom->kds_rset = NULL;
991 #endif                          /* CONFIG_KDS */
992 #ifdef CONFIG_MALI_DMA_FENCE
993         atomic_set(&katom->dma_fence.dep_count, -1);
994 #endif
995
996         kbase_tlstream_tl_attrib_atom_state(katom, TL_ATOM_STATE_IDLE);
997
998         /* Don't do anything if there is a mess up with dependencies.
999            This is done in a separate cycle to check both the dependencies at ones, otherwise
1000            it will be extra complexity to deal with 1st dependency ( just added to the list )
1001            if only the 2nd one has invalid config.
1002          */
1003         for (i = 0; i < 2; i++) {
1004                 int dep_atom_number = user_atom->pre_dep[i].atom_id;
1005                 base_jd_dep_type dep_atom_type = user_atom->pre_dep[i].dependency_type;
1006
1007                 if (dep_atom_number) {
1008                         if (dep_atom_type != BASE_JD_DEP_TYPE_ORDER &&
1009                                         dep_atom_type != BASE_JD_DEP_TYPE_DATA) {
1010                                 katom->event_code = BASE_JD_EVENT_JOB_CONFIG_FAULT;
1011                                 katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
1012
1013                                 /* Wrong dependency setup. Atom will be sent
1014                                  * back to user space. Do not record any
1015                                  * dependencies. */
1016                                 kbase_tlstream_tl_new_atom(
1017                                                 katom,
1018                                                 kbase_jd_atom_id(kctx, katom));
1019                                 kbase_tlstream_tl_ret_atom_ctx(
1020                                                 katom, kctx);
1021
1022                                 ret = jd_done_nolock(katom, NULL);
1023                                 goto out;
1024                         }
1025                 }
1026         }
1027
1028         /* Add dependencies */
1029         for (i = 0; i < 2; i++) {
1030                 int dep_atom_number = user_atom->pre_dep[i].atom_id;
1031                 base_jd_dep_type dep_atom_type;
1032                 struct kbase_jd_atom *dep_atom = &jctx->atoms[dep_atom_number];
1033
1034                 dep_atom_type = user_atom->pre_dep[i].dependency_type;
1035                 kbase_jd_katom_dep_clear(&katom->dep[i]);
1036
1037                 if (!dep_atom_number)
1038                         continue;
1039
1040                 if (dep_atom->status == KBASE_JD_ATOM_STATE_UNUSED ||
1041                                 dep_atom->status == KBASE_JD_ATOM_STATE_COMPLETED) {
1042
1043                         if (dep_atom->event_code == BASE_JD_EVENT_DONE)
1044                                 continue;
1045                         /* don't stop this atom if it has an order dependency
1046                          * only to the failed one, try to submit it through
1047                          * the normal path
1048                          */
1049                         if (dep_atom_type == BASE_JD_DEP_TYPE_ORDER &&
1050                                         dep_atom->event_code > BASE_JD_EVENT_ACTIVE) {
1051                                 continue;
1052                         }
1053
1054                         /* Atom has completed, propagate the error code if any */
1055                         katom->event_code = dep_atom->event_code;
1056                         katom->status = KBASE_JD_ATOM_STATE_QUEUED;
1057
1058                         /* This atom is going through soft replay or
1059                          * will be sent back to user space. Do not record any
1060                          * dependencies. */
1061                         kbase_tlstream_tl_new_atom(
1062                                         katom,
1063                                         kbase_jd_atom_id(kctx, katom));
1064                         kbase_tlstream_tl_ret_atom_ctx(katom, kctx);
1065
1066                         if ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
1067                                          == BASE_JD_REQ_SOFT_REPLAY) {
1068                                 if (kbase_replay_process(katom)) {
1069                                         ret = false;
1070                                         goto out;
1071                                 }
1072                         }
1073                         will_fail = true;
1074
1075                 } else {
1076                         /* Atom is in progress, add this atom to the list */
1077                         list_add_tail(&katom->dep_item[i], &dep_atom->dep_head[i]);
1078                         kbase_jd_katom_dep_set(&katom->dep[i], dep_atom, dep_atom_type);
1079                         queued = 1;
1080                 }
1081         }
1082
1083         if (will_fail) {
1084                 if (!queued) {
1085                         ret = jd_done_nolock(katom, NULL);
1086
1087                         goto out;
1088                 } else {
1089                         katom->will_fail_event_code = katom->event_code;
1090                         ret = false;
1091
1092                         goto out;
1093                 }
1094         } else {
1095                 /* These must occur after the above loop to ensure that an atom
1096                  * that depends on a previous atom with the same number behaves
1097                  * as expected */
1098                 katom->event_code = BASE_JD_EVENT_DONE;
1099                 katom->status = KBASE_JD_ATOM_STATE_QUEUED;
1100         }
1101
1102         /* For invalid priority, be most lenient and choose the default */
1103         sched_prio = kbasep_js_atom_prio_to_sched_prio(user_atom->prio);
1104         if (sched_prio == KBASE_JS_ATOM_SCHED_PRIO_INVALID)
1105                 sched_prio = KBASE_JS_ATOM_SCHED_PRIO_DEFAULT;
1106         katom->sched_priority = sched_prio;
1107
1108         /* Create a new atom recording all dependencies it was set up with. */
1109         kbase_tlstream_tl_new_atom(
1110                         katom,
1111                         kbase_jd_atom_id(kctx, katom));
1112         kbase_tlstream_tl_attrib_atom_priority(katom, katom->sched_priority);
1113         kbase_tlstream_tl_ret_atom_ctx(katom, kctx);
1114         for (i = 0; i < 2; i++)
1115                 if (BASE_JD_DEP_TYPE_INVALID != kbase_jd_katom_dep_type(
1116                                         &katom->dep[i])) {
1117                         kbase_tlstream_tl_dep_atom_atom(
1118                                         (void *)kbase_jd_katom_dep_atom(
1119                                                 &katom->dep[i]),
1120                                         (void *)katom);
1121                 } else if (BASE_JD_DEP_TYPE_INVALID !=
1122                                 user_atom->pre_dep[i].dependency_type) {
1123                         /* Resolved dependency. */
1124                         int dep_atom_number =
1125                                 user_atom->pre_dep[i].atom_id;
1126                         struct kbase_jd_atom *dep_atom =
1127                                 &jctx->atoms[dep_atom_number];
1128
1129                         kbase_tlstream_tl_rdep_atom_atom(
1130                                         (void *)dep_atom,
1131                                         (void *)katom);
1132                 }
1133
1134         /* Reject atoms with job chain = NULL, as these cause issues with soft-stop */
1135         if (!katom->jc && (katom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP) {
1136                 dev_warn(kctx->kbdev->dev, "Rejecting atom with jc = NULL");
1137                 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1138                 ret = jd_done_nolock(katom, NULL);
1139                 goto out;
1140         }
1141
1142         /* Reject atoms with an invalid device_nr */
1143         if ((katom->core_req & BASE_JD_REQ_SPECIFIC_COHERENT_GROUP) &&
1144             (katom->device_nr >= kctx->kbdev->gpu_props.num_core_groups)) {
1145                 dev_warn(kctx->kbdev->dev,
1146                                 "Rejecting atom with invalid device_nr %d",
1147                                 katom->device_nr);
1148                 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1149                 ret = jd_done_nolock(katom, NULL);
1150                 goto out;
1151         }
1152
1153         /* Reject atoms with invalid core requirements */
1154         if ((katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) &&
1155                         (katom->core_req & BASE_JD_REQ_EVENT_COALESCE)) {
1156                 dev_warn(kctx->kbdev->dev,
1157                                 "Rejecting atom with invalid core requirements");
1158                 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1159                 katom->core_req &= ~BASE_JD_REQ_EVENT_COALESCE;
1160                 ret = jd_done_nolock(katom, NULL);
1161                 goto out;
1162         }
1163
1164         if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
1165                 /* handle what we need to do to access the external resources */
1166                 if (kbase_jd_pre_external_resources(katom, user_atom) != 0) {
1167                         /* setup failed (no access, bad resource, unknown resource types, etc.) */
1168                         katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1169                         ret = jd_done_nolock(katom, NULL);
1170                         goto out;
1171                 }
1172         }
1173
1174         /* Validate the atom. Function will return error if the atom is
1175          * malformed.
1176          *
1177          * Soft-jobs never enter the job scheduler but have their own initialize method.
1178          *
1179          * If either fail then we immediately complete the atom with an error.
1180          */
1181         if ((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0) {
1182                 if (!kbase_js_is_atom_valid(kctx->kbdev, katom)) {
1183                         katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1184                         ret = jd_done_nolock(katom, NULL);
1185                         goto out;
1186                 }
1187         } else {
1188                 /* Soft-job */
1189                 if (kbase_prepare_soft_job(katom) != 0) {
1190                         katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1191                         ret = jd_done_nolock(katom, NULL);
1192                         goto out;
1193                 }
1194         }
1195
1196 #ifdef CONFIG_GPU_TRACEPOINTS
1197         katom->work_id = atomic_inc_return(&jctx->work_id);
1198         trace_gpu_job_enqueue((u32)kctx->id, katom->work_id,
1199                         kbasep_map_core_reqs_to_string(katom->core_req));
1200 #endif
1201
1202         if (queued && !IS_GPU_ATOM(katom)) {
1203                 ret = false;
1204                 goto out;
1205         }
1206 #ifdef CONFIG_KDS
1207         if (!katom->kds_dep_satisfied) {
1208                 /* Queue atom due to KDS dependency */
1209                 ret = false;
1210                 goto out;
1211         }
1212 #endif                          /* CONFIG_KDS */
1213
1214
1215 #ifdef CONFIG_MALI_DMA_FENCE
1216         if (atomic_read(&katom->dma_fence.dep_count) != -1) {
1217                 ret = false;
1218                 goto out;
1219         }
1220 #endif /* CONFIG_MALI_DMA_FENCE */
1221
1222         if ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
1223                                                   == BASE_JD_REQ_SOFT_REPLAY) {
1224                 if (kbase_replay_process(katom))
1225                         ret = false;
1226                 else
1227                         ret = jd_done_nolock(katom, NULL);
1228
1229                 goto out;
1230         } else if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
1231                 if (kbase_process_soft_job(katom) == 0) {
1232                         kbase_finish_soft_job(katom);
1233                         ret = jd_done_nolock(katom, NULL);
1234                         goto out;
1235                 }
1236
1237                 ret = false;
1238         } else if ((katom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP) {
1239                 katom->status = KBASE_JD_ATOM_STATE_IN_JS;
1240                 ret = kbasep_js_add_job(kctx, katom);
1241                 /* If job was cancelled then resolve immediately */
1242                 if (katom->event_code == BASE_JD_EVENT_JOB_CANCELLED)
1243                         ret = jd_done_nolock(katom, NULL);
1244         } else {
1245                 /* This is a pure dependency. Resolve it immediately */
1246                 ret = jd_done_nolock(katom, NULL);
1247         }
1248
1249  out:
1250         return ret;
1251 }
1252
1253 #ifdef BASE_LEGACY_UK6_SUPPORT
1254 int kbase_jd_submit(struct kbase_context *kctx,
1255                 const struct kbase_uk_job_submit *submit_data,
1256                 int uk6_atom)
1257 #else
1258 int kbase_jd_submit(struct kbase_context *kctx,
1259                 const struct kbase_uk_job_submit *submit_data)
1260 #endif /* BASE_LEGACY_UK6_SUPPORT */
1261 {
1262         struct kbase_jd_context *jctx = &kctx->jctx;
1263         int err = 0;
1264         int i;
1265         bool need_to_try_schedule_context = false;
1266         struct kbase_device *kbdev;
1267         void __user *user_addr;
1268         u32 latest_flush;
1269
1270         /*
1271          * kbase_jd_submit isn't expected to fail and so all errors with the jobs
1272          * are reported by immediately falling them (through event system)
1273          */
1274         kbdev = kctx->kbdev;
1275
1276         beenthere(kctx, "%s", "Enter");
1277
1278         if (kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
1279                 dev_err(kbdev->dev, "Attempt to submit to a context that has SUBMIT_DISABLED set on it");
1280                 return -EINVAL;
1281         }
1282
1283 #ifdef BASE_LEGACY_UK6_SUPPORT
1284         if ((uk6_atom && submit_data->stride !=
1285                         sizeof(struct base_jd_atom_v2_uk6)) ||
1286                         submit_data->stride != sizeof(base_jd_atom_v2)) {
1287 #else
1288         if (submit_data->stride != sizeof(base_jd_atom_v2)) {
1289 #endif /* BASE_LEGACY_UK6_SUPPORT */
1290                 dev_err(kbdev->dev, "Stride passed to job_submit doesn't match kernel");
1291                 return -EINVAL;
1292         }
1293
1294         user_addr = get_compat_pointer(kctx, &submit_data->addr);
1295
1296         KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_add_return(submit_data->nr_atoms, &kctx->timeline.jd_atoms_in_flight));
1297
1298         /* All atoms submitted in this call have the same flush ID */
1299         latest_flush = kbase_backend_get_current_flush_id(kbdev);
1300
1301         for (i = 0; i < submit_data->nr_atoms; i++) {
1302                 struct base_jd_atom_v2 user_atom;
1303                 struct kbase_jd_atom *katom;
1304
1305 #ifdef BASE_LEGACY_UK6_SUPPORT
1306                 if (uk6_atom) {
1307                         struct base_jd_atom_v2_uk6 user_atom_v6;
1308                         base_jd_dep_type dep_types[2] = {BASE_JD_DEP_TYPE_DATA, BASE_JD_DEP_TYPE_DATA};
1309
1310                         if (copy_from_user(&user_atom_v6, user_addr,
1311                                         sizeof(user_atom_v6))) {
1312                                 err = -EINVAL;
1313                                 KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx,
1314                                         atomic_sub_return(
1315                                         submit_data->nr_atoms - i,
1316                                         &kctx->timeline.jd_atoms_in_flight));
1317                                 break;
1318                         }
1319                         /* Convert from UK6 atom format to UK7 format */
1320                         user_atom.jc = user_atom_v6.jc;
1321                         user_atom.udata = user_atom_v6.udata;
1322                         user_atom.extres_list = user_atom_v6.extres_list;
1323                         user_atom.nr_extres = user_atom_v6.nr_extres;
1324                         user_atom.core_req = (u32)(user_atom_v6.core_req & 0x7fff);
1325
1326                         /* atom number 0 is used for no dependency atoms */
1327                         if (!user_atom_v6.pre_dep[0])
1328                                 dep_types[0] = BASE_JD_DEP_TYPE_INVALID;
1329
1330                         base_jd_atom_dep_set(&user_atom.pre_dep[0],
1331                                         user_atom_v6.pre_dep[0],
1332                                         dep_types[0]);
1333
1334                         /* atom number 0 is used for no dependency atoms */
1335                         if (!user_atom_v6.pre_dep[1])
1336                                 dep_types[1] = BASE_JD_DEP_TYPE_INVALID;
1337
1338                         base_jd_atom_dep_set(&user_atom.pre_dep[1],
1339                                         user_atom_v6.pre_dep[1],
1340                                         dep_types[1]);
1341
1342                         user_atom.atom_number = user_atom_v6.atom_number;
1343                         user_atom.prio = user_atom_v6.prio;
1344                         user_atom.device_nr = user_atom_v6.device_nr;
1345                 } else {
1346 #endif /* BASE_LEGACY_UK6_SUPPORT */
1347                 if (copy_from_user(&user_atom, user_addr, sizeof(user_atom)) != 0) {
1348                         err = -EINVAL;
1349                         KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_sub_return(submit_data->nr_atoms - i, &kctx->timeline.jd_atoms_in_flight));
1350                         break;
1351                 }
1352 #ifdef BASE_LEGACY_UK6_SUPPORT
1353                 }
1354 #endif /* BASE_LEGACY_UK6_SUPPORT */
1355
1356 #ifdef BASE_LEGACY_UK10_2_SUPPORT
1357                 if (KBASE_API_VERSION(10, 3) > kctx->api_version)
1358                         user_atom.core_req = (u32)(user_atom.compat_core_req
1359                                               & 0x7fff);
1360 #endif /* BASE_LEGACY_UK10_2_SUPPORT */
1361
1362                 user_addr = (void __user *)((uintptr_t) user_addr + submit_data->stride);
1363
1364                 mutex_lock(&jctx->lock);
1365 #ifndef compiletime_assert
1366 #define compiletime_assert_defined
1367 #define compiletime_assert(x, msg) do { switch (0) { case 0: case (x):; } } \
1368 while (false)
1369 #endif
1370                 compiletime_assert((1 << (8*sizeof(user_atom.atom_number))) ==
1371                                         BASE_JD_ATOM_COUNT,
1372                         "BASE_JD_ATOM_COUNT and base_atom_id type out of sync");
1373                 compiletime_assert(sizeof(user_atom.pre_dep[0].atom_id) ==
1374                                         sizeof(user_atom.atom_number),
1375                         "BASE_JD_ATOM_COUNT and base_atom_id type out of sync");
1376 #ifdef compiletime_assert_defined
1377 #undef compiletime_assert
1378 #undef compiletime_assert_defined
1379 #endif
1380                 katom = &jctx->atoms[user_atom.atom_number];
1381
1382                 /* Record the flush ID for the cache flush optimisation */
1383                 katom->flush_id = latest_flush;
1384
1385                 while (katom->status != KBASE_JD_ATOM_STATE_UNUSED) {
1386                         /* Atom number is already in use, wait for the atom to
1387                          * complete
1388                          */
1389                         mutex_unlock(&jctx->lock);
1390
1391                         /* This thread will wait for the atom to complete. Due
1392                          * to thread scheduling we are not sure that the other
1393                          * thread that owns the atom will also schedule the
1394                          * context, so we force the scheduler to be active and
1395                          * hence eventually schedule this context at some point
1396                          * later.
1397                          */
1398                         kbase_js_sched_all(kbdev);
1399
1400                         if (wait_event_killable(katom->completed,
1401                                         katom->status ==
1402                                         KBASE_JD_ATOM_STATE_UNUSED) != 0) {
1403                                 /* We're being killed so the result code
1404                                  * doesn't really matter
1405                                  */
1406                                 return 0;
1407                         }
1408                         mutex_lock(&jctx->lock);
1409                 }
1410
1411                 need_to_try_schedule_context |=
1412                                        jd_submit_atom(kctx, &user_atom, katom);
1413
1414                 /* Register a completed job as a disjoint event when the GPU is in a disjoint state
1415                  * (ie. being reset or replaying jobs).
1416                  */
1417                 kbase_disjoint_event_potential(kbdev);
1418
1419                 mutex_unlock(&jctx->lock);
1420         }
1421
1422         if (need_to_try_schedule_context)
1423                 kbase_js_sched_all(kbdev);
1424
1425         return err;
1426 }
1427
1428 KBASE_EXPORT_TEST_API(kbase_jd_submit);
1429
1430 void kbase_jd_done_worker(struct work_struct *data)
1431 {
1432         struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, work);
1433         struct kbase_jd_context *jctx;
1434         struct kbase_context *kctx;
1435         struct kbasep_js_kctx_info *js_kctx_info;
1436         struct kbase_device *kbdev;
1437         struct kbasep_js_device_data *js_devdata;
1438         u64 cache_jc = katom->jc;
1439         struct kbasep_js_atom_retained_state katom_retained_state;
1440         bool context_idle;
1441         base_jd_core_req core_req = katom->core_req;
1442         u64 affinity = katom->affinity;
1443         enum kbase_atom_coreref_state coreref_state = katom->coreref_state;
1444
1445         /* Soft jobs should never reach this function */
1446         KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0);
1447
1448         kctx = katom->kctx;
1449         jctx = &kctx->jctx;
1450         kbdev = kctx->kbdev;
1451         js_kctx_info = &kctx->jctx.sched_info;
1452         js_devdata = &kbdev->js_data;
1453
1454         KBASE_TRACE_ADD(kbdev, JD_DONE_WORKER, kctx, katom, katom->jc, 0);
1455
1456         kbase_backend_complete_wq(kbdev, katom);
1457
1458         /*
1459          * Begin transaction on JD context and JS context
1460          */
1461         mutex_lock(&jctx->lock);
1462         kbase_tlstream_tl_attrib_atom_state(katom, TL_ATOM_STATE_DONE);
1463         mutex_lock(&js_devdata->queue_mutex);
1464         mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
1465
1466         /* This worker only gets called on contexts that are scheduled *in*. This is
1467          * because it only happens in response to an IRQ from a job that was
1468          * running.
1469          */
1470         KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
1471
1472         if (katom->event_code == BASE_JD_EVENT_STOPPED) {
1473                 /* Atom has been promoted to stopped */
1474                 unsigned long flags;
1475
1476                 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
1477                 mutex_unlock(&js_devdata->queue_mutex);
1478
1479                 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1480
1481                 katom->status = KBASE_JD_ATOM_STATE_IN_JS;
1482                 kbase_js_unpull(kctx, katom);
1483
1484                 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1485                 mutex_unlock(&jctx->lock);
1486
1487                 return;
1488         }
1489
1490         if (katom->event_code != BASE_JD_EVENT_DONE)
1491                 dev_err(kbdev->dev,
1492                         "t6xx: GPU fault 0x%02lx from job slot %d\n",
1493                                         (unsigned long)katom->event_code,
1494                                                                 katom->slot_nr);
1495
1496         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
1497                 kbase_as_poking_timer_release_atom(kbdev, kctx, katom);
1498
1499         /* Retain state before the katom disappears */
1500         kbasep_js_atom_retained_state_copy(&katom_retained_state, katom);
1501
1502         context_idle = kbase_js_complete_atom_wq(kctx, katom);
1503
1504         KBASE_DEBUG_ASSERT(kbasep_js_has_atom_finished(&katom_retained_state));
1505
1506         kbasep_js_remove_job(kbdev, kctx, katom);
1507         mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
1508         mutex_unlock(&js_devdata->queue_mutex);
1509         katom->atom_flags &= ~KBASE_KATOM_FLAG_HOLDING_CTX_REF;
1510         /* jd_done_nolock() requires the jsctx_mutex lock to be dropped */
1511         jd_done_nolock(katom, &kctx->completed_jobs);
1512
1513         /* katom may have been freed now, do not use! */
1514
1515         if (context_idle) {
1516                 unsigned long flags;
1517
1518                 context_idle = false;
1519                 mutex_lock(&js_devdata->queue_mutex);
1520                 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1521
1522                 /* If kbase_sched() has scheduled this context back in then
1523                  * KCTX_ACTIVE will have been set after we marked it as
1524                  * inactive, and another pm reference will have been taken, so
1525                  * drop our reference. But do not call kbase_jm_idle_ctx(), as
1526                  * the context is active and fast-starting is allowed.
1527                  *
1528                  * If an atom has been fast-started then kctx->atoms_pulled will
1529                  * be non-zero but KCTX_ACTIVE will still be false (as the
1530                  * previous pm reference has been inherited). Do NOT drop our
1531                  * reference, as it has been re-used, and leave the context as
1532                  * active.
1533                  *
1534                  * If no new atoms have been started then KCTX_ACTIVE will still
1535                  * be false and atoms_pulled will be zero, so drop the reference
1536                  * and call kbase_jm_idle_ctx().
1537                  *
1538                  * As the checks are done under both the queue_mutex and
1539                  * hwaccess_lock is should be impossible for this to race
1540                  * with the scheduler code.
1541                  */
1542                 if (kbase_ctx_flag(kctx, KCTX_ACTIVE) ||
1543                     !atomic_read(&kctx->atoms_pulled)) {
1544                         /* Calling kbase_jm_idle_ctx() here will ensure that
1545                          * atoms are not fast-started when we drop the
1546                          * hwaccess_lock. This is not performed if
1547                          * KCTX_ACTIVE is set as in that case another pm
1548                          * reference has been taken and a fast-start would be
1549                          * valid.
1550                          */
1551                         if (!kbase_ctx_flag(kctx, KCTX_ACTIVE))
1552                                 kbase_jm_idle_ctx(kbdev, kctx);
1553                         context_idle = true;
1554                 } else {
1555                         kbase_ctx_flag_set(kctx, KCTX_ACTIVE);
1556                 }
1557                 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1558                 mutex_unlock(&js_devdata->queue_mutex);
1559         }
1560
1561         /*
1562          * Transaction complete
1563          */
1564         mutex_unlock(&jctx->lock);
1565
1566         /* Job is now no longer running, so can now safely release the context
1567          * reference, and handle any actions that were logged against the atom's retained state */
1568
1569         kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx, &katom_retained_state);
1570
1571         kbase_js_sched_all(kbdev);
1572
1573         if (!atomic_dec_return(&kctx->work_count)) {
1574                 /* If worker now idle then post all events that jd_done_nolock()
1575                  * has queued */
1576                 mutex_lock(&jctx->lock);
1577                 while (!list_empty(&kctx->completed_jobs)) {
1578                         struct kbase_jd_atom *atom = list_entry(
1579                                         kctx->completed_jobs.next,
1580                                         struct kbase_jd_atom, jd_item);
1581                         list_del(kctx->completed_jobs.next);
1582
1583                         kbase_event_post(kctx, atom);
1584                 }
1585                 mutex_unlock(&jctx->lock);
1586         }
1587
1588         kbase_backend_complete_wq_post_sched(kbdev, core_req, affinity,
1589                         coreref_state);
1590
1591         if (context_idle)
1592                 kbase_pm_context_idle(kbdev);
1593
1594         KBASE_TRACE_ADD(kbdev, JD_DONE_WORKER_END, kctx, NULL, cache_jc, 0);
1595 }
1596
1597 /**
1598  * jd_cancel_worker - Work queue job cancel function.
1599  * @data: a &struct work_struct
1600  *
1601  * Only called as part of 'Zapping' a context (which occurs on termination).
1602  * Operates serially with the kbase_jd_done_worker() on the work queue.
1603  *
1604  * This can only be called on contexts that aren't scheduled.
1605  *
1606  * We don't need to release most of the resources that would occur on
1607  * kbase_jd_done() or kbase_jd_done_worker(), because the atoms here must not be
1608  * running (by virtue of only being called on contexts that aren't
1609  * scheduled).
1610  */
1611 static void jd_cancel_worker(struct work_struct *data)
1612 {
1613         struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, work);
1614         struct kbase_jd_context *jctx;
1615         struct kbase_context *kctx;
1616         struct kbasep_js_kctx_info *js_kctx_info;
1617         bool need_to_try_schedule_context;
1618         bool attr_state_changed;
1619         struct kbase_device *kbdev;
1620
1621         /* Soft jobs should never reach this function */
1622         KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0);
1623
1624         kctx = katom->kctx;
1625         kbdev = kctx->kbdev;
1626         jctx = &kctx->jctx;
1627         js_kctx_info = &kctx->jctx.sched_info;
1628
1629         KBASE_TRACE_ADD(kbdev, JD_CANCEL_WORKER, kctx, katom, katom->jc, 0);
1630
1631         /* This only gets called on contexts that are scheduled out. Hence, we must
1632          * make sure we don't de-ref the number of running jobs (there aren't
1633          * any), nor must we try to schedule out the context (it's already
1634          * scheduled out).
1635          */
1636         KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
1637
1638         /* Scheduler: Remove the job from the system */
1639         mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
1640         attr_state_changed = kbasep_js_remove_cancelled_job(kbdev, kctx, katom);
1641         mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
1642
1643         mutex_lock(&jctx->lock);
1644
1645         need_to_try_schedule_context = jd_done_nolock(katom, NULL);
1646         /* Because we're zapping, we're not adding any more jobs to this ctx, so no need to
1647          * schedule the context. There's also no need for the jsctx_mutex to have been taken
1648          * around this too. */
1649         KBASE_DEBUG_ASSERT(!need_to_try_schedule_context);
1650
1651         /* katom may have been freed now, do not use! */
1652         mutex_unlock(&jctx->lock);
1653
1654         if (attr_state_changed)
1655                 kbase_js_sched_all(kbdev);
1656 }
1657
1658 /**
1659  * kbase_jd_done - Complete a job that has been removed from the Hardware
1660  * @katom: atom which has been completed
1661  * @slot_nr: slot the atom was on
1662  * @end_timestamp: completion time
1663  * @done_code: completion code
1664  *
1665  * This must be used whenever a job has been removed from the Hardware, e.g.:
1666  * An IRQ indicates that the job finished (for both error and 'done' codes), or
1667  * the job was evicted from the JS_HEAD_NEXT registers during a Soft/Hard stop.
1668  *
1669  * Some work is carried out immediately, and the rest is deferred onto a
1670  * workqueue
1671  *
1672  * Context:
1673  *   This can be called safely from atomic context.
1674  *   The caller must hold kbdev->hwaccess_lock
1675  */
1676 void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr,
1677                 ktime_t *end_timestamp, kbasep_js_atom_done_code done_code)
1678 {
1679         struct kbase_context *kctx;
1680         struct kbase_device *kbdev;
1681
1682         KBASE_DEBUG_ASSERT(katom);
1683         kctx = katom->kctx;
1684         KBASE_DEBUG_ASSERT(kctx);
1685         kbdev = kctx->kbdev;
1686         KBASE_DEBUG_ASSERT(kbdev);
1687
1688         if (done_code & KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT)
1689                 katom->event_code = BASE_JD_EVENT_REMOVED_FROM_NEXT;
1690
1691         KBASE_TRACE_ADD(kbdev, JD_DONE, kctx, katom, katom->jc, 0);
1692
1693         kbase_job_check_leave_disjoint(kbdev, katom);
1694
1695         katom->slot_nr = slot_nr;
1696
1697         atomic_inc(&kctx->work_count);
1698
1699 #ifdef CONFIG_DEBUG_FS
1700         /* a failed job happened and is waiting for dumping*/
1701         if (!katom->will_fail_event_code &&
1702                         kbase_debug_job_fault_process(katom, katom->event_code))
1703                 return;
1704 #endif
1705
1706         WARN_ON(work_pending(&katom->work));
1707         KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
1708         INIT_WORK(&katom->work, kbase_jd_done_worker);
1709         queue_work(kctx->jctx.job_done_wq, &katom->work);
1710 }
1711
1712 KBASE_EXPORT_TEST_API(kbase_jd_done);
1713
1714 void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
1715 {
1716         struct kbase_context *kctx;
1717         struct kbasep_js_kctx_info *js_kctx_info;
1718
1719         KBASE_DEBUG_ASSERT(NULL != kbdev);
1720         KBASE_DEBUG_ASSERT(NULL != katom);
1721         kctx = katom->kctx;
1722         KBASE_DEBUG_ASSERT(NULL != kctx);
1723
1724         js_kctx_info = &kctx->jctx.sched_info;
1725
1726         KBASE_TRACE_ADD(kbdev, JD_CANCEL, kctx, katom, katom->jc, 0);
1727
1728         /* This should only be done from a context that is not scheduled */
1729         KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
1730
1731         WARN_ON(work_pending(&katom->work));
1732
1733         katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
1734
1735         KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
1736         INIT_WORK(&katom->work, jd_cancel_worker);
1737         queue_work(kctx->jctx.job_done_wq, &katom->work);
1738 }
1739
1740
1741 void kbase_jd_zap_context(struct kbase_context *kctx)
1742 {
1743         struct kbase_jd_atom *katom;
1744         struct list_head *entry, *tmp;
1745         struct kbase_device *kbdev;
1746
1747         KBASE_DEBUG_ASSERT(kctx);
1748
1749         kbdev = kctx->kbdev;
1750
1751         KBASE_TRACE_ADD(kbdev, JD_ZAP_CONTEXT, kctx, NULL, 0u, 0u);
1752
1753         kbase_js_zap_context(kctx);
1754
1755         mutex_lock(&kctx->jctx.lock);
1756
1757         /*
1758          * While holding the struct kbase_jd_context lock clean up jobs which are known to kbase but are
1759          * queued outside the job scheduler.
1760          */
1761
1762         del_timer_sync(&kctx->soft_job_timeout);
1763         list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
1764                 katom = list_entry(entry, struct kbase_jd_atom, queue);
1765                 kbase_cancel_soft_job(katom);
1766         }
1767
1768
1769 #ifdef CONFIG_KDS
1770
1771         /* For each job waiting on a kds resource, cancel the wait and force the job to
1772          * complete early, this is done so that we don't leave jobs outstanding waiting
1773          * on kds resources which may never be released when contexts are zapped, resulting
1774          * in a hang.
1775          *
1776          * Note that we can safely iterate over the list as the struct kbase_jd_context lock is held,
1777          * this prevents items being removed when calling job_done_nolock in kbase_cancel_kds_wait_job.
1778          */
1779
1780         list_for_each(entry, &kctx->waiting_kds_resource) {
1781                 katom = list_entry(entry, struct kbase_jd_atom, node);
1782
1783                 kbase_cancel_kds_wait_job(katom);
1784         }
1785 #endif
1786
1787 #ifdef CONFIG_MALI_DMA_FENCE
1788         kbase_dma_fence_cancel_all_atoms(kctx);
1789 #endif
1790
1791         mutex_unlock(&kctx->jctx.lock);
1792
1793 #ifdef CONFIG_MALI_DMA_FENCE
1794         /* Flush dma-fence workqueue to ensure that any callbacks that may have
1795          * been queued are done before continuing.
1796          */
1797         flush_workqueue(kctx->dma_fence.wq);
1798 #endif
1799
1800         kbase_jm_wait_for_zero_jobs(kctx);
1801 }
1802
1803 KBASE_EXPORT_TEST_API(kbase_jd_zap_context);
1804
1805 int kbase_jd_init(struct kbase_context *kctx)
1806 {
1807         int i;
1808         int mali_err = 0;
1809 #ifdef CONFIG_KDS
1810         int err;
1811 #endif                          /* CONFIG_KDS */
1812
1813         KBASE_DEBUG_ASSERT(kctx);
1814
1815         kctx->jctx.job_done_wq = alloc_workqueue("mali_jd",
1816                         WQ_HIGHPRI | WQ_UNBOUND, 1);
1817         if (NULL == kctx->jctx.job_done_wq) {
1818                 mali_err = -ENOMEM;
1819                 goto out1;
1820         }
1821
1822         for (i = 0; i < BASE_JD_ATOM_COUNT; i++) {
1823                 init_waitqueue_head(&kctx->jctx.atoms[i].completed);
1824
1825                 INIT_LIST_HEAD(&kctx->jctx.atoms[i].dep_head[0]);
1826                 INIT_LIST_HEAD(&kctx->jctx.atoms[i].dep_head[1]);
1827
1828                 /* Catch userspace attempting to use an atom which doesn't exist as a pre-dependency */
1829                 kctx->jctx.atoms[i].event_code = BASE_JD_EVENT_JOB_INVALID;
1830                 kctx->jctx.atoms[i].status = KBASE_JD_ATOM_STATE_UNUSED;
1831
1832 #ifdef CONFIG_MALI_DMA_FENCE
1833                 kctx->jctx.atoms[i].dma_fence.context = fence_context_alloc(1);
1834                 atomic_set(&kctx->jctx.atoms[i].dma_fence.seqno, 0);
1835                 INIT_LIST_HEAD(&kctx->jctx.atoms[i].dma_fence.callbacks);
1836 #endif
1837         }
1838
1839         mutex_init(&kctx->jctx.lock);
1840
1841         init_waitqueue_head(&kctx->jctx.zero_jobs_wait);
1842
1843         spin_lock_init(&kctx->jctx.tb_lock);
1844
1845 #ifdef CONFIG_KDS
1846         err = kds_callback_init(&kctx->jctx.kds_cb, 0, kds_dep_clear);
1847         if (0 != err) {
1848                 mali_err = -EINVAL;
1849                 goto out2;
1850         }
1851 #endif                          /* CONFIG_KDS */
1852
1853         kctx->jctx.job_nr = 0;
1854         INIT_LIST_HEAD(&kctx->completed_jobs);
1855         atomic_set(&kctx->work_count, 0);
1856
1857         return 0;
1858
1859 #ifdef CONFIG_KDS
1860  out2:
1861         destroy_workqueue(kctx->jctx.job_done_wq);
1862 #endif                          /* CONFIG_KDS */
1863  out1:
1864         return mali_err;
1865 }
1866
1867 KBASE_EXPORT_TEST_API(kbase_jd_init);
1868
1869 void kbase_jd_exit(struct kbase_context *kctx)
1870 {
1871         KBASE_DEBUG_ASSERT(kctx);
1872
1873 #ifdef CONFIG_KDS
1874         kds_callback_term(&kctx->jctx.kds_cb);
1875 #endif                          /* CONFIG_KDS */
1876         /* Work queue is emptied by this */
1877         destroy_workqueue(kctx->jctx.job_done_wq);
1878 }
1879
1880 KBASE_EXPORT_TEST_API(kbase_jd_exit);