MALI: rockchip: upgrade midgard DDK to r14p0-01rel0
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / backend / gpu / mali_kbase_jm_rb.c
1 /*
2  *
3  * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19 /*
20  * Register-based HW access backend specific APIs
21  */
22
23 #include <mali_kbase.h>
24 #include <mali_kbase_hwaccess_jm.h>
25 #include <mali_kbase_jm.h>
26 #include <mali_kbase_js.h>
27 #include <mali_kbase_tlstream.h>
28 #include <mali_kbase_10969_workaround.h>
29 #include <backend/gpu/mali_kbase_cache_policy_backend.h>
30 #include <backend/gpu/mali_kbase_device_internal.h>
31 #include <backend/gpu/mali_kbase_jm_internal.h>
32 #include <backend/gpu/mali_kbase_js_affinity.h>
33 #include <backend/gpu/mali_kbase_pm_internal.h>
34
35 /* Return whether the specified ringbuffer is empty. HW access lock must be
36  * held */
37 #define SLOT_RB_EMPTY(rb)   (rb->write_idx == rb->read_idx)
38 /* Return number of atoms currently in the specified ringbuffer. HW access lock
39  * must be held */
40 #define SLOT_RB_ENTRIES(rb) (int)(s8)(rb->write_idx - rb->read_idx)
41
42 static void kbase_gpu_release_atom(struct kbase_device *kbdev,
43                                         struct kbase_jd_atom *katom,
44                                         ktime_t *end_timestamp);
45
46 /**
47  * kbase_gpu_enqueue_atom - Enqueue an atom in the HW access ringbuffer
48  * @kbdev: Device pointer
49  * @katom: Atom to enqueue
50  *
51  * Context: Caller must hold the HW access lock
52  */
53 static void kbase_gpu_enqueue_atom(struct kbase_device *kbdev,
54                                         struct kbase_jd_atom *katom)
55 {
56         struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[katom->slot_nr];
57
58         WARN_ON(SLOT_RB_ENTRIES(rb) >= SLOT_RB_SIZE);
59
60         lockdep_assert_held(&kbdev->hwaccess_lock);
61
62         rb->entries[rb->write_idx & SLOT_RB_MASK].katom = katom;
63         rb->write_idx++;
64
65         katom->gpu_rb_state = KBASE_ATOM_GPU_RB_WAITING_BLOCKED;
66 }
67
68 /**
69  * kbase_gpu_dequeue_atom - Remove an atom from the HW access ringbuffer, once
70  * it has been completed
71  * @kbdev:         Device pointer
72  * @js:            Job slot to remove atom from
73  * @end_timestamp: Pointer to timestamp of atom completion. May be NULL, in
74  *                 which case current time will be used.
75  *
76  * Context: Caller must hold the HW access lock
77  *
78  * Return: Atom removed from ringbuffer
79  */
80 static struct kbase_jd_atom *kbase_gpu_dequeue_atom(struct kbase_device *kbdev,
81                                                 int js,
82                                                 ktime_t *end_timestamp)
83 {
84         struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js];
85         struct kbase_jd_atom *katom;
86
87         if (SLOT_RB_EMPTY(rb)) {
88                 WARN(1, "GPU ringbuffer unexpectedly empty\n");
89                 return NULL;
90         }
91
92         lockdep_assert_held(&kbdev->hwaccess_lock);
93
94         katom = rb->entries[rb->read_idx & SLOT_RB_MASK].katom;
95
96         kbase_gpu_release_atom(kbdev, katom, end_timestamp);
97
98         rb->read_idx++;
99
100         katom->gpu_rb_state = KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB;
101
102         kbase_js_debug_log_current_affinities(kbdev);
103
104         return katom;
105 }
106
107 struct kbase_jd_atom *kbase_gpu_inspect(struct kbase_device *kbdev, int js,
108                                         int idx)
109 {
110         struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js];
111
112         lockdep_assert_held(&kbdev->hwaccess_lock);
113
114         if ((SLOT_RB_ENTRIES(rb) - 1) < idx)
115                 return NULL; /* idx out of range */
116
117         return rb->entries[(rb->read_idx + idx) & SLOT_RB_MASK].katom;
118 }
119
120 struct kbase_jd_atom *kbase_backend_inspect_head(struct kbase_device *kbdev,
121                                         int js)
122 {
123         return kbase_gpu_inspect(kbdev, js, 0);
124 }
125
126 struct kbase_jd_atom *kbase_backend_inspect_tail(struct kbase_device *kbdev,
127                                         int js)
128 {
129         struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js];
130
131         if (SLOT_RB_EMPTY(rb))
132                 return NULL;
133
134         return rb->entries[(rb->write_idx - 1) & SLOT_RB_MASK].katom;
135 }
136
137 /**
138  * kbase_gpu_atoms_submitted - Inspect whether a slot has any atoms currently
139  * on the GPU
140  * @kbdev:  Device pointer
141  * @js:     Job slot to inspect
142  *
143  * Return: true if there are atoms on the GPU for slot js,
144  *         false otherwise
145  */
146 static bool kbase_gpu_atoms_submitted(struct kbase_device *kbdev, int js)
147 {
148         int i;
149
150         lockdep_assert_held(&kbdev->hwaccess_lock);
151
152         for (i = 0; i < SLOT_RB_SIZE; i++) {
153                 struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i);
154
155                 if (!katom)
156                         return false;
157                 if (katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED ||
158                                 katom->gpu_rb_state == KBASE_ATOM_GPU_RB_READY)
159                         return true;
160         }
161
162         return false;
163 }
164
165 /**
166  * kbase_gpu_atoms_submitted_any() - Inspect whether there are any atoms
167  * currently on the GPU
168  * @kbdev:  Device pointer
169  *
170  * Return: true if there are any atoms on the GPU, false otherwise
171  */
172 static bool kbase_gpu_atoms_submitted_any(struct kbase_device *kbdev)
173 {
174         int js;
175         int i;
176
177         lockdep_assert_held(&kbdev->hwaccess_lock);
178
179         for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
180                 for (i = 0; i < SLOT_RB_SIZE; i++) {
181                         struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i);
182
183                         if (katom && katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED)
184                                 return true;
185                 }
186         }
187         return false;
188 }
189
190 int kbase_backend_nr_atoms_submitted(struct kbase_device *kbdev, int js)
191 {
192         int nr = 0;
193         int i;
194
195         lockdep_assert_held(&kbdev->hwaccess_lock);
196
197         for (i = 0; i < SLOT_RB_SIZE; i++) {
198                 struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i);
199
200                 if (katom && (katom->gpu_rb_state ==
201                                                 KBASE_ATOM_GPU_RB_SUBMITTED))
202                         nr++;
203         }
204
205         return nr;
206 }
207
208 int kbase_backend_nr_atoms_on_slot(struct kbase_device *kbdev, int js)
209 {
210         int nr = 0;
211         int i;
212
213         lockdep_assert_held(&kbdev->hwaccess_lock);
214
215         for (i = 0; i < SLOT_RB_SIZE; i++) {
216                 if (kbase_gpu_inspect(kbdev, js, i))
217                         nr++;
218         }
219
220         return nr;
221 }
222
223 static int kbase_gpu_nr_atoms_on_slot_min(struct kbase_device *kbdev, int js,
224                                 enum kbase_atom_gpu_rb_state min_rb_state)
225 {
226         int nr = 0;
227         int i;
228
229         lockdep_assert_held(&kbdev->hwaccess_lock);
230
231         for (i = 0; i < SLOT_RB_SIZE; i++) {
232                 struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i);
233
234                 if (katom && (katom->gpu_rb_state >= min_rb_state))
235                         nr++;
236         }
237
238         return nr;
239 }
240
241 /**
242  * check_secure_atom - Check if the given atom is in the given secure state and
243  *                     has a ringbuffer state of at least
244  *                     KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION
245  * @katom:  Atom pointer
246  * @secure: Desired secure state
247  *
248  * Return: true if atom is in the given state, false otherwise
249  */
250 static bool check_secure_atom(struct kbase_jd_atom *katom, bool secure)
251 {
252         if (katom->gpu_rb_state >=
253                         KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION &&
254                         ((kbase_jd_katom_is_protected(katom) && secure) ||
255                         (!kbase_jd_katom_is_protected(katom) && !secure)))
256                 return true;
257
258         return false;
259 }
260
261 /**
262  * kbase_gpu_check_secure_atoms - Check if there are any atoms in the given
263  *                                secure state in the ringbuffers of at least
264  *                                state
265  *                                KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE
266  * @kbdev:  Device pointer
267  * @secure: Desired secure state
268  *
269  * Return: true if any atoms are in the given state, false otherwise
270  */
271 static bool kbase_gpu_check_secure_atoms(struct kbase_device *kbdev,
272                 bool secure)
273 {
274         int js, i;
275
276         for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
277                 for (i = 0; i < SLOT_RB_SIZE; i++) {
278                         struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev,
279                                         js, i);
280
281                         if (katom) {
282                                 if (check_secure_atom(katom, secure))
283                                         return true;
284                         }
285                 }
286         }
287
288         return false;
289 }
290
291 int kbase_backend_slot_free(struct kbase_device *kbdev, int js)
292 {
293         if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) !=
294                                                 KBASE_RESET_GPU_NOT_PENDING) {
295                 /* The GPU is being reset - so prevent submission */
296                 return 0;
297         }
298
299         return SLOT_RB_SIZE - kbase_backend_nr_atoms_on_slot(kbdev, js);
300 }
301
302
303 static void kbasep_js_job_check_deref_cores(struct kbase_device *kbdev,
304                                                 struct kbase_jd_atom *katom);
305
306 static bool kbasep_js_job_check_ref_cores(struct kbase_device *kbdev,
307                                                 int js,
308                                                 struct kbase_jd_atom *katom)
309 {
310         /* The most recently checked affinity. Having this at this scope allows
311          * us to guarantee that we've checked the affinity in this function
312          * call.
313          */
314         u64 recently_chosen_affinity = 0;
315         bool chosen_affinity = false;
316         bool retry;
317
318         do {
319                 retry = false;
320
321                 /* NOTE: The following uses a number of FALLTHROUGHs to optimize
322                  * the calls to this function. Ending of the function is
323                  * indicated by BREAK OUT */
324                 switch (katom->coreref_state) {
325                         /* State when job is first attempted to be run */
326                 case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED:
327                         KBASE_DEBUG_ASSERT(katom->affinity == 0);
328
329                         /* Compute affinity */
330                         if (false == kbase_js_choose_affinity(
331                                         &recently_chosen_affinity, kbdev, katom,
332                                                                         js)) {
333                                 /* No cores are currently available */
334                                 /* *** BREAK OUT: No state transition *** */
335                                 break;
336                         }
337
338                         chosen_affinity = true;
339
340                         /* Request the cores */
341                         kbase_pm_request_cores(kbdev,
342                                         katom->core_req & BASE_JD_REQ_T,
343                                                 recently_chosen_affinity);
344
345                         katom->affinity = recently_chosen_affinity;
346
347                         /* Proceed to next state */
348                         katom->coreref_state =
349                         KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
350
351                         /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
352
353                 case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
354                         {
355                                 enum kbase_pm_cores_ready cores_ready;
356
357                                 KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
358                                         (katom->core_req & BASE_JD_REQ_T));
359
360                                 cores_ready = kbase_pm_register_inuse_cores(
361                                                 kbdev,
362                                                 katom->core_req & BASE_JD_REQ_T,
363                                                 katom->affinity);
364                                 if (cores_ready == KBASE_NEW_AFFINITY) {
365                                         /* Affinity no longer valid - return to
366                                          * previous state */
367                                         kbasep_js_job_check_deref_cores(kbdev,
368                                                                         katom);
369                                         KBASE_TRACE_ADD_SLOT_INFO(kbdev,
370                                         JS_CORE_REF_REGISTER_INUSE_FAILED,
371                                                         katom->kctx, katom,
372                                                         katom->jc, js,
373                                                         (u32) katom->affinity);
374                                         /* *** BREAK OUT: Return to previous
375                                          * state, retry *** */
376                                         retry = true;
377                                         break;
378                                 }
379                                 if (cores_ready == KBASE_CORES_NOT_READY) {
380                                         /* Stay in this state and return, to
381                                          * retry at this state later */
382                                         KBASE_TRACE_ADD_SLOT_INFO(kbdev,
383                                         JS_CORE_REF_REGISTER_INUSE_FAILED,
384                                                         katom->kctx, katom,
385                                                         katom->jc, js,
386                                                         (u32) katom->affinity);
387                                         /* *** BREAK OUT: No state transition
388                                          * *** */
389                                         break;
390                                 }
391                                 /* Proceed to next state */
392                                 katom->coreref_state =
393                                 KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY;
394                         }
395
396                         /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
397
398                 case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
399                         KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
400                                         (katom->core_req & BASE_JD_REQ_T));
401
402                         /* Optimize out choosing the affinity twice in the same
403                          * function call */
404                         if (chosen_affinity == false) {
405                                 /* See if the affinity changed since a previous
406                                  * call. */
407                                 if (false == kbase_js_choose_affinity(
408                                                 &recently_chosen_affinity,
409                                                         kbdev, katom, js)) {
410                                         /* No cores are currently available */
411                                         kbasep_js_job_check_deref_cores(kbdev,
412                                                                         katom);
413                                         KBASE_TRACE_ADD_SLOT_INFO(kbdev,
414                                         JS_CORE_REF_REQUEST_ON_RECHECK_FAILED,
415                                                 katom->kctx, katom,
416                                                 katom->jc, js,
417                                                 (u32) recently_chosen_affinity);
418                                         /* *** BREAK OUT: Transition to lower
419                                          * state *** */
420                                         break;
421                                 }
422                                 chosen_affinity = true;
423                         }
424
425                         /* Now see if this requires a different set of cores */
426                         if (recently_chosen_affinity != katom->affinity) {
427                                 enum kbase_pm_cores_ready cores_ready;
428
429                                 kbase_pm_request_cores(kbdev,
430                                                 katom->core_req & BASE_JD_REQ_T,
431                                                 recently_chosen_affinity);
432
433                                 /* Register new cores whilst we still hold the
434                                  * old ones, to minimize power transitions */
435                                 cores_ready =
436                                         kbase_pm_register_inuse_cores(kbdev,
437                                                 katom->core_req & BASE_JD_REQ_T,
438                                                 recently_chosen_affinity);
439                                 kbasep_js_job_check_deref_cores(kbdev, katom);
440
441                                 /* Fixup the state that was reduced by
442                                  * deref_cores: */
443                                 katom->coreref_state =
444                                 KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY;
445                                 katom->affinity = recently_chosen_affinity;
446                                 if (cores_ready == KBASE_NEW_AFFINITY) {
447                                         /* Affinity no longer valid - return to
448                                          * previous state */
449                                         katom->coreref_state =
450                                         KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
451
452                                         kbasep_js_job_check_deref_cores(kbdev,
453                                                                         katom);
454
455                                         KBASE_TRACE_ADD_SLOT_INFO(kbdev,
456                                         JS_CORE_REF_REGISTER_INUSE_FAILED,
457                                                         katom->kctx, katom,
458                                                         katom->jc, js,
459                                                         (u32) katom->affinity);
460                                         /* *** BREAK OUT: Return to previous
461                                          * state, retry *** */
462                                         retry = true;
463                                         break;
464                                 }
465                                 /* Now might be waiting for powerup again, with
466                                  * a new affinity */
467                                 if (cores_ready == KBASE_CORES_NOT_READY) {
468                                         /* Return to previous state */
469                                         katom->coreref_state =
470                                         KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
471                                         KBASE_TRACE_ADD_SLOT_INFO(kbdev,
472                                         JS_CORE_REF_REGISTER_ON_RECHECK_FAILED,
473                                                         katom->kctx, katom,
474                                                         katom->jc, js,
475                                                         (u32) katom->affinity);
476                                         /* *** BREAK OUT: Transition to lower
477                                          * state *** */
478                                         break;
479                                 }
480                         }
481                         /* Proceed to next state */
482                         katom->coreref_state =
483                         KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS;
484
485                         /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
486                 case KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS:
487                         KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
488                                         (katom->core_req & BASE_JD_REQ_T));
489                         KBASE_DEBUG_ASSERT(katom->affinity ==
490                                                 recently_chosen_affinity);
491
492                         /* Note: this is where the caller must've taken the
493                          * hwaccess_lock */
494
495                         /* Check for affinity violations - if there are any,
496                          * then we just ask the caller to requeue and try again
497                          * later */
498                         if (kbase_js_affinity_would_violate(kbdev, js,
499                                         katom->affinity) != false) {
500                                 /* Return to previous state */
501                                 katom->coreref_state =
502                                 KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY;
503                                 /* *** BREAK OUT: Transition to lower state ***
504                                  */
505                                 KBASE_TRACE_ADD_SLOT_INFO(kbdev,
506                                         JS_CORE_REF_AFFINITY_WOULD_VIOLATE,
507                                         katom->kctx, katom, katom->jc, js,
508                                         (u32) katom->affinity);
509                                 break;
510                         }
511
512                         /* No affinity violations would result, so the cores are
513                          * ready */
514                         katom->coreref_state = KBASE_ATOM_COREREF_STATE_READY;
515                         /* *** BREAK OUT: Cores Ready *** */
516                         break;
517
518                 default:
519                         KBASE_DEBUG_ASSERT_MSG(false,
520                                         "Unhandled kbase_atom_coreref_state %d",
521                                                         katom->coreref_state);
522                         break;
523                 }
524         } while (retry != false);
525
526         return (katom->coreref_state == KBASE_ATOM_COREREF_STATE_READY);
527 }
528
529 static void kbasep_js_job_check_deref_cores(struct kbase_device *kbdev,
530                                                 struct kbase_jd_atom *katom)
531 {
532         KBASE_DEBUG_ASSERT(kbdev != NULL);
533         KBASE_DEBUG_ASSERT(katom != NULL);
534
535         switch (katom->coreref_state) {
536         case KBASE_ATOM_COREREF_STATE_READY:
537                 /* State where atom was submitted to the HW - just proceed to
538                  * power-down */
539                 KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
540                                         (katom->core_req & BASE_JD_REQ_T));
541
542                 /* *** FALLTHROUGH *** */
543
544         case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
545                 /* State where cores were registered */
546                 KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
547                                         (katom->core_req & BASE_JD_REQ_T));
548                 kbase_pm_release_cores(kbdev, katom->core_req & BASE_JD_REQ_T,
549                                                         katom->affinity);
550
551                 break;
552
553         case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
554                 /* State where cores were requested, but not registered */
555                 KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
556                                         (katom->core_req & BASE_JD_REQ_T));
557                 kbase_pm_unrequest_cores(kbdev, katom->core_req & BASE_JD_REQ_T,
558                                                         katom->affinity);
559                 break;
560
561         case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED:
562                 /* Initial state - nothing required */
563                 KBASE_DEBUG_ASSERT(katom->affinity == 0);
564                 break;
565
566         default:
567                 KBASE_DEBUG_ASSERT_MSG(false,
568                                                 "Unhandled coreref_state: %d",
569                                                         katom->coreref_state);
570                 break;
571         }
572
573         katom->affinity = 0;
574         katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
575 }
576
577 static void kbasep_js_job_check_deref_cores_nokatom(struct kbase_device *kbdev,
578                 base_jd_core_req core_req, u64 affinity,
579                 enum kbase_atom_coreref_state coreref_state)
580 {
581         KBASE_DEBUG_ASSERT(kbdev != NULL);
582
583         switch (coreref_state) {
584         case KBASE_ATOM_COREREF_STATE_READY:
585                 /* State where atom was submitted to the HW - just proceed to
586                  * power-down */
587                 KBASE_DEBUG_ASSERT(affinity != 0 ||
588                                         (core_req & BASE_JD_REQ_T));
589
590                 /* *** FALLTHROUGH *** */
591
592         case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
593                 /* State where cores were registered */
594                 KBASE_DEBUG_ASSERT(affinity != 0 ||
595                                         (core_req & BASE_JD_REQ_T));
596                 kbase_pm_release_cores(kbdev, core_req & BASE_JD_REQ_T,
597                                                         affinity);
598
599                 break;
600
601         case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
602                 /* State where cores were requested, but not registered */
603                 KBASE_DEBUG_ASSERT(affinity != 0 ||
604                                         (core_req & BASE_JD_REQ_T));
605                 kbase_pm_unrequest_cores(kbdev, core_req & BASE_JD_REQ_T,
606                                                         affinity);
607                 break;
608
609         case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED:
610                 /* Initial state - nothing required */
611                 KBASE_DEBUG_ASSERT(affinity == 0);
612                 break;
613
614         default:
615                 KBASE_DEBUG_ASSERT_MSG(false,
616                                                 "Unhandled coreref_state: %d",
617                                                         coreref_state);
618                 break;
619         }
620 }
621
622 static void kbase_gpu_release_atom(struct kbase_device *kbdev,
623                                         struct kbase_jd_atom *katom,
624                                         ktime_t *end_timestamp)
625 {
626         switch (katom->gpu_rb_state) {
627         case KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB:
628                 /* Should be impossible */
629                 WARN(1, "Attempting to release atom not in ringbuffer\n");
630                 break;
631
632         case KBASE_ATOM_GPU_RB_SUBMITTED:
633                 /* Inform power management at start/finish of atom so it can
634                  * update its GPU utilisation metrics. Mark atom as not
635                  * submitted beforehand. */
636                 katom->gpu_rb_state = KBASE_ATOM_GPU_RB_READY;
637                 kbase_pm_metrics_update(kbdev, end_timestamp);
638
639                 if (katom->core_req & BASE_JD_REQ_PERMON)
640                         kbase_pm_release_gpu_cycle_counter_nolock(kbdev);
641                 /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
642
643         case KBASE_ATOM_GPU_RB_READY:
644                 /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
645
646         case KBASE_ATOM_GPU_RB_WAITING_AFFINITY:
647                 kbase_js_affinity_release_slot_cores(kbdev, katom->slot_nr,
648                                                         katom->affinity);
649                 /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
650
651         case KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE:
652                 break;
653
654         case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION:
655                 if (katom->protected_state.enter !=
656                                 KBASE_ATOM_ENTER_PROTECTED_CHECK ||
657                                 katom->protected_state.exit !=
658                                 KBASE_ATOM_EXIT_PROTECTED_CHECK)
659                         kbdev->protected_mode_transition = false;
660
661                 if (kbase_jd_katom_is_protected(katom) &&
662                                 (katom->protected_state.enter ==
663                                 KBASE_ATOM_ENTER_PROTECTED_IDLE_L2))
664                         kbase_vinstr_resume(kbdev->vinstr_ctx);
665
666                 /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
667
668         case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV:
669                 /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
670
671         case KBASE_ATOM_GPU_RB_WAITING_BLOCKED:
672                 /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
673
674         case KBASE_ATOM_GPU_RB_RETURN_TO_JS:
675                 break;
676         }
677
678         katom->gpu_rb_state = KBASE_ATOM_GPU_RB_WAITING_BLOCKED;
679 }
680
681 static void kbase_gpu_mark_atom_for_return(struct kbase_device *kbdev,
682                                                 struct kbase_jd_atom *katom)
683 {
684         kbase_gpu_release_atom(kbdev, katom, NULL);
685         katom->gpu_rb_state = KBASE_ATOM_GPU_RB_RETURN_TO_JS;
686 }
687
688 static inline bool kbase_gpu_rmu_workaround(struct kbase_device *kbdev, int js)
689 {
690         struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
691         bool slot_busy[3];
692
693         if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
694                 return true;
695         slot_busy[0] = kbase_gpu_nr_atoms_on_slot_min(kbdev, 0,
696                                         KBASE_ATOM_GPU_RB_WAITING_AFFINITY);
697         slot_busy[1] = kbase_gpu_nr_atoms_on_slot_min(kbdev, 1,
698                                         KBASE_ATOM_GPU_RB_WAITING_AFFINITY);
699         slot_busy[2] = kbase_gpu_nr_atoms_on_slot_min(kbdev, 2,
700                                         KBASE_ATOM_GPU_RB_WAITING_AFFINITY);
701
702         if ((js == 2 && !(slot_busy[0] || slot_busy[1])) ||
703                 (js != 2 && !slot_busy[2]))
704                 return true;
705
706         /* Don't submit slot 2 atom while GPU has jobs on slots 0/1 */
707         if (js == 2 && (kbase_gpu_atoms_submitted(kbdev, 0) ||
708                         kbase_gpu_atoms_submitted(kbdev, 1) ||
709                         backend->rmu_workaround_flag))
710                 return false;
711
712         /* Don't submit slot 0/1 atom while GPU has jobs on slot 2 */
713         if (js != 2 && (kbase_gpu_atoms_submitted(kbdev, 2) ||
714                         !backend->rmu_workaround_flag))
715                 return false;
716
717         backend->rmu_workaround_flag = !backend->rmu_workaround_flag;
718
719         return true;
720 }
721
722 static inline bool kbase_gpu_in_protected_mode(struct kbase_device *kbdev)
723 {
724         return kbdev->protected_mode;
725 }
726
727 static int kbase_gpu_protected_mode_enter(struct kbase_device *kbdev)
728 {
729         int err = -EINVAL;
730
731         lockdep_assert_held(&kbdev->hwaccess_lock);
732
733         WARN_ONCE(!kbdev->protected_ops,
734                         "Cannot enter protected mode: protected callbacks not specified.\n");
735
736         /*
737          * When entering into protected mode, we must ensure that the
738          * GPU is not operating in coherent mode as well. This is to
739          * ensure that no protected memory can be leaked.
740          */
741         if (kbdev->system_coherency == COHERENCY_ACE)
742                 kbase_cache_set_coherency_mode(kbdev, COHERENCY_ACE_LITE);
743
744         if (kbdev->protected_ops) {
745                 /* Switch GPU to protected mode */
746                 err = kbdev->protected_ops->protected_mode_enter(kbdev);
747
748                 if (err)
749                         dev_warn(kbdev->dev, "Failed to enable protected mode: %d\n",
750                                         err);
751                 else
752                         kbdev->protected_mode = true;
753         }
754
755         return err;
756 }
757
758 static int kbase_gpu_protected_mode_reset(struct kbase_device *kbdev)
759 {
760         lockdep_assert_held(&kbdev->hwaccess_lock);
761
762         WARN_ONCE(!kbdev->protected_ops,
763                         "Cannot exit protected mode: protected callbacks not specified.\n");
764
765         if (!kbdev->protected_ops)
766                 return -EINVAL;
767
768         kbase_reset_gpu_silent(kbdev);
769
770         return 0;
771 }
772
773 static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
774                 struct kbase_jd_atom **katom, int idx, int js)
775 {
776         int err = 0;
777
778         switch (katom[idx]->protected_state.enter) {
779         case KBASE_ATOM_ENTER_PROTECTED_CHECK:
780                 /* The checks in KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV
781                  * should ensure that we are not already transitiong, and that
782                  * there are no atoms currently on the GPU. */
783                 WARN_ON(kbdev->protected_mode_transition);
784                 WARN_ON(kbase_gpu_atoms_submitted_any(kbdev));
785
786                 kbdev->protected_mode_transition = true;
787                 katom[idx]->protected_state.enter =
788                         KBASE_ATOM_ENTER_PROTECTED_VINSTR;
789
790                 /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
791
792         case KBASE_ATOM_ENTER_PROTECTED_VINSTR:
793                 if (kbase_vinstr_try_suspend(kbdev->vinstr_ctx) < 0) {
794                         /*
795                          * We can't switch now because
796                          * the vinstr core state switch
797                          * is not done yet.
798                          */
799                         return -EAGAIN;
800                 }
801
802                 /* Once reaching this point GPU must be
803                  * switched to protected mode or vinstr
804                  * re-enabled. */
805
806                 /*
807                  * Not in correct mode, begin protected mode switch.
808                  * Entering protected mode requires us to power down the L2,
809                  * and drop out of fully coherent mode.
810                  */
811                 katom[idx]->protected_state.enter =
812                         KBASE_ATOM_ENTER_PROTECTED_IDLE_L2;
813
814                 kbase_pm_update_cores_state_nolock(kbdev);
815
816                 /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
817
818         case KBASE_ATOM_ENTER_PROTECTED_IDLE_L2:
819                 /* Avoid unnecessary waiting on non-ACE platforms. */
820                 if (kbdev->current_gpu_coherency_mode == COHERENCY_ACE) {
821                         if (kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2) ||
822                                 kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_L2)) {
823                                 /*
824                                 * The L2 is still powered, wait for all the users to
825                                 * finish with it before doing the actual reset.
826                                 */
827                                 return -EAGAIN;
828                         }
829                 }
830
831                 katom[idx]->protected_state.enter =
832                         KBASE_ATOM_ENTER_PROTECTED_FINISHED;
833
834                 /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
835
836         case KBASE_ATOM_ENTER_PROTECTED_FINISHED:
837
838                 /* No jobs running, so we can switch GPU mode right now. */
839                 err = kbase_gpu_protected_mode_enter(kbdev);
840
841                 /*
842                  * Regardless of result, we are no longer transitioning
843                  * the GPU.
844                  */
845                 kbdev->protected_mode_transition = false;
846
847                 if (err) {
848                         /*
849                          * Failed to switch into protected mode, resume
850                          * vinstr core and fail atom.
851                          */
852                         kbase_vinstr_resume(kbdev->vinstr_ctx);
853                         katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID;
854                         kbase_gpu_mark_atom_for_return(kbdev, katom[idx]);
855                         /* Only return if head atom or previous atom
856                          * already removed - as atoms must be returned
857                          * in order. */
858                         if (idx == 0 || katom[0]->gpu_rb_state ==
859                                         KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
860                                 kbase_gpu_dequeue_atom(kbdev, js, NULL);
861                                 kbase_jm_return_atom_to_js(kbdev, katom[idx]);
862                         }
863                         return -EINVAL;
864                 }
865
866                 /* Protected mode sanity checks. */
867                 KBASE_DEBUG_ASSERT_MSG(
868                         kbase_jd_katom_is_protected(katom[idx]) ==
869                         kbase_gpu_in_protected_mode(kbdev),
870                         "Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
871                         kbase_jd_katom_is_protected(katom[idx]),
872                         kbase_gpu_in_protected_mode(kbdev));
873                 katom[idx]->gpu_rb_state =
874                         KBASE_ATOM_GPU_RB_READY;
875         }
876
877         return 0;
878 }
879
880 static int kbase_jm_exit_protected_mode(struct kbase_device *kbdev,
881                 struct kbase_jd_atom **katom, int idx, int js)
882 {
883         int err = 0;
884
885
886         switch (katom[idx]->protected_state.exit) {
887         case KBASE_ATOM_EXIT_PROTECTED_CHECK:
888                 /* The checks in KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV
889                  * should ensure that we are not already transitiong, and that
890                  * there are no atoms currently on the GPU. */
891                 WARN_ON(kbdev->protected_mode_transition);
892                 WARN_ON(kbase_gpu_atoms_submitted_any(kbdev));
893
894                 /*
895                  * Exiting protected mode requires a reset, but first the L2
896                  * needs to be powered down to ensure it's not active when the
897                  * reset is issued.
898                  */
899                 katom[idx]->protected_state.exit =
900                                 KBASE_ATOM_EXIT_PROTECTED_IDLE_L2;
901
902                 kbdev->protected_mode_transition = true;
903                 kbase_pm_update_cores_state_nolock(kbdev);
904
905                 /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
906         case KBASE_ATOM_EXIT_PROTECTED_IDLE_L2:
907                 if (kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2) ||
908                                 kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_L2)) {
909                         /*
910                          * The L2 is still powered, wait for all the users to
911                          * finish with it before doing the actual reset.
912                          */
913                         return -EAGAIN;
914                 }
915                 katom[idx]->protected_state.exit =
916                                 KBASE_ATOM_EXIT_PROTECTED_RESET;
917
918                 /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
919
920         case KBASE_ATOM_EXIT_PROTECTED_RESET:
921                 /* Issue the reset to the GPU */
922                 err = kbase_gpu_protected_mode_reset(kbdev);
923
924                 if (err) {
925                         kbdev->protected_mode_transition = false;
926
927                         /* Failed to exit protected mode, fail atom */
928                         katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID;
929                         kbase_gpu_mark_atom_for_return(kbdev, katom[idx]);
930                         /* Only return if head atom or previous atom
931                          * already removed - as atoms must be returned
932                          * in order */
933                         if (idx == 0 || katom[0]->gpu_rb_state ==
934                                         KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
935                                 kbase_gpu_dequeue_atom(kbdev, js, NULL);
936                                 kbase_jm_return_atom_to_js(kbdev, katom[idx]);
937                         }
938
939                         kbase_vinstr_resume(kbdev->vinstr_ctx);
940
941                         return -EINVAL;
942                 }
943
944                 katom[idx]->protected_state.exit =
945                                 KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT;
946
947                 /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
948
949         case KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT:
950                 if (kbase_reset_gpu_active(kbdev))
951                         return -EAGAIN;
952
953                 kbdev->protected_mode_transition = false;
954                 kbdev->protected_mode = false;
955
956                 /* protected mode sanity checks */
957                 KBASE_DEBUG_ASSERT_MSG(
958                         kbase_jd_katom_is_protected(katom[idx]) == kbase_gpu_in_protected_mode(kbdev),
959                         "Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
960                         kbase_jd_katom_is_protected(katom[idx]), kbase_gpu_in_protected_mode(kbdev));
961                 KBASE_DEBUG_ASSERT_MSG(
962                         (kbase_jd_katom_is_protected(katom[idx]) && js == 0) ||
963                         !kbase_jd_katom_is_protected(katom[idx]),
964                         "Protected atom on JS%d not supported", js);
965         }
966
967         return 0;
968 }
969
970 void kbase_backend_slot_update(struct kbase_device *kbdev)
971 {
972         int js;
973
974         lockdep_assert_held(&kbdev->hwaccess_lock);
975
976         for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
977                 struct kbase_jd_atom *katom[2];
978                 int idx;
979
980                 katom[0] = kbase_gpu_inspect(kbdev, js, 0);
981                 katom[1] = kbase_gpu_inspect(kbdev, js, 1);
982                 WARN_ON(katom[1] && !katom[0]);
983
984                 for (idx = 0; idx < SLOT_RB_SIZE; idx++) {
985                         bool cores_ready;
986                         int ret;
987
988                         if (!katom[idx])
989                                 continue;
990
991                         switch (katom[idx]->gpu_rb_state) {
992                         case KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB:
993                                 /* Should be impossible */
994                                 WARN(1, "Attempting to update atom not in ringbuffer\n");
995                                 break;
996
997                         case KBASE_ATOM_GPU_RB_WAITING_BLOCKED:
998                                 if (katom[idx]->atom_flags &
999                                                 KBASE_KATOM_FLAG_X_DEP_BLOCKED)
1000                                         break;
1001
1002                                 katom[idx]->gpu_rb_state =
1003                                 KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV;
1004
1005                         /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
1006
1007                         case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV:
1008                                 if (kbase_gpu_check_secure_atoms(kbdev,
1009                                                 !kbase_jd_katom_is_protected(
1010                                                 katom[idx])))
1011                                         break;
1012
1013                                 if (kbdev->protected_mode_transition)
1014                                         break;
1015
1016                                 katom[idx]->gpu_rb_state =
1017                                         KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION;
1018
1019                         /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
1020
1021                         case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION:
1022
1023                                 /*
1024                                  * Exiting protected mode must be done before
1025                                  * the references on the cores are taken as
1026                                  * a power down the L2 is required which
1027                                  * can't happen after the references for this
1028                                  * atom are taken.
1029                                  */
1030
1031                                 if (!kbase_gpu_in_protected_mode(kbdev) &&
1032                                         kbase_jd_katom_is_protected(katom[idx])) {
1033                                         /* Atom needs to transition into protected mode. */
1034                                         ret = kbase_jm_enter_protected_mode(kbdev,
1035                                                         katom, idx, js);
1036                                         if (ret)
1037                                                 break;
1038                                 } else if (kbase_gpu_in_protected_mode(kbdev) &&
1039                                         !kbase_jd_katom_is_protected(katom[idx])) {
1040                                         /* Atom needs to transition out of protected mode. */
1041                                         ret = kbase_jm_exit_protected_mode(kbdev,
1042                                                         katom, idx, js);
1043                                         if (ret)
1044                                                 break;
1045                                 }
1046                                 katom[idx]->protected_state.exit =
1047                                                 KBASE_ATOM_EXIT_PROTECTED_CHECK;
1048
1049                                 /* Atom needs no protected mode transition. */
1050
1051                                 katom[idx]->gpu_rb_state =
1052                                         KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE;
1053
1054                         /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
1055
1056                         case KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE:
1057                                 if (katom[idx]->will_fail_event_code) {
1058                                         kbase_gpu_mark_atom_for_return(kbdev,
1059                                                         katom[idx]);
1060                                         /* Set EVENT_DONE so this atom will be
1061                                            completed, not unpulled. */
1062                                         katom[idx]->event_code =
1063                                                 BASE_JD_EVENT_DONE;
1064                                         /* Only return if head atom or previous
1065                                          * atom already removed - as atoms must
1066                                          * be returned in order. */
1067                                         if (idx == 0 || katom[0]->gpu_rb_state ==
1068                                                         KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
1069                                                 kbase_gpu_dequeue_atom(kbdev, js, NULL);
1070                                                 kbase_jm_return_atom_to_js(kbdev, katom[idx]);
1071                                         }
1072                                         break;
1073                                 }
1074
1075                                 cores_ready =
1076                                         kbasep_js_job_check_ref_cores(kbdev, js,
1077                                                                 katom[idx]);
1078
1079                                 if (katom[idx]->event_code ==
1080                                                 BASE_JD_EVENT_PM_EVENT) {
1081                                         katom[idx]->gpu_rb_state =
1082                                                 KBASE_ATOM_GPU_RB_RETURN_TO_JS;
1083                                         break;
1084                                 }
1085
1086                                 if (!cores_ready)
1087                                         break;
1088
1089                                 kbase_js_affinity_retain_slot_cores(kbdev, js,
1090                                                         katom[idx]->affinity);
1091                                 katom[idx]->gpu_rb_state =
1092                                         KBASE_ATOM_GPU_RB_WAITING_AFFINITY;
1093
1094                         /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
1095
1096                         case KBASE_ATOM_GPU_RB_WAITING_AFFINITY:
1097                                 if (!kbase_gpu_rmu_workaround(kbdev, js))
1098                                         break;
1099
1100                                 katom[idx]->gpu_rb_state =
1101                                         KBASE_ATOM_GPU_RB_READY;
1102
1103                         /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
1104
1105                         case KBASE_ATOM_GPU_RB_READY:
1106
1107                                 /* Only submit if head atom or previous atom
1108                                  * already submitted */
1109                                 if (idx == 1 &&
1110                                         (katom[0]->gpu_rb_state !=
1111                                                 KBASE_ATOM_GPU_RB_SUBMITTED &&
1112                                         katom[0]->gpu_rb_state !=
1113                                         KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB))
1114                                         break;
1115
1116                                 /* Check if this job needs the cycle counter
1117                                  * enabled before submission */
1118                                 if (katom[idx]->core_req & BASE_JD_REQ_PERMON)
1119                                         kbase_pm_request_gpu_cycle_counter_l2_is_on(
1120                                                                         kbdev);
1121
1122                                 kbase_job_hw_submit(kbdev, katom[idx], js);
1123                                 katom[idx]->gpu_rb_state =
1124                                                 KBASE_ATOM_GPU_RB_SUBMITTED;
1125
1126                                 /* Inform power management at start/finish of
1127                                  * atom so it can update its GPU utilisation
1128                                  * metrics. */
1129                                 kbase_pm_metrics_update(kbdev,
1130                                                 &katom[idx]->start_timestamp);
1131
1132                         /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
1133
1134                         case KBASE_ATOM_GPU_RB_SUBMITTED:
1135                                 /* Atom submitted to HW, nothing else to do */
1136                                 break;
1137
1138                         case KBASE_ATOM_GPU_RB_RETURN_TO_JS:
1139                                 /* Only return if head atom or previous atom
1140                                  * already removed - as atoms must be returned
1141                                  * in order */
1142                                 if (idx == 0 || katom[0]->gpu_rb_state ==
1143                                         KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
1144                                         kbase_gpu_dequeue_atom(kbdev, js, NULL);
1145                                         kbase_jm_return_atom_to_js(kbdev,
1146                                                                 katom[idx]);
1147                                 }
1148                                 break;
1149                         }
1150                 }
1151         }
1152
1153         /* Warn if PRLAM-8987 affinity restrictions are violated */
1154         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
1155                 WARN_ON((kbase_gpu_atoms_submitted(kbdev, 0) ||
1156                         kbase_gpu_atoms_submitted(kbdev, 1)) &&
1157                         kbase_gpu_atoms_submitted(kbdev, 2));
1158 }
1159
1160
1161 void kbase_backend_run_atom(struct kbase_device *kbdev,
1162                                 struct kbase_jd_atom *katom)
1163 {
1164         lockdep_assert_held(&kbdev->hwaccess_lock);
1165         kbase_gpu_enqueue_atom(kbdev, katom);
1166         kbase_backend_slot_update(kbdev);
1167 }
1168
1169 bool kbase_gpu_irq_evict(struct kbase_device *kbdev, int js)
1170 {
1171         struct kbase_jd_atom *katom;
1172         struct kbase_jd_atom *next_katom;
1173
1174         lockdep_assert_held(&kbdev->hwaccess_lock);
1175
1176         katom = kbase_gpu_inspect(kbdev, js, 0);
1177         next_katom = kbase_gpu_inspect(kbdev, js, 1);
1178
1179         if (next_katom && katom->kctx == next_katom->kctx &&
1180                 next_katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED &&
1181                 (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO), NULL)
1182                                                                         != 0 ||
1183                 kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI), NULL)
1184                                                                         != 0)) {
1185                 kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT),
1186                                 JS_COMMAND_NOP, NULL);
1187                 next_katom->gpu_rb_state = KBASE_ATOM_GPU_RB_READY;
1188                 return true;
1189         }
1190
1191         return false;
1192 }
1193
1194 void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
1195                                 u32 completion_code,
1196                                 u64 job_tail,
1197                                 ktime_t *end_timestamp)
1198 {
1199         struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, 0);
1200         struct kbase_context *kctx = katom->kctx;
1201
1202         lockdep_assert_held(&kbdev->hwaccess_lock);
1203
1204         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6787) &&
1205                         completion_code != BASE_JD_EVENT_DONE &&
1206                         !(completion_code & BASE_JD_SW_EVENT)) {
1207                 katom->need_cache_flush_cores_retained = katom->affinity;
1208                 kbase_pm_request_cores(kbdev, false, katom->affinity);
1209         } else if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10676)) {
1210                 if (kbdev->gpu_props.num_core_groups > 1 &&
1211                         !(katom->affinity &
1212                         kbdev->gpu_props.props.coherency_info.group[0].core_mask
1213                                                                         ) &&
1214                         (katom->affinity &
1215                         kbdev->gpu_props.props.coherency_info.group[1].core_mask
1216                                                                         )) {
1217                         dev_info(kbdev->dev, "JD: Flushing cache due to PRLAM-10676\n");
1218                         katom->need_cache_flush_cores_retained =
1219                                                                 katom->affinity;
1220                         kbase_pm_request_cores(kbdev, false,
1221                                                         katom->affinity);
1222                 }
1223         }
1224
1225         katom = kbase_gpu_dequeue_atom(kbdev, js, end_timestamp);
1226         kbase_timeline_job_slot_done(kbdev, katom->kctx, katom, js, 0);
1227         kbase_tlstream_tl_nret_atom_lpu(
1228                         katom,
1229                         &kbdev->gpu_props.props.raw_props.js_features[
1230                                 katom->slot_nr]);
1231         kbase_tlstream_tl_nret_atom_as(katom, &kbdev->as[kctx->as_nr]);
1232         kbase_tlstream_tl_nret_ctx_lpu(
1233                         kctx,
1234                         &kbdev->gpu_props.props.raw_props.js_features[
1235                                 katom->slot_nr]);
1236
1237         if (completion_code == BASE_JD_EVENT_STOPPED) {
1238                 struct kbase_jd_atom *next_katom = kbase_gpu_inspect(kbdev, js,
1239                                                                         0);
1240
1241                 /*
1242                  * Dequeue next atom from ringbuffers on same slot if required.
1243                  * This atom will already have been removed from the NEXT
1244                  * registers by kbase_gpu_soft_hard_stop_slot(), to ensure that
1245                  * the atoms on this slot are returned in the correct order.
1246                  */
1247                 if (next_katom && katom->kctx == next_katom->kctx) {
1248                         kbase_gpu_dequeue_atom(kbdev, js, end_timestamp);
1249                         kbase_jm_return_atom_to_js(kbdev, next_katom);
1250                 }
1251         } else if (completion_code != BASE_JD_EVENT_DONE) {
1252                 struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
1253                 int i;
1254
1255 #if KBASE_TRACE_DUMP_ON_JOB_SLOT_ERROR != 0
1256                 KBASE_TRACE_DUMP(kbdev);
1257 #endif
1258                 kbasep_js_clear_submit_allowed(js_devdata, katom->kctx);
1259
1260                 /*
1261                  * Remove all atoms on the same context from ringbuffers. This
1262                  * will not remove atoms that are already on the GPU, as these
1263                  * are guaranteed not to have fail dependencies on the failed
1264                  * atom.
1265                  */
1266                 for (i = 0; i < kbdev->gpu_props.num_job_slots; i++) {
1267                         struct kbase_jd_atom *katom_idx0 =
1268                                                 kbase_gpu_inspect(kbdev, i, 0);
1269                         struct kbase_jd_atom *katom_idx1 =
1270                                                 kbase_gpu_inspect(kbdev, i, 1);
1271
1272                         if (katom_idx0 && katom_idx0->kctx == katom->kctx &&
1273                                 katom_idx0->gpu_rb_state !=
1274                                 KBASE_ATOM_GPU_RB_SUBMITTED) {
1275                                 /* Dequeue katom_idx0 from ringbuffer */
1276                                 kbase_gpu_dequeue_atom(kbdev, i, end_timestamp);
1277
1278                                 if (katom_idx1 &&
1279                                         katom_idx1->kctx == katom->kctx &&
1280                                         katom_idx0->gpu_rb_state !=
1281                                                 KBASE_ATOM_GPU_RB_SUBMITTED) {
1282                                         /* Dequeue katom_idx1 from ringbuffer */
1283                                         kbase_gpu_dequeue_atom(kbdev, i,
1284                                                         end_timestamp);
1285
1286                                         katom_idx1->event_code =
1287                                                         BASE_JD_EVENT_STOPPED;
1288                                         kbase_jm_return_atom_to_js(kbdev,
1289                                                                 katom_idx1);
1290                                 }
1291                                 katom_idx0->event_code = BASE_JD_EVENT_STOPPED;
1292                                 kbase_jm_return_atom_to_js(kbdev, katom_idx0);
1293
1294                         } else if (katom_idx1 &&
1295                                         katom_idx1->kctx == katom->kctx &&
1296                                         katom_idx1->gpu_rb_state !=
1297                                                 KBASE_ATOM_GPU_RB_SUBMITTED) {
1298                                 /* Can not dequeue this atom yet - will be
1299                                  * dequeued when atom at idx0 completes */
1300                                 katom_idx1->event_code = BASE_JD_EVENT_STOPPED;
1301                                 kbase_gpu_mark_atom_for_return(kbdev,
1302                                                                 katom_idx1);
1303                         }
1304                 }
1305         }
1306
1307         KBASE_TRACE_ADD_SLOT_INFO(kbdev, JM_JOB_DONE, kctx, katom, katom->jc,
1308                                         js, completion_code);
1309
1310         if (job_tail != 0 && job_tail != katom->jc) {
1311                 bool was_updated = (job_tail != katom->jc);
1312
1313                 /* Some of the job has been executed, so we update the job chain
1314                  * address to where we should resume from */
1315                 katom->jc = job_tail;
1316                 if (was_updated)
1317                         KBASE_TRACE_ADD_SLOT(kbdev, JM_UPDATE_HEAD, katom->kctx,
1318                                                 katom, job_tail, js);
1319         }
1320
1321         /* Only update the event code for jobs that weren't cancelled */
1322         if (katom->event_code != BASE_JD_EVENT_JOB_CANCELLED)
1323                 katom->event_code = (base_jd_event_code)completion_code;
1324
1325         kbase_device_trace_register_access(kctx, REG_WRITE,
1326                                                 JOB_CONTROL_REG(JOB_IRQ_CLEAR),
1327                                                 1 << js);
1328
1329         /* Complete the job, and start new ones
1330          *
1331          * Also defer remaining work onto the workqueue:
1332          * - Re-queue Soft-stopped jobs
1333          * - For any other jobs, queue the job back into the dependency system
1334          * - Schedule out the parent context if necessary, and schedule a new
1335          *   one in.
1336          */
1337 #ifdef CONFIG_GPU_TRACEPOINTS
1338         {
1339                 /* The atom in the HEAD */
1340                 struct kbase_jd_atom *next_katom = kbase_gpu_inspect(kbdev, js,
1341                                                                         0);
1342
1343                 if (next_katom && next_katom->gpu_rb_state ==
1344                                                 KBASE_ATOM_GPU_RB_SUBMITTED) {
1345                         char js_string[16];
1346
1347                         trace_gpu_sched_switch(kbasep_make_job_slot_string(js,
1348                                                                 js_string),
1349                                                 ktime_to_ns(*end_timestamp),
1350                                                 (u32)next_katom->kctx->id, 0,
1351                                                 next_katom->work_id);
1352                         kbdev->hwaccess.backend.slot_rb[js].last_context =
1353                                                         next_katom->kctx;
1354                 } else {
1355                         char js_string[16];
1356
1357                         trace_gpu_sched_switch(kbasep_make_job_slot_string(js,
1358                                                                 js_string),
1359                                                 ktime_to_ns(ktime_get()), 0, 0,
1360                                                 0);
1361                         kbdev->hwaccess.backend.slot_rb[js].last_context = 0;
1362                 }
1363         }
1364 #endif
1365
1366         if (completion_code == BASE_JD_EVENT_STOPPED)
1367                 katom = kbase_jm_return_atom_to_js(kbdev, katom);
1368         else
1369                 katom = kbase_jm_complete(kbdev, katom, end_timestamp);
1370
1371         if (katom) {
1372                 /* Cross-slot dependency has now become runnable. Try to submit
1373                  * it. */
1374
1375                 /* Check if there are lower priority jobs to soft stop */
1376                 kbase_job_slot_ctx_priority_check_locked(kctx, katom);
1377
1378                 kbase_jm_try_kick(kbdev, 1 << katom->slot_nr);
1379         }
1380
1381         /* Job completion may have unblocked other atoms. Try to update all job
1382          * slots */
1383         kbase_backend_slot_update(kbdev);
1384 }
1385
1386 void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp)
1387 {
1388         int js;
1389
1390         lockdep_assert_held(&kbdev->hwaccess_lock);
1391
1392         for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
1393                 int atom_idx = 0;
1394                 int idx;
1395
1396                 for (idx = 0; idx < SLOT_RB_SIZE; idx++) {
1397                         struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev,
1398                                         js, atom_idx);
1399                         bool keep_in_jm_rb = false;
1400
1401                         if (!katom)
1402                                 break;
1403
1404                         if (katom->gpu_rb_state < KBASE_ATOM_GPU_RB_SUBMITTED)
1405                                 keep_in_jm_rb = true;
1406
1407                         kbase_gpu_release_atom(kbdev, katom, NULL);
1408
1409                         /*
1410                          * If the atom wasn't on HW when the reset was issued
1411                          * then leave it in the RB and next time we're kicked
1412                          * it will be processed again from the starting state.
1413                          */
1414                         if (keep_in_jm_rb) {
1415                                 katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
1416                                 katom->affinity = 0;
1417                                 katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK;
1418                                 /* As the atom was not removed, increment the
1419                                  * index so that we read the correct atom in the
1420                                  * next iteration. */
1421                                 atom_idx++;
1422                                 continue;
1423                         }
1424
1425                         /*
1426                          * The atom was on the HW when the reset was issued
1427                          * all we can do is fail the atom.
1428                          */
1429                         kbase_gpu_dequeue_atom(kbdev, js, NULL);
1430                         katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
1431                         kbase_jm_complete(kbdev, katom, end_timestamp);
1432                 }
1433         }
1434
1435         kbdev->protected_mode_transition = false;
1436         kbdev->protected_mode = false;
1437 }
1438
1439 static inline void kbase_gpu_stop_atom(struct kbase_device *kbdev,
1440                                         int js,
1441                                         struct kbase_jd_atom *katom,
1442                                         u32 action)
1443 {
1444         struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
1445         u32 hw_action = action & JS_COMMAND_MASK;
1446
1447         kbase_job_check_enter_disjoint(kbdev, action, katom->core_req, katom);
1448         kbasep_job_slot_soft_or_hard_stop_do_action(kbdev, js, hw_action,
1449                                                         katom->core_req, katom);
1450         kbasep_js_clear_submit_allowed(js_devdata, katom->kctx);
1451 }
1452
1453 static inline void kbase_gpu_remove_atom(struct kbase_device *kbdev,
1454                                                 struct kbase_jd_atom *katom,
1455                                                 u32 action,
1456                                                 bool disjoint)
1457 {
1458         struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
1459
1460         katom->event_code = BASE_JD_EVENT_REMOVED_FROM_NEXT;
1461         kbase_gpu_mark_atom_for_return(kbdev, katom);
1462         kbasep_js_clear_submit_allowed(js_devdata, katom->kctx);
1463
1464         if (disjoint)
1465                 kbase_job_check_enter_disjoint(kbdev, action, katom->core_req,
1466                                                                         katom);
1467 }
1468
1469 static int should_stop_x_dep_slot(struct kbase_jd_atom *katom)
1470 {
1471         if (katom->x_post_dep) {
1472                 struct kbase_jd_atom *dep_atom = katom->x_post_dep;
1473
1474                 if (dep_atom->gpu_rb_state !=
1475                                         KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB &&
1476                         dep_atom->gpu_rb_state !=
1477                                         KBASE_ATOM_GPU_RB_RETURN_TO_JS)
1478                         return dep_atom->slot_nr;
1479         }
1480         return -1;
1481 }
1482
1483 static void kbase_job_evicted(struct kbase_jd_atom *katom)
1484 {
1485         kbase_timeline_job_slot_done(katom->kctx->kbdev, katom->kctx, katom,
1486                         katom->slot_nr, KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT);
1487 }
1488
1489 bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
1490                                         struct kbase_context *kctx,
1491                                         int js,
1492                                         struct kbase_jd_atom *katom,
1493                                         u32 action)
1494 {
1495         struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
1496
1497         struct kbase_jd_atom *katom_idx0;
1498         struct kbase_jd_atom *katom_idx1;
1499
1500         bool katom_idx0_valid, katom_idx1_valid;
1501
1502         bool ret = false;
1503
1504         int stop_x_dep_idx0 = -1, stop_x_dep_idx1 = -1;
1505
1506         lockdep_assert_held(&kbdev->hwaccess_lock);
1507
1508         katom_idx0 = kbase_gpu_inspect(kbdev, js, 0);
1509         katom_idx1 = kbase_gpu_inspect(kbdev, js, 1);
1510
1511         if (katom) {
1512                 katom_idx0_valid = (katom_idx0 == katom);
1513                 /* If idx0 is to be removed and idx1 is on the same context,
1514                  * then idx1 must also be removed otherwise the atoms might be
1515                  * returned out of order */
1516                 if (katom_idx1)
1517                         katom_idx1_valid = (katom_idx1 == katom) ||
1518                                                 (katom_idx0_valid &&
1519                                                         (katom_idx0->kctx ==
1520                                                         katom_idx1->kctx));
1521                 else
1522                         katom_idx1_valid = false;
1523         } else {
1524                 katom_idx0_valid = (katom_idx0 &&
1525                                         (!kctx || katom_idx0->kctx == kctx));
1526                 katom_idx1_valid = (katom_idx1 &&
1527                                         (!kctx || katom_idx1->kctx == kctx));
1528         }
1529
1530         if (katom_idx0_valid)
1531                 stop_x_dep_idx0 = should_stop_x_dep_slot(katom_idx0);
1532         if (katom_idx1_valid)
1533                 stop_x_dep_idx1 = should_stop_x_dep_slot(katom_idx1);
1534
1535         if (katom_idx0_valid) {
1536                 if (katom_idx0->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED) {
1537                         /* Simple case - just dequeue and return */
1538                         kbase_gpu_dequeue_atom(kbdev, js, NULL);
1539                         if (katom_idx1_valid) {
1540                                 kbase_gpu_dequeue_atom(kbdev, js, NULL);
1541                                 katom_idx1->event_code =
1542                                                 BASE_JD_EVENT_REMOVED_FROM_NEXT;
1543                                 kbase_jm_return_atom_to_js(kbdev, katom_idx1);
1544                                 kbasep_js_clear_submit_allowed(js_devdata,
1545                                                         katom_idx1->kctx);
1546                         }
1547
1548                         katom_idx0->event_code =
1549                                                 BASE_JD_EVENT_REMOVED_FROM_NEXT;
1550                         kbase_jm_return_atom_to_js(kbdev, katom_idx0);
1551                         kbasep_js_clear_submit_allowed(js_devdata,
1552                                                         katom_idx0->kctx);
1553                 } else {
1554                         /* katom_idx0 is on GPU */
1555                         if (katom_idx1 && katom_idx1->gpu_rb_state ==
1556                                                 KBASE_ATOM_GPU_RB_SUBMITTED) {
1557                                 /* katom_idx0 and katom_idx1 are on GPU */
1558
1559                                 if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
1560                                                 JS_COMMAND_NEXT), NULL) == 0) {
1561                                         /* idx0 has already completed - stop
1562                                          * idx1 if needed*/
1563                                         if (katom_idx1_valid) {
1564                                                 kbase_gpu_stop_atom(kbdev, js,
1565                                                                 katom_idx1,
1566                                                                 action);
1567                                                 ret = true;
1568                                         }
1569                                 } else {
1570                                         /* idx1 is in NEXT registers - attempt
1571                                          * to remove */
1572                                         kbase_reg_write(kbdev,
1573                                                         JOB_SLOT_REG(js,
1574                                                         JS_COMMAND_NEXT),
1575                                                         JS_COMMAND_NOP, NULL);
1576
1577                                         if (kbase_reg_read(kbdev,
1578                                                         JOB_SLOT_REG(js,
1579                                                         JS_HEAD_NEXT_LO), NULL)
1580                                                                         != 0 ||
1581                                                 kbase_reg_read(kbdev,
1582                                                         JOB_SLOT_REG(js,
1583                                                         JS_HEAD_NEXT_HI), NULL)
1584                                                                         != 0) {
1585                                                 /* idx1 removed successfully,
1586                                                  * will be handled in IRQ */
1587                                                 kbase_job_evicted(katom_idx1);
1588                                                 kbase_gpu_remove_atom(kbdev,
1589                                                                 katom_idx1,
1590                                                                 action, true);
1591                                                 stop_x_dep_idx1 =
1592                                         should_stop_x_dep_slot(katom_idx1);
1593
1594                                                 /* stop idx0 if still on GPU */
1595                                                 kbase_gpu_stop_atom(kbdev, js,
1596                                                                 katom_idx0,
1597                                                                 action);
1598                                                 ret = true;
1599                                         } else if (katom_idx1_valid) {
1600                                                 /* idx0 has already completed,
1601                                                  * stop idx1 if needed */
1602                                                 kbase_gpu_stop_atom(kbdev, js,
1603                                                                 katom_idx1,
1604                                                                 action);
1605                                                 ret = true;
1606                                         }
1607                                 }
1608                         } else if (katom_idx1_valid) {
1609                                 /* idx1 not on GPU but must be dequeued*/
1610
1611                                 /* idx1 will be handled in IRQ */
1612                                 kbase_gpu_remove_atom(kbdev, katom_idx1, action,
1613                                                                         false);
1614                                 /* stop idx0 */
1615                                 /* This will be repeated for anything removed
1616                                  * from the next registers, since their normal
1617                                  * flow was also interrupted, and this function
1618                                  * might not enter disjoint state e.g. if we
1619                                  * don't actually do a hard stop on the head
1620                                  * atom */
1621                                 kbase_gpu_stop_atom(kbdev, js, katom_idx0,
1622                                                                         action);
1623                                 ret = true;
1624                         } else {
1625                                 /* no atom in idx1 */
1626                                 /* just stop idx0 */
1627                                 kbase_gpu_stop_atom(kbdev, js, katom_idx0,
1628                                                                         action);
1629                                 ret = true;
1630                         }
1631                 }
1632         } else if (katom_idx1_valid) {
1633                 if (katom_idx1->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED) {
1634                         /* Mark for return */
1635                         /* idx1 will be returned once idx0 completes */
1636                         kbase_gpu_remove_atom(kbdev, katom_idx1, action,
1637                                                                         false);
1638                 } else {
1639                         /* idx1 is on GPU */
1640                         if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
1641                                                 JS_COMMAND_NEXT), NULL) == 0) {
1642                                 /* idx0 has already completed - stop idx1 */
1643                                 kbase_gpu_stop_atom(kbdev, js, katom_idx1,
1644                                                                         action);
1645                                 ret = true;
1646                         } else {
1647                                 /* idx1 is in NEXT registers - attempt to
1648                                  * remove */
1649                                 kbase_reg_write(kbdev, JOB_SLOT_REG(js,
1650                                                         JS_COMMAND_NEXT),
1651                                                         JS_COMMAND_NOP, NULL);
1652
1653                                 if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
1654                                                 JS_HEAD_NEXT_LO), NULL) != 0 ||
1655                                     kbase_reg_read(kbdev, JOB_SLOT_REG(js,
1656                                                 JS_HEAD_NEXT_HI), NULL) != 0) {
1657                                         /* idx1 removed successfully, will be
1658                                          * handled in IRQ once idx0 completes */
1659                                         kbase_job_evicted(katom_idx1);
1660                                         kbase_gpu_remove_atom(kbdev, katom_idx1,
1661                                                                         action,
1662                                                                         false);
1663                                 } else {
1664                                         /* idx0 has already completed - stop
1665                                          * idx1 */
1666                                         kbase_gpu_stop_atom(kbdev, js,
1667                                                                 katom_idx1,
1668                                                                 action);
1669                                         ret = true;
1670                                 }
1671                         }
1672                 }
1673         }
1674
1675
1676         if (stop_x_dep_idx0 != -1)
1677                 kbase_backend_soft_hard_stop_slot(kbdev, kctx, stop_x_dep_idx0,
1678                                                                 NULL, action);
1679
1680         if (stop_x_dep_idx1 != -1)
1681                 kbase_backend_soft_hard_stop_slot(kbdev, kctx, stop_x_dep_idx1,
1682                                                                 NULL, action);
1683
1684         return ret;
1685 }
1686
1687 void kbase_gpu_cacheclean(struct kbase_device *kbdev,
1688                                         struct kbase_jd_atom *katom)
1689 {
1690         /* Limit the number of loops to avoid a hang if the interrupt is missed
1691          */
1692         u32 max_loops = KBASE_CLEAN_CACHE_MAX_LOOPS;
1693         unsigned long flags;
1694
1695         mutex_lock(&kbdev->cacheclean_lock);
1696
1697         /* use GPU_COMMAND completion solution */
1698         /* clean & invalidate the caches */
1699         KBASE_TRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, NULL, 0u, 0);
1700         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
1701                                         GPU_COMMAND_CLEAN_INV_CACHES, NULL);
1702
1703         /* wait for cache flush to complete before continuing */
1704         while (--max_loops &&
1705                 (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL) &
1706                                                 CLEAN_CACHES_COMPLETED) == 0)
1707                 ;
1708
1709         /* clear the CLEAN_CACHES_COMPLETED irq */
1710         KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, NULL, 0u,
1711                                                         CLEAN_CACHES_COMPLETED);
1712         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR),
1713                                                 CLEAN_CACHES_COMPLETED, NULL);
1714         KBASE_DEBUG_ASSERT_MSG(kbdev->hwcnt.backend.state !=
1715                                                 KBASE_INSTR_STATE_CLEANING,
1716             "Instrumentation code was cleaning caches, but Job Management code cleared their IRQ - Instrumentation code will now hang.");
1717
1718         mutex_unlock(&kbdev->cacheclean_lock);
1719
1720         spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1721         kbase_pm_unrequest_cores(kbdev, false,
1722                                         katom->need_cache_flush_cores_retained);
1723         spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1724 }
1725
1726 void kbase_backend_complete_wq(struct kbase_device *kbdev,
1727                                                 struct kbase_jd_atom *katom)
1728 {
1729         /*
1730          * If cache flush required due to HW workaround then perform the flush
1731          * now
1732          */
1733         if (katom->need_cache_flush_cores_retained) {
1734                 kbase_gpu_cacheclean(kbdev, katom);
1735                 katom->need_cache_flush_cores_retained = 0;
1736         }
1737
1738         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10969)            &&
1739             (katom->core_req & BASE_JD_REQ_FS)                        &&
1740             katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT       &&
1741             (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED) &&
1742             !(katom->atom_flags & KBASE_KATOM_FLAGS_RERUN)) {
1743                 dev_dbg(kbdev->dev, "Soft-stopped fragment shader job got a TILE_RANGE_FAULT. Possible HW issue, trying SW workaround\n");
1744                 if (kbasep_10969_workaround_clamp_coordinates(katom)) {
1745                         /* The job had a TILE_RANGE_FAULT after was soft-stopped
1746                          * Due to an HW issue we try to execute the job again.
1747                          */
1748                         dev_dbg(kbdev->dev,
1749                                 "Clamping has been executed, try to rerun the job\n"
1750                         );
1751                         katom->event_code = BASE_JD_EVENT_STOPPED;
1752                         katom->atom_flags |= KBASE_KATOM_FLAGS_RERUN;
1753                 }
1754         }
1755
1756         /* Clear the coreref_state now - while check_deref_cores() may not have
1757          * been called yet, the caller will have taken a copy of this field. If
1758          * this is not done, then if the atom is re-scheduled (following a soft
1759          * stop) then the core reference would not be retaken. */
1760         katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
1761         katom->affinity = 0;
1762 }
1763
1764 void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev,
1765                 base_jd_core_req core_req, u64 affinity,
1766                 enum kbase_atom_coreref_state coreref_state)
1767 {
1768         unsigned long flags;
1769
1770         spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1771         kbasep_js_job_check_deref_cores_nokatom(kbdev, core_req, affinity,
1772                         coreref_state);
1773         spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1774
1775         if (!kbdev->pm.active_count) {
1776                 mutex_lock(&kbdev->js_data.runpool_mutex);
1777                 mutex_lock(&kbdev->pm.lock);
1778                 kbase_pm_update_active(kbdev);
1779                 mutex_unlock(&kbdev->pm.lock);
1780                 mutex_unlock(&kbdev->js_data.runpool_mutex);
1781         }
1782 }
1783
1784 void kbase_gpu_dump_slots(struct kbase_device *kbdev)
1785 {
1786         struct kbasep_js_device_data *js_devdata;
1787         unsigned long flags;
1788         int js;
1789
1790         js_devdata = &kbdev->js_data;
1791
1792         spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1793
1794         dev_info(kbdev->dev, "kbase_gpu_dump_slots:\n");
1795
1796         for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
1797                 int idx;
1798
1799                 for (idx = 0; idx < SLOT_RB_SIZE; idx++) {
1800                         struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev,
1801                                                                         js,
1802                                                                         idx);
1803
1804                         if (katom)
1805                                 dev_info(kbdev->dev,
1806                                 "  js%d idx%d : katom=%p gpu_rb_state=%d\n",
1807                                 js, idx, katom, katom->gpu_rb_state);
1808                         else
1809                                 dev_info(kbdev->dev, "  js%d idx%d : empty\n",
1810                                                                 js, idx);
1811                 }
1812         }
1813
1814         spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1815 }
1816
1817
1818