MALI: rockchip: upgrade midgard DDK to r11p0-00rel0
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / backend / gpu / mali_kbase_jm_hw.c
1 /*
2  *
3  * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18 /*
19  * Base kernel job manager APIs
20  */
21
22 #include <mali_kbase.h>
23 #include <mali_kbase_config.h>
24 #include <mali_midg_regmap.h>
25 #if defined(CONFIG_MALI_GATOR_SUPPORT)
26 #include <mali_kbase_gator.h>
27 #endif
28 #include <mali_kbase_tlstream.h>
29 #include <mali_kbase_hw.h>
30 #include <mali_kbase_config_defaults.h>
31 #include <mali_kbase_hwaccess_jm.h>
32 #include <backend/gpu/mali_kbase_device_internal.h>
33 #include <backend/gpu/mali_kbase_irq_internal.h>
34 #include <backend/gpu/mali_kbase_js_affinity.h>
35 #include <backend/gpu/mali_kbase_jm_internal.h>
36
37 #define beenthere(kctx, f, a...) \
38                         dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
39
40 #if KBASE_GPU_RESET_EN
41 static void kbasep_try_reset_gpu_early(struct kbase_device *kbdev);
42 static void kbasep_reset_timeout_worker(struct work_struct *data);
43 static enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *timer);
44 #endif /* KBASE_GPU_RESET_EN */
45
46 static inline int kbasep_jm_is_js_free(struct kbase_device *kbdev, int js,
47                                                 struct kbase_context *kctx)
48 {
49         return !kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT), kctx);
50 }
51
52 void kbase_job_hw_submit(struct kbase_device *kbdev,
53                                 struct kbase_jd_atom *katom,
54                                 int js)
55 {
56         struct kbase_context *kctx;
57         u32 cfg;
58         u64 jc_head = katom->jc;
59
60         KBASE_DEBUG_ASSERT(kbdev);
61         KBASE_DEBUG_ASSERT(katom);
62
63         kctx = katom->kctx;
64
65         /* Command register must be available */
66         KBASE_DEBUG_ASSERT(kbasep_jm_is_js_free(kbdev, js, kctx));
67         /* Affinity is not violating */
68         kbase_js_debug_log_current_affinities(kbdev);
69         KBASE_DEBUG_ASSERT(!kbase_js_affinity_would_violate(kbdev, js,
70                                                         katom->affinity));
71
72         kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO),
73                                                 jc_head & 0xFFFFFFFF, kctx);
74         kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI),
75                                                 jc_head >> 32, kctx);
76
77         kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_LO),
78                                         katom->affinity & 0xFFFFFFFF, kctx);
79         kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_HI),
80                                         katom->affinity >> 32, kctx);
81
82         /* start MMU, medium priority, cache clean/flush on end, clean/flush on
83          * start */
84         cfg = kctx->as_nr;
85         if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION))
86                 cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
87
88 #ifndef CONFIG_MALI_COH_GPU
89         cfg |= JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
90         cfg |= JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE;
91 #endif
92
93         cfg |= JS_CONFIG_START_MMU;
94         cfg |= JS_CONFIG_THREAD_PRI(8);
95
96         if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE) &&
97                 (katom->atom_flags & KBASE_KATOM_FLAG_SECURE))
98                 cfg |= JS_CONFIG_DISABLE_DESCRIPTOR_WR_BK;
99
100         if (kbase_hw_has_feature(kbdev,
101                                 BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
102                 if (!kbdev->hwaccess.backend.slot_rb[js].job_chain_flag) {
103                         cfg |= JS_CONFIG_JOB_CHAIN_FLAG;
104                         katom->atom_flags |= KBASE_KATOM_FLAGS_JOBCHAIN;
105                         kbdev->hwaccess.backend.slot_rb[js].job_chain_flag =
106                                                                 true;
107                 } else {
108                         katom->atom_flags &= ~KBASE_KATOM_FLAGS_JOBCHAIN;
109                         kbdev->hwaccess.backend.slot_rb[js].job_chain_flag =
110                                                                 false;
111                 }
112         }
113
114         kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_CONFIG_NEXT), cfg, kctx);
115
116         if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION))
117                 kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_FLUSH_ID_NEXT),
118                                 katom->flush_id, kctx);
119
120         /* Write an approximate start timestamp.
121          * It's approximate because there might be a job in the HEAD register.
122          * In such cases, we'll try to make a better approximation in the IRQ
123          * handler (up to the KBASE_JS_IRQ_THROTTLE_TIME_US). */
124         katom->start_timestamp = ktime_get();
125
126         /* GO ! */
127         dev_dbg(kbdev->dev, "JS: Submitting atom %p from ctx %p to js[%d] with head=0x%llx, affinity=0x%llx",
128                                 katom, kctx, js, jc_head, katom->affinity);
129
130         KBASE_TRACE_ADD_SLOT_INFO(kbdev, JM_SUBMIT, kctx, katom, jc_head, js,
131                                                         (u32) katom->affinity);
132
133 #if defined(CONFIG_MALI_GATOR_SUPPORT)
134         kbase_trace_mali_job_slots_event(
135                                 GATOR_MAKE_EVENT(GATOR_JOB_SLOT_START, js),
136                                 kctx, kbase_jd_atom_id(kctx, katom));
137 #endif
138         kbase_tlstream_tl_attrib_atom_config(katom, jc_head,
139                         katom->affinity, cfg);
140         kbase_tlstream_tl_ret_ctx_lpu(
141                 kctx,
142                 &kbdev->gpu_props.props.raw_props.js_features[
143                         katom->slot_nr]);
144         kbase_tlstream_tl_ret_atom_as(katom, &kbdev->as[kctx->as_nr]);
145         kbase_tlstream_tl_ret_atom_lpu(
146                         katom,
147                         &kbdev->gpu_props.props.raw_props.js_features[js],
148                         "ctx_nr,atom_nr");
149 #ifdef CONFIG_GPU_TRACEPOINTS
150         if (!kbase_backend_nr_atoms_submitted(kbdev, js)) {
151                 /* If this is the only job on the slot, trace it as starting */
152                 char js_string[16];
153
154                 trace_gpu_sched_switch(
155                                 kbasep_make_job_slot_string(js, js_string),
156                                 ktime_to_ns(katom->start_timestamp),
157                                 (u32)katom->kctx->id, 0, katom->work_id);
158                 kbdev->hwaccess.backend.slot_rb[js].last_context = katom->kctx;
159         }
160 #endif
161         kbase_timeline_job_slot_submit(kbdev, kctx, katom, js);
162
163         kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT),
164                                                 JS_COMMAND_START, katom->kctx);
165 }
166
167 /**
168  * kbasep_job_slot_update_head_start_timestamp - Update timestamp
169  * @kbdev: kbase device
170  * @js: job slot
171  * @end_timestamp: timestamp
172  *
173  * Update the start_timestamp of the job currently in the HEAD, based on the
174  * fact that we got an IRQ for the previous set of completed jobs.
175  *
176  * The estimate also takes into account the %KBASE_JS_IRQ_THROTTLE_TIME_US and
177  * the time the job was submitted, to work out the best estimate (which might
178  * still result in an over-estimate to the calculated time spent)
179  */
180 static void kbasep_job_slot_update_head_start_timestamp(
181                                                 struct kbase_device *kbdev,
182                                                 int js,
183                                                 ktime_t end_timestamp)
184 {
185         if (kbase_backend_nr_atoms_on_slot(kbdev, js) > 0) {
186                 struct kbase_jd_atom *katom;
187                 ktime_t new_timestamp;
188                 ktime_t timestamp_diff;
189                 /* The atom in the HEAD */
190                 katom = kbase_gpu_inspect(kbdev, js, 0);
191
192                 KBASE_DEBUG_ASSERT(katom != NULL);
193
194                 /* Account for any IRQ Throttle time - makes an overestimate of
195                  * the time spent by the job */
196                 new_timestamp = ktime_sub_ns(end_timestamp,
197                                         KBASE_JS_IRQ_THROTTLE_TIME_US * 1000);
198                 timestamp_diff = ktime_sub(new_timestamp,
199                                                         katom->start_timestamp);
200                 if (ktime_to_ns(timestamp_diff) >= 0) {
201                         /* Only update the timestamp if it's a better estimate
202                          * than what's currently stored. This is because our
203                          * estimate that accounts for the throttle time may be
204                          * too much of an overestimate */
205                         katom->start_timestamp = new_timestamp;
206                 }
207         }
208 }
209
210 #if (defined(MALI_MIPE_ENABLED) && MALI_MIPE_ENABLED) || \
211         !defined(MALI_MIPE_ENABLED)
212 /**
213  * kbasep_trace_tl_nret_atom_lpu - Call nret_atom_lpu timeline tracepoint
214  * @kbdev: kbase device
215  * @i: job slot
216  *
217  * Get kbase atom by calling kbase_gpu_inspect for given job slot.
218  * Then use obtained katom and name of slot associated with the given
219  * job slot number in tracepoint call to the instrumentation module
220  * informing that given atom is no longer executed on given lpu (job slot).
221  */
222 static void kbasep_trace_tl_nret_atom_lpu(struct kbase_device *kbdev, int i)
223 {
224         struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, i, 0);
225
226         kbase_tlstream_tl_nret_atom_lpu(katom,
227                 &kbdev->gpu_props.props.raw_props.js_features[i]);
228 }
229 #endif
230
231 void kbase_job_done(struct kbase_device *kbdev, u32 done)
232 {
233         unsigned long flags;
234         int i;
235         u32 count = 0;
236         ktime_t end_timestamp = ktime_get();
237         struct kbasep_js_device_data *js_devdata;
238
239         KBASE_DEBUG_ASSERT(kbdev);
240         js_devdata = &kbdev->js_data;
241
242         KBASE_TRACE_ADD(kbdev, JM_IRQ, NULL, NULL, 0, done);
243
244         memset(&kbdev->slot_submit_count_irq[0], 0,
245                                         sizeof(kbdev->slot_submit_count_irq));
246
247         /* write irq throttle register, this will prevent irqs from occurring
248          * until the given number of gpu clock cycles have passed */
249         {
250                 int irq_throttle_cycles =
251                                 atomic_read(&kbdev->irq_throttle_cycles);
252
253                 kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_THROTTLE),
254                                                 irq_throttle_cycles, NULL);
255         }
256
257         spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
258
259         while (done) {
260                 u32 failed = done >> 16;
261
262                 /* treat failed slots as finished slots */
263                 u32 finished = (done & 0xFFFF) | failed;
264
265                 /* Note: This is inherently unfair, as we always check
266                  * for lower numbered interrupts before the higher
267                  * numbered ones.*/
268                 i = ffs(finished) - 1;
269                 KBASE_DEBUG_ASSERT(i >= 0);
270
271                 do {
272                         int nr_done;
273                         u32 active;
274                         u32 completion_code = BASE_JD_EVENT_DONE;/* assume OK */
275                         u64 job_tail = 0;
276
277                         if (failed & (1u << i)) {
278                                 /* read out the job slot status code if the job
279                                  * slot reported failure */
280                                 completion_code = kbase_reg_read(kbdev,
281                                         JOB_SLOT_REG(i, JS_STATUS), NULL);
282
283                                 switch (completion_code) {
284                                 case BASE_JD_EVENT_STOPPED:
285 #if defined(CONFIG_MALI_GATOR_SUPPORT)
286                                         kbase_trace_mali_job_slots_event(
287                                                 GATOR_MAKE_EVENT(
288                                                 GATOR_JOB_SLOT_SOFT_STOPPED, i),
289                                                                 NULL, 0);
290 #endif
291
292                                         kbase_tlstream_aux_job_softstop(i);
293
294 #if (defined(MALI_MIPE_ENABLED) && MALI_MIPE_ENABLED) || \
295         !defined(MALI_MIPE_ENABLED)
296                                         kbasep_trace_tl_nret_atom_lpu(
297                                                 kbdev, i);
298 #endif
299
300                                         /* Soft-stopped job - read the value of
301                                          * JS<n>_TAIL so that the job chain can
302                                          * be resumed */
303                                         job_tail = (u64)kbase_reg_read(kbdev,
304                                                 JOB_SLOT_REG(i, JS_TAIL_LO),
305                                                                         NULL) |
306                                                 ((u64)kbase_reg_read(kbdev,
307                                                 JOB_SLOT_REG(i, JS_TAIL_HI),
308                                                                 NULL) << 32);
309                                         break;
310                                 case BASE_JD_EVENT_NOT_STARTED:
311                                         /* PRLAM-10673 can cause a TERMINATED
312                                          * job to come back as NOT_STARTED, but
313                                          * the error interrupt helps us detect
314                                          * it */
315                                         completion_code =
316                                                 BASE_JD_EVENT_TERMINATED;
317                                         /* fall through */
318                                 default:
319                                         dev_warn(kbdev->dev, "error detected from slot %d, job status 0x%08x (%s)",
320                                                         i, completion_code,
321                                                         kbase_exception_name
322                                                         (kbdev,
323                                                         completion_code));
324                                 }
325
326                                 kbase_gpu_irq_evict(kbdev, i);
327                         }
328
329                         kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR),
330                                         done & ((1 << i) | (1 << (i + 16))),
331                                         NULL);
332                         active = kbase_reg_read(kbdev,
333                                         JOB_CONTROL_REG(JOB_IRQ_JS_STATE),
334                                         NULL);
335
336                         if (((active >> i) & 1) == 0 &&
337                                         (((done >> (i + 16)) & 1) == 0)) {
338                                 /* There is a potential race we must work
339                                  * around:
340                                  *
341                                  *  1. A job slot has a job in both current and
342                                  *     next registers
343                                  *  2. The job in current completes
344                                  *     successfully, the IRQ handler reads
345                                  *     RAWSTAT and calls this function with the
346                                  *     relevant bit set in "done"
347                                  *  3. The job in the next registers becomes the
348                                  *     current job on the GPU
349                                  *  4. Sometime before the JOB_IRQ_CLEAR line
350                                  *     above the job on the GPU _fails_
351                                  *  5. The IRQ_CLEAR clears the done bit but not
352                                  *     the failed bit. This atomically sets
353                                  *     JOB_IRQ_JS_STATE. However since both jobs
354                                  *     have now completed the relevant bits for
355                                  *     the slot are set to 0.
356                                  *
357                                  * If we now did nothing then we'd incorrectly
358                                  * assume that _both_ jobs had completed
359                                  * successfully (since we haven't yet observed
360                                  * the fail bit being set in RAWSTAT).
361                                  *
362                                  * So at this point if there are no active jobs
363                                  * left we check to see if RAWSTAT has a failure
364                                  * bit set for the job slot. If it does we know
365                                  * that there has been a new failure that we
366                                  * didn't previously know about, so we make sure
367                                  * that we record this in active (but we wait
368                                  * for the next loop to deal with it).
369                                  *
370                                  * If we were handling a job failure (i.e. done
371                                  * has the relevant high bit set) then we know
372                                  * that the value read back from
373                                  * JOB_IRQ_JS_STATE is the correct number of
374                                  * remaining jobs because the failed job will
375                                  * have prevented any futher jobs from starting
376                                  * execution.
377                                  */
378                                 u32 rawstat = kbase_reg_read(kbdev,
379                                         JOB_CONTROL_REG(JOB_IRQ_RAWSTAT), NULL);
380
381                                 if ((rawstat >> (i + 16)) & 1) {
382                                         /* There is a failed job that we've
383                                          * missed - add it back to active */
384                                         active |= (1u << i);
385                                 }
386                         }
387
388                         dev_dbg(kbdev->dev, "Job ended with status 0x%08X\n",
389                                                         completion_code);
390
391                         nr_done = kbase_backend_nr_atoms_submitted(kbdev, i);
392                         nr_done -= (active >> i) & 1;
393                         nr_done -= (active >> (i + 16)) & 1;
394
395                         if (nr_done <= 0) {
396                                 dev_warn(kbdev->dev, "Spurious interrupt on slot %d",
397                                                                         i);
398
399                                 goto spurious;
400                         }
401
402                         count += nr_done;
403
404                         while (nr_done) {
405                                 if (nr_done == 1) {
406                                         kbase_gpu_complete_hw(kbdev, i,
407                                                                 completion_code,
408                                                                 job_tail,
409                                                                 &end_timestamp);
410                                         kbase_jm_try_kick_all(kbdev);
411                                 } else {
412                                         /* More than one job has completed.
413                                          * Since this is not the last job being
414                                          * reported this time it must have
415                                          * passed. This is because the hardware
416                                          * will not allow further jobs in a job
417                                          * slot to complete until the failed job
418                                          * is cleared from the IRQ status.
419                                          */
420                                         kbase_gpu_complete_hw(kbdev, i,
421                                                         BASE_JD_EVENT_DONE,
422                                                         0,
423                                                         &end_timestamp);
424                                 }
425                                 nr_done--;
426                         }
427  spurious:
428                         done = kbase_reg_read(kbdev,
429                                         JOB_CONTROL_REG(JOB_IRQ_RAWSTAT), NULL);
430
431                         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10883)) {
432                                 /* Workaround for missing interrupt caused by
433                                  * PRLAM-10883 */
434                                 if (((active >> i) & 1) && (0 ==
435                                                 kbase_reg_read(kbdev,
436                                                         JOB_SLOT_REG(i,
437                                                         JS_STATUS), NULL))) {
438                                         /* Force job slot to be processed again
439                                          */
440                                         done |= (1u << i);
441                                 }
442                         }
443
444                         failed = done >> 16;
445                         finished = (done & 0xFFFF) | failed;
446                         if (done)
447                                 end_timestamp = ktime_get();
448                 } while (finished & (1 << i));
449
450                 kbasep_job_slot_update_head_start_timestamp(kbdev, i,
451                                                                 end_timestamp);
452         }
453
454         spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
455 #if KBASE_GPU_RESET_EN
456         if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
457                                                 KBASE_RESET_GPU_COMMITTED) {
458                 /* If we're trying to reset the GPU then we might be able to do
459                  * it early (without waiting for a timeout) because some jobs
460                  * have completed
461                  */
462                 kbasep_try_reset_gpu_early(kbdev);
463         }
464 #endif /* KBASE_GPU_RESET_EN */
465         KBASE_TRACE_ADD(kbdev, JM_IRQ_END, NULL, NULL, 0, count);
466 }
467 KBASE_EXPORT_TEST_API(kbase_job_done);
468
469 static bool kbasep_soft_stop_allowed(struct kbase_device *kbdev,
470                                         struct kbase_jd_atom *katom)
471 {
472         bool soft_stops_allowed = true;
473
474         if (kbase_jd_katom_is_secure(katom)) {
475                 soft_stops_allowed = false;
476         } else if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408)) {
477                 if ((katom->core_req & BASE_JD_REQ_T) != 0)
478                         soft_stops_allowed = false;
479         }
480         return soft_stops_allowed;
481 }
482
483 static bool kbasep_hard_stop_allowed(struct kbase_device *kbdev,
484                                                                 u16 core_reqs)
485 {
486         bool hard_stops_allowed = true;
487
488         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8394)) {
489                 if ((core_reqs & BASE_JD_REQ_T) != 0)
490                         hard_stops_allowed = false;
491         }
492         return hard_stops_allowed;
493 }
494
495 void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
496                                         int js,
497                                         u32 action,
498                                         u16 core_reqs,
499                                         struct kbase_jd_atom *target_katom)
500 {
501         struct kbase_context *kctx = target_katom->kctx;
502 #if KBASE_TRACE_ENABLE
503         u32 status_reg_before;
504         u64 job_in_head_before;
505         u32 status_reg_after;
506
507         KBASE_DEBUG_ASSERT(!(action & (~JS_COMMAND_MASK)));
508
509         /* Check the head pointer */
510         job_in_head_before = ((u64) kbase_reg_read(kbdev,
511                                         JOB_SLOT_REG(js, JS_HEAD_LO), NULL))
512                         | (((u64) kbase_reg_read(kbdev,
513                                         JOB_SLOT_REG(js, JS_HEAD_HI), NULL))
514                                                                         << 32);
515         status_reg_before = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_STATUS),
516                                                                         NULL);
517 #endif
518
519         if (action == JS_COMMAND_SOFT_STOP) {
520                 bool soft_stop_allowed = kbasep_soft_stop_allowed(kbdev,
521                                                                 target_katom);
522
523                 if (!soft_stop_allowed) {
524 #ifdef CONFIG_MALI_DEBUG
525                         dev_dbg(kbdev->dev,
526                                         "Attempt made to soft-stop a job that cannot be soft-stopped. core_reqs = 0x%X",
527                                         (unsigned int)core_reqs);
528 #endif                          /* CONFIG_MALI_DEBUG */
529                         return;
530                 }
531
532                 /* We are about to issue a soft stop, so mark the atom as having
533                  * been soft stopped */
534                 target_katom->atom_flags |= KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED;
535
536                 /* Mark the point where we issue the soft-stop command */
537                 kbase_tlstream_aux_issue_job_softstop(target_katom);
538
539                 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
540                         int i;
541
542                         for (i = 0;
543                              i < kbase_backend_nr_atoms_submitted(kbdev, js);
544                              i++) {
545                                 struct kbase_jd_atom *katom;
546
547                                 katom = kbase_gpu_inspect(kbdev, js, i);
548
549                                 KBASE_DEBUG_ASSERT(katom);
550
551                                 /* For HW_ISSUE_8316, only 'bad' jobs attacking
552                                  * the system can cause this issue: normally,
553                                  * all memory should be allocated in multiples
554                                  * of 4 pages, and growable memory should be
555                                  * changed size in multiples of 4 pages.
556                                  *
557                                  * Whilst such 'bad' jobs can be cleared by a
558                                  * GPU reset, the locking up of a uTLB entry
559                                  * caused by the bad job could also stall other
560                                  * ASs, meaning that other ASs' jobs don't
561                                  * complete in the 'grace' period before the
562                                  * reset. We don't want to lose other ASs' jobs
563                                  * when they would normally complete fine, so we
564                                  * must 'poke' the MMU regularly to help other
565                                  * ASs complete */
566                                 kbase_as_poking_timer_retain_atom(
567                                                 kbdev, katom->kctx, katom);
568                         }
569                 }
570
571                 if (kbase_hw_has_feature(
572                                 kbdev,
573                                 BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
574                         action = (target_katom->atom_flags &
575                                         KBASE_KATOM_FLAGS_JOBCHAIN) ?
576                                 JS_COMMAND_SOFT_STOP_1 :
577                                 JS_COMMAND_SOFT_STOP_0;
578                 }
579         } else if (action == JS_COMMAND_HARD_STOP) {
580                 bool hard_stop_allowed = kbasep_hard_stop_allowed(kbdev,
581                                                                 core_reqs);
582
583                 if (!hard_stop_allowed) {
584                         /* Jobs can be hard-stopped for the following reasons:
585                          *  * CFS decides the job has been running too long (and
586                          *    soft-stop has not occurred). In this case the GPU
587                          *    will be reset by CFS if the job remains on the
588                          *    GPU.
589                          *
590                          *  * The context is destroyed, kbase_jd_zap_context
591                          *    will attempt to hard-stop the job. However it also
592                          *    has a watchdog which will cause the GPU to be
593                          *    reset if the job remains on the GPU.
594                          *
595                          *  * An (unhandled) MMU fault occurred. As long as
596                          *    BASE_HW_ISSUE_8245 is defined then the GPU will be
597                          *    reset.
598                          *
599                          * All three cases result in the GPU being reset if the
600                          * hard-stop fails, so it is safe to just return and
601                          * ignore the hard-stop request.
602                          */
603                         dev_warn(kbdev->dev,
604                                         "Attempt made to hard-stop a job that cannot be hard-stopped. core_reqs = 0x%X",
605                                         (unsigned int)core_reqs);
606                         return;
607                 }
608                 target_katom->atom_flags |= KBASE_KATOM_FLAG_BEEN_HARD_STOPPED;
609
610                 if (kbase_hw_has_feature(
611                                 kbdev,
612                                 BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
613                         action = (target_katom->atom_flags &
614                                         KBASE_KATOM_FLAGS_JOBCHAIN) ?
615                                 JS_COMMAND_HARD_STOP_1 :
616                                 JS_COMMAND_HARD_STOP_0;
617                 }
618         }
619
620         kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND), action, kctx);
621
622 #if KBASE_TRACE_ENABLE
623         status_reg_after = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_STATUS),
624                                                                         NULL);
625         if (status_reg_after == BASE_JD_EVENT_ACTIVE) {
626                 struct kbase_jd_atom *head;
627                 struct kbase_context *head_kctx;
628
629                 head = kbase_gpu_inspect(kbdev, js, 0);
630                 head_kctx = head->kctx;
631
632                 if (status_reg_before == BASE_JD_EVENT_ACTIVE)
633                         KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, head_kctx,
634                                                 head, job_in_head_before, js);
635                 else
636                         KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL,
637                                                 0, js);
638
639                 switch (action) {
640                 case JS_COMMAND_SOFT_STOP:
641                         KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP, head_kctx,
642                                                         head, head->jc, js);
643                         break;
644                 case JS_COMMAND_SOFT_STOP_0:
645                         KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_0, head_kctx,
646                                                         head, head->jc, js);
647                         break;
648                 case JS_COMMAND_SOFT_STOP_1:
649                         KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_1, head_kctx,
650                                                         head, head->jc, js);
651                         break;
652                 case JS_COMMAND_HARD_STOP:
653                         KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP, head_kctx,
654                                                         head, head->jc, js);
655                         break;
656                 case JS_COMMAND_HARD_STOP_0:
657                         KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_0, head_kctx,
658                                                         head, head->jc, js);
659                         break;
660                 case JS_COMMAND_HARD_STOP_1:
661                         KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_1, head_kctx,
662                                                         head, head->jc, js);
663                         break;
664                 default:
665                         BUG();
666                         break;
667                 }
668         } else {
669                 if (status_reg_before == BASE_JD_EVENT_ACTIVE)
670                         KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL,
671                                                         job_in_head_before, js);
672                 else
673                         KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL,
674                                                         0, js);
675
676                 switch (action) {
677                 case JS_COMMAND_SOFT_STOP:
678                         KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP, NULL, NULL, 0,
679                                                         js);
680                         break;
681                 case JS_COMMAND_SOFT_STOP_0:
682                         KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_0, NULL, NULL,
683                                                         0, js);
684                         break;
685                 case JS_COMMAND_SOFT_STOP_1:
686                         KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_1, NULL, NULL,
687                                                         0, js);
688                         break;
689                 case JS_COMMAND_HARD_STOP:
690                         KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP, NULL, NULL, 0,
691                                                         js);
692                         break;
693                 case JS_COMMAND_HARD_STOP_0:
694                         KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_0, NULL, NULL,
695                                                         0, js);
696                         break;
697                 case JS_COMMAND_HARD_STOP_1:
698                         KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_1, NULL, NULL,
699                                                         0, js);
700                         break;
701                 default:
702                         BUG();
703                         break;
704                 }
705         }
706 #endif
707 }
708
709 void kbase_backend_jm_kill_jobs_from_kctx(struct kbase_context *kctx)
710 {
711         unsigned long flags;
712         struct kbase_device *kbdev;
713         struct kbasep_js_device_data *js_devdata;
714         int i;
715
716         KBASE_DEBUG_ASSERT(kctx != NULL);
717         kbdev = kctx->kbdev;
718         KBASE_DEBUG_ASSERT(kbdev != NULL);
719         js_devdata = &kbdev->js_data;
720
721         /* Cancel any remaining running jobs for this kctx  */
722         mutex_lock(&kctx->jctx.lock);
723         spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
724
725         /* Invalidate all jobs in context, to prevent re-submitting */
726         for (i = 0; i < BASE_JD_ATOM_COUNT; i++) {
727                 if (!work_pending(&kctx->jctx.atoms[i].work))
728                         kctx->jctx.atoms[i].event_code =
729                                                 BASE_JD_EVENT_JOB_CANCELLED;
730         }
731
732         for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
733                 kbase_job_slot_hardstop(kctx, i, NULL);
734
735         spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
736         mutex_unlock(&kctx->jctx.lock);
737 }
738
739 void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
740                                 struct kbase_jd_atom *target_katom)
741 {
742         struct kbase_device *kbdev;
743         struct kbasep_js_device_data *js_devdata;
744         int js = target_katom->slot_nr;
745         int priority = target_katom->sched_priority;
746         int i;
747
748         KBASE_DEBUG_ASSERT(kctx != NULL);
749         kbdev = kctx->kbdev;
750         KBASE_DEBUG_ASSERT(kbdev != NULL);
751         js_devdata = &kbdev->js_data;
752
753         lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
754
755         for (i = 0; i < kbase_backend_nr_atoms_on_slot(kbdev, js); i++) {
756                 struct kbase_jd_atom *katom;
757
758                 katom = kbase_gpu_inspect(kbdev, js, i);
759                 if (!katom)
760                         continue;
761
762                 if (katom->kctx != kctx)
763                         continue;
764
765                 if (katom->sched_priority > priority)
766                         kbase_job_slot_softstop(kbdev, js, katom);
767         }
768 }
769
770 struct zap_reset_data {
771         /* The stages are:
772          * 1. The timer has never been called
773          * 2. The zap has timed out, all slots are soft-stopped - the GPU reset
774          *    will happen. The GPU has been reset when
775          *    kbdev->hwaccess.backend.reset_waitq is signalled
776          *
777          * (-1 - The timer has been cancelled)
778          */
779         int stage;
780         struct kbase_device *kbdev;
781         struct hrtimer timer;
782         spinlock_t lock; /* protects updates to stage member */
783 };
784
785 static enum hrtimer_restart zap_timeout_callback(struct hrtimer *timer)
786 {
787         struct zap_reset_data *reset_data = container_of(timer,
788                                                 struct zap_reset_data, timer);
789         struct kbase_device *kbdev = reset_data->kbdev;
790         unsigned long flags;
791
792         spin_lock_irqsave(&reset_data->lock, flags);
793
794         if (reset_data->stage == -1)
795                 goto out;
796
797 #if KBASE_GPU_RESET_EN
798         if (kbase_prepare_to_reset_gpu(kbdev)) {
799                 dev_err(kbdev->dev, "Issueing GPU soft-reset because jobs failed to be killed (within %d ms) as part of context termination (e.g. process exit)\n",
800                                                                 ZAP_TIMEOUT);
801                 kbase_reset_gpu(kbdev);
802         }
803 #endif /* KBASE_GPU_RESET_EN */
804         reset_data->stage = 2;
805
806  out:
807         spin_unlock_irqrestore(&reset_data->lock, flags);
808
809         return HRTIMER_NORESTART;
810 }
811
812 void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx)
813 {
814         struct kbase_device *kbdev = kctx->kbdev;
815         struct zap_reset_data reset_data;
816         unsigned long flags;
817
818         hrtimer_init_on_stack(&reset_data.timer, CLOCK_MONOTONIC,
819                                                         HRTIMER_MODE_REL);
820         reset_data.timer.function = zap_timeout_callback;
821
822         spin_lock_init(&reset_data.lock);
823
824         reset_data.kbdev = kbdev;
825         reset_data.stage = 1;
826
827         hrtimer_start(&reset_data.timer, HR_TIMER_DELAY_MSEC(ZAP_TIMEOUT),
828                                                         HRTIMER_MODE_REL);
829
830         /* Wait for all jobs to finish, and for the context to be not-scheduled
831          * (due to kbase_job_zap_context(), we also guarentee it's not in the JS
832          * policy queue either */
833         wait_event(kctx->jctx.zero_jobs_wait, kctx->jctx.job_nr == 0);
834         wait_event(kctx->jctx.sched_info.ctx.is_scheduled_wait,
835                         kctx->jctx.sched_info.ctx.is_scheduled == false);
836
837         spin_lock_irqsave(&reset_data.lock, flags);
838         if (reset_data.stage == 1) {
839                 /* The timer hasn't run yet - so cancel it */
840                 reset_data.stage = -1;
841         }
842         spin_unlock_irqrestore(&reset_data.lock, flags);
843
844         hrtimer_cancel(&reset_data.timer);
845
846         if (reset_data.stage == 2) {
847                 /* The reset has already started.
848                  * Wait for the reset to complete
849                  */
850                 wait_event(kbdev->hwaccess.backend.reset_wait,
851                                 atomic_read(&kbdev->hwaccess.backend.reset_gpu)
852                                                 == KBASE_RESET_GPU_NOT_PENDING);
853         }
854         destroy_hrtimer_on_stack(&reset_data.timer);
855
856         dev_dbg(kbdev->dev, "Zap: Finished Context %p", kctx);
857
858         /* Ensure that the signallers of the waitqs have finished */
859         mutex_lock(&kctx->jctx.lock);
860         mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
861         mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
862         mutex_unlock(&kctx->jctx.lock);
863 }
864
865 u32 kbase_backend_get_current_flush_id(struct kbase_device *kbdev)
866 {
867         u32 flush_id = 0;
868
869         if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION)) {
870                 mutex_lock(&kbdev->pm.lock);
871                 if (kbdev->pm.backend.gpu_powered)
872                         flush_id = kbase_reg_read(kbdev,
873                                         GPU_CONTROL_REG(LATEST_FLUSH), NULL);
874                 mutex_unlock(&kbdev->pm.lock);
875         }
876
877         return flush_id;
878 }
879
880 int kbase_job_slot_init(struct kbase_device *kbdev)
881 {
882 #if KBASE_GPU_RESET_EN
883         kbdev->hwaccess.backend.reset_workq = alloc_workqueue(
884                                                 "Mali reset workqueue", 0, 1);
885         if (NULL == kbdev->hwaccess.backend.reset_workq)
886                 return -EINVAL;
887
888         KBASE_DEBUG_ASSERT(0 ==
889                 object_is_on_stack(&kbdev->hwaccess.backend.reset_work));
890         INIT_WORK(&kbdev->hwaccess.backend.reset_work,
891                                                 kbasep_reset_timeout_worker);
892
893         hrtimer_init(&kbdev->hwaccess.backend.reset_timer, CLOCK_MONOTONIC,
894                                                         HRTIMER_MODE_REL);
895         kbdev->hwaccess.backend.reset_timer.function =
896                                                 kbasep_reset_timer_callback;
897 #endif
898
899         return 0;
900 }
901 KBASE_EXPORT_TEST_API(kbase_job_slot_init);
902
903 void kbase_job_slot_halt(struct kbase_device *kbdev)
904 {
905         CSTD_UNUSED(kbdev);
906 }
907
908 void kbase_job_slot_term(struct kbase_device *kbdev)
909 {
910 #if KBASE_GPU_RESET_EN
911         destroy_workqueue(kbdev->hwaccess.backend.reset_workq);
912 #endif
913 }
914 KBASE_EXPORT_TEST_API(kbase_job_slot_term);
915
916 #if KBASE_GPU_RESET_EN
917 /**
918  * kbasep_check_for_afbc_on_slot() - Check whether AFBC is in use on this slot
919  * @kbdev: kbase device pointer
920  * @kctx:  context to check against
921  * @js:    slot to check
922  * @target_katom: An atom to check, or NULL if all atoms from @kctx on
923  *                slot @js should be checked
924  *
925  * This checks are based upon parameters that would normally be passed to
926  * kbase_job_slot_hardstop().
927  *
928  * In the event of @target_katom being NULL, this will check the last jobs that
929  * are likely to be running on the slot to see if a) they belong to kctx, and
930  * so would be stopped, and b) whether they have AFBC
931  *
932  * In that case, It's guaranteed that a job currently executing on the HW with
933  * AFBC will be detected. However, this is a conservative check because it also
934  * detects jobs that have just completed too.
935  *
936  * Return: true when hard-stop _might_ stop an afbc atom, else false.
937  */
938 static bool kbasep_check_for_afbc_on_slot(struct kbase_device *kbdev,
939                 struct kbase_context *kctx, int js,
940                 struct kbase_jd_atom *target_katom)
941 {
942         bool ret = false;
943         int i;
944
945         lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
946
947         /* When we have an atom the decision can be made straight away. */
948         if (target_katom)
949                 return !!(target_katom->core_req & BASE_JD_REQ_FS_AFBC);
950
951         /* Otherwise, we must chweck the hardware to see if it has atoms from
952          * this context with AFBC. */
953         for (i = 0; i < kbase_backend_nr_atoms_on_slot(kbdev, js); i++) {
954                 struct kbase_jd_atom *katom;
955
956                 katom = kbase_gpu_inspect(kbdev, js, i);
957                 if (!katom)
958                         continue;
959
960                 /* Ignore atoms from other contexts, they won't be stopped when
961                  * we use this for checking if we should hard-stop them */
962                 if (katom->kctx != kctx)
963                         continue;
964
965                 /* An atom on this slot and this context: check for AFBC */
966                 if (katom->core_req & BASE_JD_REQ_FS_AFBC) {
967                         ret = true;
968                         break;
969                 }
970         }
971
972         return ret;
973 }
974 #endif /* KBASE_GPU_RESET_EN */
975
976 /**
977  * kbase_job_slot_softstop_swflags - Soft-stop a job with flags
978  * @kbdev:         The kbase device
979  * @js:            The job slot to soft-stop
980  * @target_katom:  The job that should be soft-stopped (or NULL for any job)
981  * @sw_flags:      Flags to pass in about the soft-stop
982  *
983  * Context:
984  *   The job slot lock must be held when calling this function.
985  *   The job slot must not already be in the process of being soft-stopped.
986  *
987  * Soft-stop the specified job slot, with extra information about the stop
988  *
989  * Where possible any job in the next register is evicted before the soft-stop.
990  */
991 void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, int js,
992                         struct kbase_jd_atom *target_katom, u32 sw_flags)
993 {
994         KBASE_DEBUG_ASSERT(!(sw_flags & JS_COMMAND_MASK));
995         kbase_backend_soft_hard_stop_slot(kbdev, NULL, js, target_katom,
996                         JS_COMMAND_SOFT_STOP | sw_flags);
997 }
998
999 /**
1000  * kbase_job_slot_softstop - Soft-stop the specified job slot
1001  * @kbdev:         The kbase device
1002  * @js:            The job slot to soft-stop
1003  * @target_katom:  The job that should be soft-stopped (or NULL for any job)
1004  * Context:
1005  *   The job slot lock must be held when calling this function.
1006  *   The job slot must not already be in the process of being soft-stopped.
1007  *
1008  * Where possible any job in the next register is evicted before the soft-stop.
1009  */
1010 void kbase_job_slot_softstop(struct kbase_device *kbdev, int js,
1011                                 struct kbase_jd_atom *target_katom)
1012 {
1013         kbase_job_slot_softstop_swflags(kbdev, js, target_katom, 0u);
1014 }
1015
1016 /**
1017  * kbase_job_slot_hardstop - Hard-stop the specified job slot
1018  * @kctx:         The kbase context that contains the job(s) that should
1019  *                be hard-stopped
1020  * @js:           The job slot to hard-stop
1021  * @target_katom: The job that should be hard-stopped (or NULL for all
1022  *                jobs from the context)
1023  * Context:
1024  *   The job slot lock must be held when calling this function.
1025  */
1026 void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
1027                                 struct kbase_jd_atom *target_katom)
1028 {
1029         struct kbase_device *kbdev = kctx->kbdev;
1030         bool stopped;
1031 #if KBASE_GPU_RESET_EN
1032         /* We make the check for AFBC before evicting/stopping atoms.  Note
1033          * that no other thread can modify the slots whilst we have the
1034          * runpool_irq lock. */
1035         int needs_workaround_for_afbc =
1036                         kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3542)
1037                         && kbasep_check_for_afbc_on_slot(kbdev, kctx, js,
1038                                          target_katom);
1039 #endif
1040
1041         stopped = kbase_backend_soft_hard_stop_slot(kbdev, kctx, js,
1042                                                         target_katom,
1043                                                         JS_COMMAND_HARD_STOP);
1044 #if KBASE_GPU_RESET_EN
1045         if (stopped && (kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_8401) ||
1046                         kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_9510) ||
1047                         needs_workaround_for_afbc)) {
1048                 /* MIDBASE-2916 if a fragment job with AFBC encoding is
1049                  * hardstopped, ensure to do a soft reset also in order to
1050                  * clear the GPU status.
1051                  * Workaround for HW issue 8401 has an issue,so after
1052                  * hard-stopping just reset the GPU. This will ensure that the
1053                  * jobs leave the GPU.*/
1054                 if (kbase_prepare_to_reset_gpu_locked(kbdev)) {
1055                         dev_err(kbdev->dev, "Issueing GPU soft-reset after hard stopping due to hardware issue");
1056                         kbase_reset_gpu_locked(kbdev);
1057                 }
1058         }
1059 #endif
1060 }
1061
1062 /**
1063  * kbase_job_check_enter_disjoint - potentiall enter disjoint mode
1064  * @kbdev: kbase device
1065  * @action: the event which has occurred
1066  * @core_reqs: core requirements of the atom
1067  * @target_katom: the atom which is being affected
1068  *
1069  * For a certain soft/hard-stop action, work out whether to enter disjoint
1070  * state.
1071  *
1072  * This does not register multiple disjoint events if the atom has already
1073  * started a disjoint period
1074  *
1075  * @core_reqs can be supplied as 0 if the atom had not started on the hardware
1076  * (and so a 'real' soft/hard-stop was not required, but it still interrupted
1077  * flow, perhaps on another context)
1078  *
1079  * kbase_job_check_leave_disjoint() should be used to end the disjoint
1080  * state when the soft/hard-stop action is complete
1081  */
1082 void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action,
1083                 u16 core_reqs, struct kbase_jd_atom *target_katom)
1084 {
1085         u32 hw_action = action & JS_COMMAND_MASK;
1086
1087         /* For hard-stop, don't enter if hard-stop not allowed */
1088         if (hw_action == JS_COMMAND_HARD_STOP &&
1089                         !kbasep_hard_stop_allowed(kbdev, core_reqs))
1090                 return;
1091
1092         /* For soft-stop, don't enter if soft-stop not allowed, or isn't
1093          * causing disjoint */
1094         if (hw_action == JS_COMMAND_SOFT_STOP &&
1095                         !(kbasep_soft_stop_allowed(kbdev, target_katom) &&
1096                           (action & JS_COMMAND_SW_CAUSES_DISJOINT)))
1097                 return;
1098
1099         /* Nothing to do if already logged disjoint state on this atom */
1100         if (target_katom->atom_flags & KBASE_KATOM_FLAG_IN_DISJOINT)
1101                 return;
1102
1103         target_katom->atom_flags |= KBASE_KATOM_FLAG_IN_DISJOINT;
1104         kbase_disjoint_state_up(kbdev);
1105 }
1106
1107 /**
1108  * kbase_job_check_enter_disjoint - potentially leave disjoint state
1109  * @kbdev: kbase device
1110  * @target_katom: atom which is finishing
1111  *
1112  * Work out whether to leave disjoint state when finishing an atom that was
1113  * originated by kbase_job_check_enter_disjoint().
1114  */
1115 void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
1116                 struct kbase_jd_atom *target_katom)
1117 {
1118         if (target_katom->atom_flags & KBASE_KATOM_FLAG_IN_DISJOINT) {
1119                 target_katom->atom_flags &= ~KBASE_KATOM_FLAG_IN_DISJOINT;
1120                 kbase_disjoint_state_down(kbdev);
1121         }
1122 }
1123
1124
1125 #if KBASE_GPU_RESET_EN
1126 static void kbase_debug_dump_registers(struct kbase_device *kbdev)
1127 {
1128         int i;
1129
1130         dev_err(kbdev->dev, "Register state:");
1131         dev_err(kbdev->dev, "  GPU_IRQ_RAWSTAT=0x%08x GPU_STATUS=0x%08x",
1132                 kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL),
1133                 kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS), NULL));
1134         dev_err(kbdev->dev, "  JOB_IRQ_RAWSTAT=0x%08x JOB_IRQ_JS_STATE=0x%08x JOB_IRQ_THROTTLE=0x%08x",
1135                 kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_RAWSTAT), NULL),
1136                 kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_JS_STATE), NULL),
1137                 kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_THROTTLE), NULL));
1138         for (i = 0; i < 3; i++) {
1139                 dev_err(kbdev->dev, "  JS%d_STATUS=0x%08x      JS%d_HEAD_LO=0x%08x",
1140                         i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_STATUS),
1141                                         NULL),
1142                         i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_HEAD_LO),
1143                                         NULL));
1144         }
1145         dev_err(kbdev->dev, "  MMU_IRQ_RAWSTAT=0x%08x GPU_FAULTSTATUS=0x%08x",
1146                 kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_RAWSTAT), NULL),
1147                 kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS), NULL));
1148         dev_err(kbdev->dev, "  GPU_IRQ_MASK=0x%08x    JOB_IRQ_MASK=0x%08x     MMU_IRQ_MASK=0x%08x",
1149                 kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL),
1150                 kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), NULL),
1151                 kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL));
1152         dev_err(kbdev->dev, "  PWR_OVERRIDE0=0x%08x   PWR_OVERRIDE1=0x%08x",
1153                 kbase_reg_read(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE0), NULL),
1154                 kbase_reg_read(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE1), NULL));
1155         dev_err(kbdev->dev, "  SHADER_CONFIG=0x%08x   L2_MMU_CONFIG=0x%08x",
1156                 kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_CONFIG), NULL),
1157                 kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG), NULL));
1158 }
1159
1160 static void kbasep_save_hwcnt_setup(struct kbase_device *kbdev,
1161                                 struct kbase_context *kctx,
1162                                 struct kbase_uk_hwcnt_setup *hwcnt_setup)
1163 {
1164         hwcnt_setup->dump_buffer =
1165                 kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO), kctx) &
1166                                                                 0xffffffff;
1167         hwcnt_setup->dump_buffer |= (u64)
1168                 kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI), kctx) <<
1169                                                                         32;
1170         hwcnt_setup->jm_bm =
1171                 kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN), kctx);
1172         hwcnt_setup->shader_bm =
1173                 kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN), kctx);
1174         hwcnt_setup->tiler_bm =
1175                 kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), kctx);
1176         hwcnt_setup->mmu_l2_bm =
1177                 kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN), kctx);
1178 }
1179
1180 static void kbasep_reset_timeout_worker(struct work_struct *data)
1181 {
1182         unsigned long flags, mmu_flags;
1183         struct kbase_device *kbdev;
1184         int i;
1185         ktime_t end_timestamp = ktime_get();
1186         struct kbasep_js_device_data *js_devdata;
1187         struct kbase_uk_hwcnt_setup hwcnt_setup = { {0} };
1188         enum kbase_instr_state bckp_state;
1189         bool try_schedule = false;
1190         bool restore_hwc = false;
1191
1192         KBASE_DEBUG_ASSERT(data);
1193
1194         kbdev = container_of(data, struct kbase_device,
1195                                                 hwaccess.backend.reset_work);
1196
1197         KBASE_DEBUG_ASSERT(kbdev);
1198         js_devdata = &kbdev->js_data;
1199
1200         KBASE_TRACE_ADD(kbdev, JM_BEGIN_RESET_WORKER, NULL, NULL, 0u, 0);
1201
1202         /* Make sure the timer has completed - this cannot be done from
1203          * interrupt context, so this cannot be done within
1204          * kbasep_try_reset_gpu_early. */
1205         hrtimer_cancel(&kbdev->hwaccess.backend.reset_timer);
1206
1207         if (kbase_pm_context_active_handle_suspend(kbdev,
1208                                 KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
1209                 /* This would re-activate the GPU. Since it's already idle,
1210                  * there's no need to reset it */
1211                 atomic_set(&kbdev->hwaccess.backend.reset_gpu,
1212                                                 KBASE_RESET_GPU_NOT_PENDING);
1213                 kbase_disjoint_state_down(kbdev);
1214                 wake_up(&kbdev->hwaccess.backend.reset_wait);
1215                 return;
1216         }
1217
1218         KBASE_DEBUG_ASSERT(kbdev->irq_reset_flush == false);
1219
1220         spin_lock_irqsave(&kbdev->mmu_mask_change, mmu_flags);
1221         /* We're about to flush out the IRQs and their bottom half's */
1222         kbdev->irq_reset_flush = true;
1223
1224         /* Disable IRQ to avoid IRQ handlers to kick in after releasing the
1225          * spinlock; this also clears any outstanding interrupts */
1226         spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
1227         kbase_pm_disable_interrupts(kbdev);
1228         spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
1229
1230         spin_unlock_irqrestore(&kbdev->mmu_mask_change, mmu_flags);
1231
1232         /* Ensure that any IRQ handlers have finished
1233          * Must be done without any locks IRQ handlers will take */
1234         kbase_synchronize_irqs(kbdev);
1235
1236         /* Flush out any in-flight work items */
1237         kbase_flush_mmu_wqs(kbdev);
1238
1239         /* The flush has completed so reset the active indicator */
1240         kbdev->irq_reset_flush = false;
1241
1242         mutex_lock(&kbdev->pm.lock);
1243         /* We hold the pm lock, so there ought to be a current policy */
1244         KBASE_DEBUG_ASSERT(kbdev->pm.backend.pm_current_policy);
1245
1246         /* All slot have been soft-stopped and we've waited
1247          * SOFT_STOP_RESET_TIMEOUT for the slots to clear, at this point we
1248          * assume that anything that is still left on the GPU is stuck there and
1249          * we'll kill it when we reset the GPU */
1250
1251         dev_err(kbdev->dev, "Resetting GPU (allowing up to %d ms)",
1252                                                                 RESET_TIMEOUT);
1253
1254         spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
1255
1256         if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING) {
1257                 /* the same interrupt handler preempted itself */
1258                 /* GPU is being reset */
1259                 spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
1260                 wait_event(kbdev->hwcnt.backend.wait,
1261                                         kbdev->hwcnt.backend.triggered != 0);
1262                 spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
1263         }
1264         /* Save the HW counters setup */
1265         if (kbdev->hwcnt.kctx != NULL) {
1266                 struct kbase_context *kctx = kbdev->hwcnt.kctx;
1267
1268                 if (kctx->jctx.sched_info.ctx.is_scheduled) {
1269                         kbasep_save_hwcnt_setup(kbdev, kctx, &hwcnt_setup);
1270
1271                         restore_hwc = true;
1272                 }
1273         }
1274
1275         /* Output the state of some interesting registers to help in the
1276          * debugging of GPU resets */
1277         kbase_debug_dump_registers(kbdev);
1278
1279         bckp_state = kbdev->hwcnt.backend.state;
1280         kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_RESETTING;
1281         kbdev->hwcnt.backend.triggered = 0;
1282
1283         spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
1284
1285         /* Reset the GPU */
1286         kbase_pm_init_hw(kbdev, 0);
1287
1288         /* Complete any jobs that were still on the GPU */
1289         spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1290         kbase_backend_reset(kbdev, &end_timestamp);
1291         kbase_pm_metrics_update(kbdev, NULL);
1292         spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1293
1294         mutex_unlock(&kbdev->pm.lock);
1295
1296         mutex_lock(&js_devdata->runpool_mutex);
1297
1298         /* Reprogram the GPU's MMU */
1299         for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
1300                 struct kbase_as *as = &kbdev->as[i];
1301
1302                 mutex_lock(&as->transaction_mutex);
1303                 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1304
1305                 if (js_devdata->runpool_irq.per_as_data[i].kctx)
1306                         kbase_mmu_update(
1307                                 js_devdata->runpool_irq.per_as_data[i].kctx);
1308                 else
1309                         kbase_mmu_disable_as(kbdev, i);
1310
1311                 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1312                 mutex_unlock(&as->transaction_mutex);
1313         }
1314
1315         kbase_pm_enable_interrupts(kbdev);
1316
1317         atomic_set(&kbdev->hwaccess.backend.reset_gpu,
1318                                                 KBASE_RESET_GPU_NOT_PENDING);
1319
1320         kbase_disjoint_state_down(kbdev);
1321
1322         wake_up(&kbdev->hwaccess.backend.reset_wait);
1323         dev_err(kbdev->dev, "Reset complete");
1324
1325         if (js_devdata->nr_contexts_pullable > 0 && !kbdev->poweroff_pending)
1326                 try_schedule = true;
1327
1328         mutex_unlock(&js_devdata->runpool_mutex);
1329
1330         spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
1331         /* Restore the HW counters setup */
1332         if (restore_hwc) {
1333                 struct kbase_context *kctx = kbdev->hwcnt.kctx;
1334                 u32 prfcnt_config = kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT;
1335
1336 #ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY
1337                 u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
1338                 u32 product_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID)
1339                         >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
1340                 int arch_v6 = GPU_ID_IS_NEW_FORMAT(product_id);
1341
1342                 if (arch_v6)
1343                         prfcnt_config |= 1 << PRFCNT_CONFIG_SETSELECT_SHIFT;
1344 #endif
1345
1346                 kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
1347                                 prfcnt_config | PRFCNT_CONFIG_MODE_OFF, kctx);
1348
1349                 kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),
1350                                 hwcnt_setup.dump_buffer & 0xFFFFFFFF, kctx);
1351                 kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI),
1352                                 hwcnt_setup.dump_buffer >> 32,        kctx);
1353                 kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN),
1354                                 hwcnt_setup.jm_bm,                    kctx);
1355                 kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN),
1356                                 hwcnt_setup.shader_bm,                kctx);
1357                 kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN),
1358                                 hwcnt_setup.mmu_l2_bm,                kctx);
1359
1360                 /* Due to PRLAM-8186 we need to disable the Tiler before we
1361                  * enable the HW counter dump. */
1362                 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
1363                         kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
1364                                                 0, kctx);
1365                 else
1366                         kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
1367                                                 hwcnt_setup.tiler_bm, kctx);
1368
1369                 kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
1370                                 prfcnt_config | PRFCNT_CONFIG_MODE_MANUAL,
1371                                 kctx);
1372
1373                 /* If HW has PRLAM-8186 we can now re-enable the tiler HW
1374                  * counters dump */
1375                 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
1376                         kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
1377                                                 hwcnt_setup.tiler_bm, kctx);
1378         }
1379         kbdev->hwcnt.backend.state = bckp_state;
1380         switch (kbdev->hwcnt.backend.state) {
1381         /* Cases for waking kbasep_cache_clean_worker worker */
1382         case KBASE_INSTR_STATE_CLEANED:
1383                 /* Cache-clean IRQ occurred, but we reset:
1384                  * Wakeup incase the waiter saw RESETTING */
1385         case KBASE_INSTR_STATE_REQUEST_CLEAN:
1386                 /* After a clean was requested, but before the regs were
1387                  * written:
1388                  * Wakeup incase the waiter saw RESETTING */
1389                 wake_up(&kbdev->hwcnt.backend.cache_clean_wait);
1390                 break;
1391         case KBASE_INSTR_STATE_CLEANING:
1392                 /* Either:
1393                  * 1) We've not got the Cache-clean IRQ yet: it was lost, or:
1394                  * 2) We got it whilst resetting: it was voluntarily lost
1395                  *
1396                  * So, move to the next state and wakeup: */
1397                 kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_CLEANED;
1398                 wake_up(&kbdev->hwcnt.backend.cache_clean_wait);
1399                 break;
1400
1401         /* Cases for waking anyone else */
1402         case KBASE_INSTR_STATE_DUMPING:
1403                 /* If dumping, abort the dump, because we may've lost the IRQ */
1404                 kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
1405                 kbdev->hwcnt.backend.triggered = 1;
1406                 wake_up(&kbdev->hwcnt.backend.wait);
1407                 break;
1408         case KBASE_INSTR_STATE_DISABLED:
1409         case KBASE_INSTR_STATE_IDLE:
1410         case KBASE_INSTR_STATE_FAULT:
1411                 /* Every other reason: wakeup in that state */
1412                 kbdev->hwcnt.backend.triggered = 1;
1413                 wake_up(&kbdev->hwcnt.backend.wait);
1414                 break;
1415
1416         /* Unhandled cases */
1417         case KBASE_INSTR_STATE_RESETTING:
1418         default:
1419                 BUG();
1420                 break;
1421         }
1422         spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
1423
1424         /* Resume the vinstr core */
1425         kbase_vinstr_hwc_resume(kbdev->vinstr_ctx);
1426
1427         /* Note: counter dumping may now resume */
1428
1429         mutex_lock(&kbdev->pm.lock);
1430
1431         /* Find out what cores are required now */
1432         kbase_pm_update_cores_state(kbdev);
1433
1434         /* Synchronously request and wait for those cores, because if
1435          * instrumentation is enabled it would need them immediately. */
1436         kbase_pm_check_transitions_sync(kbdev);
1437
1438         mutex_unlock(&kbdev->pm.lock);
1439
1440         /* Try submitting some jobs to restart processing */
1441         if (try_schedule) {
1442                 KBASE_TRACE_ADD(kbdev, JM_SUBMIT_AFTER_RESET, NULL, NULL, 0u,
1443                                                                         0);
1444                 kbase_js_sched_all(kbdev);
1445         }
1446
1447         kbase_pm_context_idle(kbdev);
1448         KBASE_TRACE_ADD(kbdev, JM_END_RESET_WORKER, NULL, NULL, 0u, 0);
1449 }
1450
1451 static enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *timer)
1452 {
1453         struct kbase_device *kbdev = container_of(timer, struct kbase_device,
1454                                                 hwaccess.backend.reset_timer);
1455
1456         KBASE_DEBUG_ASSERT(kbdev);
1457
1458         /* Reset still pending? */
1459         if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
1460                         KBASE_RESET_GPU_COMMITTED, KBASE_RESET_GPU_HAPPENING) ==
1461                                                 KBASE_RESET_GPU_COMMITTED)
1462                 queue_work(kbdev->hwaccess.backend.reset_workq,
1463                                         &kbdev->hwaccess.backend.reset_work);
1464
1465         return HRTIMER_NORESTART;
1466 }
1467
1468 /*
1469  * If all jobs are evicted from the GPU then we can reset the GPU
1470  * immediately instead of waiting for the timeout to elapse
1471  */
1472
1473 static void kbasep_try_reset_gpu_early_locked(struct kbase_device *kbdev)
1474 {
1475         int i;
1476         int pending_jobs = 0;
1477
1478         KBASE_DEBUG_ASSERT(kbdev);
1479
1480         /* Count the number of jobs */
1481         for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
1482                 pending_jobs += kbase_backend_nr_atoms_submitted(kbdev, i);
1483
1484         if (pending_jobs > 0) {
1485                 /* There are still jobs on the GPU - wait */
1486                 return;
1487         }
1488
1489         /* To prevent getting incorrect registers when dumping failed job,
1490          * skip early reset.
1491          */
1492         if (kbdev->job_fault_debug != false)
1493                 return;
1494
1495         /* Check that the reset has been committed to (i.e. kbase_reset_gpu has
1496          * been called), and that no other thread beat this thread to starting
1497          * the reset */
1498         if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
1499                         KBASE_RESET_GPU_COMMITTED, KBASE_RESET_GPU_HAPPENING) !=
1500                                                 KBASE_RESET_GPU_COMMITTED) {
1501                 /* Reset has already occurred */
1502                 return;
1503         }
1504
1505         queue_work(kbdev->hwaccess.backend.reset_workq,
1506                                         &kbdev->hwaccess.backend.reset_work);
1507 }
1508
1509 static void kbasep_try_reset_gpu_early(struct kbase_device *kbdev)
1510 {
1511         unsigned long flags;
1512         struct kbasep_js_device_data *js_devdata;
1513
1514         js_devdata = &kbdev->js_data;
1515         spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1516         kbasep_try_reset_gpu_early_locked(kbdev);
1517         spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1518 }
1519
1520 /**
1521  * kbase_prepare_to_reset_gpu_locked - Prepare for resetting the GPU
1522  * @kbdev: kbase device
1523  *
1524  * This function just soft-stops all the slots to ensure that as many jobs as
1525  * possible are saved.
1526  *
1527  * Return:
1528  *   The function returns a boolean which should be interpreted as follows:
1529  *   true - Prepared for reset, kbase_reset_gpu should be called.
1530  *   false - Another thread is performing a reset, kbase_reset_gpu should
1531  *   not be called.
1532  */
1533 bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev)
1534 {
1535         int i;
1536
1537         KBASE_DEBUG_ASSERT(kbdev);
1538
1539         if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
1540                                                 KBASE_RESET_GPU_NOT_PENDING,
1541                                                 KBASE_RESET_GPU_PREPARED) !=
1542                                                 KBASE_RESET_GPU_NOT_PENDING) {
1543                 /* Some other thread is already resetting the GPU */
1544                 return false;
1545         }
1546
1547         kbase_disjoint_state_up(kbdev);
1548
1549         for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
1550                 kbase_job_slot_softstop(kbdev, i, NULL);
1551
1552         return true;
1553 }
1554
1555 bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev)
1556 {
1557         unsigned long flags;
1558         bool ret;
1559         struct kbasep_js_device_data *js_devdata;
1560
1561         js_devdata = &kbdev->js_data;
1562         spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1563         ret = kbase_prepare_to_reset_gpu_locked(kbdev);
1564         spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1565
1566         return ret;
1567 }
1568 KBASE_EXPORT_TEST_API(kbase_prepare_to_reset_gpu);
1569
1570 /*
1571  * This function should be called after kbase_prepare_to_reset_gpu if it
1572  * returns true. It should never be called without a corresponding call to
1573  * kbase_prepare_to_reset_gpu.
1574  *
1575  * After this function is called (or not called if kbase_prepare_to_reset_gpu
1576  * returned false), the caller should wait for
1577  * kbdev->hwaccess.backend.reset_waitq to be signalled to know when the reset
1578  * has completed.
1579  */
1580 void kbase_reset_gpu(struct kbase_device *kbdev)
1581 {
1582         KBASE_DEBUG_ASSERT(kbdev);
1583
1584         /* Note this is an assert/atomic_set because it is a software issue for
1585          * a race to be occuring here */
1586         KBASE_DEBUG_ASSERT(atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
1587                                                 KBASE_RESET_GPU_PREPARED);
1588         atomic_set(&kbdev->hwaccess.backend.reset_gpu,
1589                                                 KBASE_RESET_GPU_COMMITTED);
1590
1591         dev_err(kbdev->dev, "Preparing to soft-reset GPU: Waiting (upto %d ms) for all jobs to complete soft-stop\n",
1592                         kbdev->reset_timeout_ms);
1593
1594         hrtimer_start(&kbdev->hwaccess.backend.reset_timer,
1595                         HR_TIMER_DELAY_MSEC(kbdev->reset_timeout_ms),
1596                         HRTIMER_MODE_REL);
1597
1598         /* Try resetting early */
1599         kbasep_try_reset_gpu_early(kbdev);
1600 }
1601 KBASE_EXPORT_TEST_API(kbase_reset_gpu);
1602
1603 void kbase_reset_gpu_locked(struct kbase_device *kbdev)
1604 {
1605         KBASE_DEBUG_ASSERT(kbdev);
1606
1607         /* Note this is an assert/atomic_set because it is a software issue for
1608          * a race to be occuring here */
1609         KBASE_DEBUG_ASSERT(atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
1610                                                 KBASE_RESET_GPU_PREPARED);
1611         atomic_set(&kbdev->hwaccess.backend.reset_gpu,
1612                                                 KBASE_RESET_GPU_COMMITTED);
1613
1614         dev_err(kbdev->dev, "Preparing to soft-reset GPU: Waiting (upto %d ms) for all jobs to complete soft-stop\n",
1615                         kbdev->reset_timeout_ms);
1616         hrtimer_start(&kbdev->hwaccess.backend.reset_timer,
1617                         HR_TIMER_DELAY_MSEC(kbdev->reset_timeout_ms),
1618                         HRTIMER_MODE_REL);
1619
1620         /* Try resetting early */
1621         kbasep_try_reset_gpu_early_locked(kbdev);
1622 }
1623 #endif /* KBASE_GPU_RESET_EN */