03ba23d54365113ac26bc85ea17c5701fe31ea4a
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / backend / gpu / mali_kbase_pm_driver.c
1 /*
2  *
3  * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19
20 /*
21  * Base kernel Power Management hardware control
22  */
23
24 // #define ENABLE_DEBUG_LOG
25 #include "../../platform/rk/custom_log.h"
26
27 #include <mali_kbase.h>
28 #include <mali_kbase_config_defaults.h>
29 #include <mali_midg_regmap.h>
30 #if defined(CONFIG_MALI_GATOR_SUPPORT)
31 #include <mali_kbase_gator.h>
32 #endif
33 #include <mali_kbase_tlstream.h>
34 #include <mali_kbase_pm.h>
35 #include <mali_kbase_config_defaults.h>
36 #include <mali_kbase_smc.h>
37 #include <mali_kbase_hwaccess_jm.h>
38 #include <backend/gpu/mali_kbase_cache_policy_backend.h>
39 #include <backend/gpu/mali_kbase_device_internal.h>
40 #include <backend/gpu/mali_kbase_irq_internal.h>
41 #include <backend/gpu/mali_kbase_pm_internal.h>
42
43 #include <linux/of.h>
44
45 #if MALI_MOCK_TEST
46 #define MOCKABLE(function) function##_original
47 #else
48 #define MOCKABLE(function) function
49 #endif                          /* MALI_MOCK_TEST */
50
51 /* Special value to indicate that the JM_CONFIG reg isn't currently used. */
52 #define KBASE_JM_CONFIG_UNUSED (1<<31)
53
54 /**
55  * enum kbasep_pm_action - Actions that can be performed on a core.
56  *
57  * This enumeration is private to the file. Its values are set to allow
58  * core_type_to_reg() function, which decodes this enumeration, to be simpler
59  * and more efficient.
60  *
61  * @ACTION_PRESENT: The cores that are present
62  * @ACTION_READY: The cores that are ready
63  * @ACTION_PWRON: Power on the cores specified
64  * @ACTION_PWROFF: Power off the cores specified
65  * @ACTION_PWRTRANS: The cores that are transitioning
66  * @ACTION_PWRACTIVE: The cores that are active
67  */
68 enum kbasep_pm_action {
69         ACTION_PRESENT = 0,
70         ACTION_READY = (SHADER_READY_LO - SHADER_PRESENT_LO),
71         ACTION_PWRON = (SHADER_PWRON_LO - SHADER_PRESENT_LO),
72         ACTION_PWROFF = (SHADER_PWROFF_LO - SHADER_PRESENT_LO),
73         ACTION_PWRTRANS = (SHADER_PWRTRANS_LO - SHADER_PRESENT_LO),
74         ACTION_PWRACTIVE = (SHADER_PWRACTIVE_LO - SHADER_PRESENT_LO)
75 };
76
77 /*---------------------------------------------------------------------------*/
78
79 static bool is_action_of_powering_off_l2(enum kbase_pm_core_type core_type,
80                                          enum kbasep_pm_action active)
81 {
82         return (KBASE_PM_CORE_L2 == core_type) && (ACTION_PWROFF  == active);
83 }
84
85 static bool is_action_of_powering_off_shader(enum kbase_pm_core_type core_type,
86                                              enum kbasep_pm_action active)
87 {
88         return (KBASE_PM_CORE_SHADER == core_type) && (ACTION_PWROFF  == active);
89 }
90
91 static bool is_action_of_powering_off_tiler(enum kbase_pm_core_type core_type,
92                                             enum kbasep_pm_action active)
93 {
94         return (KBASE_PM_CORE_TILER == core_type) && (ACTION_PWROFF  == active);
95 }
96
97 static u64 kbase_pm_get_state(
98                 struct kbase_device *kbdev,
99                 enum kbase_pm_core_type core_type,
100                 enum kbasep_pm_action action);
101
102 /**
103  * core_type_to_reg - Decode a core type and action to a register.
104  *
105  * Given a core type (defined by kbase_pm_core_type) and an action (defined
106  * by kbasep_pm_action) this function will return the register offset that
107  * will perform the action on the core type. The register returned is the _LO
108  * register and an offset must be applied to use the _HI register.
109  *
110  * @core_type: The type of core
111  * @action:    The type of action
112  *
113  * Return: The register offset of the _LO register that performs an action of
114  * type @action on a core of type @core_type.
115  */
116 static u32 core_type_to_reg(enum kbase_pm_core_type core_type,
117                                                 enum kbasep_pm_action action)
118 {
119         return (u32)core_type + (u32)action;
120 }
121
122 #ifdef CONFIG_ARM64
123 static void mali_cci_flush_l2(struct kbase_device *kbdev)
124 {
125         const u32 mask = CLEAN_CACHES_COMPLETED | RESET_COMPLETED;
126         u32 loops = KBASE_CLEAN_CACHE_MAX_LOOPS;
127         u32 raw;
128
129         /*
130          * Note that we don't take the cache flush mutex here since
131          * we expect to be the last user of the L2, all other L2 users
132          * would have dropped their references, to initiate L2 power
133          * down, L2 power down being the only valid place for this
134          * to be called from.
135          */
136
137         kbase_reg_write(kbdev,
138                         GPU_CONTROL_REG(GPU_COMMAND),
139                         GPU_COMMAND_CLEAN_INV_CACHES,
140                         NULL);
141
142         raw = kbase_reg_read(kbdev,
143                 GPU_CONTROL_REG(GPU_IRQ_RAWSTAT),
144                 NULL);
145
146         /* Wait for cache flush to complete before continuing, exit on
147          * gpu resets or loop expiry. */
148         while (((raw & mask) == 0) && --loops) {
149                 raw = kbase_reg_read(kbdev,
150                                         GPU_CONTROL_REG(GPU_IRQ_RAWSTAT),
151                                         NULL);
152         }
153 }
154 #endif
155
156 /**
157  * kbase_pm_invoke - Invokes an action on a core set
158  *
159  * This function performs the action given by @action on a set of cores of a
160  * type given by @core_type. It is a static function used by
161  * kbase_pm_transition_core_type()
162  *
163  * @kbdev:     The kbase device structure of the device
164  * @core_type: The type of core that the action should be performed on
165  * @cores:     A bit mask of cores to perform the action on (low 32 bits)
166  * @action:    The action to perform on the cores
167  */
168 static void kbase_pm_invoke(struct kbase_device *kbdev,
169                                         enum kbase_pm_core_type core_type,
170                                         u64 cores,
171                                         enum kbasep_pm_action action)
172 {
173         u32 reg;
174         u32 lo = cores & 0xFFFFFFFF;
175         u32 hi = (cores >> 32) & 0xFFFFFFFF;
176
177         lockdep_assert_held(&kbdev->pm.power_change_lock);
178
179         /*-------------------------------------------------------*/
180
181         if ( is_action_of_powering_off_l2(core_type, action) ) {
182                 D("not to power off l2 actually.");
183                 return;
184         }
185         if ( is_action_of_powering_off_shader(core_type, action) ) {
186                 D("not to power off shader actually. cores_lo : 0x%x, hi : 0x%x.",
187                   lo,
188                   hi);
189                 return;
190         }
191         if ( is_action_of_powering_off_tiler(core_type, action) ) {
192                 D("not to power off tiler actually.");
193                 return;
194         }
195
196         /*-------------------------------------------------------*/
197
198         reg = core_type_to_reg(core_type, action);
199
200         KBASE_DEBUG_ASSERT(reg);
201 #if defined(CONFIG_MALI_GATOR_SUPPORT)
202         if (cores) {
203                 if (action == ACTION_PWRON)
204                         kbase_trace_mali_pm_power_on(core_type, cores);
205                 else if (action == ACTION_PWROFF)
206                         kbase_trace_mali_pm_power_off(core_type, cores);
207         }
208 #endif
209
210         if (cores) {
211                 u64 state = kbase_pm_get_state(kbdev, core_type, ACTION_READY);
212
213                 if (action == ACTION_PWRON)
214                         state |= cores;
215                 else if (action == ACTION_PWROFF)
216                         state &= ~cores;
217                 kbase_tlstream_aux_pm_state(core_type, state);
218         }
219
220         /* Tracing */
221         if (cores) {
222                 if (action == ACTION_PWRON)
223                         switch (core_type) {
224                         case KBASE_PM_CORE_SHADER:
225                                 KBASE_TRACE_ADD(kbdev, PM_PWRON, NULL, NULL, 0u,
226                                                                         lo);
227                                 break;
228                         case KBASE_PM_CORE_TILER:
229                                 KBASE_TRACE_ADD(kbdev, PM_PWRON_TILER, NULL,
230                                                                 NULL, 0u, lo);
231                                 break;
232                         case KBASE_PM_CORE_L2:
233                                 KBASE_TRACE_ADD(kbdev, PM_PWRON_L2, NULL, NULL,
234                                                                         0u, lo);
235                                 break;
236                         default:
237                                 break;
238                         }
239                 else if (action == ACTION_PWROFF)
240                         switch (core_type) {
241                         case KBASE_PM_CORE_SHADER:
242                                 KBASE_TRACE_ADD(kbdev, PM_PWROFF, NULL, NULL,
243                                                                         0u, lo);
244                                 break;
245                         case KBASE_PM_CORE_TILER:
246                                 KBASE_TRACE_ADD(kbdev, PM_PWROFF_TILER, NULL,
247                                                                 NULL, 0u, lo);
248                                 break;
249                         case KBASE_PM_CORE_L2:
250                                 KBASE_TRACE_ADD(kbdev, PM_PWROFF_L2, NULL, NULL,
251                                                                         0u, lo);
252                                 /* disable snoops before L2 is turned off */
253                                 kbase_pm_cache_snoop_disable(kbdev);
254                                 break;
255                         default:
256                                 break;
257                         }
258         }
259
260         if (lo != 0)
261                 kbase_reg_write(kbdev, GPU_CONTROL_REG(reg), lo, NULL);
262
263         if (hi != 0)
264                 kbase_reg_write(kbdev, GPU_CONTROL_REG(reg + 4), hi, NULL);
265 }
266
267 /**
268  * kbase_pm_get_state - Get information about a core set
269  *
270  * This function gets information (chosen by @action) about a set of cores of
271  * a type given by @core_type. It is a static function used by
272  * kbase_pm_get_present_cores(), kbase_pm_get_active_cores(),
273  * kbase_pm_get_trans_cores() and kbase_pm_get_ready_cores().
274  *
275  * @kbdev:     The kbase device structure of the device
276  * @core_type: The type of core that the should be queried
277  * @action:    The property of the cores to query
278  *
279  * Return: A bit mask specifying the state of the cores
280  */
281 static u64 kbase_pm_get_state(struct kbase_device *kbdev,
282                                         enum kbase_pm_core_type core_type,
283                                         enum kbasep_pm_action action)
284 {
285         u32 reg;
286         u32 lo, hi;
287
288         reg = core_type_to_reg(core_type, action);
289
290         KBASE_DEBUG_ASSERT(reg);
291
292         lo = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg), NULL);
293         hi = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg + 4), NULL);
294
295         return (((u64) hi) << 32) | ((u64) lo);
296 }
297
298 void kbasep_pm_read_present_cores(struct kbase_device *kbdev)
299 {
300         kbdev->shader_inuse_bitmap = 0;
301         kbdev->shader_needed_bitmap = 0;
302         kbdev->shader_available_bitmap = 0;
303         kbdev->tiler_available_bitmap = 0;
304         kbdev->l2_users_count = 0;
305         kbdev->l2_available_bitmap = 0;
306         kbdev->tiler_needed_cnt = 0;
307         kbdev->tiler_inuse_cnt = 0;
308
309         memset(kbdev->shader_needed_cnt, 0, sizeof(kbdev->shader_needed_cnt));
310 }
311
312 KBASE_EXPORT_TEST_API(kbasep_pm_read_present_cores);
313
314 /**
315  * kbase_pm_get_present_cores - Get the cores that are present
316  *
317  * @kbdev: Kbase device
318  * @type: The type of cores to query
319  *
320  * Return: Bitmask of the cores that are present
321  */
322 u64 kbase_pm_get_present_cores(struct kbase_device *kbdev,
323                                                 enum kbase_pm_core_type type)
324 {
325         KBASE_DEBUG_ASSERT(kbdev != NULL);
326
327         switch (type) {
328         case KBASE_PM_CORE_L2:
329                 return kbdev->gpu_props.props.raw_props.l2_present;
330         case KBASE_PM_CORE_SHADER:
331                 return kbdev->gpu_props.props.raw_props.shader_present;
332         case KBASE_PM_CORE_TILER:
333                 return kbdev->gpu_props.props.raw_props.tiler_present;
334         }
335         KBASE_DEBUG_ASSERT(0);
336         return 0;
337 }
338
339 KBASE_EXPORT_TEST_API(kbase_pm_get_present_cores);
340
341 /**
342  * kbase_pm_get_active_cores - Get the cores that are "active"
343  *                             (busy processing work)
344  *
345  * @kbdev: Kbase device
346  * @type: The type of cores to query
347  *
348  * Return: Bitmask of cores that are active
349  */
350 u64 kbase_pm_get_active_cores(struct kbase_device *kbdev,
351                                                 enum kbase_pm_core_type type)
352 {
353         return kbase_pm_get_state(kbdev, type, ACTION_PWRACTIVE);
354 }
355
356 KBASE_EXPORT_TEST_API(kbase_pm_get_active_cores);
357
358 /**
359  * kbase_pm_get_trans_cores - Get the cores that are transitioning between
360  *                            power states
361  *
362  * @kbdev: Kbase device
363  * @type: The type of cores to query
364  *
365  * Return: Bitmask of cores that are transitioning
366  */
367 u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev,
368                                                 enum kbase_pm_core_type type)
369 {
370         return kbase_pm_get_state(kbdev, type, ACTION_PWRTRANS);
371 }
372
373 KBASE_EXPORT_TEST_API(kbase_pm_get_trans_cores);
374
375 /**
376  * kbase_pm_get_ready_cores - Get the cores that are powered on
377  *
378  * @kbdev: Kbase device
379  * @type: The type of cores to query
380  *
381  * Return: Bitmask of cores that are ready (powered on)
382  */
383 u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev,
384                                                 enum kbase_pm_core_type type)
385 {
386         u64 result;
387
388         result = kbase_pm_get_state(kbdev, type, ACTION_READY);
389
390         switch (type) {
391         case KBASE_PM_CORE_SHADER:
392                 KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED, NULL, NULL, 0u,
393                                                                 (u32) result);
394                 break;
395         case KBASE_PM_CORE_TILER:
396                 KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_TILER, NULL, NULL, 0u,
397                                                                 (u32) result);
398                 break;
399         case KBASE_PM_CORE_L2:
400                 KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_L2, NULL, NULL, 0u,
401                                                                 (u32) result);
402                 break;
403         default:
404                 break;
405         }
406
407         return result;
408 }
409
410 KBASE_EXPORT_TEST_API(kbase_pm_get_ready_cores);
411
412 /**
413  * kbase_pm_transition_core_type - Perform power transitions for a particular
414  *                                 core type.
415  *
416  * This function will perform any available power transitions to make the actual
417  * hardware state closer to the desired state. If a core is currently
418  * transitioning then changes to the power state of that call cannot be made
419  * until the transition has finished. Cores which are not present in the
420  * hardware are ignored if they are specified in the desired_state bitmask,
421  * however the return value will always be 0 in this case.
422  *
423  * @kbdev:             The kbase device
424  * @type:              The core type to perform transitions for
425  * @desired_state:     A bit mask of the desired state of the cores
426  * @in_use:            A bit mask of the cores that are currently running
427  *                     jobs. These cores have to be kept powered up because
428  *                     there are jobs running (or about to run) on them.
429  * @available:         Receives a bit mask of the cores that the job
430  *                     scheduler can use to submit jobs to. May be NULL if
431  *                     this is not needed.
432  * @powering_on:       Bit mask to update with cores that are
433  *                    transitioning to a power-on state.
434  *
435  * Return: true if the desired state has been reached, false otherwise
436  */
437 static bool kbase_pm_transition_core_type(struct kbase_device *kbdev,
438                                                 enum kbase_pm_core_type type,
439                                                 u64 desired_state,
440                                                 u64 in_use,
441                                                 u64 * const available,
442                                                 u64 *powering_on)
443 {
444         u64 present;
445         u64 ready;
446         u64 trans;
447         u64 powerup;
448         u64 powerdown;
449         u64 powering_on_trans;
450         u64 desired_state_in_use;
451
452         lockdep_assert_held(&kbdev->pm.power_change_lock);
453
454         /* Get current state */
455         present = kbase_pm_get_present_cores(kbdev, type);
456         trans = kbase_pm_get_trans_cores(kbdev, type);
457         ready = kbase_pm_get_ready_cores(kbdev, type);
458         /* mask off ready from trans in case transitions finished between the
459          * register reads */
460         trans &= ~ready;
461
462         powering_on_trans = trans & *powering_on;
463         *powering_on = powering_on_trans;
464
465         if (available != NULL)
466                 *available = (ready | powering_on_trans) & desired_state;
467
468         /* Update desired state to include the in-use cores. These have to be
469          * kept powered up because there are jobs running or about to run on
470          * these cores
471          */
472         desired_state_in_use = desired_state | in_use;
473
474         /* Update state of whether l2 caches are powered */
475         if (type == KBASE_PM_CORE_L2) {
476                 if ((ready == present) && (desired_state_in_use == ready) &&
477                                                                 (trans == 0)) {
478                         /* All are ready, none will be turned off, and none are
479                          * transitioning */
480                         kbdev->pm.backend.l2_powered = 1;
481                         /*
482                          * Ensure snoops are enabled after L2 is powered up,
483                          * note that kbase keeps track of the snoop state, so
484                          * safe to repeatedly call.
485                          */
486                         kbase_pm_cache_snoop_enable(kbdev);
487                         if (kbdev->l2_users_count > 0) {
488                                 /* Notify any registered l2 cache users
489                                  * (optimized out when no users waiting) */
490                                 wake_up(&kbdev->pm.backend.l2_powered_wait);
491                         }
492                 } else
493                         kbdev->pm.backend.l2_powered = 0;
494         }
495
496         if (desired_state_in_use == ready && (trans == 0))
497                 return true;
498
499         /* Restrict the cores to those that are actually present */
500         powerup = desired_state_in_use & present;
501         powerdown = (~desired_state_in_use) & present;
502
503         /* Restrict to cores that are not already in the desired state */
504         powerup &= ~ready;
505         powerdown &= ready;
506
507         /* Don't transition any cores that are already transitioning, except for
508          * Mali cores that support the following case:
509          *
510          * If the SHADER_PWRON or TILER_PWRON registers are written to turn on
511          * a core that is currently transitioning to power off, then this is
512          * remembered and the shader core is automatically powered up again once
513          * the original transition completes. Once the automatic power on is
514          * complete any job scheduled on the shader core should start.
515          */
516         powerdown &= ~trans;
517
518         if (kbase_hw_has_feature(kbdev,
519                                 BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS))
520                 if (KBASE_PM_CORE_SHADER == type || KBASE_PM_CORE_TILER == type)
521                         trans = powering_on_trans; /* for exception cases, only
522                                                     * mask off cores in power on
523                                                     * transitions */
524
525         powerup &= ~trans;
526
527         /* Perform transitions if any */
528         kbase_pm_invoke(kbdev, type, powerup, ACTION_PWRON);
529         kbase_pm_invoke(kbdev, type, powerdown, ACTION_PWROFF);
530
531         /* Recalculate cores transitioning on, and re-evaluate our state */
532         powering_on_trans |= powerup;
533         *powering_on = powering_on_trans;
534         if (available != NULL)
535                 *available = (ready | powering_on_trans) & desired_state;
536
537         return false;
538 }
539
540 KBASE_EXPORT_TEST_API(kbase_pm_transition_core_type);
541
542 /**
543  * get_desired_cache_status - Determine which caches should be on for a
544  *                            particular core state
545  *
546  * This function takes a bit mask of the present caches and the cores (or
547  * caches) that are attached to the caches that will be powered. It then
548  * computes which caches should be turned on to allow the cores requested to be
549  * powered up.
550  *
551  * @present:       The bit mask of present caches
552  * @cores_powered: A bit mask of cores (or L2 caches) that are desired to
553  *                 be powered
554  * @tilers_powered: The bit mask of tilers that are desired to be powered
555  *
556  * Return: A bit mask of the caches that should be turned on
557  */
558 static u64 get_desired_cache_status(u64 present, u64 cores_powered,
559                 u64 tilers_powered)
560 {
561         u64 desired = 0;
562
563         while (present) {
564                 /* Find out which is the highest set bit */
565                 u64 bit = fls64(present) - 1;
566                 u64 bit_mask = 1ull << bit;
567                 /* Create a mask which has all bits from 'bit' upwards set */
568
569                 u64 mask = ~(bit_mask - 1);
570
571                 /* If there are any cores powered at this bit or above (that
572                  * haven't previously been processed) then we need this core on
573                  */
574                 if (cores_powered & mask)
575                         desired |= bit_mask;
576
577                 /* Remove bits from cores_powered and present */
578                 cores_powered &= ~mask;
579                 present &= ~bit_mask;
580         }
581
582         /* Power up the required L2(s) for the tiler */
583         if (tilers_powered)
584                 desired |= 1;
585
586         return desired;
587 }
588
589 KBASE_EXPORT_TEST_API(get_desired_cache_status);
590
591 bool
592 MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbdev)
593 {
594         bool cores_are_available = false;
595         bool in_desired_state = true;
596         u64 desired_l2_state;
597         u64 cores_powered;
598         u64 tilers_powered;
599         u64 tiler_available_bitmap;
600         u64 shader_available_bitmap;
601         u64 shader_ready_bitmap;
602         u64 shader_transitioning_bitmap;
603         u64 l2_available_bitmap;
604         u64 prev_l2_available_bitmap;
605
606         KBASE_DEBUG_ASSERT(NULL != kbdev);
607         lockdep_assert_held(&kbdev->pm.power_change_lock);
608
609         spin_lock(&kbdev->pm.backend.gpu_powered_lock);
610         if (kbdev->pm.backend.gpu_powered == false) {
611                 spin_unlock(&kbdev->pm.backend.gpu_powered_lock);
612                 if (kbdev->pm.backend.desired_shader_state == 0 &&
613                                 kbdev->pm.backend.desired_tiler_state == 0)
614                         return true;
615                 return false;
616         }
617
618         /* Trace that a change-state is being requested, and that it took
619          * (effectively) no time to start it. This is useful for counting how
620          * many state changes occurred, in a way that's backwards-compatible
621          * with processing the trace data */
622         kbase_timeline_pm_send_event(kbdev,
623                                 KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE);
624         kbase_timeline_pm_handle_event(kbdev,
625                                 KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE);
626
627         /* If any cores are already powered then, we must keep the caches on */
628         cores_powered = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_SHADER);
629
630         cores_powered |= kbdev->pm.backend.desired_shader_state;
631
632         /* Work out which tilers want to be powered */
633         tilers_powered = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_TILER);
634         tilers_powered |= kbdev->pm.backend.desired_tiler_state;
635
636         /* If there are l2 cache users registered, keep all l2s powered even if
637          * all other cores are off. */
638         if (kbdev->l2_users_count > 0)
639                 cores_powered |= kbdev->gpu_props.props.raw_props.l2_present;
640
641         desired_l2_state = get_desired_cache_status(
642                         kbdev->gpu_props.props.raw_props.l2_present,
643                         cores_powered, tilers_powered);
644
645         /* If any l2 cache is on, then enable l2 #0, for use by job manager */
646         if (0 != desired_l2_state)
647                 desired_l2_state |= 1;
648
649         prev_l2_available_bitmap = kbdev->l2_available_bitmap;
650         in_desired_state &= kbase_pm_transition_core_type(kbdev,
651                                 KBASE_PM_CORE_L2, desired_l2_state, 0,
652                                 &l2_available_bitmap,
653                                 &kbdev->pm.backend.powering_on_l2_state);
654
655         if (kbdev->l2_available_bitmap != l2_available_bitmap)
656                 KBASE_TIMELINE_POWER_L2(kbdev, l2_available_bitmap);
657
658         kbdev->l2_available_bitmap = l2_available_bitmap;
659
660         if (in_desired_state) {
661                 in_desired_state &= kbase_pm_transition_core_type(kbdev,
662                                 KBASE_PM_CORE_TILER,
663                                 kbdev->pm.backend.desired_tiler_state,
664                                 0, &tiler_available_bitmap,
665                                 &kbdev->pm.backend.powering_on_tiler_state);
666                 in_desired_state &= kbase_pm_transition_core_type(kbdev,
667                                 KBASE_PM_CORE_SHADER,
668                                 kbdev->pm.backend.desired_shader_state,
669                                 kbdev->shader_inuse_bitmap,
670                                 &shader_available_bitmap,
671                                 &kbdev->pm.backend.powering_on_shader_state);
672
673                 if (kbdev->shader_available_bitmap != shader_available_bitmap) {
674                         KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL,
675                                                 NULL, 0u,
676                                                 (u32) shader_available_bitmap);
677                         KBASE_TIMELINE_POWER_SHADER(kbdev,
678                                                 shader_available_bitmap);
679                 }
680
681                 kbdev->shader_available_bitmap = shader_available_bitmap;
682
683                 if (kbdev->tiler_available_bitmap != tiler_available_bitmap) {
684                         KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER,
685                                                 NULL, NULL, 0u,
686                                                 (u32) tiler_available_bitmap);
687                         KBASE_TIMELINE_POWER_TILER(kbdev,
688                                                         tiler_available_bitmap);
689                 }
690
691                 kbdev->tiler_available_bitmap = tiler_available_bitmap;
692
693         } else if ((l2_available_bitmap &
694                         kbdev->gpu_props.props.raw_props.tiler_present) !=
695                         kbdev->gpu_props.props.raw_props.tiler_present) {
696                 tiler_available_bitmap = 0;
697
698                 if (kbdev->tiler_available_bitmap != tiler_available_bitmap)
699                         KBASE_TIMELINE_POWER_TILER(kbdev,
700                                                         tiler_available_bitmap);
701
702                 kbdev->tiler_available_bitmap = tiler_available_bitmap;
703         }
704
705         /* State updated for slow-path waiters */
706         kbdev->pm.backend.gpu_in_desired_state = in_desired_state;
707
708         shader_ready_bitmap = kbase_pm_get_ready_cores(kbdev,
709                                                         KBASE_PM_CORE_SHADER);
710         shader_transitioning_bitmap = kbase_pm_get_trans_cores(kbdev,
711                                                         KBASE_PM_CORE_SHADER);
712
713         /* Determine whether the cores are now available (even if the set of
714          * available cores is empty). Note that they can be available even if
715          * we've not finished transitioning to the desired state */
716         if ((kbdev->shader_available_bitmap &
717                                         kbdev->pm.backend.desired_shader_state)
718                                 == kbdev->pm.backend.desired_shader_state &&
719                 (kbdev->tiler_available_bitmap &
720                                         kbdev->pm.backend.desired_tiler_state)
721                                 == kbdev->pm.backend.desired_tiler_state) {
722                 cores_are_available = true;
723
724                 KBASE_TRACE_ADD(kbdev, PM_CORES_AVAILABLE, NULL, NULL, 0u,
725                                 (u32)(kbdev->shader_available_bitmap &
726                                 kbdev->pm.backend.desired_shader_state));
727                 KBASE_TRACE_ADD(kbdev, PM_CORES_AVAILABLE_TILER, NULL, NULL, 0u,
728                                 (u32)(kbdev->tiler_available_bitmap &
729                                 kbdev->pm.backend.desired_tiler_state));
730
731                 /* Log timelining information about handling events that power
732                  * up cores, to match up either with immediate submission either
733                  * because cores already available, or from PM IRQ */
734                 if (!in_desired_state)
735                         kbase_timeline_pm_send_event(kbdev,
736                                 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
737         }
738
739         if (in_desired_state) {
740                 KBASE_DEBUG_ASSERT(cores_are_available);
741
742 #if defined(CONFIG_MALI_GATOR_SUPPORT)
743                 kbase_trace_mali_pm_status(KBASE_PM_CORE_L2,
744                                                 kbase_pm_get_ready_cores(kbdev,
745                                                         KBASE_PM_CORE_L2));
746                 kbase_trace_mali_pm_status(KBASE_PM_CORE_SHADER,
747                                                 kbase_pm_get_ready_cores(kbdev,
748                                                         KBASE_PM_CORE_SHADER));
749                 kbase_trace_mali_pm_status(KBASE_PM_CORE_TILER,
750                                                 kbase_pm_get_ready_cores(kbdev,
751                                                         KBASE_PM_CORE_TILER));
752 #endif
753
754                 kbase_tlstream_aux_pm_state(
755                                 KBASE_PM_CORE_L2,
756                                 kbase_pm_get_ready_cores(
757                                         kbdev, KBASE_PM_CORE_L2));
758                 kbase_tlstream_aux_pm_state(
759                                 KBASE_PM_CORE_SHADER,
760                                 kbase_pm_get_ready_cores(
761                                         kbdev, KBASE_PM_CORE_SHADER));
762                 kbase_tlstream_aux_pm_state(
763                                 KBASE_PM_CORE_TILER,
764                                 kbase_pm_get_ready_cores(
765                                         kbdev,
766                                         KBASE_PM_CORE_TILER));
767
768                 KBASE_TRACE_ADD(kbdev, PM_DESIRED_REACHED, NULL, NULL,
769                                 kbdev->pm.backend.gpu_in_desired_state,
770                                 (u32)kbdev->pm.backend.desired_shader_state);
771                 KBASE_TRACE_ADD(kbdev, PM_DESIRED_REACHED_TILER, NULL, NULL, 0u,
772                                 (u32)kbdev->pm.backend.desired_tiler_state);
773
774                 /* Log timelining information for synchronous waiters */
775                 kbase_timeline_pm_send_event(kbdev,
776                                 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
777                 /* Wake slow-path waiters. Job scheduler does not use this. */
778                 KBASE_TRACE_ADD(kbdev, PM_WAKE_WAITERS, NULL, NULL, 0u, 0);
779                 wake_up(&kbdev->pm.backend.gpu_in_desired_state_wait);
780         }
781
782         spin_unlock(&kbdev->pm.backend.gpu_powered_lock);
783
784         /* kbase_pm_ca_update_core_status can cause one-level recursion into
785          * this function, so it must only be called once all changes to kbdev
786          * have been committed, and after the gpu_powered_lock has been
787          * dropped. */
788         if (kbdev->shader_ready_bitmap != shader_ready_bitmap ||
789             kbdev->shader_transitioning_bitmap != shader_transitioning_bitmap) {
790                 kbdev->shader_ready_bitmap = shader_ready_bitmap;
791                 kbdev->shader_transitioning_bitmap =
792                                                 shader_transitioning_bitmap;
793
794                 kbase_pm_ca_update_core_status(kbdev, shader_ready_bitmap,
795                                                 shader_transitioning_bitmap);
796         }
797
798         /* The core availability policy is not allowed to keep core group 0
799          * turned off (unless it was changing the l2 power state) */
800         if (!((shader_ready_bitmap | shader_transitioning_bitmap) &
801                 kbdev->gpu_props.props.coherency_info.group[0].core_mask) &&
802                 (prev_l2_available_bitmap == desired_l2_state) &&
803                 !(kbase_pm_ca_get_core_mask(kbdev) &
804                 kbdev->gpu_props.props.coherency_info.group[0].core_mask))
805                 BUG();
806
807         /* The core availability policy is allowed to keep core group 1 off,
808          * but all jobs specifically targeting CG1 must fail */
809         if (!((shader_ready_bitmap | shader_transitioning_bitmap) &
810                 kbdev->gpu_props.props.coherency_info.group[1].core_mask) &&
811                 !(kbase_pm_ca_get_core_mask(kbdev) &
812                 kbdev->gpu_props.props.coherency_info.group[1].core_mask))
813                 kbdev->pm.backend.cg1_disabled = true;
814         else
815                 kbdev->pm.backend.cg1_disabled = false;
816
817         return cores_are_available;
818 }
819 KBASE_EXPORT_TEST_API(kbase_pm_check_transitions_nolock);
820
821 /* Timeout for kbase_pm_check_transitions_sync when wait_event_killable has
822  * aborted due to a fatal signal. If the time spent waiting has exceeded this
823  * threshold then there is most likely a hardware issue. */
824 #define PM_TIMEOUT (5*HZ) /* 5s */
825
826 void kbase_pm_check_transitions_sync(struct kbase_device *kbdev)
827 {
828         unsigned long flags;
829         unsigned long timeout;
830         bool cores_are_available;
831         int ret;
832
833         /* Force the transition to be checked and reported - the cores may be
834          * 'available' (for job submission) but not fully powered up. */
835         spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
836         cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
837         /* Don't need 'cores_are_available', because we don't return anything */
838         CSTD_UNUSED(cores_are_available);
839         spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
840
841         timeout = jiffies + PM_TIMEOUT;
842
843         /* Wait for cores */
844         ret = wait_event_killable(kbdev->pm.backend.gpu_in_desired_state_wait,
845                         kbdev->pm.backend.gpu_in_desired_state);
846
847         if (ret < 0 && time_after(jiffies, timeout)) {
848                 dev_err(kbdev->dev, "Power transition timed out unexpectedly\n");
849                 dev_err(kbdev->dev, "Desired state :\n");
850                 dev_err(kbdev->dev, "\tShader=%016llx\n",
851                                 kbdev->pm.backend.desired_shader_state);
852                 dev_err(kbdev->dev, "\tTiler =%016llx\n",
853                                 kbdev->pm.backend.desired_tiler_state);
854                 dev_err(kbdev->dev, "Current state :\n");
855                 dev_err(kbdev->dev, "\tShader=%08x%08x\n",
856                                 kbase_reg_read(kbdev,
857                                         GPU_CONTROL_REG(SHADER_READY_HI), NULL),
858                                 kbase_reg_read(kbdev,
859                                         GPU_CONTROL_REG(SHADER_READY_LO),
860                                         NULL));
861                 dev_err(kbdev->dev, "\tTiler =%08x%08x\n",
862                                 kbase_reg_read(kbdev,
863                                         GPU_CONTROL_REG(TILER_READY_HI), NULL),
864                                 kbase_reg_read(kbdev,
865                                         GPU_CONTROL_REG(TILER_READY_LO), NULL));
866                 dev_err(kbdev->dev, "\tL2    =%08x%08x\n",
867                                 kbase_reg_read(kbdev,
868                                         GPU_CONTROL_REG(L2_READY_HI), NULL),
869                                 kbase_reg_read(kbdev,
870                                         GPU_CONTROL_REG(L2_READY_LO), NULL));
871                 dev_err(kbdev->dev, "Cores transitioning :\n");
872                 dev_err(kbdev->dev, "\tShader=%08x%08x\n",
873                                 kbase_reg_read(kbdev, GPU_CONTROL_REG(
874                                                 SHADER_PWRTRANS_HI), NULL),
875                                 kbase_reg_read(kbdev, GPU_CONTROL_REG(
876                                                 SHADER_PWRTRANS_LO), NULL));
877                 dev_err(kbdev->dev, "\tTiler =%08x%08x\n",
878                                 kbase_reg_read(kbdev, GPU_CONTROL_REG(
879                                                 TILER_PWRTRANS_HI), NULL),
880                                 kbase_reg_read(kbdev, GPU_CONTROL_REG(
881                                                 TILER_PWRTRANS_LO), NULL));
882                 dev_err(kbdev->dev, "\tL2    =%08x%08x\n",
883                                 kbase_reg_read(kbdev, GPU_CONTROL_REG(
884                                                 L2_PWRTRANS_HI), NULL),
885                                 kbase_reg_read(kbdev, GPU_CONTROL_REG(
886                                                 L2_PWRTRANS_LO), NULL));
887 #if KBASE_GPU_RESET_EN
888                 dev_err(kbdev->dev, "Sending reset to GPU - all running jobs will be lost\n");
889                 if (kbase_prepare_to_reset_gpu(kbdev))
890                         kbase_reset_gpu(kbdev);
891 #endif /* KBASE_GPU_RESET_EN */
892         } else {
893                 /* Log timelining information that a change in state has
894                  * completed */
895                 kbase_timeline_pm_handle_event(kbdev,
896                                 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
897         }
898 }
899 KBASE_EXPORT_TEST_API(kbase_pm_check_transitions_sync);
900
901 void kbase_pm_enable_interrupts(struct kbase_device *kbdev)
902 {
903         unsigned long flags;
904
905         KBASE_DEBUG_ASSERT(NULL != kbdev);
906         /*
907          * Clear all interrupts,
908          * and unmask them all.
909          */
910         spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
911         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL,
912                                                                         NULL);
913         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), GPU_IRQ_REG_ALL,
914                                                                         NULL);
915         spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
916
917         kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF,
918                                                                         NULL);
919         kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0xFFFFFFFF, NULL);
920
921         kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF, NULL);
922         kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0xFFFFFFFF, NULL);
923 }
924
925 KBASE_EXPORT_TEST_API(kbase_pm_enable_interrupts);
926
927 void kbase_pm_disable_interrupts(struct kbase_device *kbdev)
928 {
929         unsigned long flags;
930
931         KBASE_DEBUG_ASSERT(NULL != kbdev);
932         /*
933          * Mask all interrupts,
934          * and clear them all.
935          */
936         spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
937         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), 0, NULL);
938         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL,
939                                                                         NULL);
940         spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
941
942         kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0, NULL);
943         kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF,
944                                                                         NULL);
945
946         kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0, NULL);
947         kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF, NULL);
948 }
949
950 KBASE_EXPORT_TEST_API(kbase_pm_disable_interrupts);
951
952 /*
953  * pmu layout:
954  * 0x0000: PMU TAG (RO) (0xCAFECAFE)
955  * 0x0004: PMU VERSION ID (RO) (0x00000000)
956  * 0x0008: CLOCK ENABLE (RW) (31:1 SBZ, 0 CLOCK STATE)
957  */
958 void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
959 {
960         bool reset_required = is_resume;
961         struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
962         unsigned long flags;
963         int i;
964
965         KBASE_DEBUG_ASSERT(NULL != kbdev);
966         lockdep_assert_held(&js_devdata->runpool_mutex);
967         lockdep_assert_held(&kbdev->pm.lock);
968
969         if (kbdev->pm.backend.gpu_powered) {
970                 /* Already turned on */
971                 if (kbdev->poweroff_pending)
972                         kbase_pm_enable_interrupts(kbdev);
973                 kbdev->poweroff_pending = false;
974                 KBASE_DEBUG_ASSERT(!is_resume);
975                 return;
976         }
977
978         kbdev->poweroff_pending = false;
979
980         KBASE_TRACE_ADD(kbdev, PM_GPU_ON, NULL, NULL, 0u, 0u);
981
982         if (is_resume && kbdev->pm.backend.callback_power_resume) {
983                 kbdev->pm.backend.callback_power_resume(kbdev);
984                 return;
985         } else if (kbdev->pm.backend.callback_power_on) {
986                 kbdev->pm.backend.callback_power_on(kbdev);
987                 /* If your platform properly keeps the GPU state you may use the
988                  * return value of the callback_power_on function to
989                  * conditionally reset the GPU on power up. Currently we are
990                  * conservative and always reset the GPU. */
991                 reset_required = true;
992         }
993
994         spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
995         kbdev->pm.backend.gpu_powered = true;
996         spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
997
998         if (reset_required) {
999                 /* GPU state was lost, reset GPU to ensure it is in a
1000                  * consistent state */
1001                 kbase_pm_init_hw(kbdev, PM_ENABLE_IRQS);
1002         }
1003
1004         /* Reprogram the GPU's MMU */
1005         for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
1006                 struct kbase_as *as = &kbdev->as[i];
1007
1008                 mutex_lock(&as->transaction_mutex);
1009                 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1010
1011                 if (js_devdata->runpool_irq.per_as_data[i].kctx)
1012                         kbase_mmu_update(
1013                                 js_devdata->runpool_irq.per_as_data[i].kctx);
1014                 else
1015                         kbase_mmu_disable_as(kbdev, i);
1016
1017                 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1018                 mutex_unlock(&as->transaction_mutex);
1019         }
1020
1021         /* Lastly, enable the interrupts */
1022         kbase_pm_enable_interrupts(kbdev);
1023 }
1024
1025 KBASE_EXPORT_TEST_API(kbase_pm_clock_on);
1026
1027 bool kbase_pm_clock_off(struct kbase_device *kbdev, bool is_suspend)
1028 {
1029         unsigned long flags;
1030
1031         KBASE_DEBUG_ASSERT(NULL != kbdev);
1032         lockdep_assert_held(&kbdev->pm.lock);
1033
1034         /* ASSERT that the cores should now be unavailable. No lock needed. */
1035         KBASE_DEBUG_ASSERT(kbdev->shader_available_bitmap == 0u);
1036
1037         kbdev->poweroff_pending = true;
1038
1039         if (!kbdev->pm.backend.gpu_powered) {
1040                 /* Already turned off */
1041                 if (is_suspend && kbdev->pm.backend.callback_power_suspend)
1042                         kbdev->pm.backend.callback_power_suspend(kbdev);
1043                 return true;
1044         }
1045
1046         KBASE_TRACE_ADD(kbdev, PM_GPU_OFF, NULL, NULL, 0u, 0u);
1047
1048         /* Disable interrupts. This also clears any outstanding interrupts */
1049         kbase_pm_disable_interrupts(kbdev);
1050         /* Ensure that any IRQ handlers have finished */
1051         kbase_synchronize_irqs(kbdev);
1052
1053         spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
1054
1055         if (atomic_read(&kbdev->faults_pending)) {
1056                 /* Page/bus faults are still being processed. The GPU can not
1057                  * be powered off until they have completed */
1058                 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
1059                                                                         flags);
1060                 return false;
1061         }
1062
1063         kbase_pm_cache_snoop_disable(kbdev);
1064
1065         /* The GPU power may be turned off from this point */
1066         kbdev->pm.backend.gpu_powered = false;
1067         spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
1068
1069         if (is_suspend && kbdev->pm.backend.callback_power_suspend)
1070                 kbdev->pm.backend.callback_power_suspend(kbdev);
1071         else if (kbdev->pm.backend.callback_power_off)
1072                 kbdev->pm.backend.callback_power_off(kbdev);
1073         return true;
1074 }
1075
1076 KBASE_EXPORT_TEST_API(kbase_pm_clock_off);
1077
1078 struct kbasep_reset_timeout_data {
1079         struct hrtimer timer;
1080         bool timed_out;
1081         struct kbase_device *kbdev;
1082 };
1083
1084 void kbase_pm_reset_done(struct kbase_device *kbdev)
1085 {
1086         KBASE_DEBUG_ASSERT(kbdev != NULL);
1087         kbdev->pm.backend.reset_done = true;
1088         wake_up(&kbdev->pm.backend.reset_done_wait);
1089 }
1090
1091 /**
1092  * kbase_pm_wait_for_reset - Wait for a reset to happen
1093  *
1094  * Wait for the %RESET_COMPLETED IRQ to occur, then reset the waiting state.
1095  *
1096  * @kbdev: Kbase device
1097  */
1098 static void kbase_pm_wait_for_reset(struct kbase_device *kbdev)
1099 {
1100         lockdep_assert_held(&kbdev->pm.lock);
1101
1102         wait_event(kbdev->pm.backend.reset_done_wait,
1103                                                 (kbdev->pm.backend.reset_done));
1104         kbdev->pm.backend.reset_done = false;
1105 }
1106
1107 KBASE_EXPORT_TEST_API(kbase_pm_reset_done);
1108
1109 static enum hrtimer_restart kbasep_reset_timeout(struct hrtimer *timer)
1110 {
1111         struct kbasep_reset_timeout_data *rtdata =
1112                 container_of(timer, struct kbasep_reset_timeout_data, timer);
1113
1114         rtdata->timed_out = 1;
1115
1116         /* Set the wait queue to wake up kbase_pm_init_hw even though the reset
1117          * hasn't completed */
1118         kbase_pm_reset_done(rtdata->kbdev);
1119
1120         return HRTIMER_NORESTART;
1121 }
1122
1123 static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
1124 {
1125         struct device_node *np = kbdev->dev->of_node;
1126         u32 jm_values[4];
1127         const u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
1128         const u32 prod_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >>
1129                 GPU_ID_VERSION_PRODUCT_ID_SHIFT;
1130         const u32 major = (gpu_id & GPU_ID_VERSION_MAJOR) >>
1131                 GPU_ID_VERSION_MAJOR_SHIFT;
1132
1133         kbdev->hw_quirks_sc = 0;
1134
1135         /* Needed due to MIDBASE-1494: LS_PAUSEBUFFER_DISABLE. See PRLAM-8443.
1136          * and needed due to MIDGLES-3539. See PRLAM-11035 */
1137         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8443) ||
1138                         kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11035))
1139                 kbdev->hw_quirks_sc |= SC_LS_PAUSEBUFFER_DISABLE;
1140
1141         /* Needed due to MIDBASE-2054: SDC_DISABLE_OQ_DISCARD. See PRLAM-10327.
1142          */
1143         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10327))
1144                 kbdev->hw_quirks_sc |= SC_SDC_DISABLE_OQ_DISCARD;
1145
1146 #ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY
1147         /* Enable alternative hardware counter selection if configured. */
1148         if (!GPU_ID_IS_NEW_FORMAT(prod_id))
1149                 kbdev->hw_quirks_sc |= SC_ALT_COUNTERS;
1150 #endif
1151
1152         /* Needed due to MIDBASE-2795. ENABLE_TEXGRD_FLAGS. See PRLAM-10797. */
1153         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10797))
1154                 kbdev->hw_quirks_sc |= SC_ENABLE_TEXGRD_FLAGS;
1155
1156         if (!kbase_hw_has_issue(kbdev, GPUCORE_1619)) {
1157                 if (prod_id < 0x750 || prod_id == 0x6956) /* T60x, T62x, T72x */
1158                         kbdev->hw_quirks_sc |= SC_LS_ATTR_CHECK_DISABLE;
1159                 else if (prod_id >= 0x750 && prod_id <= 0x880) /* T76x, T8xx */
1160                         kbdev->hw_quirks_sc |= SC_LS_ALLOW_ATTR_TYPES;
1161         }
1162
1163         kbdev->hw_quirks_tiler = kbase_reg_read(kbdev,
1164                         GPU_CONTROL_REG(TILER_CONFIG), NULL);
1165
1166         /* Set tiler clock gate override if required */
1167         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3953))
1168                 kbdev->hw_quirks_tiler |= TC_CLOCK_GATE_OVERRIDE;
1169
1170         /* Limit the GPU bus bandwidth if the platform needs this. */
1171         kbdev->hw_quirks_mmu = kbase_reg_read(kbdev,
1172                         GPU_CONTROL_REG(L2_MMU_CONFIG), NULL);
1173
1174         /* Limit read ID width for AXI */
1175         kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_READS);
1176         kbdev->hw_quirks_mmu |= (DEFAULT_ARID_LIMIT & 0x3) <<
1177                                 L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT;
1178
1179         /* Limit write ID width for AXI */
1180         kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES);
1181         kbdev->hw_quirks_mmu |= (DEFAULT_AWID_LIMIT & 0x3) <<
1182                                 L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT;
1183
1184         if (kbdev->system_coherency == COHERENCY_ACE) {
1185                 /* Allow memory configuration disparity to be ignored, we
1186                  * optimize the use of shared memory and thus we expect
1187                  * some disparity in the memory configuration */
1188                 kbdev->hw_quirks_mmu |= L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY;
1189         }
1190
1191         /* Only for T86x/T88x-based products after r2p0 */
1192         if (prod_id >= 0x860 && prod_id <= 0x880 && major >= 2) {
1193                 /* The JM_CONFIG register is specified as follows in the
1194                  T86x/T88x Engineering Specification Supplement:
1195                  The values are read from device tree in order.
1196                 */
1197 #define TIMESTAMP_OVERRIDE  1
1198 #define CLOCK_GATE_OVERRIDE (1<<1)
1199 #define JOB_THROTTLE_ENABLE (1<<2)
1200 #define JOB_THROTTLE_LIMIT_SHIFT 3
1201
1202                 /* 6 bits in the register */
1203                 const u32 jm_max_limit = 0x3F;
1204
1205                 if (of_property_read_u32_array(np,
1206                                         "jm_config",
1207                                         &jm_values[0],
1208                                         ARRAY_SIZE(jm_values))) {
1209                         /* Entry not in device tree, use defaults  */
1210                         jm_values[0] = 0;
1211                         jm_values[1] = 0;
1212                         jm_values[2] = 0;
1213                         jm_values[3] = jm_max_limit; /* Max value */
1214                 }
1215
1216                 /* Limit throttle limit to 6 bits*/
1217                 if (jm_values[3] > jm_max_limit) {
1218                         dev_dbg(kbdev->dev, "JOB_THROTTLE_LIMIT supplied in device tree is too large. Limiting to MAX (63).");
1219                         jm_values[3] = jm_max_limit;
1220                 }
1221
1222                 /* Aggregate to one integer. */
1223                 kbdev->hw_quirks_jm = (jm_values[0] ? TIMESTAMP_OVERRIDE : 0);
1224                 kbdev->hw_quirks_jm |= (jm_values[1] ? CLOCK_GATE_OVERRIDE : 0);
1225                 kbdev->hw_quirks_jm |= (jm_values[2] ? JOB_THROTTLE_ENABLE : 0);
1226                 kbdev->hw_quirks_jm |= (jm_values[3] <<
1227                                 JOB_THROTTLE_LIMIT_SHIFT);
1228         } else {
1229                 kbdev->hw_quirks_jm = KBASE_JM_CONFIG_UNUSED;
1230         }
1231
1232
1233 }
1234
1235 static void kbase_pm_hw_issues_apply(struct kbase_device *kbdev)
1236 {
1237         if (kbdev->hw_quirks_sc)
1238                 kbase_reg_write(kbdev, GPU_CONTROL_REG(SHADER_CONFIG),
1239                                 kbdev->hw_quirks_sc, NULL);
1240
1241         kbase_reg_write(kbdev, GPU_CONTROL_REG(TILER_CONFIG),
1242                         kbdev->hw_quirks_tiler, NULL);
1243
1244         kbase_reg_write(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG),
1245                         kbdev->hw_quirks_mmu, NULL);
1246
1247
1248         if (kbdev->hw_quirks_jm != KBASE_JM_CONFIG_UNUSED)
1249                 kbase_reg_write(kbdev, GPU_CONTROL_REG(JM_CONFIG),
1250                                 kbdev->hw_quirks_jm, NULL);
1251
1252 }
1253
1254 void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev)
1255 {
1256         if ((kbdev->system_coherency == COHERENCY_ACE) &&
1257                 !kbdev->cci_snoop_enabled) {
1258 #ifdef CONFIG_ARM64
1259                 if (kbdev->snoop_enable_smc != 0)
1260                         kbase_invoke_smc_fid(kbdev->snoop_enable_smc, 0, 0, 0);
1261 #endif /* CONFIG_ARM64 */
1262                 dev_dbg(kbdev->dev, "MALI - CCI Snoops - Enabled\n");
1263                 kbdev->cci_snoop_enabled = true;
1264         }
1265 }
1266
1267 void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev)
1268 {
1269         if ((kbdev->system_coherency == COHERENCY_ACE) &&
1270                 kbdev->cci_snoop_enabled) {
1271 #ifdef CONFIG_ARM64
1272                 if (kbdev->snoop_disable_smc != 0) {
1273                         mali_cci_flush_l2(kbdev);
1274                         kbase_invoke_smc_fid(kbdev->snoop_disable_smc, 0, 0, 0);
1275                 }
1276 #endif /* CONFIG_ARM64 */
1277                 dev_dbg(kbdev->dev, "MALI - CCI Snoops Disabled\n");
1278                 kbdev->cci_snoop_enabled = false;
1279         }
1280 }
1281
1282 static int kbase_pm_reset_do_normal(struct kbase_device *kbdev)
1283 {
1284         struct kbasep_reset_timeout_data rtdata;
1285
1286         KBASE_TRACE_ADD(kbdev, CORE_GPU_SOFT_RESET, NULL, NULL, 0u, 0);
1287
1288         kbase_tlstream_jd_gpu_soft_reset(kbdev);
1289
1290         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
1291                                                 GPU_COMMAND_SOFT_RESET, NULL);
1292
1293         /* Unmask the reset complete interrupt only */
1294         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), RESET_COMPLETED,
1295                                                                         NULL);
1296
1297         /* Initialize a structure for tracking the status of the reset */
1298         rtdata.kbdev = kbdev;
1299         rtdata.timed_out = 0;
1300
1301         /* Create a timer to use as a timeout on the reset */
1302         hrtimer_init_on_stack(&rtdata.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1303         rtdata.timer.function = kbasep_reset_timeout;
1304
1305         hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT),
1306                                                         HRTIMER_MODE_REL);
1307
1308         /* Wait for the RESET_COMPLETED interrupt to be raised */
1309         kbase_pm_wait_for_reset(kbdev);
1310
1311         if (rtdata.timed_out == 0) {
1312                 /* GPU has been reset */
1313                 hrtimer_cancel(&rtdata.timer);
1314                 destroy_hrtimer_on_stack(&rtdata.timer);
1315                 return 0;
1316         }
1317
1318         /* No interrupt has been received - check if the RAWSTAT register says
1319          * the reset has completed */
1320         if (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL) &
1321                                                         RESET_COMPLETED) {
1322                 /* The interrupt is set in the RAWSTAT; this suggests that the
1323                  * interrupts are not getting to the CPU */
1324                 dev_err(kbdev->dev, "Reset interrupt didn't reach CPU. Check interrupt assignments.\n");
1325                 /* If interrupts aren't working we can't continue. */
1326                 destroy_hrtimer_on_stack(&rtdata.timer);
1327                 return -EINVAL;
1328         }
1329
1330         /* The GPU doesn't seem to be responding to the reset so try a hard
1331          * reset */
1332         dev_err(kbdev->dev, "Failed to soft-reset GPU (timed out after %d ms), now attempting a hard reset\n",
1333                                                                 RESET_TIMEOUT);
1334         KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
1335         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
1336                                                 GPU_COMMAND_HARD_RESET, NULL);
1337
1338         /* Restart the timer to wait for the hard reset to complete */
1339         rtdata.timed_out = 0;
1340
1341         hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT),
1342                                                         HRTIMER_MODE_REL);
1343
1344         /* Wait for the RESET_COMPLETED interrupt to be raised */
1345         kbase_pm_wait_for_reset(kbdev);
1346
1347         if (rtdata.timed_out == 0) {
1348                 /* GPU has been reset */
1349                 hrtimer_cancel(&rtdata.timer);
1350                 destroy_hrtimer_on_stack(&rtdata.timer);
1351                 return 0;
1352         }
1353
1354         destroy_hrtimer_on_stack(&rtdata.timer);
1355
1356         dev_err(kbdev->dev, "Failed to hard-reset the GPU (timed out after %d ms)\n",
1357                                                                 RESET_TIMEOUT);
1358
1359         return -EINVAL;
1360 }
1361
1362 static int kbase_pm_reset_do_protected(struct kbase_device *kbdev)
1363 {
1364         KBASE_TRACE_ADD(kbdev, CORE_GPU_SOFT_RESET, NULL, NULL, 0u, 0);
1365         kbase_tlstream_jd_gpu_soft_reset(kbdev);
1366
1367         return kbdev->protected_ops->protected_mode_reset(kbdev);
1368 }
1369
1370 int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
1371 {
1372         unsigned long irq_flags;
1373         int err;
1374         bool resume_vinstr = false;
1375
1376         KBASE_DEBUG_ASSERT(NULL != kbdev);
1377         lockdep_assert_held(&kbdev->pm.lock);
1378
1379         /* Ensure the clock is on before attempting to access the hardware */
1380         if (!kbdev->pm.backend.gpu_powered) {
1381                 if (kbdev->pm.backend.callback_power_on)
1382                         kbdev->pm.backend.callback_power_on(kbdev);
1383
1384                 spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock,
1385                                                                 irq_flags);
1386                 kbdev->pm.backend.gpu_powered = true;
1387                 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
1388                                                                 irq_flags);
1389         }
1390
1391         /* Ensure interrupts are off to begin with, this also clears any
1392          * outstanding interrupts */
1393         kbase_pm_disable_interrupts(kbdev);
1394         /* Ensure cache snoops are disabled before reset. */
1395         kbase_pm_cache_snoop_disable(kbdev);
1396         /* Prepare for the soft-reset */
1397         kbdev->pm.backend.reset_done = false;
1398
1399         /* The cores should be made unavailable due to the reset */
1400         spin_lock_irqsave(&kbdev->pm.power_change_lock, irq_flags);
1401         if (kbdev->shader_available_bitmap != 0u)
1402                         KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL,
1403                                                 NULL, 0u, (u32)0u);
1404         if (kbdev->tiler_available_bitmap != 0u)
1405                         KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER,
1406                                                 NULL, NULL, 0u, (u32)0u);
1407         kbdev->shader_available_bitmap = 0u;
1408         kbdev->tiler_available_bitmap = 0u;
1409         kbdev->l2_available_bitmap = 0u;
1410         spin_unlock_irqrestore(&kbdev->pm.power_change_lock, irq_flags);
1411
1412         /* Soft reset the GPU */
1413         if (kbdev->protected_mode_support &&
1414                         kbdev->protected_ops->protected_mode_reset)
1415                 err = kbase_pm_reset_do_protected(kbdev);
1416         else
1417                 err = kbase_pm_reset_do_normal(kbdev);
1418
1419         spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, irq_flags);
1420         if (kbdev->protected_mode)
1421                 resume_vinstr = true;
1422         kbdev->protected_mode_transition = false;
1423         kbdev->protected_mode = false;
1424         spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, irq_flags);
1425
1426         if (err)
1427                 goto exit;
1428
1429         if (flags & PM_HW_ISSUES_DETECT)
1430                 kbase_pm_hw_issues_detect(kbdev);
1431
1432         kbase_pm_hw_issues_apply(kbdev);
1433
1434         kbase_cache_set_coherency_mode(kbdev, kbdev->system_coherency);
1435
1436         /* Sanity check protected mode was left after reset */
1437         if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
1438                 u32 gpu_status = kbase_reg_read(kbdev,
1439                                 GPU_CONTROL_REG(GPU_STATUS), NULL);
1440
1441                 WARN_ON(gpu_status & GPU_STATUS_PROTECTED_MODE_ACTIVE);
1442         }
1443
1444         /* If cycle counter was in use re-enable it, enable_irqs will only be
1445          * false when called from kbase_pm_powerup */
1446         if (kbdev->pm.backend.gpu_cycle_counter_requests &&
1447                                                 (flags & PM_ENABLE_IRQS)) {
1448                 /* enable interrupts as the L2 may have to be powered on */
1449                 kbase_pm_enable_interrupts(kbdev);
1450                 kbase_pm_request_l2_caches(kbdev);
1451
1452                 /* Re-enable the counters if we need to */
1453                 spin_lock_irqsave(
1454                         &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
1455                                                                 irq_flags);
1456                 if (kbdev->pm.backend.gpu_cycle_counter_requests)
1457                         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
1458                                         GPU_COMMAND_CYCLE_COUNT_START, NULL);
1459                 spin_unlock_irqrestore(
1460                         &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
1461                                                                 irq_flags);
1462
1463                 kbase_pm_release_l2_caches(kbdev);
1464                 kbase_pm_disable_interrupts(kbdev);
1465         }
1466
1467         if (flags & PM_ENABLE_IRQS)
1468                 kbase_pm_enable_interrupts(kbdev);
1469
1470 exit:
1471         /* If GPU is leaving protected mode resume vinstr operation. */
1472         if (kbdev->vinstr_ctx && resume_vinstr)
1473                 kbase_vinstr_resume(kbdev->vinstr_ctx);
1474
1475         return err;
1476 }
1477
1478 /**
1479  * kbase_pm_request_gpu_cycle_counter_do_request - Request cycle counters
1480  *
1481  * Increase the count of cycle counter users and turn the cycle counters on if
1482  * they were previously off
1483  *
1484  * This function is designed to be called by
1485  * kbase_pm_request_gpu_cycle_counter() or
1486  * kbase_pm_request_gpu_cycle_counter_l2_is_on() only
1487  *
1488  * When this function is called the l2 cache must be on and the l2 cache users
1489  * count must have been incremented by a call to (
1490  * kbase_pm_request_l2_caches() or kbase_pm_request_l2_caches_l2_on() )
1491  *
1492  * @kbdev:     The kbase device structure of the device
1493  */
1494 static void
1495 kbase_pm_request_gpu_cycle_counter_do_request(struct kbase_device *kbdev)
1496 {
1497         unsigned long flags;
1498
1499         spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
1500                                                                         flags);
1501
1502         ++kbdev->pm.backend.gpu_cycle_counter_requests;
1503
1504         if (1 == kbdev->pm.backend.gpu_cycle_counter_requests)
1505                 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
1506                                         GPU_COMMAND_CYCLE_COUNT_START, NULL);
1507
1508         spin_unlock_irqrestore(
1509                         &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
1510                                                                         flags);
1511 }
1512
1513 void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev)
1514 {
1515         KBASE_DEBUG_ASSERT(kbdev != NULL);
1516
1517         KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
1518
1519         KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests <
1520                                                                 INT_MAX);
1521
1522         kbase_pm_request_l2_caches(kbdev);
1523
1524         kbase_pm_request_gpu_cycle_counter_do_request(kbdev);
1525 }
1526
1527 KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter);
1528
1529 void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev)
1530 {
1531         KBASE_DEBUG_ASSERT(kbdev != NULL);
1532
1533         KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
1534
1535         KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests <
1536                                                                 INT_MAX);
1537
1538         kbase_pm_request_l2_caches_l2_is_on(kbdev);
1539
1540         kbase_pm_request_gpu_cycle_counter_do_request(kbdev);
1541 }
1542
1543 KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter_l2_is_on);
1544
1545 void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev)
1546 {
1547         unsigned long flags;
1548
1549         KBASE_DEBUG_ASSERT(kbdev != NULL);
1550
1551         spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
1552                                                                         flags);
1553
1554         KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests > 0);
1555
1556         --kbdev->pm.backend.gpu_cycle_counter_requests;
1557
1558         if (0 == kbdev->pm.backend.gpu_cycle_counter_requests)
1559                 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
1560                                         GPU_COMMAND_CYCLE_COUNT_STOP, NULL);
1561
1562         spin_unlock_irqrestore(
1563                         &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
1564                                                                         flags);
1565
1566         kbase_pm_release_l2_caches(kbdev);
1567 }
1568
1569 KBASE_EXPORT_TEST_API(kbase_pm_release_gpu_cycle_counter);