MALI: rockchip: upgrade midgard DDK to r11p0-00rel0
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / backend / gpu / mali_kbase_pm_driver.c
1 /*
2  *
3  * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19
20 /*
21  * Base kernel Power Management hardware control
22  */
23
24 #include <mali_kbase.h>
25 #include <mali_kbase_config_defaults.h>
26 #include <mali_midg_regmap.h>
27 #if defined(CONFIG_MALI_GATOR_SUPPORT)
28 #include <mali_kbase_gator.h>
29 #endif
30 #include <mali_kbase_tlstream.h>
31 #include <mali_kbase_pm.h>
32 #include <mali_kbase_cache_policy.h>
33 #include <mali_kbase_config_defaults.h>
34 #include <mali_kbase_smc.h>
35 #include <mali_kbase_hwaccess_jm.h>
36 #include <backend/gpu/mali_kbase_cache_policy_backend.h>
37 #include <backend/gpu/mali_kbase_device_internal.h>
38 #include <backend/gpu/mali_kbase_irq_internal.h>
39 #include <backend/gpu/mali_kbase_pm_internal.h>
40
41 #include <linux/of.h>
42
43 #if MALI_MOCK_TEST
44 #define MOCKABLE(function) function##_original
45 #else
46 #define MOCKABLE(function) function
47 #endif                          /* MALI_MOCK_TEST */
48
49 /* Special value to indicate that the JM_CONFIG reg isn't currently used. */
50 #define KBASE_JM_CONFIG_UNUSED (1<<31)
51
52 /**
53  * enum kbasep_pm_action - Actions that can be performed on a core.
54  *
55  * This enumeration is private to the file. Its values are set to allow
56  * core_type_to_reg() function, which decodes this enumeration, to be simpler
57  * and more efficient.
58  *
59  * @ACTION_PRESENT: The cores that are present
60  * @ACTION_READY: The cores that are ready
61  * @ACTION_PWRON: Power on the cores specified
62  * @ACTION_PWROFF: Power off the cores specified
63  * @ACTION_PWRTRANS: The cores that are transitioning
64  * @ACTION_PWRACTIVE: The cores that are active
65  */
66 enum kbasep_pm_action {
67         ACTION_PRESENT = 0,
68         ACTION_READY = (SHADER_READY_LO - SHADER_PRESENT_LO),
69         ACTION_PWRON = (SHADER_PWRON_LO - SHADER_PRESENT_LO),
70         ACTION_PWROFF = (SHADER_PWROFF_LO - SHADER_PRESENT_LO),
71         ACTION_PWRTRANS = (SHADER_PWRTRANS_LO - SHADER_PRESENT_LO),
72         ACTION_PWRACTIVE = (SHADER_PWRACTIVE_LO - SHADER_PRESENT_LO)
73 };
74
75 static u64 kbase_pm_get_state(
76                 struct kbase_device *kbdev,
77                 enum kbase_pm_core_type core_type,
78                 enum kbasep_pm_action action);
79
80 /**
81  * core_type_to_reg - Decode a core type and action to a register.
82  *
83  * Given a core type (defined by kbase_pm_core_type) and an action (defined
84  * by kbasep_pm_action) this function will return the register offset that
85  * will perform the action on the core type. The register returned is the _LO
86  * register and an offset must be applied to use the _HI register.
87  *
88  * @core_type: The type of core
89  * @action:    The type of action
90  *
91  * Return: The register offset of the _LO register that performs an action of
92  * type @action on a core of type @core_type.
93  */
94 static u32 core_type_to_reg(enum kbase_pm_core_type core_type,
95                                                 enum kbasep_pm_action action)
96 {
97         return (u32)core_type + (u32)action;
98 }
99
100 #ifdef CONFIG_ARM64
101 static void mali_cci_flush_l2(struct kbase_device *kbdev)
102 {
103         const u32 mask = CLEAN_CACHES_COMPLETED | RESET_COMPLETED;
104         u32 loops = KBASE_CLEAN_CACHE_MAX_LOOPS;
105         u32 raw;
106
107         /*
108          * Note that we don't take the cache flush mutex here since
109          * we expect to be the last user of the L2, all other L2 users
110          * would have dropped their references, to initiate L2 power
111          * down, L2 power down being the only valid place for this
112          * to be called from.
113          */
114
115         kbase_reg_write(kbdev,
116                         GPU_CONTROL_REG(GPU_COMMAND),
117                         GPU_COMMAND_CLEAN_INV_CACHES,
118                         NULL);
119
120         raw = kbase_reg_read(kbdev,
121                 GPU_CONTROL_REG(GPU_IRQ_RAWSTAT),
122                 NULL);
123
124         /* Wait for cache flush to complete before continuing, exit on
125          * gpu resets or loop expiry. */
126         while (((raw & mask) == 0) && --loops) {
127                 raw = kbase_reg_read(kbdev,
128                                         GPU_CONTROL_REG(GPU_IRQ_RAWSTAT),
129                                         NULL);
130         }
131 }
132 #endif
133
134 /**
135  * kbase_pm_invoke - Invokes an action on a core set
136  *
137  * This function performs the action given by @action on a set of cores of a
138  * type given by @core_type. It is a static function used by
139  * kbase_pm_transition_core_type()
140  *
141  * @kbdev:     The kbase device structure of the device
142  * @core_type: The type of core that the action should be performed on
143  * @cores:     A bit mask of cores to perform the action on (low 32 bits)
144  * @action:    The action to perform on the cores
145  */
146 static void kbase_pm_invoke(struct kbase_device *kbdev,
147                                         enum kbase_pm_core_type core_type,
148                                         u64 cores,
149                                         enum kbasep_pm_action action)
150 {
151         u32 reg;
152         u32 lo = cores & 0xFFFFFFFF;
153         u32 hi = (cores >> 32) & 0xFFFFFFFF;
154
155         lockdep_assert_held(&kbdev->pm.power_change_lock);
156
157         reg = core_type_to_reg(core_type, action);
158
159         KBASE_DEBUG_ASSERT(reg);
160 #if defined(CONFIG_MALI_GATOR_SUPPORT)
161         if (cores) {
162                 if (action == ACTION_PWRON)
163                         kbase_trace_mali_pm_power_on(core_type, cores);
164                 else if (action == ACTION_PWROFF)
165                         kbase_trace_mali_pm_power_off(core_type, cores);
166         }
167 #endif
168
169         if (cores) {
170                 u64 state = kbase_pm_get_state(kbdev, core_type, ACTION_READY);
171
172                 if (action == ACTION_PWRON)
173                         state |= cores;
174                 else if (action == ACTION_PWROFF)
175                         state &= ~cores;
176                 kbase_tlstream_aux_pm_state(core_type, state);
177         }
178
179         /* Tracing */
180         if (cores) {
181                 if (action == ACTION_PWRON)
182                         switch (core_type) {
183                         case KBASE_PM_CORE_SHADER:
184                                 KBASE_TRACE_ADD(kbdev, PM_PWRON, NULL, NULL, 0u,
185                                                                         lo);
186                                 break;
187                         case KBASE_PM_CORE_TILER:
188                                 KBASE_TRACE_ADD(kbdev, PM_PWRON_TILER, NULL,
189                                                                 NULL, 0u, lo);
190                                 break;
191                         case KBASE_PM_CORE_L2:
192                                 KBASE_TRACE_ADD(kbdev, PM_PWRON_L2, NULL, NULL,
193                                                                         0u, lo);
194                                 break;
195                         default:
196                                 break;
197                         }
198                 else if (action == ACTION_PWROFF)
199                         switch (core_type) {
200                         case KBASE_PM_CORE_SHADER:
201                                 KBASE_TRACE_ADD(kbdev, PM_PWROFF, NULL, NULL,
202                                                                         0u, lo);
203                                 break;
204                         case KBASE_PM_CORE_TILER:
205                                 KBASE_TRACE_ADD(kbdev, PM_PWROFF_TILER, NULL,
206                                                                 NULL, 0u, lo);
207                                 break;
208                         case KBASE_PM_CORE_L2:
209                                 KBASE_TRACE_ADD(kbdev, PM_PWROFF_L2, NULL, NULL,
210                                                                         0u, lo);
211                                 /* disable snoops before L2 is turned off */
212                                 kbase_pm_cache_snoop_disable(kbdev);
213                                 break;
214                         default:
215                                 break;
216                         }
217         }
218
219         if (lo != 0)
220                 kbase_reg_write(kbdev, GPU_CONTROL_REG(reg), lo, NULL);
221
222         if (hi != 0)
223                 kbase_reg_write(kbdev, GPU_CONTROL_REG(reg + 4), hi, NULL);
224 }
225
226 /**
227  * kbase_pm_get_state - Get information about a core set
228  *
229  * This function gets information (chosen by @action) about a set of cores of
230  * a type given by @core_type. It is a static function used by
231  * kbase_pm_get_present_cores(), kbase_pm_get_active_cores(),
232  * kbase_pm_get_trans_cores() and kbase_pm_get_ready_cores().
233  *
234  * @kbdev:     The kbase device structure of the device
235  * @core_type: The type of core that the should be queried
236  * @action:    The property of the cores to query
237  *
238  * Return: A bit mask specifying the state of the cores
239  */
240 static u64 kbase_pm_get_state(struct kbase_device *kbdev,
241                                         enum kbase_pm_core_type core_type,
242                                         enum kbasep_pm_action action)
243 {
244         u32 reg;
245         u32 lo, hi;
246
247         reg = core_type_to_reg(core_type, action);
248
249         KBASE_DEBUG_ASSERT(reg);
250
251         lo = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg), NULL);
252         hi = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg + 4), NULL);
253
254         return (((u64) hi) << 32) | ((u64) lo);
255 }
256
257 void kbasep_pm_read_present_cores(struct kbase_device *kbdev)
258 {
259         kbdev->shader_inuse_bitmap = 0;
260         kbdev->shader_needed_bitmap = 0;
261         kbdev->shader_available_bitmap = 0;
262         kbdev->tiler_available_bitmap = 0;
263         kbdev->l2_users_count = 0;
264         kbdev->l2_available_bitmap = 0;
265         kbdev->tiler_needed_cnt = 0;
266         kbdev->tiler_inuse_cnt = 0;
267
268         memset(kbdev->shader_needed_cnt, 0, sizeof(kbdev->shader_needed_cnt));
269 }
270
271 KBASE_EXPORT_TEST_API(kbasep_pm_read_present_cores);
272
273 /**
274  * kbase_pm_get_present_cores - Get the cores that are present
275  *
276  * @kbdev: Kbase device
277  * @type: The type of cores to query
278  *
279  * Return: Bitmask of the cores that are present
280  */
281 u64 kbase_pm_get_present_cores(struct kbase_device *kbdev,
282                                                 enum kbase_pm_core_type type)
283 {
284         KBASE_DEBUG_ASSERT(kbdev != NULL);
285
286         switch (type) {
287         case KBASE_PM_CORE_L2:
288                 return kbdev->gpu_props.props.raw_props.l2_present;
289         case KBASE_PM_CORE_SHADER:
290                 return kbdev->gpu_props.props.raw_props.shader_present;
291         case KBASE_PM_CORE_TILER:
292                 return kbdev->gpu_props.props.raw_props.tiler_present;
293         }
294         KBASE_DEBUG_ASSERT(0);
295         return 0;
296 }
297
298 KBASE_EXPORT_TEST_API(kbase_pm_get_present_cores);
299
300 /**
301  * kbase_pm_get_active_cores - Get the cores that are "active"
302  *                             (busy processing work)
303  *
304  * @kbdev: Kbase device
305  * @type: The type of cores to query
306  *
307  * Return: Bitmask of cores that are active
308  */
309 u64 kbase_pm_get_active_cores(struct kbase_device *kbdev,
310                                                 enum kbase_pm_core_type type)
311 {
312         return kbase_pm_get_state(kbdev, type, ACTION_PWRACTIVE);
313 }
314
315 KBASE_EXPORT_TEST_API(kbase_pm_get_active_cores);
316
317 /**
318  * kbase_pm_get_trans_cores - Get the cores that are transitioning between
319  *                            power states
320  *
321  * @kbdev: Kbase device
322  * @type: The type of cores to query
323  *
324  * Return: Bitmask of cores that are transitioning
325  */
326 u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev,
327                                                 enum kbase_pm_core_type type)
328 {
329         return kbase_pm_get_state(kbdev, type, ACTION_PWRTRANS);
330 }
331
332 KBASE_EXPORT_TEST_API(kbase_pm_get_trans_cores);
333
334 /**
335  * kbase_pm_get_ready_cores - Get the cores that are powered on
336  *
337  * @kbdev: Kbase device
338  * @type: The type of cores to query
339  *
340  * Return: Bitmask of cores that are ready (powered on)
341  */
342 u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev,
343                                                 enum kbase_pm_core_type type)
344 {
345         u64 result;
346
347         result = kbase_pm_get_state(kbdev, type, ACTION_READY);
348
349         switch (type) {
350         case KBASE_PM_CORE_SHADER:
351                 KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED, NULL, NULL, 0u,
352                                                                 (u32) result);
353                 break;
354         case KBASE_PM_CORE_TILER:
355                 KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_TILER, NULL, NULL, 0u,
356                                                                 (u32) result);
357                 break;
358         case KBASE_PM_CORE_L2:
359                 KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_L2, NULL, NULL, 0u,
360                                                                 (u32) result);
361                 break;
362         default:
363                 break;
364         }
365
366         return result;
367 }
368
369 KBASE_EXPORT_TEST_API(kbase_pm_get_ready_cores);
370
371 /**
372  * kbase_pm_transition_core_type - Perform power transitions for a particular
373  *                                 core type.
374  *
375  * This function will perform any available power transitions to make the actual
376  * hardware state closer to the desired state. If a core is currently
377  * transitioning then changes to the power state of that call cannot be made
378  * until the transition has finished. Cores which are not present in the
379  * hardware are ignored if they are specified in the desired_state bitmask,
380  * however the return value will always be 0 in this case.
381  *
382  * @kbdev:             The kbase device
383  * @type:              The core type to perform transitions for
384  * @desired_state:     A bit mask of the desired state of the cores
385  * @in_use:            A bit mask of the cores that are currently running
386  *                     jobs. These cores have to be kept powered up because
387  *                     there are jobs running (or about to run) on them.
388  * @available:         Receives a bit mask of the cores that the job
389  *                     scheduler can use to submit jobs to. May be NULL if
390  *                     this is not needed.
391  * @powering_on:       Bit mask to update with cores that are
392  *                    transitioning to a power-on state.
393  *
394  * Return: true if the desired state has been reached, false otherwise
395  */
396 static bool kbase_pm_transition_core_type(struct kbase_device *kbdev,
397                                                 enum kbase_pm_core_type type,
398                                                 u64 desired_state,
399                                                 u64 in_use,
400                                                 u64 * const available,
401                                                 u64 *powering_on)
402 {
403         u64 present;
404         u64 ready;
405         u64 trans;
406         u64 powerup;
407         u64 powerdown;
408         u64 powering_on_trans;
409         u64 desired_state_in_use;
410
411         lockdep_assert_held(&kbdev->pm.power_change_lock);
412
413         /* Get current state */
414         present = kbase_pm_get_present_cores(kbdev, type);
415         trans = kbase_pm_get_trans_cores(kbdev, type);
416         ready = kbase_pm_get_ready_cores(kbdev, type);
417         /* mask off ready from trans in case transitions finished between the
418          * register reads */
419         trans &= ~ready;
420
421         powering_on_trans = trans & *powering_on;
422         *powering_on = powering_on_trans;
423
424         if (available != NULL)
425                 *available = (ready | powering_on_trans) & desired_state;
426
427         /* Update desired state to include the in-use cores. These have to be
428          * kept powered up because there are jobs running or about to run on
429          * these cores
430          */
431         desired_state_in_use = desired_state | in_use;
432
433         /* Update state of whether l2 caches are powered */
434         if (type == KBASE_PM_CORE_L2) {
435                 if ((ready == present) && (desired_state_in_use == ready) &&
436                                                                 (trans == 0)) {
437                         /* All are ready, none will be turned off, and none are
438                          * transitioning */
439                         kbdev->pm.backend.l2_powered = 1;
440                         /*
441                          * Ensure snoops are enabled after L2 is powered up,
442                          * note that kbase keeps track of the snoop state, so
443                          * safe to repeatedly call.
444                          */
445                         kbase_pm_cache_snoop_enable(kbdev);
446                         if (kbdev->l2_users_count > 0) {
447                                 /* Notify any registered l2 cache users
448                                  * (optimized out when no users waiting) */
449                                 wake_up(&kbdev->pm.backend.l2_powered_wait);
450                         }
451                 } else
452                         kbdev->pm.backend.l2_powered = 0;
453         }
454
455         if (desired_state_in_use == ready && (trans == 0))
456                 return true;
457
458         /* Restrict the cores to those that are actually present */
459         powerup = desired_state_in_use & present;
460         powerdown = (~desired_state_in_use) & present;
461
462         /* Restrict to cores that are not already in the desired state */
463         powerup &= ~ready;
464         powerdown &= ready;
465
466         /* Don't transition any cores that are already transitioning, except for
467          * Mali cores that support the following case:
468          *
469          * If the SHADER_PWRON or TILER_PWRON registers are written to turn on
470          * a core that is currently transitioning to power off, then this is
471          * remembered and the shader core is automatically powered up again once
472          * the original transition completes. Once the automatic power on is
473          * complete any job scheduled on the shader core should start.
474          */
475         powerdown &= ~trans;
476
477         if (kbase_hw_has_feature(kbdev,
478                                 BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS))
479                 if (KBASE_PM_CORE_SHADER == type || KBASE_PM_CORE_TILER == type)
480                         trans = powering_on_trans; /* for exception cases, only
481                                                     * mask off cores in power on
482                                                     * transitions */
483
484         powerup &= ~trans;
485
486         /* Perform transitions if any */
487         kbase_pm_invoke(kbdev, type, powerup, ACTION_PWRON);
488         kbase_pm_invoke(kbdev, type, powerdown, ACTION_PWROFF);
489
490         /* Recalculate cores transitioning on, and re-evaluate our state */
491         powering_on_trans |= powerup;
492         *powering_on = powering_on_trans;
493         if (available != NULL)
494                 *available = (ready | powering_on_trans) & desired_state;
495
496         return false;
497 }
498
499 KBASE_EXPORT_TEST_API(kbase_pm_transition_core_type);
500
501 /**
502  * get_desired_cache_status - Determine which caches should be on for a
503  *                            particular core state
504  *
505  * This function takes a bit mask of the present caches and the cores (or
506  * caches) that are attached to the caches that will be powered. It then
507  * computes which caches should be turned on to allow the cores requested to be
508  * powered up.
509  *
510  * @present:       The bit mask of present caches
511  * @cores_powered: A bit mask of cores (or L2 caches) that are desired to
512  *                 be powered
513  *
514  * Return: A bit mask of the caches that should be turned on
515  */
516 static u64 get_desired_cache_status(u64 present, u64 cores_powered)
517 {
518         u64 desired = 0;
519
520         while (present) {
521                 /* Find out which is the highest set bit */
522                 u64 bit = fls64(present) - 1;
523                 u64 bit_mask = 1ull << bit;
524                 /* Create a mask which has all bits from 'bit' upwards set */
525
526                 u64 mask = ~(bit_mask - 1);
527
528                 /* If there are any cores powered at this bit or above (that
529                  * haven't previously been processed) then we need this core on
530                  */
531                 if (cores_powered & mask)
532                         desired |= bit_mask;
533
534                 /* Remove bits from cores_powered and present */
535                 cores_powered &= ~mask;
536                 present &= ~bit_mask;
537         }
538
539         return desired;
540 }
541
542 KBASE_EXPORT_TEST_API(get_desired_cache_status);
543
544 bool
545 MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbdev)
546 {
547         bool cores_are_available = false;
548         bool in_desired_state = true;
549         u64 desired_l2_state;
550         u64 cores_powered;
551         u64 tiler_available_bitmap;
552         u64 shader_available_bitmap;
553         u64 shader_ready_bitmap;
554         u64 shader_transitioning_bitmap;
555         u64 l2_available_bitmap;
556         u64 prev_l2_available_bitmap;
557
558         KBASE_DEBUG_ASSERT(NULL != kbdev);
559         lockdep_assert_held(&kbdev->pm.power_change_lock);
560
561         spin_lock(&kbdev->pm.backend.gpu_powered_lock);
562         if (kbdev->pm.backend.gpu_powered == false) {
563                 spin_unlock(&kbdev->pm.backend.gpu_powered_lock);
564                 if (kbdev->pm.backend.desired_shader_state == 0 &&
565                                 kbdev->pm.backend.desired_tiler_state == 0)
566                         return true;
567                 return false;
568         }
569
570         /* Trace that a change-state is being requested, and that it took
571          * (effectively) no time to start it. This is useful for counting how
572          * many state changes occurred, in a way that's backwards-compatible
573          * with processing the trace data */
574         kbase_timeline_pm_send_event(kbdev,
575                                 KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE);
576         kbase_timeline_pm_handle_event(kbdev,
577                                 KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE);
578
579         /* If any cores are already powered then, we must keep the caches on */
580         cores_powered = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_SHADER);
581
582         cores_powered |= kbdev->pm.backend.desired_shader_state;
583
584         /* If there are l2 cache users registered, keep all l2s powered even if
585          * all other cores are off. */
586         if (kbdev->l2_users_count > 0)
587                 cores_powered |= kbdev->gpu_props.props.raw_props.l2_present;
588
589         desired_l2_state = get_desired_cache_status(
590                         kbdev->gpu_props.props.raw_props.l2_present,
591                         cores_powered);
592
593         /* If any l2 cache is on, then enable l2 #0, for use by job manager */
594         if (0 != desired_l2_state) {
595                 desired_l2_state |= 1;
596                 /* Also enable tiler if l2 cache is powered */
597                 kbdev->pm.backend.desired_tiler_state =
598                         kbdev->gpu_props.props.raw_props.tiler_present;
599         } else {
600                 kbdev->pm.backend.desired_tiler_state = 0;
601         }
602
603         prev_l2_available_bitmap = kbdev->l2_available_bitmap;
604         in_desired_state &= kbase_pm_transition_core_type(kbdev,
605                                 KBASE_PM_CORE_L2, desired_l2_state, 0,
606                                 &l2_available_bitmap,
607                                 &kbdev->pm.backend.powering_on_l2_state);
608
609         if (kbdev->l2_available_bitmap != l2_available_bitmap)
610                 KBASE_TIMELINE_POWER_L2(kbdev, l2_available_bitmap);
611
612         kbdev->l2_available_bitmap = l2_available_bitmap;
613
614         if (in_desired_state) {
615                 in_desired_state &= kbase_pm_transition_core_type(kbdev,
616                                 KBASE_PM_CORE_TILER,
617                                 kbdev->pm.backend.desired_tiler_state,
618                                 0, &tiler_available_bitmap,
619                                 &kbdev->pm.backend.powering_on_tiler_state);
620                 in_desired_state &= kbase_pm_transition_core_type(kbdev,
621                                 KBASE_PM_CORE_SHADER,
622                                 kbdev->pm.backend.desired_shader_state,
623                                 kbdev->shader_inuse_bitmap,
624                                 &shader_available_bitmap,
625                                 &kbdev->pm.backend.powering_on_shader_state);
626
627                 if (kbdev->shader_available_bitmap != shader_available_bitmap) {
628                         KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL,
629                                                 NULL, 0u,
630                                                 (u32) shader_available_bitmap);
631                         KBASE_TIMELINE_POWER_SHADER(kbdev,
632                                                 shader_available_bitmap);
633                 }
634
635                 kbdev->shader_available_bitmap = shader_available_bitmap;
636
637                 if (kbdev->tiler_available_bitmap != tiler_available_bitmap) {
638                         KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER,
639                                                 NULL, NULL, 0u,
640                                                 (u32) tiler_available_bitmap);
641                         KBASE_TIMELINE_POWER_TILER(kbdev,
642                                                         tiler_available_bitmap);
643                 }
644
645                 kbdev->tiler_available_bitmap = tiler_available_bitmap;
646
647         } else if ((l2_available_bitmap &
648                         kbdev->gpu_props.props.raw_props.tiler_present) !=
649                         kbdev->gpu_props.props.raw_props.tiler_present) {
650                 tiler_available_bitmap = 0;
651
652                 if (kbdev->tiler_available_bitmap != tiler_available_bitmap)
653                         KBASE_TIMELINE_POWER_TILER(kbdev,
654                                                         tiler_available_bitmap);
655
656                 kbdev->tiler_available_bitmap = tiler_available_bitmap;
657         }
658
659         /* State updated for slow-path waiters */
660         kbdev->pm.backend.gpu_in_desired_state = in_desired_state;
661
662         shader_ready_bitmap = kbase_pm_get_ready_cores(kbdev,
663                                                         KBASE_PM_CORE_SHADER);
664         shader_transitioning_bitmap = kbase_pm_get_trans_cores(kbdev,
665                                                         KBASE_PM_CORE_SHADER);
666
667         /* Determine whether the cores are now available (even if the set of
668          * available cores is empty). Note that they can be available even if
669          * we've not finished transitioning to the desired state */
670         if ((kbdev->shader_available_bitmap &
671                                         kbdev->pm.backend.desired_shader_state)
672                                 == kbdev->pm.backend.desired_shader_state &&
673                 (kbdev->tiler_available_bitmap &
674                                         kbdev->pm.backend.desired_tiler_state)
675                                 == kbdev->pm.backend.desired_tiler_state) {
676                 cores_are_available = true;
677
678                 KBASE_TRACE_ADD(kbdev, PM_CORES_AVAILABLE, NULL, NULL, 0u,
679                                 (u32)(kbdev->shader_available_bitmap &
680                                 kbdev->pm.backend.desired_shader_state));
681                 KBASE_TRACE_ADD(kbdev, PM_CORES_AVAILABLE_TILER, NULL, NULL, 0u,
682                                 (u32)(kbdev->tiler_available_bitmap &
683                                 kbdev->pm.backend.desired_tiler_state));
684
685                 /* Log timelining information about handling events that power
686                  * up cores, to match up either with immediate submission either
687                  * because cores already available, or from PM IRQ */
688                 if (!in_desired_state)
689                         kbase_timeline_pm_send_event(kbdev,
690                                 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
691         }
692
693         if (in_desired_state) {
694                 KBASE_DEBUG_ASSERT(cores_are_available);
695
696 #if defined(CONFIG_MALI_GATOR_SUPPORT)
697                 kbase_trace_mali_pm_status(KBASE_PM_CORE_L2,
698                                                 kbase_pm_get_ready_cores(kbdev,
699                                                         KBASE_PM_CORE_L2));
700                 kbase_trace_mali_pm_status(KBASE_PM_CORE_SHADER,
701                                                 kbase_pm_get_ready_cores(kbdev,
702                                                         KBASE_PM_CORE_SHADER));
703                 kbase_trace_mali_pm_status(KBASE_PM_CORE_TILER,
704                                                 kbase_pm_get_ready_cores(kbdev,
705                                                         KBASE_PM_CORE_TILER));
706 #endif
707
708                 kbase_tlstream_aux_pm_state(
709                                 KBASE_PM_CORE_L2,
710                                 kbase_pm_get_ready_cores(
711                                         kbdev, KBASE_PM_CORE_L2));
712                 kbase_tlstream_aux_pm_state(
713                                 KBASE_PM_CORE_SHADER,
714                                 kbase_pm_get_ready_cores(
715                                         kbdev, KBASE_PM_CORE_SHADER));
716                 kbase_tlstream_aux_pm_state(
717                                 KBASE_PM_CORE_TILER,
718                                 kbase_pm_get_ready_cores(
719                                         kbdev,
720                                         KBASE_PM_CORE_TILER));
721
722                 KBASE_TRACE_ADD(kbdev, PM_DESIRED_REACHED, NULL, NULL,
723                                 kbdev->pm.backend.gpu_in_desired_state,
724                                 (u32)kbdev->pm.backend.desired_shader_state);
725                 KBASE_TRACE_ADD(kbdev, PM_DESIRED_REACHED_TILER, NULL, NULL, 0u,
726                                 (u32)kbdev->pm.backend.desired_tiler_state);
727
728                 /* Log timelining information for synchronous waiters */
729                 kbase_timeline_pm_send_event(kbdev,
730                                 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
731                 /* Wake slow-path waiters. Job scheduler does not use this. */
732                 KBASE_TRACE_ADD(kbdev, PM_WAKE_WAITERS, NULL, NULL, 0u, 0);
733                 wake_up(&kbdev->pm.backend.gpu_in_desired_state_wait);
734         }
735
736         spin_unlock(&kbdev->pm.backend.gpu_powered_lock);
737
738         /* kbase_pm_ca_update_core_status can cause one-level recursion into
739          * this function, so it must only be called once all changes to kbdev
740          * have been committed, and after the gpu_powered_lock has been
741          * dropped. */
742         if (kbdev->shader_ready_bitmap != shader_ready_bitmap ||
743             kbdev->shader_transitioning_bitmap != shader_transitioning_bitmap) {
744                 kbdev->shader_ready_bitmap = shader_ready_bitmap;
745                 kbdev->shader_transitioning_bitmap =
746                                                 shader_transitioning_bitmap;
747
748                 kbase_pm_ca_update_core_status(kbdev, shader_ready_bitmap,
749                                                 shader_transitioning_bitmap);
750         }
751
752         /* The core availability policy is not allowed to keep core group 0
753          * turned off (unless it was changing the l2 power state) */
754         if (!((shader_ready_bitmap | shader_transitioning_bitmap) &
755                 kbdev->gpu_props.props.coherency_info.group[0].core_mask) &&
756                 (prev_l2_available_bitmap == desired_l2_state) &&
757                 !(kbase_pm_ca_get_core_mask(kbdev) &
758                 kbdev->gpu_props.props.coherency_info.group[0].core_mask))
759                 BUG();
760
761         /* The core availability policy is allowed to keep core group 1 off,
762          * but all jobs specifically targeting CG1 must fail */
763         if (!((shader_ready_bitmap | shader_transitioning_bitmap) &
764                 kbdev->gpu_props.props.coherency_info.group[1].core_mask) &&
765                 !(kbase_pm_ca_get_core_mask(kbdev) &
766                 kbdev->gpu_props.props.coherency_info.group[1].core_mask))
767                 kbdev->pm.backend.cg1_disabled = true;
768         else
769                 kbdev->pm.backend.cg1_disabled = false;
770
771         return cores_are_available;
772 }
773 KBASE_EXPORT_TEST_API(kbase_pm_check_transitions_nolock);
774
775 /* Timeout for kbase_pm_check_transitions_sync when wait_event_killable has
776  * aborted due to a fatal signal. If the time spent waiting has exceeded this
777  * threshold then there is most likely a hardware issue. */
778 #define PM_TIMEOUT (5*HZ) /* 5s */
779
780 void kbase_pm_check_transitions_sync(struct kbase_device *kbdev)
781 {
782         unsigned long flags;
783         unsigned long timeout;
784         bool cores_are_available;
785         int ret;
786
787         /* Force the transition to be checked and reported - the cores may be
788          * 'available' (for job submission) but not fully powered up. */
789         spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
790         cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
791         /* Don't need 'cores_are_available', because we don't return anything */
792         CSTD_UNUSED(cores_are_available);
793         spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
794
795         timeout = jiffies + PM_TIMEOUT;
796
797         /* Wait for cores */
798         ret = wait_event_killable(kbdev->pm.backend.gpu_in_desired_state_wait,
799                         kbdev->pm.backend.gpu_in_desired_state);
800
801         if (ret < 0 && time_after(jiffies, timeout)) {
802                 dev_err(kbdev->dev, "Power transition timed out unexpectedly\n");
803                 dev_err(kbdev->dev, "Desired state :\n");
804                 dev_err(kbdev->dev, "\tShader=%016llx\n",
805                                 kbdev->pm.backend.desired_shader_state);
806                 dev_err(kbdev->dev, "\tTiler =%016llx\n",
807                                 kbdev->pm.backend.desired_tiler_state);
808                 dev_err(kbdev->dev, "Current state :\n");
809                 dev_err(kbdev->dev, "\tShader=%08x%08x\n",
810                                 kbase_reg_read(kbdev,
811                                         GPU_CONTROL_REG(SHADER_READY_HI), NULL),
812                                 kbase_reg_read(kbdev,
813                                         GPU_CONTROL_REG(SHADER_READY_LO),
814                                         NULL));
815                 dev_err(kbdev->dev, "\tTiler =%08x%08x\n",
816                                 kbase_reg_read(kbdev,
817                                         GPU_CONTROL_REG(TILER_READY_HI), NULL),
818                                 kbase_reg_read(kbdev,
819                                         GPU_CONTROL_REG(TILER_READY_LO), NULL));
820                 dev_err(kbdev->dev, "\tL2    =%08x%08x\n",
821                                 kbase_reg_read(kbdev,
822                                         GPU_CONTROL_REG(L2_READY_HI), NULL),
823                                 kbase_reg_read(kbdev,
824                                         GPU_CONTROL_REG(L2_READY_LO), NULL));
825                 dev_err(kbdev->dev, "Cores transitioning :\n");
826                 dev_err(kbdev->dev, "\tShader=%08x%08x\n",
827                                 kbase_reg_read(kbdev, GPU_CONTROL_REG(
828                                                 SHADER_PWRTRANS_HI), NULL),
829                                 kbase_reg_read(kbdev, GPU_CONTROL_REG(
830                                                 SHADER_PWRTRANS_LO), NULL));
831                 dev_err(kbdev->dev, "\tTiler =%08x%08x\n",
832                                 kbase_reg_read(kbdev, GPU_CONTROL_REG(
833                                                 TILER_PWRTRANS_HI), NULL),
834                                 kbase_reg_read(kbdev, GPU_CONTROL_REG(
835                                                 TILER_PWRTRANS_LO), NULL));
836                 dev_err(kbdev->dev, "\tL2    =%08x%08x\n",
837                                 kbase_reg_read(kbdev, GPU_CONTROL_REG(
838                                                 L2_PWRTRANS_HI), NULL),
839                                 kbase_reg_read(kbdev, GPU_CONTROL_REG(
840                                                 L2_PWRTRANS_LO), NULL));
841 #if KBASE_GPU_RESET_EN
842                 dev_err(kbdev->dev, "Sending reset to GPU - all running jobs will be lost\n");
843                 if (kbase_prepare_to_reset_gpu(kbdev))
844                         kbase_reset_gpu(kbdev);
845 #endif /* KBASE_GPU_RESET_EN */
846         } else {
847                 /* Log timelining information that a change in state has
848                  * completed */
849                 kbase_timeline_pm_handle_event(kbdev,
850                                 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
851         }
852 }
853 KBASE_EXPORT_TEST_API(kbase_pm_check_transitions_sync);
854
855 void kbase_pm_enable_interrupts(struct kbase_device *kbdev)
856 {
857         unsigned long flags;
858
859         KBASE_DEBUG_ASSERT(NULL != kbdev);
860         /*
861          * Clear all interrupts,
862          * and unmask them all.
863          */
864         spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
865         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL,
866                                                                         NULL);
867         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), GPU_IRQ_REG_ALL,
868                                                                         NULL);
869         spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
870
871         kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF,
872                                                                         NULL);
873         kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0xFFFFFFFF, NULL);
874
875         kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF, NULL);
876         kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0xFFFFFFFF, NULL);
877 }
878
879 KBASE_EXPORT_TEST_API(kbase_pm_enable_interrupts);
880
881 void kbase_pm_disable_interrupts(struct kbase_device *kbdev)
882 {
883         unsigned long flags;
884
885         KBASE_DEBUG_ASSERT(NULL != kbdev);
886         /*
887          * Mask all interrupts,
888          * and clear them all.
889          */
890         spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
891         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), 0, NULL);
892         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL,
893                                                                         NULL);
894         spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
895
896         kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0, NULL);
897         kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF,
898                                                                         NULL);
899
900         kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0, NULL);
901         kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF, NULL);
902 }
903
904 KBASE_EXPORT_TEST_API(kbase_pm_disable_interrupts);
905
906 /*
907  * pmu layout:
908  * 0x0000: PMU TAG (RO) (0xCAFECAFE)
909  * 0x0004: PMU VERSION ID (RO) (0x00000000)
910  * 0x0008: CLOCK ENABLE (RW) (31:1 SBZ, 0 CLOCK STATE)
911  */
912 void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
913 {
914         bool reset_required = is_resume;
915         struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
916         unsigned long flags;
917         int i;
918
919         KBASE_DEBUG_ASSERT(NULL != kbdev);
920         lockdep_assert_held(&js_devdata->runpool_mutex);
921         lockdep_assert_held(&kbdev->pm.lock);
922
923         if (kbdev->pm.backend.gpu_powered) {
924                 /* Already turned on */
925                 if (kbdev->poweroff_pending)
926                         kbase_pm_enable_interrupts(kbdev);
927                 kbdev->poweroff_pending = false;
928                 KBASE_DEBUG_ASSERT(!is_resume);
929                 return;
930         }
931
932         kbdev->poweroff_pending = false;
933
934         KBASE_TRACE_ADD(kbdev, PM_GPU_ON, NULL, NULL, 0u, 0u);
935
936         if (is_resume && kbdev->pm.backend.callback_power_resume) {
937                 kbdev->pm.backend.callback_power_resume(kbdev);
938                 return;
939         } else if (kbdev->pm.backend.callback_power_on) {
940                 kbdev->pm.backend.callback_power_on(kbdev);
941                 /* If your platform properly keeps the GPU state you may use the
942                  * return value of the callback_power_on function to
943                  * conditionally reset the GPU on power up. Currently we are
944                  * conservative and always reset the GPU. */
945                 reset_required = true;
946         }
947
948         spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
949         kbdev->pm.backend.gpu_powered = true;
950         spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
951
952         if (reset_required) {
953                 /* GPU state was lost, reset GPU to ensure it is in a
954                  * consistent state */
955                 kbase_pm_init_hw(kbdev, PM_ENABLE_IRQS);
956         }
957
958         /* Reprogram the GPU's MMU */
959         for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
960                 struct kbase_as *as = &kbdev->as[i];
961
962                 mutex_lock(&as->transaction_mutex);
963                 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
964
965                 if (js_devdata->runpool_irq.per_as_data[i].kctx)
966                         kbase_mmu_update(
967                                 js_devdata->runpool_irq.per_as_data[i].kctx);
968                 else
969                         kbase_mmu_disable_as(kbdev, i);
970
971                 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
972                 mutex_unlock(&as->transaction_mutex);
973         }
974
975         /* Lastly, enable the interrupts */
976         kbase_pm_enable_interrupts(kbdev);
977 }
978
979 KBASE_EXPORT_TEST_API(kbase_pm_clock_on);
980
981 bool kbase_pm_clock_off(struct kbase_device *kbdev, bool is_suspend)
982 {
983         unsigned long flags;
984
985         KBASE_DEBUG_ASSERT(NULL != kbdev);
986         lockdep_assert_held(&kbdev->pm.lock);
987
988         /* ASSERT that the cores should now be unavailable. No lock needed. */
989         KBASE_DEBUG_ASSERT(kbdev->shader_available_bitmap == 0u);
990
991         kbdev->poweroff_pending = true;
992
993         if (!kbdev->pm.backend.gpu_powered) {
994                 /* Already turned off */
995                 if (is_suspend && kbdev->pm.backend.callback_power_suspend)
996                         kbdev->pm.backend.callback_power_suspend(kbdev);
997                 return true;
998         }
999
1000         KBASE_TRACE_ADD(kbdev, PM_GPU_OFF, NULL, NULL, 0u, 0u);
1001
1002         /* Disable interrupts. This also clears any outstanding interrupts */
1003         kbase_pm_disable_interrupts(kbdev);
1004         /* Ensure that any IRQ handlers have finished */
1005         kbase_synchronize_irqs(kbdev);
1006
1007         spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
1008
1009         if (atomic_read(&kbdev->faults_pending)) {
1010                 /* Page/bus faults are still being processed. The GPU can not
1011                  * be powered off until they have completed */
1012                 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
1013                                                                         flags);
1014                 return false;
1015         }
1016
1017         kbase_pm_cache_snoop_disable(kbdev);
1018
1019         /* The GPU power may be turned off from this point */
1020         kbdev->pm.backend.gpu_powered = false;
1021         spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
1022
1023         if (is_suspend && kbdev->pm.backend.callback_power_suspend)
1024                 kbdev->pm.backend.callback_power_suspend(kbdev);
1025         else if (kbdev->pm.backend.callback_power_off)
1026                 kbdev->pm.backend.callback_power_off(kbdev);
1027         return true;
1028 }
1029
1030 KBASE_EXPORT_TEST_API(kbase_pm_clock_off);
1031
1032 struct kbasep_reset_timeout_data {
1033         struct hrtimer timer;
1034         bool timed_out;
1035         struct kbase_device *kbdev;
1036 };
1037
1038 void kbase_pm_reset_done(struct kbase_device *kbdev)
1039 {
1040         KBASE_DEBUG_ASSERT(kbdev != NULL);
1041         kbdev->pm.backend.reset_done = true;
1042         wake_up(&kbdev->pm.backend.reset_done_wait);
1043 }
1044
1045 /**
1046  * kbase_pm_wait_for_reset - Wait for a reset to happen
1047  *
1048  * Wait for the %RESET_COMPLETED IRQ to occur, then reset the waiting state.
1049  *
1050  * @kbdev: Kbase device
1051  */
1052 static void kbase_pm_wait_for_reset(struct kbase_device *kbdev)
1053 {
1054         lockdep_assert_held(&kbdev->pm.lock);
1055
1056         wait_event(kbdev->pm.backend.reset_done_wait,
1057                                                 (kbdev->pm.backend.reset_done));
1058         kbdev->pm.backend.reset_done = false;
1059 }
1060
1061 KBASE_EXPORT_TEST_API(kbase_pm_reset_done);
1062
1063 static enum hrtimer_restart kbasep_reset_timeout(struct hrtimer *timer)
1064 {
1065         struct kbasep_reset_timeout_data *rtdata =
1066                 container_of(timer, struct kbasep_reset_timeout_data, timer);
1067
1068         rtdata->timed_out = 1;
1069
1070         /* Set the wait queue to wake up kbase_pm_init_hw even though the reset
1071          * hasn't completed */
1072         kbase_pm_reset_done(rtdata->kbdev);
1073
1074         return HRTIMER_NORESTART;
1075 }
1076
1077 static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
1078 {
1079         struct device_node *np = kbdev->dev->of_node;
1080         u32 jm_values[4];
1081         const u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
1082         const u32 prod_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >>
1083                 GPU_ID_VERSION_PRODUCT_ID_SHIFT;
1084         const u32 major = (gpu_id & GPU_ID_VERSION_MAJOR) >>
1085                 GPU_ID_VERSION_MAJOR_SHIFT;
1086
1087         kbdev->hw_quirks_sc = 0;
1088
1089         /* Needed due to MIDBASE-1494: LS_PAUSEBUFFER_DISABLE. See PRLAM-8443.
1090          * and needed due to MIDGLES-3539. See PRLAM-11035 */
1091         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8443) ||
1092                         kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11035))
1093                 kbdev->hw_quirks_sc |= SC_LS_PAUSEBUFFER_DISABLE;
1094
1095         /* Needed due to MIDBASE-2054: SDC_DISABLE_OQ_DISCARD. See PRLAM-10327.
1096          */
1097         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10327))
1098                 kbdev->hw_quirks_sc |= SC_SDC_DISABLE_OQ_DISCARD;
1099
1100 #ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY
1101         /* Enable alternative hardware counter selection if configured. */
1102         if (!GPU_ID_IS_NEW_FORMAT(prod_id))
1103                 kbdev->hw_quirks_sc |= SC_ALT_COUNTERS;
1104 #endif
1105
1106         /* Needed due to MIDBASE-2795. ENABLE_TEXGRD_FLAGS. See PRLAM-10797. */
1107         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10797))
1108                 kbdev->hw_quirks_sc |= SC_ENABLE_TEXGRD_FLAGS;
1109
1110         if (!kbase_hw_has_issue(kbdev, GPUCORE_1619)) {
1111                 if (prod_id < 0x760 || prod_id == 0x6956) /* T60x, T62x, T72x */
1112                         kbdev->hw_quirks_sc |= SC_LS_ATTR_CHECK_DISABLE;
1113                 else if (prod_id >= 0x760 && prod_id <= 0x880) /* T76x, T8xx */
1114                         kbdev->hw_quirks_sc |= SC_LS_ALLOW_ATTR_TYPES;
1115         }
1116
1117         kbdev->hw_quirks_tiler = kbase_reg_read(kbdev,
1118                         GPU_CONTROL_REG(TILER_CONFIG), NULL);
1119
1120         /* Set tiler clock gate override if required */
1121         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3953))
1122                 kbdev->hw_quirks_tiler |= TC_CLOCK_GATE_OVERRIDE;
1123
1124         /* Limit the GPU bus bandwidth if the platform needs this. */
1125         kbdev->hw_quirks_mmu = kbase_reg_read(kbdev,
1126                         GPU_CONTROL_REG(L2_MMU_CONFIG), NULL);
1127
1128         /* Limit read ID width for AXI */
1129         kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_READS);
1130         kbdev->hw_quirks_mmu |= (DEFAULT_ARID_LIMIT & 0x3) <<
1131                                 L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT;
1132
1133         /* Limit write ID width for AXI */
1134         kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES);
1135         kbdev->hw_quirks_mmu |= (DEFAULT_AWID_LIMIT & 0x3) <<
1136                                 L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT;
1137
1138         if (kbdev->system_coherency == COHERENCY_ACE) {
1139                 /* Allow memory configuration disparity to be ignored, we
1140                  * optimize the use of shared memory and thus we expect
1141                  * some disparity in the memory configuration */
1142                 kbdev->hw_quirks_mmu |= L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY;
1143         }
1144
1145         /* Only for T86x/T88x-based products after r2p0 */
1146         if (prod_id >= 0x860 && prod_id <= 0x880 && major >= 2) {
1147                 /* The JM_CONFIG register is specified as follows in the
1148                  T86x/T88x Engineering Specification Supplement:
1149                  The values are read from device tree in order.
1150                 */
1151 #define TIMESTAMP_OVERRIDE  1
1152 #define CLOCK_GATE_OVERRIDE (1<<1)
1153 #define JOB_THROTTLE_ENABLE (1<<2)
1154 #define JOB_THROTTLE_LIMIT_SHIFT 3
1155
1156                 /* 6 bits in the register */
1157                 const u32 jm_max_limit = 0x3F;
1158
1159                 if (of_property_read_u32_array(np,
1160                                         "jm_config",
1161                                         &jm_values[0],
1162                                         ARRAY_SIZE(jm_values))) {
1163                         /* Entry not in device tree, use defaults  */
1164                         jm_values[0] = 0;
1165                         jm_values[1] = 0;
1166                         jm_values[2] = 0;
1167                         jm_values[3] = jm_max_limit; /* Max value */
1168                 }
1169
1170                 /* Limit throttle limit to 6 bits*/
1171                 if (jm_values[3] > jm_max_limit) {
1172                         dev_dbg(kbdev->dev, "JOB_THROTTLE_LIMIT supplied in device tree is too large. Limiting to MAX (63).");
1173                         jm_values[3] = jm_max_limit;
1174                 }
1175
1176                 /* Aggregate to one integer. */
1177                 kbdev->hw_quirks_jm = (jm_values[0] ? TIMESTAMP_OVERRIDE : 0);
1178                 kbdev->hw_quirks_jm |= (jm_values[1] ? CLOCK_GATE_OVERRIDE : 0);
1179                 kbdev->hw_quirks_jm |= (jm_values[2] ? JOB_THROTTLE_ENABLE : 0);
1180                 kbdev->hw_quirks_jm |= (jm_values[3] <<
1181                                 JOB_THROTTLE_LIMIT_SHIFT);
1182         } else {
1183                 kbdev->hw_quirks_jm = KBASE_JM_CONFIG_UNUSED;
1184         }
1185
1186
1187 }
1188
1189 static void kbase_pm_hw_issues_apply(struct kbase_device *kbdev)
1190 {
1191         if (kbdev->hw_quirks_sc)
1192                 kbase_reg_write(kbdev, GPU_CONTROL_REG(SHADER_CONFIG),
1193                                 kbdev->hw_quirks_sc, NULL);
1194
1195         kbase_reg_write(kbdev, GPU_CONTROL_REG(TILER_CONFIG),
1196                         kbdev->hw_quirks_tiler, NULL);
1197
1198         kbase_reg_write(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG),
1199                         kbdev->hw_quirks_mmu, NULL);
1200
1201
1202         if (kbdev->hw_quirks_jm != KBASE_JM_CONFIG_UNUSED)
1203                 kbase_reg_write(kbdev, GPU_CONTROL_REG(JM_CONFIG),
1204                                 kbdev->hw_quirks_jm, NULL);
1205
1206 }
1207
1208 void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev)
1209 {
1210         if ((kbdev->system_coherency == COHERENCY_ACE) &&
1211                 !kbdev->cci_snoop_enabled) {
1212 #ifdef CONFIG_ARM64
1213                 if (kbdev->snoop_enable_smc != 0)
1214                         kbase_invoke_smc_fid(kbdev->snoop_enable_smc, 0, 0, 0);
1215 #endif /* CONFIG_ARM64 */
1216                 dev_dbg(kbdev->dev, "MALI - CCI Snoops - Enabled\n");
1217                 kbdev->cci_snoop_enabled = true;
1218         }
1219 }
1220
1221 void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev)
1222 {
1223         if ((kbdev->system_coherency == COHERENCY_ACE) &&
1224                 kbdev->cci_snoop_enabled) {
1225 #ifdef CONFIG_ARM64
1226                 if (kbdev->snoop_disable_smc != 0) {
1227                         mali_cci_flush_l2(kbdev);
1228                         kbase_invoke_smc_fid(kbdev->snoop_disable_smc, 0, 0, 0);
1229                 }
1230 #endif /* CONFIG_ARM64 */
1231                 dev_dbg(kbdev->dev, "MALI - CCI Snoops Disabled\n");
1232                 kbdev->cci_snoop_enabled = false;
1233         }
1234 }
1235
1236 int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
1237 {
1238         unsigned long irq_flags;
1239         struct kbasep_reset_timeout_data rtdata;
1240
1241         KBASE_DEBUG_ASSERT(NULL != kbdev);
1242         lockdep_assert_held(&kbdev->pm.lock);
1243
1244         /* Ensure the clock is on before attempting to access the hardware */
1245         if (!kbdev->pm.backend.gpu_powered) {
1246                 if (kbdev->pm.backend.callback_power_on)
1247                         kbdev->pm.backend.callback_power_on(kbdev);
1248
1249                 spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock,
1250                                                                 irq_flags);
1251                 kbdev->pm.backend.gpu_powered = true;
1252                 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
1253                                                                 irq_flags);
1254         }
1255
1256         /* Ensure interrupts are off to begin with, this also clears any
1257          * outstanding interrupts */
1258         kbase_pm_disable_interrupts(kbdev);
1259         /* Ensure cache snoops are disabled before reset. */
1260         kbase_pm_cache_snoop_disable(kbdev);
1261         /* Prepare for the soft-reset */
1262         kbdev->pm.backend.reset_done = false;
1263
1264         /* The cores should be made unavailable due to the reset */
1265         spin_lock_irqsave(&kbdev->pm.power_change_lock, irq_flags);
1266         if (kbdev->shader_available_bitmap != 0u)
1267                         KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL,
1268                                                 NULL, 0u, (u32)0u);
1269         if (kbdev->tiler_available_bitmap != 0u)
1270                         KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER,
1271                                                 NULL, NULL, 0u, (u32)0u);
1272         kbdev->shader_available_bitmap = 0u;
1273         kbdev->tiler_available_bitmap = 0u;
1274         kbdev->l2_available_bitmap = 0u;
1275         spin_unlock_irqrestore(&kbdev->pm.power_change_lock, irq_flags);
1276
1277         /* Soft reset the GPU */
1278         KBASE_TRACE_ADD(kbdev, CORE_GPU_SOFT_RESET, NULL, NULL, 0u, 0);
1279
1280         kbase_tlstream_jd_gpu_soft_reset(kbdev);
1281
1282         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
1283                                                 GPU_COMMAND_SOFT_RESET, NULL);
1284
1285         /* Unmask the reset complete interrupt only */
1286         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), RESET_COMPLETED,
1287                                                                         NULL);
1288
1289         /* Initialize a structure for tracking the status of the reset */
1290         rtdata.kbdev = kbdev;
1291         rtdata.timed_out = 0;
1292
1293         /* Create a timer to use as a timeout on the reset */
1294         hrtimer_init_on_stack(&rtdata.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1295         rtdata.timer.function = kbasep_reset_timeout;
1296
1297         hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT),
1298                                                         HRTIMER_MODE_REL);
1299
1300         /* Wait for the RESET_COMPLETED interrupt to be raised */
1301         kbase_pm_wait_for_reset(kbdev);
1302
1303         if (rtdata.timed_out == 0) {
1304                 /* GPU has been reset */
1305                 hrtimer_cancel(&rtdata.timer);
1306                 destroy_hrtimer_on_stack(&rtdata.timer);
1307                 goto out;
1308         }
1309
1310         /* No interrupt has been received - check if the RAWSTAT register says
1311          * the reset has completed */
1312         if (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL) &
1313                                                         RESET_COMPLETED) {
1314                 /* The interrupt is set in the RAWSTAT; this suggests that the
1315                  * interrupts are not getting to the CPU */
1316                 dev_err(kbdev->dev, "Reset interrupt didn't reach CPU. Check interrupt assignments.\n");
1317                 /* If interrupts aren't working we can't continue. */
1318                 destroy_hrtimer_on_stack(&rtdata.timer);
1319                 return -EINVAL;
1320         }
1321
1322         /* The GPU doesn't seem to be responding to the reset so try a hard
1323          * reset */
1324         dev_err(kbdev->dev, "Failed to soft-reset GPU (timed out after %d ms), now attempting a hard reset\n",
1325                                                                 RESET_TIMEOUT);
1326         KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
1327         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
1328                                                 GPU_COMMAND_HARD_RESET, NULL);
1329
1330         /* Restart the timer to wait for the hard reset to complete */
1331         rtdata.timed_out = 0;
1332
1333         hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT),
1334                                                         HRTIMER_MODE_REL);
1335
1336         /* Wait for the RESET_COMPLETED interrupt to be raised */
1337         kbase_pm_wait_for_reset(kbdev);
1338
1339         if (rtdata.timed_out == 0) {
1340                 /* GPU has been reset */
1341                 hrtimer_cancel(&rtdata.timer);
1342                 destroy_hrtimer_on_stack(&rtdata.timer);
1343                 goto out;
1344         }
1345
1346         destroy_hrtimer_on_stack(&rtdata.timer);
1347
1348         dev_err(kbdev->dev, "Failed to hard-reset the GPU (timed out after %d ms)\n",
1349                                                                 RESET_TIMEOUT);
1350
1351         /* The GPU still hasn't reset, give up */
1352         return -EINVAL;
1353
1354 out:
1355
1356         if (flags & PM_HW_ISSUES_DETECT)
1357                 kbase_pm_hw_issues_detect(kbdev);
1358
1359         kbase_pm_hw_issues_apply(kbdev);
1360
1361         kbase_cache_set_coherency_mode(kbdev, kbdev->system_coherency);
1362
1363         if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
1364                 u32 gpu_status = kbase_reg_read(kbdev,
1365                                 GPU_CONTROL_REG(GPU_STATUS), NULL);
1366
1367                 kbdev->secure_mode = (gpu_status &
1368                                 GPU_STATUS_PROTECTED_MODE_ACTIVE) != 0;
1369         }
1370
1371         /* If cycle counter was in use re-enable it, enable_irqs will only be
1372          * false when called from kbase_pm_powerup */
1373         if (kbdev->pm.backend.gpu_cycle_counter_requests &&
1374                                                 (flags & PM_ENABLE_IRQS)) {
1375                 /* enable interrupts as the L2 may have to be powered on */
1376                 kbase_pm_enable_interrupts(kbdev);
1377                 kbase_pm_request_l2_caches(kbdev);
1378
1379                 /* Re-enable the counters if we need to */
1380                 spin_lock_irqsave(
1381                         &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
1382                                                                 irq_flags);
1383                 if (kbdev->pm.backend.gpu_cycle_counter_requests)
1384                         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
1385                                         GPU_COMMAND_CYCLE_COUNT_START, NULL);
1386                 spin_unlock_irqrestore(
1387                         &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
1388                                                                 irq_flags);
1389
1390                 kbase_pm_release_l2_caches(kbdev);
1391                 kbase_pm_disable_interrupts(kbdev);
1392         }
1393
1394         if (flags & PM_ENABLE_IRQS)
1395                 kbase_pm_enable_interrupts(kbdev);
1396
1397         return 0;
1398 }
1399
1400 /**
1401  * kbase_pm_request_gpu_cycle_counter_do_request - Request cycle counters
1402  *
1403  * Increase the count of cycle counter users and turn the cycle counters on if
1404  * they were previously off
1405  *
1406  * This function is designed to be called by
1407  * kbase_pm_request_gpu_cycle_counter() or
1408  * kbase_pm_request_gpu_cycle_counter_l2_is_on() only
1409  *
1410  * When this function is called the l2 cache must be on and the l2 cache users
1411  * count must have been incremented by a call to (
1412  * kbase_pm_request_l2_caches() or kbase_pm_request_l2_caches_l2_on() )
1413  *
1414  * @kbdev:     The kbase device structure of the device
1415  */
1416 static void
1417 kbase_pm_request_gpu_cycle_counter_do_request(struct kbase_device *kbdev)
1418 {
1419         unsigned long flags;
1420
1421         spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
1422                                                                         flags);
1423
1424         ++kbdev->pm.backend.gpu_cycle_counter_requests;
1425
1426         if (1 == kbdev->pm.backend.gpu_cycle_counter_requests)
1427                 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
1428                                         GPU_COMMAND_CYCLE_COUNT_START, NULL);
1429
1430         spin_unlock_irqrestore(
1431                         &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
1432                                                                         flags);
1433 }
1434
1435 void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev)
1436 {
1437         KBASE_DEBUG_ASSERT(kbdev != NULL);
1438
1439         KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
1440
1441         KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests <
1442                                                                 INT_MAX);
1443
1444         kbase_pm_request_l2_caches(kbdev);
1445
1446         kbase_pm_request_gpu_cycle_counter_do_request(kbdev);
1447 }
1448
1449 KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter);
1450
1451 void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev)
1452 {
1453         KBASE_DEBUG_ASSERT(kbdev != NULL);
1454
1455         KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
1456
1457         KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests <
1458                                                                 INT_MAX);
1459
1460         kbase_pm_request_l2_caches_l2_is_on(kbdev);
1461
1462         kbase_pm_request_gpu_cycle_counter_do_request(kbdev);
1463 }
1464
1465 KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter_l2_is_on);
1466
1467 void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev)
1468 {
1469         unsigned long flags;
1470
1471         KBASE_DEBUG_ASSERT(kbdev != NULL);
1472
1473         spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
1474                                                                         flags);
1475
1476         KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests > 0);
1477
1478         --kbdev->pm.backend.gpu_cycle_counter_requests;
1479
1480         if (0 == kbdev->pm.backend.gpu_cycle_counter_requests)
1481                 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
1482                                         GPU_COMMAND_CYCLE_COUNT_STOP, NULL);
1483
1484         spin_unlock_irqrestore(
1485                         &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
1486                                                                         flags);
1487
1488         kbase_pm_release_l2_caches(kbdev);
1489 }
1490
1491 KBASE_EXPORT_TEST_API(kbase_pm_release_gpu_cycle_counter);