3 * (C) COPYRIGHT ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
21 * @file mali_kbase_pm_driver.c
22 * Base kernel Power Management hardware control
25 #include <mali_kbase.h>
26 #include <mali_midg_regmap.h>
27 #include <mali_kbase_gator.h>
28 #include <mali_kbase_pm.h>
29 #include <mali_kbase_config_defaults.h>
32 #define MOCKABLE(function) function##_original
34 #define MOCKABLE(function) function
35 #endif /* MALI_MOCK_TEST */
37 /** Actions that can be performed on a core.
39 * This enumeration is private to the file. Its values are set to allow @ref core_type_to_reg function,
40 * which decodes this enumeration, to be simpler and more efficient.
42 typedef enum kbasep_pm_action {
44 ACTION_READY = (SHADER_READY_LO - SHADER_PRESENT_LO),
45 ACTION_PWRON = (SHADER_PWRON_LO - SHADER_PRESENT_LO),
46 ACTION_PWROFF = (SHADER_PWROFF_LO - SHADER_PRESENT_LO),
47 ACTION_PWRTRANS = (SHADER_PWRTRANS_LO - SHADER_PRESENT_LO),
48 ACTION_PWRACTIVE = (SHADER_PWRACTIVE_LO - SHADER_PRESENT_LO)
51 /** Decode a core type and action to a register.
53 * Given a core type (defined by @ref kbase_pm_core_type) and an action (defined by @ref kbasep_pm_action) this
54 * function will return the register offset that will perform the action on the core type. The register returned is
55 * the \c _LO register and an offset must be applied to use the \c _HI register.
57 * @param core_type The type of core
58 * @param action The type of action
60 * @return The register offset of the \c _LO register that performs an action of type \c action on a core of type \c
63 static u32 core_type_to_reg(kbase_pm_core_type core_type, kbasep_pm_action action)
65 return core_type + action;
68 /** Invokes an action on a core set
70 * This function performs the action given by \c action on a set of cores of a type given by \c core_type. It is a
71 * static function used by @ref kbase_pm_transition_core_type
73 * @param kbdev The kbase device structure of the device
74 * @param core_type The type of core that the action should be performed on
75 * @param cores A bit mask of cores to perform the action on (low 32 bits)
76 * @param action The action to perform on the cores
78 STATIC void kbase_pm_invoke(kbase_device *kbdev, kbase_pm_core_type core_type, u64 cores, kbasep_pm_action action)
81 u32 lo = cores & 0xFFFFFFFF;
82 u32 hi = (cores >> 32) & 0xFFFFFFFF;
84 lockdep_assert_held(&kbdev->pm.power_change_lock);
86 reg = core_type_to_reg(core_type, action);
88 KBASE_DEBUG_ASSERT(reg);
89 #ifdef CONFIG_MALI_GATOR_SUPPORT
91 if (action == ACTION_PWRON)
92 kbase_trace_mali_pm_power_on(core_type, cores);
93 else if (action == ACTION_PWROFF)
94 kbase_trace_mali_pm_power_off(core_type, cores);
96 #endif /* CONFIG_MALI_GATOR_SUPPORT */
99 if (action == ACTION_PWRON)
101 case KBASE_PM_CORE_SHADER:
102 KBASE_TRACE_ADD(kbdev, PM_PWRON, NULL, NULL, 0u, lo);
104 case KBASE_PM_CORE_TILER:
105 KBASE_TRACE_ADD(kbdev, PM_PWRON_TILER, NULL, NULL, 0u, lo);
107 case KBASE_PM_CORE_L2:
108 KBASE_TRACE_ADD(kbdev, PM_PWRON_L2, NULL, NULL, 0u, lo);
114 else if (action == ACTION_PWROFF)
116 case KBASE_PM_CORE_SHADER:
117 KBASE_TRACE_ADD(kbdev, PM_PWROFF, NULL, NULL, 0u, lo);
119 case KBASE_PM_CORE_TILER:
120 KBASE_TRACE_ADD(kbdev, PM_PWROFF_TILER, NULL, NULL, 0u, lo);
122 case KBASE_PM_CORE_L2:
123 KBASE_TRACE_ADD(kbdev, PM_PWROFF_L2, NULL, NULL, 0u, lo);
132 kbase_reg_write(kbdev, GPU_CONTROL_REG(reg), lo, NULL);
135 kbase_reg_write(kbdev, GPU_CONTROL_REG(reg + 4), hi, NULL);
138 /** Get information about a core set
140 * This function gets information (chosen by \c action) about a set of cores of a type given by \c core_type. It is a
141 * static function used by @ref kbase_pm_get_present_cores, @ref kbase_pm_get_active_cores, @ref
142 * kbase_pm_get_trans_cores and @ref kbase_pm_get_ready_cores.
144 * @param kbdev The kbase device structure of the device
145 * @param core_type The type of core that the should be queried
146 * @param action The property of the cores to query
148 * @return A bit mask specifying the state of the cores
150 static u64 kbase_pm_get_state(kbase_device *kbdev, kbase_pm_core_type core_type, kbasep_pm_action action)
155 reg = core_type_to_reg(core_type, action);
157 KBASE_DEBUG_ASSERT(reg);
159 lo = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg), NULL);
160 hi = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg + 4), NULL);
162 return (((u64) hi) << 32) | ((u64) lo);
165 void kbasep_pm_read_present_cores(kbase_device *kbdev)
167 kbdev->shader_present_bitmap = kbase_pm_get_state(kbdev, KBASE_PM_CORE_SHADER, ACTION_PRESENT);
168 kbdev->tiler_present_bitmap = kbase_pm_get_state(kbdev, KBASE_PM_CORE_TILER, ACTION_PRESENT);
169 kbdev->l2_present_bitmap = kbase_pm_get_state(kbdev, KBASE_PM_CORE_L2, ACTION_PRESENT);
170 kbdev->l3_present_bitmap = kbase_pm_get_state(kbdev, KBASE_PM_CORE_L3, ACTION_PRESENT);
172 kbdev->shader_inuse_bitmap = 0;
173 kbdev->shader_needed_bitmap = 0;
174 kbdev->shader_available_bitmap = 0;
175 kbdev->tiler_available_bitmap = 0;
176 kbdev->l2_users_count = 0;
177 kbdev->l2_available_bitmap = 0;
178 kbdev->tiler_needed_cnt = 0;
179 kbdev->tiler_inuse_cnt = 0;
181 memset(kbdev->shader_needed_cnt, 0, sizeof(kbdev->shader_needed_cnt));
184 KBASE_EXPORT_TEST_API(kbasep_pm_read_present_cores)
186 /** Get the cores that are present
188 u64 kbase_pm_get_present_cores(kbase_device *kbdev, kbase_pm_core_type type)
190 KBASE_DEBUG_ASSERT(kbdev != NULL);
193 case KBASE_PM_CORE_L3:
194 return kbdev->l3_present_bitmap;
196 case KBASE_PM_CORE_L2:
197 return kbdev->l2_present_bitmap;
199 case KBASE_PM_CORE_SHADER:
200 return kbdev->shader_present_bitmap;
202 case KBASE_PM_CORE_TILER:
203 return kbdev->tiler_present_bitmap;
206 KBASE_DEBUG_ASSERT(0);
210 KBASE_EXPORT_TEST_API(kbase_pm_get_present_cores)
212 /** Get the cores that are "active" (busy processing work)
214 u64 kbase_pm_get_active_cores(kbase_device *kbdev, kbase_pm_core_type type)
216 return kbase_pm_get_state(kbdev, type, ACTION_PWRACTIVE);
219 KBASE_EXPORT_TEST_API(kbase_pm_get_active_cores)
221 /** Get the cores that are transitioning between power states
223 u64 kbase_pm_get_trans_cores(kbase_device *kbdev, kbase_pm_core_type type)
225 return kbase_pm_get_state(kbdev, type, ACTION_PWRTRANS);
228 KBASE_EXPORT_TEST_API(kbase_pm_get_trans_cores)
229 /** Get the cores that are powered on
231 u64 kbase_pm_get_ready_cores(kbase_device *kbdev, kbase_pm_core_type type)
234 result = kbase_pm_get_state(kbdev, type, ACTION_READY);
237 case KBASE_PM_CORE_SHADER:
238 KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED, NULL, NULL, 0u, (u32) result);
240 case KBASE_PM_CORE_TILER:
241 KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_TILER, NULL, NULL, 0u, (u32) result);
243 case KBASE_PM_CORE_L2:
244 KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_L2, NULL, NULL, 0u, (u32) result);
247 /* NB: L3 not currently traced */
254 KBASE_EXPORT_TEST_API(kbase_pm_get_ready_cores)
256 /** Perform power transitions for a particular core type.
258 * This function will perform any available power transitions to make the actual hardware state closer to the desired
259 * state. If a core is currently transitioning then changes to the power state of that call cannot be made until the
260 * transition has finished. Cores which are not present in the hardware are ignored if they are specified in the
261 * desired_state bitmask, however the return value will always be 0 in this case.
263 * @param kbdev The kbase device
264 * @param type The core type to perform transitions for
265 * @param desired_state A bit mask of the desired state of the cores
266 * @param in_use A bit mask of the cores that are currently running jobs.
267 * These cores have to be kept powered up because there are jobs
268 * running (or about to run) on them.
269 * @param[out] available Receives a bit mask of the cores that the job scheduler can use to submit jobs to.
270 * May be NULL if this is not needed.
271 * @param[in,out] powering_on Bit mask to update with cores that are transitioning to a power-on state.
273 * @return MALI_TRUE if the desired state has been reached, MALI_FALSE otherwise
275 STATIC mali_bool kbase_pm_transition_core_type(kbase_device *kbdev, kbase_pm_core_type type, u64 desired_state,
276 u64 in_use, u64 * const available, u64 *powering_on)
283 u64 powering_on_trans;
284 u64 desired_state_in_use;
286 lockdep_assert_held(&kbdev->pm.power_change_lock);
288 /* Get current state */
289 present = kbase_pm_get_present_cores(kbdev, type);
290 trans = kbase_pm_get_trans_cores(kbdev, type);
291 ready = kbase_pm_get_ready_cores(kbdev, type);
293 powering_on_trans = trans & *powering_on;
294 *powering_on = powering_on_trans;
296 if (available != NULL)
297 *available = (ready | powering_on_trans) & desired_state;
299 /* Update desired state to include the in-use cores. These have to be kept powered up because there are jobs
300 * running or about to run on these cores
302 desired_state_in_use = desired_state | in_use;
304 /* Update state of whether l2 caches are powered */
305 if (type == KBASE_PM_CORE_L2) {
306 if ((ready == present) && (desired_state_in_use == ready) && (trans == 0)) {
307 /* All are ready, none will be turned off, and none are transitioning */
308 kbdev->pm.l2_powered = 1;
309 if (kbdev->l2_users_count > 0) {
310 /* Notify any registered l2 cache users (optimized out when no users waiting) */
311 wake_up(&kbdev->pm.l2_powered_wait);
314 kbdev->pm.l2_powered = 0;
318 if (desired_state_in_use == ready && (trans == 0))
321 /* Restrict the cores to those that are actually present */
322 powerup = desired_state_in_use & present;
323 powerdown = (~desired_state_in_use) & present;
325 /* Restrict to cores that are not already in the desired state */
329 /* Don't transition any cores that are already transitioning, except for
330 * Mali cores that support the following case:
332 * If the SHADER_PWRON or TILER_PWRON registers are written to turn on
333 * a core that is currently transitioning to power off, then this is
334 * remembered and the shader core is automatically powered up again once
335 * the original transition completes. Once the automatic power on is
336 * complete any job scheduled on the shader core should start.
340 if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS))
341 if (KBASE_PM_CORE_SHADER == type || KBASE_PM_CORE_TILER == type)
342 trans = powering_on_trans; /* for exception cases, only mask off cores in power on transitions */
346 /* Perform transitions if any */
347 kbase_pm_invoke(kbdev, type, powerup, ACTION_PWRON);
348 kbase_pm_invoke(kbdev, type, powerdown, ACTION_PWROFF);
350 /* Recalculate cores transitioning on, and re-evaluate our state */
351 powering_on_trans |= powerup;
352 *powering_on = powering_on_trans;
353 if (available != NULL)
354 *available = (ready | powering_on_trans) & desired_state;
359 KBASE_EXPORT_TEST_API(kbase_pm_transition_core_type)
361 /** Determine which caches should be on for a particular core state.
363 * This function takes a bit mask of the present caches and the cores (or caches) that are attached to the caches that
364 * will be powered. It then computes which caches should be turned on to allow the cores requested to be powered up.
366 * @param present The bit mask of present caches
367 * @param cores_powered A bit mask of cores (or L2 caches) that are desired to be powered
369 * @return A bit mask of the caches that should be turned on
371 STATIC u64 get_desired_cache_status(u64 present, u64 cores_powered)
376 /* Find out which is the highest set bit */
377 u64 bit = fls64(present) - 1;
378 u64 bit_mask = 1ull << bit;
379 /* Create a mask which has all bits from 'bit' upwards set */
381 u64 mask = ~(bit_mask - 1);
383 /* If there are any cores powered at this bit or above (that haven't previously been processed) then we need
385 if (cores_powered & mask)
388 /* Remove bits from cores_powered and present */
389 cores_powered &= ~mask;
390 present &= ~bit_mask;
396 KBASE_EXPORT_TEST_API(get_desired_cache_status)
398 mali_bool MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbdev)
400 mali_bool cores_are_available = MALI_FALSE;
401 mali_bool in_desired_state = MALI_TRUE;
402 u64 desired_l2_state;
403 u64 desired_l3_state;
405 u64 tiler_available_bitmap;
406 u64 shader_available_bitmap;
407 u64 shader_ready_bitmap;
408 u64 shader_transitioning_bitmap;
409 u64 l2_available_bitmap;
411 KBASE_DEBUG_ASSERT(NULL != kbdev);
412 lockdep_assert_held(&kbdev->pm.power_change_lock);
414 spin_lock(&kbdev->pm.gpu_powered_lock);
415 if (kbdev->pm.gpu_powered == MALI_FALSE) {
416 spin_unlock(&kbdev->pm.gpu_powered_lock);
417 if (kbdev->pm.desired_shader_state == 0 && kbdev->pm.desired_tiler_state == 0)
422 /* Trace that a change-state is being requested, and that it took
423 * (effectively) no time to start it. This is useful for counting how many
424 * state changes occurred, in a way that's backwards-compatible with
425 * processing the trace data */
426 kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE);
427 kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE);
429 /* If any cores are already powered then, we must keep the caches on */
430 cores_powered = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_SHADER);
432 cores_powered |= kbdev->pm.desired_shader_state;
434 /* If there are l2 cache users registered, keep all l2s powered even if all other cores are off. */
435 if (kbdev->l2_users_count > 0)
436 cores_powered |= kbdev->l2_present_bitmap;
438 desired_l2_state = get_desired_cache_status(kbdev->l2_present_bitmap, cores_powered);
440 /* If any l2 cache is on, then enable l2 #0, for use by job manager */
441 if (0 != desired_l2_state) {
442 desired_l2_state |= 1;
443 /* Also enable tiler if l2 cache is powered */
444 kbdev->pm.desired_tiler_state = kbdev->tiler_present_bitmap;
446 kbdev->pm.desired_tiler_state = 0;
449 desired_l3_state = get_desired_cache_status(kbdev->l3_present_bitmap, desired_l2_state);
451 in_desired_state &= kbase_pm_transition_core_type(kbdev, KBASE_PM_CORE_L3, desired_l3_state, 0, NULL, &kbdev->pm.powering_on_l3_state);
452 in_desired_state &= kbase_pm_transition_core_type(kbdev, KBASE_PM_CORE_L2, desired_l2_state, 0, &l2_available_bitmap, &kbdev->pm.powering_on_l2_state);
454 if( kbdev->l2_available_bitmap != l2_available_bitmap)
456 KBASE_TIMELINE_POWER_L2(kbdev,l2_available_bitmap);
459 kbdev->l2_available_bitmap = l2_available_bitmap;
461 if (in_desired_state) {
463 in_desired_state &= kbase_pm_transition_core_type(kbdev, KBASE_PM_CORE_TILER, kbdev->pm.desired_tiler_state, 0, &tiler_available_bitmap, &kbdev->pm.powering_on_tiler_state);
464 in_desired_state &= kbase_pm_transition_core_type(kbdev, KBASE_PM_CORE_SHADER, kbdev->pm.desired_shader_state, kbdev->shader_inuse_bitmap, &shader_available_bitmap, &kbdev->pm.powering_on_shader_state);
466 if (kbdev->shader_available_bitmap != shader_available_bitmap) {
467 KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL, NULL, 0u, (u32) shader_available_bitmap);
468 KBASE_TIMELINE_POWER_SHADER(kbdev, shader_available_bitmap);
471 kbdev->shader_available_bitmap = shader_available_bitmap;
473 if (kbdev->tiler_available_bitmap != tiler_available_bitmap) {
474 KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER, NULL, NULL, 0u, (u32) tiler_available_bitmap);
475 KBASE_TIMELINE_POWER_TILER(kbdev, tiler_available_bitmap);
478 kbdev->tiler_available_bitmap = tiler_available_bitmap;
480 } else if ((l2_available_bitmap & kbdev->tiler_present_bitmap) != kbdev->tiler_present_bitmap) {
481 tiler_available_bitmap = 0;
483 if (kbdev->tiler_available_bitmap != tiler_available_bitmap) {
484 KBASE_TIMELINE_POWER_TILER(kbdev, tiler_available_bitmap);
487 kbdev->tiler_available_bitmap = tiler_available_bitmap;
490 /* State updated for slow-path waiters */
491 kbdev->pm.gpu_in_desired_state = in_desired_state;
493 shader_ready_bitmap = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_SHADER);
494 shader_transitioning_bitmap = kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_SHADER);
496 /* Determine whether the cores are now available (even if the set of
497 * available cores is empty). Note that they can be available even if we've
498 * not finished transitioning to the desired state */
499 if ((kbdev->shader_available_bitmap & kbdev->pm.desired_shader_state) == kbdev->pm.desired_shader_state
500 && (kbdev->tiler_available_bitmap & kbdev->pm.desired_tiler_state) == kbdev->pm.desired_tiler_state) {
501 cores_are_available = MALI_TRUE;
503 KBASE_TRACE_ADD(kbdev, PM_CORES_AVAILABLE, NULL, NULL, 0u, (u32)(kbdev->shader_available_bitmap & kbdev->pm.desired_shader_state));
504 KBASE_TRACE_ADD(kbdev, PM_CORES_AVAILABLE_TILER, NULL, NULL, 0u, (u32)(kbdev->tiler_available_bitmap & kbdev->pm.desired_tiler_state));
506 /* Log timelining information about handling events that power up
507 * cores, to match up either with immediate submission either because
508 * cores already available, or from PM IRQ */
509 if (!in_desired_state)
510 kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
513 if (in_desired_state) {
514 KBASE_DEBUG_ASSERT(cores_are_available);
516 #ifdef CONFIG_MALI_GATOR_SUPPORT
517 kbase_trace_mali_pm_status(KBASE_PM_CORE_L3, kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L3));
518 kbase_trace_mali_pm_status(KBASE_PM_CORE_L2, kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2));
519 kbase_trace_mali_pm_status(KBASE_PM_CORE_SHADER, kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_SHADER));
520 kbase_trace_mali_pm_status(KBASE_PM_CORE_TILER, kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_TILER));
521 #endif /* CONFIG_MALI_GATOR_SUPPORT */
523 KBASE_TRACE_ADD(kbdev, PM_DESIRED_REACHED, NULL, NULL, kbdev->pm.gpu_in_desired_state, (u32)kbdev->pm.desired_shader_state);
524 KBASE_TRACE_ADD(kbdev, PM_DESIRED_REACHED_TILER, NULL, NULL, 0u, (u32)kbdev->pm.desired_tiler_state);
526 /* Log timelining information for synchronous waiters */
527 kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
528 /* Wake slow-path waiters. Job scheduler does not use this. */
529 KBASE_TRACE_ADD(kbdev, PM_WAKE_WAITERS, NULL, NULL, 0u, 0);
530 wake_up(&kbdev->pm.gpu_in_desired_state_wait);
533 spin_unlock(&kbdev->pm.gpu_powered_lock);
535 /* kbase_pm_ca_update_core_status can cause one-level recursion into
536 * this function, so it must only be called once all changes to kbdev
537 * have been committed, and after the gpu_powered_lock has been
539 if (kbdev->shader_ready_bitmap != shader_ready_bitmap ||
540 kbdev->shader_transitioning_bitmap != shader_transitioning_bitmap) {
541 kbdev->shader_ready_bitmap = shader_ready_bitmap;
542 kbdev->shader_transitioning_bitmap = shader_transitioning_bitmap;
544 kbase_pm_ca_update_core_status(kbdev, shader_ready_bitmap, shader_transitioning_bitmap);
547 /* The core availability policy is not allowed to keep core group 0 off */
548 if (!((shader_ready_bitmap | shader_transitioning_bitmap) & kbdev->gpu_props.props.coherency_info.group[0].core_mask) &&
549 !(kbase_pm_ca_get_core_mask(kbdev) & kbdev->gpu_props.props.coherency_info.group[0].core_mask))
552 /* The core availability policy is allowed to keep core group 1 off,
553 * but all jobs specifically targeting CG1 must fail */
554 if (!((shader_ready_bitmap | shader_transitioning_bitmap) & kbdev->gpu_props.props.coherency_info.group[1].core_mask) &&
555 !(kbase_pm_ca_get_core_mask(kbdev) & kbdev->gpu_props.props.coherency_info.group[1].core_mask))
556 kbdev->pm.cg1_disabled = MALI_TRUE;
558 kbdev->pm.cg1_disabled = MALI_FALSE;
560 return cores_are_available;
562 KBASE_EXPORT_TEST_API(kbase_pm_check_transitions_nolock)
564 void kbase_pm_check_transitions_sync(struct kbase_device *kbdev)
567 mali_bool cores_are_available;
568 /* Force the transition to be checked and reported - the cores may be
569 * 'available' (for job submission) but not fully powered up. */
570 spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
571 cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
572 /* Don't need 'cores_are_available', because we don't return anything */
573 CSTD_UNUSED(cores_are_available);
574 spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
577 wait_event(kbdev->pm.gpu_in_desired_state_wait, kbdev->pm.gpu_in_desired_state);
579 /* Log timelining information that a change in state has completed */
580 kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
582 KBASE_EXPORT_TEST_API(kbase_pm_check_transitions_sync)
584 void kbase_pm_enable_interrupts(kbase_device *kbdev)
588 KBASE_DEBUG_ASSERT(NULL != kbdev);
590 * Clear all interrupts,
591 * and unmask them all.
593 spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
594 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL, NULL);
595 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), GPU_IRQ_REG_ALL, NULL);
596 spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
598 kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF, NULL);
599 kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0xFFFFFFFF, NULL);
601 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF, NULL);
602 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0xFFFFFFFF, NULL);
605 KBASE_EXPORT_TEST_API(kbase_pm_enable_interrupts)
607 void kbase_pm_disable_interrupts(kbase_device *kbdev)
611 KBASE_DEBUG_ASSERT(NULL != kbdev);
613 * Mask all interrupts,
614 * and clear them all.
616 spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
617 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), 0, NULL);
618 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL, NULL);
619 spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
621 kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0, NULL);
622 kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF, NULL);
624 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0, NULL);
625 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF, NULL);
628 KBASE_EXPORT_TEST_API(kbase_pm_disable_interrupts)
632 * 0x0000: PMU TAG (RO) (0xCAFECAFE)
633 * 0x0004: PMU VERSION ID (RO) (0x00000000)
634 * 0x0008: CLOCK ENABLE (RW) (31:1 SBZ, 0 CLOCK STATE)
636 void kbase_pm_clock_on(kbase_device *kbdev, mali_bool is_resume)
638 mali_bool reset_required = is_resume;
640 KBASE_DEBUG_ASSERT(NULL != kbdev);
641 lockdep_assert_held(&kbdev->pm.lock);
643 if (kbdev->pm.gpu_powered) {
644 /* Already turned on */
645 KBASE_DEBUG_ASSERT(!is_resume);
649 KBASE_TRACE_ADD(kbdev, PM_GPU_ON, NULL, NULL, 0u, 0u);
651 if (is_resume && kbdev->pm.callback_power_resume) {
652 kbdev->pm.callback_power_resume(kbdev);
653 } else if (kbdev->pm.callback_power_on) {
654 if (kbdev->pm.callback_power_on(kbdev))
655 reset_required = MALI_TRUE;
658 spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
659 kbdev->pm.gpu_powered = MALI_TRUE;
660 spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
662 if (reset_required) {
663 /* GPU state was lost, reset GPU to ensure it is in a
664 * consistent state */
665 kbase_pm_init_hw(kbdev, MALI_TRUE);
668 /* Lastly, enable the interrupts */
669 kbase_pm_enable_interrupts(kbdev);
672 KBASE_EXPORT_TEST_API(kbase_pm_clock_on)
674 void kbase_pm_clock_off(kbase_device *kbdev, mali_bool is_suspend)
677 KBASE_DEBUG_ASSERT(NULL != kbdev);
678 lockdep_assert_held(&kbdev->pm.lock);
680 /* ASSERT that the cores should now be unavailable. No lock needed. */
681 KBASE_DEBUG_ASSERT(kbdev->shader_available_bitmap == 0u);
683 if (!kbdev->pm.gpu_powered) {
684 /* Already turned off */
685 if (is_suspend && kbdev->pm.callback_power_suspend)
686 kbdev->pm.callback_power_suspend(kbdev);
690 KBASE_TRACE_ADD(kbdev, PM_GPU_OFF, NULL, NULL, 0u, 0u);
692 /* Disable interrupts. This also clears any outstanding interrupts */
693 kbase_pm_disable_interrupts(kbdev);
694 /* Ensure that any IRQ handlers have finished */
695 kbase_synchronize_irqs(kbdev);
697 /* The GPU power may be turned off from this point */
698 spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
699 kbdev->pm.gpu_powered = MALI_FALSE;
700 spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
702 if (is_suspend && kbdev->pm.callback_power_suspend)
703 kbdev->pm.callback_power_suspend(kbdev);
704 else if (kbdev->pm.callback_power_off)
705 kbdev->pm.callback_power_off(kbdev);
708 KBASE_EXPORT_TEST_API(kbase_pm_clock_off)
710 struct kbasep_reset_timeout_data {
711 struct hrtimer timer;
716 void kbase_pm_reset_done(kbase_device *kbdev)
718 KBASE_DEBUG_ASSERT(kbdev != NULL);
719 kbdev->pm.reset_done = MALI_TRUE;
720 wake_up(&kbdev->pm.reset_done_wait);
724 * Wait for the RESET_COMPLETED IRQ to occur, then reset the waiting state.
726 STATIC void kbase_pm_wait_for_reset(kbase_device *kbdev)
728 lockdep_assert_held(&kbdev->pm.lock);
730 wait_event(kbdev->pm.reset_done_wait, (kbdev->pm.reset_done));
731 kbdev->pm.reset_done = MALI_FALSE;
734 KBASE_EXPORT_TEST_API(kbase_pm_reset_done)
736 static enum hrtimer_restart kbasep_reset_timeout(struct hrtimer *timer)
738 struct kbasep_reset_timeout_data *rtdata = container_of(timer, struct kbasep_reset_timeout_data, timer);
740 rtdata->timed_out = 1;
742 /* Set the wait queue to wake up kbase_pm_init_hw even though the reset hasn't completed */
743 kbase_pm_reset_done(rtdata->kbdev);
745 return HRTIMER_NORESTART;
748 static void kbase_pm_hw_issues(kbase_device *kbdev)
753 /* Needed due to MIDBASE-1494: LS_PAUSEBUFFER_DISABLE. See PRLAM-8443. */
754 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8443))
755 value |= SC_LS_PAUSEBUFFER_DISABLE;
757 /* Needed due to MIDBASE-2054: SDC_DISABLE_OQ_DISCARD. See PRLAM-10327. */
758 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10327))
759 value |= SC_SDC_DISABLE_OQ_DISCARD;
761 /* Enable alternative hardware counter selection if configured. */
762 if (DEFAULT_ALTERNATIVE_HWC)
763 value |= SC_ALT_COUNTERS;
765 /* Use software control of forward pixel kill when needed. See MIDEUR-174. */
766 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_2121))
767 value |= SC_OVERRIDE_FWD_PIXEL_KILL;
769 /* Needed due to MIDBASE-2795. ENABLE_TEXGRD_FLAGS. See PRLAM-10797. */
770 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10797))
771 value |= SC_ENABLE_TEXGRD_FLAGS;
774 kbase_reg_write(kbdev, GPU_CONTROL_REG(SHADER_CONFIG), value, NULL);
776 /* Limit the GPU bus bandwidth if the platform needs this. */
777 value = kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG), NULL);
779 /* Limit read ID width for AXI */
780 config_value = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_ARID_LIMIT);
781 value &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_READS);
782 value |= (config_value & 0x3) << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT;
784 /* Limit write ID width for AXI */
785 config_value = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_AWID_LIMIT);
786 value &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES);
787 value |= (config_value & 0x3) << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT;
789 kbase_reg_write(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG), value, NULL);
792 mali_error kbase_pm_init_hw(kbase_device *kbdev, mali_bool enable_irqs )
795 struct kbasep_reset_timeout_data rtdata;
797 KBASE_DEBUG_ASSERT(NULL != kbdev);
798 lockdep_assert_held(&kbdev->pm.lock);
800 /* Ensure the clock is on before attempting to access the hardware */
801 if (!kbdev->pm.gpu_powered) {
802 if (kbdev->pm.callback_power_on)
803 kbdev->pm.callback_power_on(kbdev);
805 spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
806 kbdev->pm.gpu_powered = MALI_TRUE;
807 spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
810 /* Ensure interrupts are off to begin with, this also clears any outstanding interrupts */
811 kbase_pm_disable_interrupts(kbdev);
813 /* Prepare for the soft-reset */
814 kbdev->pm.reset_done = MALI_FALSE;
816 /* The cores should be made unavailable due to the reset */
817 spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
818 if (kbdev->shader_available_bitmap != 0u)
819 KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL, NULL, 0u, (u32)0u);
820 if (kbdev->tiler_available_bitmap != 0u)
821 KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER, NULL, NULL, 0u, (u32)0u);
822 kbdev->shader_available_bitmap = 0u;
823 kbdev->tiler_available_bitmap = 0u;
824 kbdev->l2_available_bitmap = 0u;
825 spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
827 /* Soft reset the GPU */
828 KBASE_TRACE_ADD(kbdev, CORE_GPU_SOFT_RESET, NULL, NULL, 0u, 0);
829 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_SOFT_RESET, NULL);
831 /* Unmask the reset complete interrupt only */
832 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), RESET_COMPLETED, NULL);
834 /* Initialize a structure for tracking the status of the reset */
835 rtdata.kbdev = kbdev;
836 rtdata.timed_out = 0;
838 /* Create a timer to use as a timeout on the reset */
839 hrtimer_init_on_stack(&rtdata.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
840 rtdata.timer.function = kbasep_reset_timeout;
842 hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT), HRTIMER_MODE_REL);
844 /* Wait for the RESET_COMPLETED interrupt to be raised */
845 kbase_pm_wait_for_reset(kbdev);
847 if (rtdata.timed_out == 0) {
848 /* GPU has been reset */
849 hrtimer_cancel(&rtdata.timer);
850 destroy_hrtimer_on_stack(&rtdata.timer);
854 /* No interrupt has been received - check if the RAWSTAT register says the reset has completed */
855 if (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL) & RESET_COMPLETED) {
856 /* The interrupt is set in the RAWSTAT; this suggests that the interrupts are not getting to the CPU */
857 dev_warn(kbdev->dev, "Reset interrupt didn't reach CPU. Check interrupt assignments.\n");
858 /* If interrupts aren't working we can't continue. */
859 destroy_hrtimer_on_stack(&rtdata.timer);
863 /* The GPU doesn't seem to be responding to the reset so try a hard reset */
864 dev_err(kbdev->dev, "Failed to soft-reset GPU (timed out after %d ms), now attempting a hard reset\n", RESET_TIMEOUT);
865 KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
866 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET, NULL);
868 /* Restart the timer to wait for the hard reset to complete */
869 rtdata.timed_out = 0;
871 hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT), HRTIMER_MODE_REL);
873 /* Wait for the RESET_COMPLETED interrupt to be raised */
874 kbase_pm_wait_for_reset(kbdev);
876 if (rtdata.timed_out == 0) {
877 /* GPU has been reset */
878 hrtimer_cancel(&rtdata.timer);
879 destroy_hrtimer_on_stack(&rtdata.timer);
883 destroy_hrtimer_on_stack(&rtdata.timer);
885 dev_err(kbdev->dev, "Failed to hard-reset the GPU (timed out after %d ms)\n", RESET_TIMEOUT);
887 /* The GPU still hasn't reset, give up */
888 return MALI_ERROR_FUNCTION_FAILED;
891 /* Re-enable interrupts if requested*/
894 kbase_pm_enable_interrupts(kbdev);
896 /* If cycle counter was in use-re enable it */
897 spin_lock_irqsave(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
899 if (kbdev->pm.gpu_cycle_counter_requests)
900 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_CYCLE_COUNT_START, NULL);
902 spin_unlock_irqrestore(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
904 kbase_pm_hw_issues(kbdev);
906 return MALI_ERROR_NONE;
909 KBASE_EXPORT_TEST_API(kbase_pm_init_hw)
911 void kbase_pm_request_gpu_cycle_counter(kbase_device *kbdev)
914 KBASE_DEBUG_ASSERT(kbdev != NULL);
916 KBASE_DEBUG_ASSERT(kbdev->pm.gpu_powered);
918 spin_lock_irqsave(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
920 KBASE_DEBUG_ASSERT(kbdev->pm.gpu_cycle_counter_requests < INT_MAX);
922 ++kbdev->pm.gpu_cycle_counter_requests;
924 if (1 == kbdev->pm.gpu_cycle_counter_requests)
925 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_CYCLE_COUNT_START, NULL);
927 spin_unlock_irqrestore(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
930 KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter)
932 void kbase_pm_release_gpu_cycle_counter(kbase_device *kbdev)
935 KBASE_DEBUG_ASSERT(kbdev != NULL);
937 spin_lock_irqsave(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
939 KBASE_DEBUG_ASSERT(kbdev->pm.gpu_cycle_counter_requests > 0);
941 --kbdev->pm.gpu_cycle_counter_requests;
943 if (0 == kbdev->pm.gpu_cycle_counter_requests)
944 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_CYCLE_COUNT_STOP, NULL);
946 spin_unlock_irqrestore(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
949 KBASE_EXPORT_TEST_API(kbase_pm_release_gpu_cycle_counter)