3 * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
21 * Power management API definitions used internally by GPU backend
24 #ifndef _KBASE_BACKEND_PM_INTERNAL_H_
25 #define _KBASE_BACKEND_PM_INTERNAL_H_
27 #include <mali_kbase_hwaccess_pm.h>
29 #include "mali_kbase_pm_ca.h"
30 #include "mali_kbase_pm_policy.h"
34 * kbase_pm_dev_idle - The GPU is idle.
36 * The OS may choose to turn off idle devices
38 * @kbdev: The kbase device structure for the device (must be a valid pointer)
40 void kbase_pm_dev_idle(struct kbase_device *kbdev);
43 * kbase_pm_dev_activate - The GPU is active.
45 * The OS should avoid opportunistically turning off the GPU while it is active
47 * @kbdev: The kbase device structure for the device (must be a valid pointer)
49 void kbase_pm_dev_activate(struct kbase_device *kbdev);
52 * kbase_pm_get_present_cores - Get details of the cores that are present in
55 * This function can be called by the active power policy to return a bitmask of
56 * the cores (of a specified type) present in the GPU device and also a count of
57 * the number of cores.
59 * @kbdev: The kbase device structure for the device (must be a valid
61 * @type: The type of core (see the enum kbase_pm_core_type enumeration)
63 * Return: The bit mask of cores present
65 u64 kbase_pm_get_present_cores(struct kbase_device *kbdev,
66 enum kbase_pm_core_type type);
69 * kbase_pm_get_active_cores - Get details of the cores that are currently
70 * active in the device.
72 * This function can be called by the active power policy to return a bitmask of
73 * the cores (of a specified type) that are actively processing work (i.e.
74 * turned on *and* busy).
76 * @kbdev: The kbase device structure for the device (must be a valid pointer)
77 * @type: The type of core (see the enum kbase_pm_core_type enumeration)
79 * Return: The bit mask of active cores
81 u64 kbase_pm_get_active_cores(struct kbase_device *kbdev,
82 enum kbase_pm_core_type type);
85 * kbase_pm_get_trans_cores - Get details of the cores that are currently
86 * transitioning between power states.
88 * This function can be called by the active power policy to return a bitmask of
89 * the cores (of a specified type) that are currently transitioning between
92 * @kbdev: The kbase device structure for the device (must be a valid pointer)
93 * @type: The type of core (see the enum kbase_pm_core_type enumeration)
95 * Return: The bit mask of transitioning cores
97 u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev,
98 enum kbase_pm_core_type type);
101 * kbase_pm_get_ready_cores - Get details of the cores that are currently
102 * powered and ready for jobs.
104 * This function can be called by the active power policy to return a bitmask of
105 * the cores (of a specified type) that are powered and ready for jobs (they may
106 * or may not be currently executing jobs).
108 * @kbdev: The kbase device structure for the device (must be a valid pointer)
109 * @type: The type of core (see the enum kbase_pm_core_type enumeration)
111 * Return: The bit mask of ready cores
113 u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev,
114 enum kbase_pm_core_type type);
117 * kbase_pm_clock_on - Turn the clock for the device on, and enable device
120 * This function can be used by a power policy to turn the clock for the GPU on.
121 * It should be modified during integration to perform the necessary actions to
122 * ensure that the GPU is fully powered and clocked.
124 * @kbdev: The kbase device structure for the device (must be a valid
126 * @is_resume: true if clock on due to resume after suspend, false otherwise
128 void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume);
131 * kbase_pm_clock_off - Disable device interrupts, and turn the clock for the
134 * This function can be used by a power policy to turn the clock for the GPU
135 * off. It should be modified during integration to perform the necessary
136 * actions to turn the clock off (if this is possible in the integration).
138 * @kbdev: The kbase device structure for the device (must be a valid
140 * @is_suspend: true if clock off due to suspend, false otherwise
142 * Return: true if clock was turned off, or
143 * false if clock can not be turned off due to pending page/bus fault
144 * workers. Caller must flush MMU workqueues and retry
146 bool kbase_pm_clock_off(struct kbase_device *kbdev, bool is_suspend);
149 * kbase_pm_enable_interrupts - Enable interrupts on the device.
151 * Interrupts are also enabled after a call to kbase_pm_clock_on().
153 * @kbdev: The kbase device structure for the device (must be a valid pointer)
155 void kbase_pm_enable_interrupts(struct kbase_device *kbdev);
158 * kbase_pm_disable_interrupts - Disable interrupts on the device.
160 * This prevents delivery of Power Management interrupts to the CPU so that
161 * kbase_pm_check_transitions_nolock() will not be called from the IRQ handler
162 * until kbase_pm_enable_interrupts() or kbase_pm_clock_on() is called.
164 * Interrupts are also disabled after a call to kbase_pm_clock_off().
166 * @kbdev: The kbase device structure for the device (must be a valid pointer)
168 void kbase_pm_disable_interrupts(struct kbase_device *kbdev);
171 * kbase_pm_init_hw - Initialize the hardware.
172 * @kbdev: The kbase device structure for the device (must be a valid pointer)
173 * @flags: Flags specifying the type of PM init
175 * This function checks the GPU ID register to ensure that the GPU is supported
176 * by the driver and performs a reset on the device so that it is in a known
177 * state before the device is used.
179 * Return: 0 if the device is supported and successfully reset.
181 int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags);
184 * kbase_pm_reset_done - The GPU has been reset successfully.
186 * This function must be called by the GPU interrupt handler when the
187 * RESET_COMPLETED bit is set. It signals to the power management initialization
188 * code that the GPU has been successfully reset.
190 * @kbdev: The kbase device structure for the device (must be a valid pointer)
192 void kbase_pm_reset_done(struct kbase_device *kbdev);
196 * kbase_pm_check_transitions_nolock - Check if there are any power transitions
197 * to make, and if so start them.
199 * This function will check the desired_xx_state members of
200 * struct kbase_pm_device_data and the actual status of the hardware to see if
201 * any power transitions can be made at this time to make the hardware state
202 * closer to the state desired by the power policy.
204 * The return value can be used to check whether all the desired cores are
205 * available, and so whether it's worth submitting a job (e.g. from a Power
208 * Note that this still returns true when desired_xx_state has no
209 * cores. That is: of the no cores desired, none were *un*available. In
210 * this case, the caller may still need to try submitting jobs. This is because
211 * the Core Availability Policy might have taken us to an intermediate state
212 * where no cores are powered, before powering on more cores (e.g. for core
215 * The caller must hold kbase_device.pm.power_change_lock
217 * @kbdev: The kbase device structure for the device (must be a valid pointer)
219 * Return: non-zero when all desired cores are available. That is,
220 * it's worthwhile for the caller to submit a job.
223 bool kbase_pm_check_transitions_nolock(struct kbase_device *kbdev);
226 * kbase_pm_check_transitions_sync - Synchronous and locking variant of
227 * kbase_pm_check_transitions_nolock()
229 * On returning, the desired state at the time of the call will have been met.
231 * There is nothing to stop the core being switched off by calls to
232 * kbase_pm_release_cores() or kbase_pm_unrequest_cores(). Therefore, the
233 * caller must have already made a call to
234 * kbase_pm_request_cores()/kbase_pm_request_cores_sync() previously.
236 * The usual use-case for this is to ensure cores are 'READY' after performing
239 * Unlike kbase_pm_check_transitions_nolock(), the caller must not hold
240 * kbase_device.pm.power_change_lock, because this function will take that
243 * @kbdev: The kbase device structure for the device (must be a valid pointer)
245 void kbase_pm_check_transitions_sync(struct kbase_device *kbdev);
248 * kbase_pm_update_cores_state_nolock - Variant of kbase_pm_update_cores_state()
249 * where the caller must hold
250 * kbase_device.pm.power_change_lock
252 * @kbdev: The kbase device structure for the device (must be a valid pointer)
254 void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev);
257 * kbase_pm_update_cores_state - Update the desired state of shader cores from
258 * the Power Policy, and begin any power
261 * This function will update the desired_xx_state members of
262 * struct kbase_pm_device_data by calling into the current Power Policy. It will
263 * then begin power transitions to make the hardware acheive the desired shader
266 * @kbdev: The kbase device structure for the device (must be a valid pointer)
268 void kbase_pm_update_cores_state(struct kbase_device *kbdev);
271 * kbase_pm_cancel_deferred_poweroff - Cancel any pending requests to power off
272 * the GPU and/or shader cores.
274 * This should be called by any functions which directly power off the GPU.
276 * @kbdev: The kbase device structure for the device (must be a valid pointer)
278 void kbase_pm_cancel_deferred_poweroff(struct kbase_device *kbdev);
281 * kbasep_pm_read_present_cores - Read the bitmasks of present cores.
283 * This information is cached to avoid having to perform register reads whenever
284 * the information is required.
286 * @kbdev: The kbase device structure for the device (must be a valid pointer)
288 void kbasep_pm_read_present_cores(struct kbase_device *kbdev);
291 * kbasep_pm_metrics_init - Initialize the metrics gathering framework.
293 * This must be called before other metric gathering APIs are called.
295 * @kbdev: The kbase device structure for the device (must be a valid pointer)
297 * Return: 0 on success, error code on error
299 int kbasep_pm_metrics_init(struct kbase_device *kbdev);
302 * kbasep_pm_metrics_term - Terminate the metrics gathering framework.
304 * This must be called when metric gathering is no longer required. It is an
305 * error to call any metrics gathering function (other than
306 * kbasep_pm_metrics_init()) after calling this function.
308 * @kbdev: The kbase device structure for the device (must be a valid pointer)
310 void kbasep_pm_metrics_term(struct kbase_device *kbdev);
313 * kbase_pm_report_vsync - Function to be called by the frame buffer driver to
314 * update the vsync metric.
316 * This function should be called by the frame buffer driver to update whether
317 * the system is hitting the vsync target or not. buffer_updated should be true
318 * if the vsync corresponded with a new frame being displayed, otherwise it
319 * should be false. This function does not need to be called every vsync, but
320 * only when the value of @buffer_updated differs from a previous call.
322 * @kbdev: The kbase device structure for the device (must be a
324 * @buffer_updated: True if the buffer has been updated on this VSync,
327 void kbase_pm_report_vsync(struct kbase_device *kbdev, int buffer_updated);
330 * kbase_pm_get_dvfs_action - Determine whether the DVFS system should change
331 * the clock speed of the GPU.
333 * @kbdev: The kbase device structure for the device (must be a valid pointer)
335 * This function should be called regularly by the DVFS system to check whether
336 * the clock speed of the GPU needs updating.
338 void kbase_pm_get_dvfs_action(struct kbase_device *kbdev);
341 * kbase_pm_request_gpu_cycle_counter - Mark that the GPU cycle counter is
344 * If the caller is the first caller then the GPU cycle counters will be enabled
345 * along with the l2 cache
347 * The GPU must be powered when calling this function (i.e.
348 * kbase_pm_context_active() must have been called).
350 * @kbdev: The kbase device structure for the device (must be a valid pointer)
352 void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev);
355 * kbase_pm_request_gpu_cycle_counter_l2_is_on - Mark GPU cycle counter is
356 * needed (l2 cache already on)
358 * This is a version of the above function
359 * (kbase_pm_request_gpu_cycle_counter()) suitable for being called when the
360 * l2 cache is known to be on and assured to be on until the subsequent call of
361 * kbase_pm_release_gpu_cycle_counter() such as when a job is submitted. It does
362 * not sleep and can be called from atomic functions.
364 * The GPU must be powered when calling this function (i.e.
365 * kbase_pm_context_active() must have been called) and the l2 cache must be
368 * @kbdev: The kbase device structure for the device (must be a valid pointer)
370 void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev);
373 * kbase_pm_release_gpu_cycle_counter - Mark that the GPU cycle counter is no
376 * If the caller is the
377 * last caller then the GPU cycle counters will be disabled. A request must have
378 * been made before a call to this.
380 * @kbdev: The kbase device structure for the device (must be a valid pointer)
382 void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev);
385 * kbase_pm_register_access_enable - Enable access to GPU registers
387 * Enables access to the GPU registers before power management has powered up
388 * the GPU with kbase_pm_powerup().
390 * Access to registers should be done using kbase_os_reg_read()/write() at this
391 * stage, not kbase_reg_read()/write().
393 * This results in the power management callbacks provided in the driver
394 * configuration to get called to turn on power and/or clocks to the GPU. See
395 * kbase_pm_callback_conf.
397 * This should only be used before power management is powered up with
400 * @kbdev: The kbase device structure for the device (must be a valid pointer)
402 void kbase_pm_register_access_enable(struct kbase_device *kbdev);
405 * kbase_pm_register_access_disable - Disable early register access
407 * Disables access to the GPU registers enabled earlier by a call to
408 * kbase_pm_register_access_enable().
410 * This results in the power management callbacks provided in the driver
411 * configuration to get called to turn off power and/or clocks to the GPU. See
412 * kbase_pm_callback_conf
414 * This should only be used before power management is powered up with
417 * @kbdev: The kbase device structure for the device (must be a valid pointer)
419 void kbase_pm_register_access_disable(struct kbase_device *kbdev);
421 /* NOTE: kbase_pm_is_suspending is in mali_kbase.h, because it is an inline
425 * kbase_pm_metrics_is_active - Check if the power management metrics
426 * collection is active.
428 * Note that this returns if the power management metrics collection was
429 * active at the time of calling, it is possible that after the call the metrics
430 * collection enable may have changed state.
432 * The caller must handle the consequence that the state may have changed.
434 * @kbdev: The kbase device structure for the device (must be a valid pointer)
435 * Return: true if metrics collection was active else false.
437 bool kbase_pm_metrics_is_active(struct kbase_device *kbdev);
440 * kbase_pm_do_poweron - Power on the GPU, and any cores that are requested.
442 * @kbdev: The kbase device structure for the device (must be a valid
444 * @is_resume: true if power on due to resume after suspend,
447 void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume);
450 * kbase_pm_do_poweroff - Power off the GPU, and any cores that have been
453 * @kbdev: The kbase device structure for the device (must be a valid
455 * @is_suspend: true if power off due to suspend,
458 * true if power was turned off, else
459 * false if power can not be turned off due to pending page/bus
460 * fault workers. Caller must flush MMU workqueues and retry
462 bool kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend);
464 #ifdef CONFIG_PM_DEVFREQ
465 void kbase_pm_get_dvfs_utilisation(struct kbase_device *kbdev,
466 unsigned long *total, unsigned long *busy);
467 void kbase_pm_reset_dvfs_utilisation(struct kbase_device *kbdev);
470 #ifdef CONFIG_MALI_MIDGARD_DVFS
473 * kbase_platform_dvfs_event - Report utilisation to DVFS code
475 * Function provided by platform specific code when DVFS is enabled to allow
476 * the power management metrics system to report utilisation.
478 * @kbdev: The kbase device structure for the device (must be a
480 * @utilisation: The current calculated utilisation by the metrics system.
481 * @util_gl_share: The current calculated gl share of utilisation.
482 * @util_cl_share: The current calculated cl share of utilisation per core
484 * Return: Returns 0 on failure and non zero on success.
487 int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation,
488 u32 util_gl_share, u32 util_cl_share[2]);
491 void kbase_pm_power_changed(struct kbase_device *kbdev);
494 * kbase_pm_metrics_update - Inform the metrics system that an atom is either
495 * about to be run or has just completed.
496 * @kbdev: The kbase device structure for the device (must be a valid pointer)
497 * @now: Pointer to the timestamp of the change, or NULL to use current time
499 * Caller must hold runpool_irq.lock
501 void kbase_pm_metrics_update(struct kbase_device *kbdev,
505 * kbase_pm_cache_snoop_enable - Allow CPU snoops on the GPU
506 * If the GPU does not have coherency this is a no-op
507 * @kbdev: Device pointer
509 * This function should be called after L2 power up.
512 void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev);
515 * kbase_pm_cache_snoop_disable - Prevent CPU snoops on the GPU
516 * If the GPU does not have coherency this is a no-op
517 * @kbdev: Device pointer
519 * This function should be called before L2 power off.
521 void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev);
523 #endif /* _KBASE_BACKEND_PM_INTERNAL_H_ */