3 * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
20 * GPU backend implementation of base kernel power management APIs
23 /* #define ENABLE_DEBUG_LOG */
24 #include "../../platform/rk/custom_log.h"
26 #include <mali_kbase.h>
27 #include <mali_midg_regmap.h>
28 #include <mali_kbase_config_defaults.h>
29 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
30 #include <linux/pm_runtime.h>
31 #endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
33 #include <mali_kbase_pm.h>
34 #include <backend/gpu/mali_kbase_jm_internal.h>
35 #include <backend/gpu/mali_kbase_js_internal.h>
36 #include <backend/gpu/mali_kbase_pm_internal.h>
38 static int rk_slowdown_clk_gpu_before_poweroff_cores(struct kbase_device *kbdev)
41 const unsigned long freq = 200 * 1000 * 1000;
43 mutex_lock(&kbdev->mutex_for_clk);
44 ret = clk_set_rate(kbdev->clock, freq);
46 E("Failed to set clock to %lu.", freq);
47 kbdev->is_power_off = true;
48 mutex_unlock(&kbdev->mutex_for_clk);
53 static int rk_restore_clk_gpu(struct kbase_device *kbdev)
57 mutex_lock(&kbdev->mutex_for_clk);
59 ret = clk_set_rate(kbdev->clock, kbdev->freq);
61 E("Failed to set clock to %lu.", kbdev->freq);
62 kbdev->is_power_off = false;
63 mutex_unlock(&kbdev->mutex_for_clk);
68 void kbase_pm_register_access_enable(struct kbase_device *kbdev)
70 struct kbase_pm_callback_conf *callbacks;
72 callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
75 callbacks->power_on_callback(kbdev);
77 kbdev->pm.backend.gpu_powered = true;
80 void kbase_pm_register_access_disable(struct kbase_device *kbdev)
82 struct kbase_pm_callback_conf *callbacks;
84 callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
87 callbacks->power_off_callback(kbdev);
89 kbdev->pm.backend.gpu_powered = false;
92 int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
95 struct kbase_pm_callback_conf *callbacks;
97 KBASE_DEBUG_ASSERT(kbdev != NULL);
99 mutex_init(&kbdev->pm.lock);
101 kbdev->pm.backend.gpu_powered = false;
102 kbdev->pm.suspending = false;
103 #ifdef CONFIG_MALI_DEBUG
104 kbdev->pm.backend.driver_ready_for_irqs = false;
105 #endif /* CONFIG_MALI_DEBUG */
106 kbdev->pm.backend.gpu_in_desired_state = true;
107 init_waitqueue_head(&kbdev->pm.backend.gpu_in_desired_state_wait);
109 callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
111 kbdev->pm.backend.callback_power_on =
112 callbacks->power_on_callback;
113 kbdev->pm.backend.callback_power_off =
114 callbacks->power_off_callback;
115 kbdev->pm.backend.callback_power_suspend =
116 callbacks->power_suspend_callback;
117 kbdev->pm.backend.callback_power_resume =
118 callbacks->power_resume_callback;
119 kbdev->pm.callback_power_runtime_init =
120 callbacks->power_runtime_init_callback;
121 kbdev->pm.callback_power_runtime_term =
122 callbacks->power_runtime_term_callback;
123 kbdev->pm.backend.callback_power_runtime_on =
124 callbacks->power_runtime_on_callback;
125 kbdev->pm.backend.callback_power_runtime_off =
126 callbacks->power_runtime_off_callback;
127 kbdev->pm.backend.callback_power_runtime_idle =
128 callbacks->power_runtime_idle_callback;
130 kbdev->pm.backend.callback_power_on = NULL;
131 kbdev->pm.backend.callback_power_off = NULL;
132 kbdev->pm.backend.callback_power_suspend = NULL;
133 kbdev->pm.backend.callback_power_resume = NULL;
134 kbdev->pm.callback_power_runtime_init = NULL;
135 kbdev->pm.callback_power_runtime_term = NULL;
136 kbdev->pm.backend.callback_power_runtime_on = NULL;
137 kbdev->pm.backend.callback_power_runtime_off = NULL;
138 kbdev->pm.backend.callback_power_runtime_idle = NULL;
141 /* Initialise the metrics subsystem */
142 ret = kbasep_pm_metrics_init(kbdev);
146 init_waitqueue_head(&kbdev->pm.backend.l2_powered_wait);
147 kbdev->pm.backend.l2_powered = 0;
149 init_waitqueue_head(&kbdev->pm.backend.reset_done_wait);
150 kbdev->pm.backend.reset_done = false;
152 init_waitqueue_head(&kbdev->pm.zero_active_count_wait);
153 kbdev->pm.active_count = 0;
155 spin_lock_init(&kbdev->pm.power_change_lock);
156 spin_lock_init(&kbdev->pm.backend.gpu_cycle_counter_requests_lock);
157 spin_lock_init(&kbdev->pm.backend.gpu_powered_lock);
159 if (kbase_pm_ca_init(kbdev) != 0)
162 if (kbase_pm_policy_init(kbdev) != 0)
168 kbase_pm_ca_term(kbdev);
170 kbasep_pm_metrics_term(kbdev);
174 void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume)
176 lockdep_assert_held(&kbdev->pm.lock);
178 /* Turn clocks and interrupts on - no-op if we haven't done a previous
179 * kbase_pm_clock_off() */
180 kbase_pm_clock_on(kbdev, is_resume);
182 /* Update core status as required by the policy */
183 KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
184 SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START);
185 kbase_pm_update_cores_state(kbdev);
186 KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
187 SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END);
189 /* NOTE: We don't wait to reach the desired state, since running atoms
190 * will wait for that state to be reached anyway */
192 D("to restore clk_gpu.");
193 rk_restore_clk_gpu(kbdev);
196 bool kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend)
199 bool cores_are_available;
201 lockdep_assert_held(&kbdev->pm.lock);
203 D("to slowdown clk_gpu before poweroff pm_cores.");
204 rk_slowdown_clk_gpu_before_poweroff_cores(kbdev);
206 spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
208 /* Force all cores off */
209 kbdev->pm.backend.desired_shader_state = 0;
210 kbdev->pm.backend.desired_tiler_state = 0;
212 /* Force all cores to be unavailable, in the situation where
213 * transitions are in progress for some cores but not others,
214 * and kbase_pm_check_transitions_nolock can not immediately
215 * power off the cores */
216 kbdev->shader_available_bitmap = 0;
217 kbdev->tiler_available_bitmap = 0;
218 kbdev->l2_available_bitmap = 0;
220 KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
221 SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START);
222 cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
223 KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
224 SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END);
225 /* Don't need 'cores_are_available', because we don't return anything */
226 CSTD_UNUSED(cores_are_available);
228 spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
230 /* NOTE: We won't wait to reach the core's desired state, even if we're
231 * powering off the GPU itself too. It's safe to cut the power whilst
232 * they're transitioning to off, because the cores should be idle and
233 * all cache flushes should already have occurred */
235 /* Consume any change-state events */
236 kbase_timeline_pm_check_handle_event(kbdev,
237 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
238 /* Disable interrupts and turn the clock off */
239 return kbase_pm_clock_off(kbdev, is_suspend);
242 int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
245 struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
246 unsigned long irq_flags;
249 KBASE_DEBUG_ASSERT(kbdev != NULL);
251 mutex_lock(&js_devdata->runpool_mutex);
252 mutex_lock(&kbdev->pm.lock);
254 /* A suspend won't happen during startup/insmod */
255 KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
257 /* Power up the GPU, don't enable IRQs as we are not ready to receive
259 ret = kbase_pm_init_hw(kbdev, flags);
261 mutex_unlock(&kbdev->pm.lock);
262 mutex_unlock(&js_devdata->runpool_mutex);
266 kbasep_pm_read_present_cores(kbdev);
268 kbdev->pm.debug_core_mask_all = kbdev->pm.debug_core_mask[0] =
269 kbdev->pm.debug_core_mask[1] =
270 kbdev->pm.debug_core_mask[2] =
271 kbdev->gpu_props.props.raw_props.shader_present;
273 /* Pretend the GPU is active to prevent a power policy turning the GPU
275 kbdev->pm.active_count = 1;
277 spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
279 /* Ensure cycle counter is off */
280 kbdev->pm.backend.gpu_cycle_counter_requests = 0;
281 spin_unlock_irqrestore(
282 &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
285 /* We are ready to receive IRQ's now as power policy is set up, so
286 * enable them now. */
287 #ifdef CONFIG_MALI_DEBUG
288 spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
289 kbdev->pm.backend.driver_ready_for_irqs = true;
290 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
292 kbase_pm_enable_interrupts(kbdev);
294 /* Turn on the GPU and any cores needed by the policy */
295 kbase_pm_do_poweron(kbdev, false);
296 mutex_unlock(&kbdev->pm.lock);
297 mutex_unlock(&js_devdata->runpool_mutex);
299 /* Idle the GPU and/or cores, if the policy wants it to */
300 kbase_pm_context_idle(kbdev);
305 void kbase_hwaccess_pm_halt(struct kbase_device *kbdev)
307 KBASE_DEBUG_ASSERT(kbdev != NULL);
309 mutex_lock(&kbdev->pm.lock);
310 kbase_pm_cancel_deferred_poweroff(kbdev);
311 if (!kbase_pm_do_poweroff(kbdev, false)) {
312 /* Page/bus faults are pending, must drop pm.lock to process.
313 * Interrupts are disabled so no more faults should be
314 * generated at this point */
315 mutex_unlock(&kbdev->pm.lock);
316 kbase_flush_mmu_wqs(kbdev);
317 mutex_lock(&kbdev->pm.lock);
318 WARN_ON(!kbase_pm_do_poweroff(kbdev, false));
320 mutex_unlock(&kbdev->pm.lock);
323 KBASE_EXPORT_TEST_API(kbase_hwaccess_pm_halt);
325 void kbase_hwaccess_pm_term(struct kbase_device *kbdev)
327 KBASE_DEBUG_ASSERT(kbdev != NULL);
328 KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0);
329 KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests == 0);
331 /* Free any resources the policy allocated */
332 kbase_pm_policy_term(kbdev);
333 kbase_pm_ca_term(kbdev);
335 /* Shut down the metrics subsystem */
336 kbasep_pm_metrics_term(kbdev);
339 void kbase_pm_power_changed(struct kbase_device *kbdev)
341 bool cores_are_available;
344 KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
345 SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START);
346 spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
347 cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
348 spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
349 KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
350 SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END);
352 if (cores_are_available) {
353 /* Log timelining information that a change in state has
355 kbase_timeline_pm_handle_event(kbdev,
356 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
358 spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
359 kbase_gpu_slot_update(kbdev);
360 spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
364 void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
365 u64 new_core_mask_js0, u64 new_core_mask_js1,
366 u64 new_core_mask_js2)
368 kbdev->pm.debug_core_mask[0] = new_core_mask_js0;
369 kbdev->pm.debug_core_mask[1] = new_core_mask_js1;
370 kbdev->pm.debug_core_mask[2] = new_core_mask_js2;
371 kbdev->pm.debug_core_mask_all = new_core_mask_js0 | new_core_mask_js1 |
374 kbase_pm_update_cores_state_nolock(kbdev);
377 void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev)
379 kbase_pm_update_active(kbdev);
382 void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev)
384 kbase_pm_update_active(kbdev);
387 void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev)
389 struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
391 /* Force power off the GPU and all cores (regardless of policy), only
392 * after the PM active count reaches zero (otherwise, we risk turning it
393 * off prematurely) */
394 mutex_lock(&js_devdata->runpool_mutex);
395 mutex_lock(&kbdev->pm.lock);
396 kbase_pm_cancel_deferred_poweroff(kbdev);
397 if (!kbase_pm_do_poweroff(kbdev, true)) {
398 /* Page/bus faults are pending, must drop pm.lock to process.
399 * Interrupts are disabled so no more faults should be
400 * generated at this point */
401 mutex_unlock(&kbdev->pm.lock);
402 kbase_flush_mmu_wqs(kbdev);
403 mutex_lock(&kbdev->pm.lock);
404 WARN_ON(!kbase_pm_do_poweroff(kbdev, false));
407 kbase_backend_timer_suspend(kbdev);
409 mutex_unlock(&kbdev->pm.lock);
410 mutex_unlock(&js_devdata->runpool_mutex);
413 void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
415 struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
417 mutex_lock(&js_devdata->runpool_mutex);
418 mutex_lock(&kbdev->pm.lock);
420 kbdev->pm.suspending = false;
421 kbase_pm_do_poweron(kbdev, true);
423 kbase_backend_timer_resume(kbdev);
425 mutex_unlock(&kbdev->pm.lock);
426 mutex_unlock(&js_devdata->runpool_mutex);