3 * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
20 * GPU backend implementation of base kernel power management APIs
23 #include <mali_kbase.h>
24 #include <mali_midg_regmap.h>
25 #include <mali_kbase_config_defaults.h>
26 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
27 #include <linux/pm_runtime.h>
28 #endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
30 #include <mali_kbase_pm.h>
31 #include <mali_kbase_hwaccess_jm.h>
32 #include <backend/gpu/mali_kbase_js_internal.h>
33 #include <backend/gpu/mali_kbase_pm_internal.h>
35 static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data);
37 void kbase_pm_register_access_enable(struct kbase_device *kbdev)
39 struct kbase_pm_callback_conf *callbacks;
41 callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
44 callbacks->power_on_callback(kbdev);
46 kbdev->pm.backend.gpu_powered = true;
49 void kbase_pm_register_access_disable(struct kbase_device *kbdev)
51 struct kbase_pm_callback_conf *callbacks;
53 callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
56 callbacks->power_off_callback(kbdev);
58 kbdev->pm.backend.gpu_powered = false;
61 int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
64 struct kbase_pm_callback_conf *callbacks;
66 KBASE_DEBUG_ASSERT(kbdev != NULL);
68 mutex_init(&kbdev->pm.lock);
70 kbdev->pm.backend.gpu_poweroff_wait_wq = alloc_workqueue("kbase_pm_poweroff_wait",
71 WQ_HIGHPRI | WQ_UNBOUND, 1);
72 if (!kbdev->pm.backend.gpu_poweroff_wait_wq)
75 INIT_WORK(&kbdev->pm.backend.gpu_poweroff_wait_work,
76 kbase_pm_gpu_poweroff_wait_wq);
78 kbdev->pm.backend.gpu_powered = false;
79 kbdev->pm.suspending = false;
80 #ifdef CONFIG_MALI_DEBUG
81 kbdev->pm.backend.driver_ready_for_irqs = false;
82 #endif /* CONFIG_MALI_DEBUG */
83 kbdev->pm.backend.gpu_in_desired_state = true;
84 init_waitqueue_head(&kbdev->pm.backend.gpu_in_desired_state_wait);
86 callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
88 kbdev->pm.backend.callback_power_on =
89 callbacks->power_on_callback;
90 kbdev->pm.backend.callback_power_off =
91 callbacks->power_off_callback;
92 kbdev->pm.backend.callback_power_suspend =
93 callbacks->power_suspend_callback;
94 kbdev->pm.backend.callback_power_resume =
95 callbacks->power_resume_callback;
96 kbdev->pm.callback_power_runtime_init =
97 callbacks->power_runtime_init_callback;
98 kbdev->pm.callback_power_runtime_term =
99 callbacks->power_runtime_term_callback;
100 kbdev->pm.backend.callback_power_runtime_on =
101 callbacks->power_runtime_on_callback;
102 kbdev->pm.backend.callback_power_runtime_off =
103 callbacks->power_runtime_off_callback;
104 kbdev->pm.backend.callback_power_runtime_idle =
105 callbacks->power_runtime_idle_callback;
107 kbdev->pm.backend.callback_power_on = NULL;
108 kbdev->pm.backend.callback_power_off = NULL;
109 kbdev->pm.backend.callback_power_suspend = NULL;
110 kbdev->pm.backend.callback_power_resume = NULL;
111 kbdev->pm.callback_power_runtime_init = NULL;
112 kbdev->pm.callback_power_runtime_term = NULL;
113 kbdev->pm.backend.callback_power_runtime_on = NULL;
114 kbdev->pm.backend.callback_power_runtime_off = NULL;
115 kbdev->pm.backend.callback_power_runtime_idle = NULL;
118 /* Initialise the metrics subsystem */
119 ret = kbasep_pm_metrics_init(kbdev);
123 init_waitqueue_head(&kbdev->pm.backend.l2_powered_wait);
124 kbdev->pm.backend.l2_powered = 0;
126 init_waitqueue_head(&kbdev->pm.backend.reset_done_wait);
127 kbdev->pm.backend.reset_done = false;
129 init_waitqueue_head(&kbdev->pm.zero_active_count_wait);
130 kbdev->pm.active_count = 0;
132 spin_lock_init(&kbdev->pm.backend.gpu_cycle_counter_requests_lock);
133 spin_lock_init(&kbdev->pm.backend.gpu_powered_lock);
135 init_waitqueue_head(&kbdev->pm.backend.poweroff_wait);
137 if (kbase_pm_ca_init(kbdev) != 0)
140 if (kbase_pm_policy_init(kbdev) != 0)
146 kbase_pm_ca_term(kbdev);
148 kbasep_pm_metrics_term(kbdev);
152 void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume)
154 lockdep_assert_held(&kbdev->pm.lock);
156 /* Turn clocks and interrupts on - no-op if we haven't done a previous
157 * kbase_pm_clock_off() */
158 kbase_pm_clock_on(kbdev, is_resume);
160 /* Update core status as required by the policy */
161 KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
162 SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START);
163 kbase_pm_update_cores_state(kbdev);
164 KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
165 SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END);
167 /* NOTE: We don't wait to reach the desired state, since running atoms
168 * will wait for that state to be reached anyway */
171 static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
173 struct kbase_device *kbdev = container_of(data, struct kbase_device,
174 pm.backend.gpu_poweroff_wait_work);
175 struct kbase_pm_device_data *pm = &kbdev->pm;
176 struct kbase_pm_backend_data *backend = &pm->backend;
177 struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
180 /* rk_ext: adaption in DDK r14 for solution_1_for_glitch. */
181 #define NOT_TO_WAIT_CORES_POWER_TRANSITIONS_BEFORE_POWER_OFF_GPU
183 #ifdef NOT_TO_WAIT_CORES_POWER_TRANSITIONS_BEFORE_POWER_OFF_GPU
185 /* Wait for power transitions to complete. We do this with no locks held
186 * so that we don't deadlock with any pending workqueues */
187 KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
188 SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START);
189 kbase_pm_check_transitions_sync(kbdev);
190 KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
191 SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END);
194 mutex_lock(&js_devdata->runpool_mutex);
195 mutex_lock(&kbdev->pm.lock);
197 if (!backend->poweron_required) {
198 WARN_ON(kbdev->l2_available_bitmap ||
199 kbdev->shader_available_bitmap ||
200 kbdev->tiler_available_bitmap);
202 /* Consume any change-state events */
203 kbase_timeline_pm_check_handle_event(kbdev,
204 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
206 /* Disable interrupts and turn the clock off */
207 if (!kbase_pm_clock_off(kbdev, backend->poweroff_is_suspend)) {
209 * Page/bus faults are pending, must drop locks to
210 * process. Interrupts are disabled so no more faults
211 * should be generated at this point.
213 mutex_unlock(&kbdev->pm.lock);
214 mutex_unlock(&js_devdata->runpool_mutex);
215 kbase_flush_mmu_wqs(kbdev);
216 mutex_lock(&js_devdata->runpool_mutex);
217 mutex_lock(&kbdev->pm.lock);
219 /* Turn off clock now that fault have been handled. We
220 * dropped locks so poweron_required may have changed -
221 * power back on if this is the case.*/
222 if (backend->poweron_required)
223 kbase_pm_clock_on(kbdev, false);
225 WARN_ON(!kbase_pm_clock_off(kbdev,
226 backend->poweroff_is_suspend));
230 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
231 backend->poweroff_wait_in_progress = false;
232 if (backend->poweron_required) {
233 backend->poweron_required = false;
234 kbase_pm_update_cores_state_nolock(kbdev);
236 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
238 mutex_unlock(&kbdev->pm.lock);
239 mutex_unlock(&js_devdata->runpool_mutex);
241 wake_up(&kbdev->pm.backend.poweroff_wait);
244 void kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend)
248 lockdep_assert_held(&kbdev->pm.lock);
250 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
251 if (!kbdev->pm.backend.poweroff_wait_in_progress) {
252 /* Force all cores off */
253 kbdev->pm.backend.desired_shader_state = 0;
254 kbdev->pm.backend.desired_tiler_state = 0;
256 /* Force all cores to be unavailable, in the situation where
257 * transitions are in progress for some cores but not others,
258 * and kbase_pm_check_transitions_nolock can not immediately
259 * power off the cores */
260 kbdev->shader_available_bitmap = 0;
261 kbdev->tiler_available_bitmap = 0;
262 kbdev->l2_available_bitmap = 0;
264 kbdev->pm.backend.poweroff_wait_in_progress = true;
265 kbdev->pm.backend.poweroff_is_suspend = is_suspend;
267 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
268 /*Kick off wq here. Callers will have to wait*/
269 queue_work(kbdev->pm.backend.gpu_poweroff_wait_wq,
270 &kbdev->pm.backend.gpu_poweroff_wait_work);
272 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
276 static bool is_poweroff_in_progress(struct kbase_device *kbdev)
281 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
282 ret = (kbdev->pm.backend.poweroff_wait_in_progress == false);
283 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
288 void kbase_pm_wait_for_poweroff_complete(struct kbase_device *kbdev)
290 wait_event_killable(kbdev->pm.backend.poweroff_wait,
291 is_poweroff_in_progress(kbdev));
294 int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
297 struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
298 unsigned long irq_flags;
301 KBASE_DEBUG_ASSERT(kbdev != NULL);
303 mutex_lock(&js_devdata->runpool_mutex);
304 mutex_lock(&kbdev->pm.lock);
306 /* A suspend won't happen during startup/insmod */
307 KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
309 /* Power up the GPU, don't enable IRQs as we are not ready to receive
311 ret = kbase_pm_init_hw(kbdev, flags);
313 mutex_unlock(&kbdev->pm.lock);
314 mutex_unlock(&js_devdata->runpool_mutex);
318 kbasep_pm_read_present_cores(kbdev);
320 kbdev->pm.debug_core_mask_all = kbdev->pm.debug_core_mask[0] =
321 kbdev->pm.debug_core_mask[1] =
322 kbdev->pm.debug_core_mask[2] =
323 kbdev->gpu_props.props.raw_props.shader_present;
325 /* Pretend the GPU is active to prevent a power policy turning the GPU
327 kbdev->pm.active_count = 1;
329 spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
331 /* Ensure cycle counter is off */
332 kbdev->pm.backend.gpu_cycle_counter_requests = 0;
333 spin_unlock_irqrestore(
334 &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
337 /* We are ready to receive IRQ's now as power policy is set up, so
338 * enable them now. */
339 #ifdef CONFIG_MALI_DEBUG
340 spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
341 kbdev->pm.backend.driver_ready_for_irqs = true;
342 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
344 kbase_pm_enable_interrupts(kbdev);
346 /* Turn on the GPU and any cores needed by the policy */
347 kbase_pm_do_poweron(kbdev, false);
348 mutex_unlock(&kbdev->pm.lock);
349 mutex_unlock(&js_devdata->runpool_mutex);
351 /* Idle the GPU and/or cores, if the policy wants it to */
352 kbase_pm_context_idle(kbdev);
357 void kbase_hwaccess_pm_halt(struct kbase_device *kbdev)
359 KBASE_DEBUG_ASSERT(kbdev != NULL);
361 mutex_lock(&kbdev->pm.lock);
362 kbase_pm_cancel_deferred_poweroff(kbdev);
363 kbase_pm_do_poweroff(kbdev, false);
364 mutex_unlock(&kbdev->pm.lock);
367 KBASE_EXPORT_TEST_API(kbase_hwaccess_pm_halt);
369 void kbase_hwaccess_pm_term(struct kbase_device *kbdev)
371 KBASE_DEBUG_ASSERT(kbdev != NULL);
372 KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0);
373 KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests == 0);
375 /* Free any resources the policy allocated */
376 kbase_pm_policy_term(kbdev);
377 kbase_pm_ca_term(kbdev);
379 /* Shut down the metrics subsystem */
380 kbasep_pm_metrics_term(kbdev);
382 destroy_workqueue(kbdev->pm.backend.gpu_poweroff_wait_wq);
385 void kbase_pm_power_changed(struct kbase_device *kbdev)
387 bool cores_are_available;
390 KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
391 SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START);
392 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
393 cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
394 KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
395 SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END);
397 if (cores_are_available) {
398 /* Log timelining information that a change in state has
400 kbase_timeline_pm_handle_event(kbdev,
401 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
403 kbase_backend_slot_update(kbdev);
405 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
408 void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
409 u64 new_core_mask_js0, u64 new_core_mask_js1,
410 u64 new_core_mask_js2)
412 kbdev->pm.debug_core_mask[0] = new_core_mask_js0;
413 kbdev->pm.debug_core_mask[1] = new_core_mask_js1;
414 kbdev->pm.debug_core_mask[2] = new_core_mask_js2;
415 kbdev->pm.debug_core_mask_all = new_core_mask_js0 | new_core_mask_js1 |
418 kbase_pm_update_cores_state_nolock(kbdev);
421 void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev)
423 kbase_pm_update_active(kbdev);
426 void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev)
428 kbase_pm_update_active(kbdev);
431 void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev)
433 struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
435 /* Force power off the GPU and all cores (regardless of policy), only
436 * after the PM active count reaches zero (otherwise, we risk turning it
437 * off prematurely) */
438 mutex_lock(&js_devdata->runpool_mutex);
439 mutex_lock(&kbdev->pm.lock);
441 kbase_pm_cancel_deferred_poweroff(kbdev);
442 kbase_pm_do_poweroff(kbdev, true);
444 kbase_backend_timer_suspend(kbdev);
446 mutex_unlock(&kbdev->pm.lock);
447 mutex_unlock(&js_devdata->runpool_mutex);
449 kbase_pm_wait_for_poweroff_complete(kbdev);
452 void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
454 struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
456 mutex_lock(&js_devdata->runpool_mutex);
457 mutex_lock(&kbdev->pm.lock);
459 kbdev->pm.suspending = false;
460 kbase_pm_do_poweron(kbdev, true);
462 kbase_backend_timer_resume(kbdev);
464 mutex_unlock(&kbdev->pm.lock);
465 mutex_unlock(&js_devdata->runpool_mutex);