3 * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
20 * GPU backend implementation of base kernel power management APIs
23 #include <mali_kbase.h>
24 #include <mali_midg_regmap.h>
25 #include <mali_kbase_config_defaults.h>
26 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
27 #include <linux/pm_runtime.h>
28 #endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
30 #include <mali_kbase_pm.h>
31 #include <backend/gpu/mali_kbase_jm_internal.h>
32 #include <backend/gpu/mali_kbase_js_internal.h>
33 #include <backend/gpu/mali_kbase_pm_internal.h>
35 void kbase_pm_register_access_enable(struct kbase_device *kbdev)
37 struct kbase_pm_callback_conf *callbacks;
39 callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
42 callbacks->power_on_callback(kbdev);
44 kbdev->pm.backend.gpu_powered = true;
47 void kbase_pm_register_access_disable(struct kbase_device *kbdev)
49 struct kbase_pm_callback_conf *callbacks;
51 callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
54 callbacks->power_off_callback(kbdev);
56 kbdev->pm.backend.gpu_powered = false;
59 int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
62 struct kbase_pm_callback_conf *callbacks;
64 KBASE_DEBUG_ASSERT(kbdev != NULL);
66 mutex_init(&kbdev->pm.lock);
68 kbdev->pm.backend.gpu_powered = false;
69 kbdev->pm.suspending = false;
70 #ifdef CONFIG_MALI_DEBUG
71 kbdev->pm.backend.driver_ready_for_irqs = false;
72 #endif /* CONFIG_MALI_DEBUG */
73 kbdev->pm.backend.gpu_in_desired_state = true;
74 init_waitqueue_head(&kbdev->pm.backend.gpu_in_desired_state_wait);
76 callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
78 kbdev->pm.backend.callback_power_on =
79 callbacks->power_on_callback;
80 kbdev->pm.backend.callback_power_off =
81 callbacks->power_off_callback;
82 kbdev->pm.backend.callback_power_suspend =
83 callbacks->power_suspend_callback;
84 kbdev->pm.backend.callback_power_resume =
85 callbacks->power_resume_callback;
86 kbdev->pm.callback_power_runtime_init =
87 callbacks->power_runtime_init_callback;
88 kbdev->pm.callback_power_runtime_term =
89 callbacks->power_runtime_term_callback;
90 kbdev->pm.backend.callback_power_runtime_on =
91 callbacks->power_runtime_on_callback;
92 kbdev->pm.backend.callback_power_runtime_off =
93 callbacks->power_runtime_off_callback;
94 kbdev->pm.backend.callback_power_runtime_idle =
95 callbacks->power_runtime_idle_callback;
97 kbdev->pm.backend.callback_power_on = NULL;
98 kbdev->pm.backend.callback_power_off = NULL;
99 kbdev->pm.backend.callback_power_suspend = NULL;
100 kbdev->pm.backend.callback_power_resume = NULL;
101 kbdev->pm.callback_power_runtime_init = NULL;
102 kbdev->pm.callback_power_runtime_term = NULL;
103 kbdev->pm.backend.callback_power_runtime_on = NULL;
104 kbdev->pm.backend.callback_power_runtime_off = NULL;
105 kbdev->pm.backend.callback_power_runtime_idle = NULL;
108 /* Initialise the metrics subsystem */
109 ret = kbasep_pm_metrics_init(kbdev);
113 init_waitqueue_head(&kbdev->pm.backend.l2_powered_wait);
114 kbdev->pm.backend.l2_powered = 0;
116 init_waitqueue_head(&kbdev->pm.backend.reset_done_wait);
117 kbdev->pm.backend.reset_done = false;
119 init_waitqueue_head(&kbdev->pm.zero_active_count_wait);
120 kbdev->pm.active_count = 0;
122 spin_lock_init(&kbdev->pm.power_change_lock);
123 spin_lock_init(&kbdev->pm.backend.gpu_cycle_counter_requests_lock);
124 spin_lock_init(&kbdev->pm.backend.gpu_powered_lock);
126 if (kbase_pm_ca_init(kbdev) != 0)
129 if (kbase_pm_policy_init(kbdev) != 0)
135 kbase_pm_ca_term(kbdev);
137 kbasep_pm_metrics_term(kbdev);
141 void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume)
143 lockdep_assert_held(&kbdev->pm.lock);
145 /* Turn clocks and interrupts on - no-op if we haven't done a previous
146 * kbase_pm_clock_off() */
147 kbase_pm_clock_on(kbdev, is_resume);
149 /* Update core status as required by the policy */
150 KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
151 SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START);
152 kbase_pm_update_cores_state(kbdev);
153 KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
154 SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END);
156 /* NOTE: We don't wait to reach the desired state, since running atoms
157 * will wait for that state to be reached anyway */
160 bool kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend)
163 bool cores_are_available;
165 lockdep_assert_held(&kbdev->pm.lock);
167 spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
169 /* Force all cores off */
170 kbdev->pm.backend.desired_shader_state = 0;
172 /* Force all cores to be unavailable, in the situation where
173 * transitions are in progress for some cores but not others,
174 * and kbase_pm_check_transitions_nolock can not immediately
175 * power off the cores */
176 kbdev->shader_available_bitmap = 0;
177 kbdev->tiler_available_bitmap = 0;
178 kbdev->l2_available_bitmap = 0;
180 KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
181 SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START);
182 cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
183 KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
184 SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END);
185 /* Don't need 'cores_are_available', because we don't return anything */
186 CSTD_UNUSED(cores_are_available);
188 spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
190 /* NOTE: We won't wait to reach the core's desired state, even if we're
191 * powering off the GPU itself too. It's safe to cut the power whilst
192 * they're transitioning to off, because the cores should be idle and
193 * all cache flushes should already have occurred */
195 /* Consume any change-state events */
196 kbase_timeline_pm_check_handle_event(kbdev,
197 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
198 /* Disable interrupts and turn the clock off */
199 return kbase_pm_clock_off(kbdev, is_suspend);
202 int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
205 struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
206 unsigned long irq_flags;
209 KBASE_DEBUG_ASSERT(kbdev != NULL);
211 mutex_lock(&js_devdata->runpool_mutex);
212 mutex_lock(&kbdev->pm.lock);
214 /* A suspend won't happen during startup/insmod */
215 KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
217 /* Power up the GPU, don't enable IRQs as we are not ready to receive
219 ret = kbase_pm_init_hw(kbdev, flags);
221 mutex_unlock(&kbdev->pm.lock);
222 mutex_unlock(&js_devdata->runpool_mutex);
226 kbasep_pm_read_present_cores(kbdev);
228 kbdev->pm.debug_core_mask_all = kbdev->pm.debug_core_mask[0] =
229 kbdev->pm.debug_core_mask[1] =
230 kbdev->pm.debug_core_mask[2] =
231 kbdev->gpu_props.props.raw_props.shader_present;
233 /* Pretend the GPU is active to prevent a power policy turning the GPU
235 kbdev->pm.active_count = 1;
237 spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
239 /* Ensure cycle counter is off */
240 kbdev->pm.backend.gpu_cycle_counter_requests = 0;
241 spin_unlock_irqrestore(
242 &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
245 /* We are ready to receive IRQ's now as power policy is set up, so
246 * enable them now. */
247 #ifdef CONFIG_MALI_DEBUG
248 spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
249 kbdev->pm.backend.driver_ready_for_irqs = true;
250 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
252 kbase_pm_enable_interrupts(kbdev);
254 /* Turn on the GPU and any cores needed by the policy */
255 kbase_pm_do_poweron(kbdev, false);
256 mutex_unlock(&kbdev->pm.lock);
257 mutex_unlock(&js_devdata->runpool_mutex);
259 /* Idle the GPU and/or cores, if the policy wants it to */
260 kbase_pm_context_idle(kbdev);
265 void kbase_hwaccess_pm_halt(struct kbase_device *kbdev)
267 KBASE_DEBUG_ASSERT(kbdev != NULL);
269 mutex_lock(&kbdev->pm.lock);
270 kbase_pm_cancel_deferred_poweroff(kbdev);
271 if (!kbase_pm_do_poweroff(kbdev, false)) {
272 /* Page/bus faults are pending, must drop pm.lock to process.
273 * Interrupts are disabled so no more faults should be
274 * generated at this point */
275 mutex_unlock(&kbdev->pm.lock);
276 kbase_flush_mmu_wqs(kbdev);
277 mutex_lock(&kbdev->pm.lock);
278 WARN_ON(!kbase_pm_do_poweroff(kbdev, false));
280 mutex_unlock(&kbdev->pm.lock);
283 KBASE_EXPORT_TEST_API(kbase_hwaccess_pm_halt);
285 void kbase_hwaccess_pm_term(struct kbase_device *kbdev)
287 KBASE_DEBUG_ASSERT(kbdev != NULL);
288 KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0);
289 KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests == 0);
291 /* Free any resources the policy allocated */
292 kbase_pm_policy_term(kbdev);
293 kbase_pm_ca_term(kbdev);
295 /* Shut down the metrics subsystem */
296 kbasep_pm_metrics_term(kbdev);
299 void kbase_pm_power_changed(struct kbase_device *kbdev)
301 bool cores_are_available;
304 KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
305 SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START);
306 spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
307 cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
308 spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
309 KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
310 SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END);
312 if (cores_are_available) {
313 /* Log timelining information that a change in state has
315 kbase_timeline_pm_handle_event(kbdev,
316 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
318 spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
319 kbase_gpu_slot_update(kbdev);
320 spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
324 void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
325 u64 new_core_mask_js0, u64 new_core_mask_js1,
326 u64 new_core_mask_js2)
328 kbdev->pm.debug_core_mask[0] = new_core_mask_js0;
329 kbdev->pm.debug_core_mask[1] = new_core_mask_js1;
330 kbdev->pm.debug_core_mask[2] = new_core_mask_js2;
331 kbdev->pm.debug_core_mask_all = new_core_mask_js0 | new_core_mask_js1 |
334 kbase_pm_update_cores_state_nolock(kbdev);
337 void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev)
339 kbase_pm_update_active(kbdev);
342 void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev)
344 kbase_pm_update_active(kbdev);
347 void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev)
349 struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
351 /* Force power off the GPU and all cores (regardless of policy), only
352 * after the PM active count reaches zero (otherwise, we risk turning it
353 * off prematurely) */
354 mutex_lock(&js_devdata->runpool_mutex);
355 mutex_lock(&kbdev->pm.lock);
356 kbase_pm_cancel_deferred_poweroff(kbdev);
357 if (!kbase_pm_do_poweroff(kbdev, true)) {
358 /* Page/bus faults are pending, must drop pm.lock to process.
359 * Interrupts are disabled so no more faults should be
360 * generated at this point */
361 mutex_unlock(&kbdev->pm.lock);
362 kbase_flush_mmu_wqs(kbdev);
363 mutex_lock(&kbdev->pm.lock);
364 WARN_ON(!kbase_pm_do_poweroff(kbdev, false));
367 kbase_backend_timer_suspend(kbdev);
369 mutex_unlock(&kbdev->pm.lock);
370 mutex_unlock(&js_devdata->runpool_mutex);
373 void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
375 struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
377 mutex_lock(&js_devdata->runpool_mutex);
378 mutex_lock(&kbdev->pm.lock);
380 kbdev->pm.suspending = false;
381 kbase_pm_do_poweron(kbdev, true);
383 kbase_backend_timer_resume(kbdev);
385 mutex_unlock(&kbdev->pm.lock);
386 mutex_unlock(&js_devdata->runpool_mutex);