Merge branch 'linux-linaro-lsk-v4.4-android' of git://git.linaro.org/kernel/linux...
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / backend / gpu / mali_kbase_pm_backend.c
1 /*
2  *
3  * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19 /*
20  * GPU backend implementation of base kernel power management APIs
21  */
22
23 #include <mali_kbase.h>
24 #include <mali_midg_regmap.h>
25 #include <mali_kbase_config_defaults.h>
26 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
27 #include <linux/pm_runtime.h>
28 #endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
29
30 #include <mali_kbase_pm.h>
31 #include <backend/gpu/mali_kbase_jm_internal.h>
32 #include <backend/gpu/mali_kbase_pm_internal.h>
33
34 void kbase_pm_register_access_enable(struct kbase_device *kbdev)
35 {
36         struct kbase_pm_callback_conf *callbacks;
37
38         callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
39
40         if (callbacks)
41                 callbacks->power_on_callback(kbdev);
42
43         kbdev->pm.backend.gpu_powered = true;
44 }
45
46 void kbase_pm_register_access_disable(struct kbase_device *kbdev)
47 {
48         struct kbase_pm_callback_conf *callbacks;
49
50         callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
51
52         if (callbacks)
53                 callbacks->power_off_callback(kbdev);
54
55         kbdev->pm.backend.gpu_powered = false;
56 }
57
58 int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
59 {
60         int ret = 0;
61         struct kbase_pm_callback_conf *callbacks;
62
63         KBASE_DEBUG_ASSERT(kbdev != NULL);
64
65         mutex_init(&kbdev->pm.lock);
66
67         kbdev->pm.backend.gpu_powered = false;
68         kbdev->pm.suspending = false;
69 #ifdef CONFIG_MALI_DEBUG
70         kbdev->pm.backend.driver_ready_for_irqs = false;
71 #endif /* CONFIG_MALI_DEBUG */
72         kbdev->pm.backend.gpu_in_desired_state = true;
73         init_waitqueue_head(&kbdev->pm.backend.gpu_in_desired_state_wait);
74
75         callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
76         if (callbacks) {
77                 kbdev->pm.backend.callback_power_on =
78                                         callbacks->power_on_callback;
79                 kbdev->pm.backend.callback_power_off =
80                                         callbacks->power_off_callback;
81                 kbdev->pm.backend.callback_power_suspend =
82                                         callbacks->power_suspend_callback;
83                 kbdev->pm.backend.callback_power_resume =
84                                         callbacks->power_resume_callback;
85                 kbdev->pm.callback_power_runtime_init =
86                                         callbacks->power_runtime_init_callback;
87                 kbdev->pm.callback_power_runtime_term =
88                                         callbacks->power_runtime_term_callback;
89                 kbdev->pm.backend.callback_power_runtime_on =
90                                         callbacks->power_runtime_on_callback;
91                 kbdev->pm.backend.callback_power_runtime_off =
92                                         callbacks->power_runtime_off_callback;
93                 kbdev->pm.backend.callback_power_runtime_idle =
94                                         callbacks->power_runtime_idle_callback;
95         } else {
96                 kbdev->pm.backend.callback_power_on = NULL;
97                 kbdev->pm.backend.callback_power_off = NULL;
98                 kbdev->pm.backend.callback_power_suspend = NULL;
99                 kbdev->pm.backend.callback_power_resume = NULL;
100                 kbdev->pm.callback_power_runtime_init = NULL;
101                 kbdev->pm.callback_power_runtime_term = NULL;
102                 kbdev->pm.backend.callback_power_runtime_on = NULL;
103                 kbdev->pm.backend.callback_power_runtime_off = NULL;
104                 kbdev->pm.backend.callback_power_runtime_idle = NULL;
105         }
106
107         /* Initialise the metrics subsystem */
108         ret = kbasep_pm_metrics_init(kbdev);
109         if (ret)
110                 return ret;
111
112         init_waitqueue_head(&kbdev->pm.backend.l2_powered_wait);
113         kbdev->pm.backend.l2_powered = 0;
114
115         init_waitqueue_head(&kbdev->pm.backend.reset_done_wait);
116         kbdev->pm.backend.reset_done = false;
117
118         init_waitqueue_head(&kbdev->pm.zero_active_count_wait);
119         kbdev->pm.active_count = 0;
120
121         spin_lock_init(&kbdev->pm.power_change_lock);
122         spin_lock_init(&kbdev->pm.backend.gpu_cycle_counter_requests_lock);
123         spin_lock_init(&kbdev->pm.backend.gpu_powered_lock);
124
125         if (kbase_pm_ca_init(kbdev) != 0)
126                 goto workq_fail;
127
128         if (kbase_pm_policy_init(kbdev) != 0)
129                 goto pm_policy_fail;
130
131         return 0;
132
133 pm_policy_fail:
134         kbase_pm_ca_term(kbdev);
135 workq_fail:
136         kbasep_pm_metrics_term(kbdev);
137         return -EINVAL;
138 }
139
140 void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume)
141 {
142         lockdep_assert_held(&kbdev->pm.lock);
143
144         /* Turn clocks and interrupts on - no-op if we haven't done a previous
145          * kbase_pm_clock_off() */
146         kbase_pm_clock_on(kbdev, is_resume);
147
148         /* Update core status as required by the policy */
149         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
150                                 SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START);
151         kbase_pm_update_cores_state(kbdev);
152         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
153                                 SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END);
154
155         /* NOTE: We don't wait to reach the desired state, since running atoms
156          * will wait for that state to be reached anyway */
157 }
158
159 bool kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend)
160 {
161         unsigned long flags;
162         bool cores_are_available;
163
164         lockdep_assert_held(&kbdev->pm.lock);
165
166         spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
167
168         /* Force all cores off */
169         kbdev->pm.backend.desired_shader_state = 0;
170
171         /* Force all cores to be unavailable, in the situation where
172          * transitions are in progress for some cores but not others,
173          * and kbase_pm_check_transitions_nolock can not immediately
174          * power off the cores */
175         kbdev->shader_available_bitmap = 0;
176         kbdev->tiler_available_bitmap = 0;
177         kbdev->l2_available_bitmap = 0;
178
179         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
180                                 SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START);
181         cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
182         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
183                                 SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END);
184         /* Don't need 'cores_are_available', because we don't return anything */
185         CSTD_UNUSED(cores_are_available);
186
187         spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
188
189         /* NOTE: We won't wait to reach the core's desired state, even if we're
190          * powering off the GPU itself too. It's safe to cut the power whilst
191          * they're transitioning to off, because the cores should be idle and
192          * all cache flushes should already have occurred */
193
194         /* Consume any change-state events */
195         kbase_timeline_pm_check_handle_event(kbdev,
196                                 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
197         /* Disable interrupts and turn the clock off */
198         return kbase_pm_clock_off(kbdev, is_suspend);
199 }
200
201 int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
202                 unsigned int flags)
203 {
204         struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
205         unsigned long irq_flags;
206         int ret;
207
208         KBASE_DEBUG_ASSERT(kbdev != NULL);
209
210         mutex_lock(&js_devdata->runpool_mutex);
211         mutex_lock(&kbdev->pm.lock);
212
213         /* A suspend won't happen during startup/insmod */
214         KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
215
216         /* Power up the GPU, don't enable IRQs as we are not ready to receive
217          * them. */
218         ret = kbase_pm_init_hw(kbdev, flags);
219         if (ret) {
220                 mutex_unlock(&kbdev->pm.lock);
221                 mutex_unlock(&js_devdata->runpool_mutex);
222                 return ret;
223         }
224
225         kbasep_pm_read_present_cores(kbdev);
226
227         kbdev->pm.debug_core_mask_all = kbdev->pm.debug_core_mask[0] =
228                         kbdev->pm.debug_core_mask[1] =
229                         kbdev->pm.debug_core_mask[2] =
230                         kbdev->gpu_props.props.raw_props.shader_present;
231
232         /* Pretend the GPU is active to prevent a power policy turning the GPU
233          * cores off */
234         kbdev->pm.active_count = 1;
235
236         spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
237                                                                 irq_flags);
238         /* Ensure cycle counter is off */
239         kbdev->pm.backend.gpu_cycle_counter_requests = 0;
240         spin_unlock_irqrestore(
241                         &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
242                                                                 irq_flags);
243
244         /* We are ready to receive IRQ's now as power policy is set up, so
245          * enable them now. */
246 #ifdef CONFIG_MALI_DEBUG
247         spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
248         kbdev->pm.backend.driver_ready_for_irqs = true;
249         spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
250 #endif
251         kbase_pm_enable_interrupts(kbdev);
252
253         /* Turn on the GPU and any cores needed by the policy */
254         kbase_pm_do_poweron(kbdev, false);
255         mutex_unlock(&kbdev->pm.lock);
256         mutex_unlock(&js_devdata->runpool_mutex);
257
258         /* Idle the GPU and/or cores, if the policy wants it to */
259         kbase_pm_context_idle(kbdev);
260
261         return 0;
262 }
263
264 void kbase_hwaccess_pm_halt(struct kbase_device *kbdev)
265 {
266         KBASE_DEBUG_ASSERT(kbdev != NULL);
267
268         mutex_lock(&kbdev->pm.lock);
269         kbase_pm_cancel_deferred_poweroff(kbdev);
270         if (!kbase_pm_do_poweroff(kbdev, false)) {
271                 /* Page/bus faults are pending, must drop pm.lock to process.
272                  * Interrupts are disabled so no more faults should be
273                  * generated at this point */
274                 mutex_unlock(&kbdev->pm.lock);
275                 kbase_flush_mmu_wqs(kbdev);
276                 mutex_lock(&kbdev->pm.lock);
277                 WARN_ON(!kbase_pm_do_poweroff(kbdev, false));
278         }
279         mutex_unlock(&kbdev->pm.lock);
280 }
281
282 KBASE_EXPORT_TEST_API(kbase_hwaccess_pm_halt);
283
284 void kbase_hwaccess_pm_term(struct kbase_device *kbdev)
285 {
286         KBASE_DEBUG_ASSERT(kbdev != NULL);
287         KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0);
288         KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests == 0);
289
290         /* Free any resources the policy allocated */
291         kbase_pm_policy_term(kbdev);
292         kbase_pm_ca_term(kbdev);
293
294         /* Shut down the metrics subsystem */
295         kbasep_pm_metrics_term(kbdev);
296 }
297
298 void kbase_pm_power_changed(struct kbase_device *kbdev)
299 {
300         bool cores_are_available;
301         unsigned long flags;
302
303         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
304                                 SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START);
305         spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
306         cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
307         spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
308         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
309                                 SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END);
310
311         if (cores_are_available) {
312                 /* Log timelining information that a change in state has
313                  * completed */
314                 kbase_timeline_pm_handle_event(kbdev,
315                                 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
316
317                 spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
318                 kbase_gpu_slot_update(kbdev);
319                 spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
320         }
321 }
322
323 void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
324                 u64 new_core_mask_js0, u64 new_core_mask_js1,
325                 u64 new_core_mask_js2)
326 {
327         kbdev->pm.debug_core_mask[0] = new_core_mask_js0;
328         kbdev->pm.debug_core_mask[1] = new_core_mask_js1;
329         kbdev->pm.debug_core_mask[2] = new_core_mask_js2;
330         kbdev->pm.debug_core_mask_all = new_core_mask_js0 | new_core_mask_js1 |
331                         new_core_mask_js2;
332
333         kbase_pm_update_cores_state_nolock(kbdev);
334 }
335
336 void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev)
337 {
338         kbase_pm_update_active(kbdev);
339 }
340
341 void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev)
342 {
343         kbase_pm_update_active(kbdev);
344 }
345
346 void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev)
347 {
348         struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
349
350         /* Force power off the GPU and all cores (regardless of policy), only
351          * after the PM active count reaches zero (otherwise, we risk turning it
352          * off prematurely) */
353         mutex_lock(&js_devdata->runpool_mutex);
354         mutex_lock(&kbdev->pm.lock);
355         kbase_pm_cancel_deferred_poweroff(kbdev);
356         if (!kbase_pm_do_poweroff(kbdev, true)) {
357                 /* Page/bus faults are pending, must drop pm.lock to process.
358                  * Interrupts are disabled so no more faults should be
359                  * generated at this point */
360                 mutex_unlock(&kbdev->pm.lock);
361                 kbase_flush_mmu_wqs(kbdev);
362                 mutex_lock(&kbdev->pm.lock);
363                 WARN_ON(!kbase_pm_do_poweroff(kbdev, false));
364         }
365
366         mutex_unlock(&kbdev->pm.lock);
367         mutex_unlock(&js_devdata->runpool_mutex);
368 }
369
370 void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
371 {
372         struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
373
374         mutex_lock(&js_devdata->runpool_mutex);
375         mutex_lock(&kbdev->pm.lock);
376         kbdev->pm.suspending = false;
377         kbase_pm_do_poweron(kbdev, true);
378         mutex_unlock(&kbdev->pm.lock);
379         mutex_unlock(&js_devdata->runpool_mutex);
380 }