Merge tag 'lsk-v4.4-17.03-android' of git://git.linaro.org/kernel/linux-linaro-stable.git
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / backend / gpu / mali_kbase_pm_backend.c
1 /*
2  *
3  * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19 /*
20  * GPU backend implementation of base kernel power management APIs
21  */
22
23 #include <mali_kbase.h>
24 #include <mali_midg_regmap.h>
25 #include <mali_kbase_config_defaults.h>
26 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
27 #include <linux/pm_runtime.h>
28 #endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
29
30 #include <mali_kbase_pm.h>
31 #include <mali_kbase_hwaccess_jm.h>
32 #include <backend/gpu/mali_kbase_js_internal.h>
33 #include <backend/gpu/mali_kbase_pm_internal.h>
34
35 static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data);
36
37 void kbase_pm_register_access_enable(struct kbase_device *kbdev)
38 {
39         struct kbase_pm_callback_conf *callbacks;
40
41         callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
42
43         if (callbacks)
44                 callbacks->power_on_callback(kbdev);
45
46         kbdev->pm.backend.gpu_powered = true;
47 }
48
49 void kbase_pm_register_access_disable(struct kbase_device *kbdev)
50 {
51         struct kbase_pm_callback_conf *callbacks;
52
53         callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
54
55         if (callbacks)
56                 callbacks->power_off_callback(kbdev);
57
58         kbdev->pm.backend.gpu_powered = false;
59 }
60
61 int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
62 {
63         int ret = 0;
64         struct kbase_pm_callback_conf *callbacks;
65
66         KBASE_DEBUG_ASSERT(kbdev != NULL);
67
68         mutex_init(&kbdev->pm.lock);
69
70         kbdev->pm.backend.gpu_poweroff_wait_wq = alloc_workqueue("kbase_pm_poweroff_wait",
71                         WQ_HIGHPRI | WQ_UNBOUND, 1);
72         if (!kbdev->pm.backend.gpu_poweroff_wait_wq)
73                 return -ENOMEM;
74
75         INIT_WORK(&kbdev->pm.backend.gpu_poweroff_wait_work,
76                         kbase_pm_gpu_poweroff_wait_wq);
77
78         kbdev->pm.backend.gpu_powered = false;
79         kbdev->pm.suspending = false;
80 #ifdef CONFIG_MALI_DEBUG
81         kbdev->pm.backend.driver_ready_for_irqs = false;
82 #endif /* CONFIG_MALI_DEBUG */
83         kbdev->pm.backend.gpu_in_desired_state = true;
84         init_waitqueue_head(&kbdev->pm.backend.gpu_in_desired_state_wait);
85
86         callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
87         if (callbacks) {
88                 kbdev->pm.backend.callback_power_on =
89                                         callbacks->power_on_callback;
90                 kbdev->pm.backend.callback_power_off =
91                                         callbacks->power_off_callback;
92                 kbdev->pm.backend.callback_power_suspend =
93                                         callbacks->power_suspend_callback;
94                 kbdev->pm.backend.callback_power_resume =
95                                         callbacks->power_resume_callback;
96                 kbdev->pm.callback_power_runtime_init =
97                                         callbacks->power_runtime_init_callback;
98                 kbdev->pm.callback_power_runtime_term =
99                                         callbacks->power_runtime_term_callback;
100                 kbdev->pm.backend.callback_power_runtime_on =
101                                         callbacks->power_runtime_on_callback;
102                 kbdev->pm.backend.callback_power_runtime_off =
103                                         callbacks->power_runtime_off_callback;
104                 kbdev->pm.backend.callback_power_runtime_idle =
105                                         callbacks->power_runtime_idle_callback;
106         } else {
107                 kbdev->pm.backend.callback_power_on = NULL;
108                 kbdev->pm.backend.callback_power_off = NULL;
109                 kbdev->pm.backend.callback_power_suspend = NULL;
110                 kbdev->pm.backend.callback_power_resume = NULL;
111                 kbdev->pm.callback_power_runtime_init = NULL;
112                 kbdev->pm.callback_power_runtime_term = NULL;
113                 kbdev->pm.backend.callback_power_runtime_on = NULL;
114                 kbdev->pm.backend.callback_power_runtime_off = NULL;
115                 kbdev->pm.backend.callback_power_runtime_idle = NULL;
116         }
117
118         /* Initialise the metrics subsystem */
119         ret = kbasep_pm_metrics_init(kbdev);
120         if (ret)
121                 return ret;
122
123         init_waitqueue_head(&kbdev->pm.backend.l2_powered_wait);
124         kbdev->pm.backend.l2_powered = 0;
125
126         init_waitqueue_head(&kbdev->pm.backend.reset_done_wait);
127         kbdev->pm.backend.reset_done = false;
128
129         init_waitqueue_head(&kbdev->pm.zero_active_count_wait);
130         kbdev->pm.active_count = 0;
131
132         spin_lock_init(&kbdev->pm.backend.gpu_cycle_counter_requests_lock);
133         spin_lock_init(&kbdev->pm.backend.gpu_powered_lock);
134
135         init_waitqueue_head(&kbdev->pm.backend.poweroff_wait);
136
137         if (kbase_pm_ca_init(kbdev) != 0)
138                 goto workq_fail;
139
140         if (kbase_pm_policy_init(kbdev) != 0)
141                 goto pm_policy_fail;
142
143         return 0;
144
145 pm_policy_fail:
146         kbase_pm_ca_term(kbdev);
147 workq_fail:
148         kbasep_pm_metrics_term(kbdev);
149         return -EINVAL;
150 }
151
152 void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume)
153 {
154         lockdep_assert_held(&kbdev->pm.lock);
155
156         /* Turn clocks and interrupts on - no-op if we haven't done a previous
157          * kbase_pm_clock_off() */
158         kbase_pm_clock_on(kbdev, is_resume);
159
160         /* Update core status as required by the policy */
161         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
162                                 SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START);
163         kbase_pm_update_cores_state(kbdev);
164         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
165                                 SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END);
166
167         /* NOTE: We don't wait to reach the desired state, since running atoms
168          * will wait for that state to be reached anyway */
169 }
170
171 static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
172 {
173         struct kbase_device *kbdev = container_of(data, struct kbase_device,
174                         pm.backend.gpu_poweroff_wait_work);
175         struct kbase_pm_device_data *pm = &kbdev->pm;
176         struct kbase_pm_backend_data *backend = &pm->backend;
177         struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
178         unsigned long flags;
179
180 /* rk_ext: adaption in DDK r14 for solution_1_for_glitch. */
181 #define NOT_TO_WAIT_CORES_POWER_TRANSITIONS_BEFORE_POWER_OFF_GPU
182
183 #ifdef NOT_TO_WAIT_CORES_POWER_TRANSITIONS_BEFORE_POWER_OFF_GPU
184 #else
185         /* Wait for power transitions to complete. We do this with no locks held
186          * so that we don't deadlock with any pending workqueues */
187         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
188                                 SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START);
189         kbase_pm_check_transitions_sync(kbdev);
190         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
191                                 SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END);
192 #endif
193
194         mutex_lock(&js_devdata->runpool_mutex);
195         mutex_lock(&kbdev->pm.lock);
196
197         if (!backend->poweron_required) {
198                 WARN_ON(kbdev->l2_available_bitmap ||
199                                 kbdev->shader_available_bitmap ||
200                                 kbdev->tiler_available_bitmap);
201
202                 /* Consume any change-state events */
203                 kbase_timeline_pm_check_handle_event(kbdev,
204                                         KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
205
206                 /* Disable interrupts and turn the clock off */
207                 if (!kbase_pm_clock_off(kbdev, backend->poweroff_is_suspend)) {
208                         /*
209                          * Page/bus faults are pending, must drop locks to
210                          * process.  Interrupts are disabled so no more faults
211                          * should be generated at this point.
212                          */
213                         mutex_unlock(&kbdev->pm.lock);
214                         mutex_unlock(&js_devdata->runpool_mutex);
215                         kbase_flush_mmu_wqs(kbdev);
216                         mutex_lock(&js_devdata->runpool_mutex);
217                         mutex_lock(&kbdev->pm.lock);
218
219                         /* Turn off clock now that fault have been handled. We
220                          * dropped locks so poweron_required may have changed -
221                          * power back on if this is the case.*/
222                         if (backend->poweron_required)
223                                 kbase_pm_clock_on(kbdev, false);
224                         else
225                                 WARN_ON(!kbase_pm_clock_off(kbdev,
226                                                 backend->poweroff_is_suspend));
227                 }
228         }
229
230         spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
231         backend->poweroff_wait_in_progress = false;
232         if (backend->poweron_required) {
233                 backend->poweron_required = false;
234                 kbase_pm_update_cores_state_nolock(kbdev);
235         }
236         spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
237
238         mutex_unlock(&kbdev->pm.lock);
239         mutex_unlock(&js_devdata->runpool_mutex);
240
241         wake_up(&kbdev->pm.backend.poweroff_wait);
242 }
243
244 void kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend)
245 {
246         unsigned long flags;
247
248         lockdep_assert_held(&kbdev->pm.lock);
249
250         spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
251         if (!kbdev->pm.backend.poweroff_wait_in_progress) {
252                 /* Force all cores off */
253                 kbdev->pm.backend.desired_shader_state = 0;
254                 kbdev->pm.backend.desired_tiler_state = 0;
255
256                 /* Force all cores to be unavailable, in the situation where
257                  * transitions are in progress for some cores but not others,
258                  * and kbase_pm_check_transitions_nolock can not immediately
259                  * power off the cores */
260                 kbdev->shader_available_bitmap = 0;
261                 kbdev->tiler_available_bitmap = 0;
262                 kbdev->l2_available_bitmap = 0;
263
264                 kbdev->pm.backend.poweroff_wait_in_progress = true;
265                 kbdev->pm.backend.poweroff_is_suspend = is_suspend;
266
267                 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
268                 /*Kick off wq here. Callers will have to wait*/
269                 queue_work(kbdev->pm.backend.gpu_poweroff_wait_wq,
270                                 &kbdev->pm.backend.gpu_poweroff_wait_work);
271         } else {
272                 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
273         }
274 }
275
276 static bool is_poweroff_in_progress(struct kbase_device *kbdev)
277 {
278         bool ret;
279         unsigned long flags;
280
281         spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
282         ret = (kbdev->pm.backend.poweroff_wait_in_progress == false);
283         spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
284
285         return ret;
286 }
287
288 void kbase_pm_wait_for_poweroff_complete(struct kbase_device *kbdev)
289 {
290         wait_event_killable(kbdev->pm.backend.poweroff_wait,
291                         is_poweroff_in_progress(kbdev));
292 }
293
294 int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
295                 unsigned int flags)
296 {
297         struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
298         unsigned long irq_flags;
299         int ret;
300
301         KBASE_DEBUG_ASSERT(kbdev != NULL);
302
303         mutex_lock(&js_devdata->runpool_mutex);
304         mutex_lock(&kbdev->pm.lock);
305
306         /* A suspend won't happen during startup/insmod */
307         KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
308
309         /* Power up the GPU, don't enable IRQs as we are not ready to receive
310          * them. */
311         ret = kbase_pm_init_hw(kbdev, flags);
312         if (ret) {
313                 mutex_unlock(&kbdev->pm.lock);
314                 mutex_unlock(&js_devdata->runpool_mutex);
315                 return ret;
316         }
317
318         kbasep_pm_read_present_cores(kbdev);
319
320         kbdev->pm.debug_core_mask_all = kbdev->pm.debug_core_mask[0] =
321                         kbdev->pm.debug_core_mask[1] =
322                         kbdev->pm.debug_core_mask[2] =
323                         kbdev->gpu_props.props.raw_props.shader_present;
324
325         /* Pretend the GPU is active to prevent a power policy turning the GPU
326          * cores off */
327         kbdev->pm.active_count = 1;
328
329         spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
330                                                                 irq_flags);
331         /* Ensure cycle counter is off */
332         kbdev->pm.backend.gpu_cycle_counter_requests = 0;
333         spin_unlock_irqrestore(
334                         &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
335                                                                 irq_flags);
336
337         /* We are ready to receive IRQ's now as power policy is set up, so
338          * enable them now. */
339 #ifdef CONFIG_MALI_DEBUG
340         spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
341         kbdev->pm.backend.driver_ready_for_irqs = true;
342         spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
343 #endif
344         kbase_pm_enable_interrupts(kbdev);
345
346         /* Turn on the GPU and any cores needed by the policy */
347         kbase_pm_do_poweron(kbdev, false);
348         mutex_unlock(&kbdev->pm.lock);
349         mutex_unlock(&js_devdata->runpool_mutex);
350
351         /* Idle the GPU and/or cores, if the policy wants it to */
352         kbase_pm_context_idle(kbdev);
353
354         return 0;
355 }
356
357 void kbase_hwaccess_pm_halt(struct kbase_device *kbdev)
358 {
359         KBASE_DEBUG_ASSERT(kbdev != NULL);
360
361         mutex_lock(&kbdev->pm.lock);
362         kbase_pm_cancel_deferred_poweroff(kbdev);
363         kbase_pm_do_poweroff(kbdev, false);
364         mutex_unlock(&kbdev->pm.lock);
365 }
366
367 KBASE_EXPORT_TEST_API(kbase_hwaccess_pm_halt);
368
369 void kbase_hwaccess_pm_term(struct kbase_device *kbdev)
370 {
371         KBASE_DEBUG_ASSERT(kbdev != NULL);
372         KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0);
373         KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests == 0);
374
375         /* Free any resources the policy allocated */
376         kbase_pm_policy_term(kbdev);
377         kbase_pm_ca_term(kbdev);
378
379         /* Shut down the metrics subsystem */
380         kbasep_pm_metrics_term(kbdev);
381
382         destroy_workqueue(kbdev->pm.backend.gpu_poweroff_wait_wq);
383 }
384
385 void kbase_pm_power_changed(struct kbase_device *kbdev)
386 {
387         bool cores_are_available;
388         unsigned long flags;
389
390         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
391                                 SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START);
392         spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
393         cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
394         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
395                                 SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END);
396
397         if (cores_are_available) {
398                 /* Log timelining information that a change in state has
399                  * completed */
400                 kbase_timeline_pm_handle_event(kbdev,
401                                 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
402
403                 kbase_backend_slot_update(kbdev);
404         }
405         spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
406 }
407
408 void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
409                 u64 new_core_mask_js0, u64 new_core_mask_js1,
410                 u64 new_core_mask_js2)
411 {
412         kbdev->pm.debug_core_mask[0] = new_core_mask_js0;
413         kbdev->pm.debug_core_mask[1] = new_core_mask_js1;
414         kbdev->pm.debug_core_mask[2] = new_core_mask_js2;
415         kbdev->pm.debug_core_mask_all = new_core_mask_js0 | new_core_mask_js1 |
416                         new_core_mask_js2;
417
418         kbase_pm_update_cores_state_nolock(kbdev);
419 }
420
421 void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev)
422 {
423         kbase_pm_update_active(kbdev);
424 }
425
426 void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev)
427 {
428         kbase_pm_update_active(kbdev);
429 }
430
431 void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev)
432 {
433         struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
434
435         /* Force power off the GPU and all cores (regardless of policy), only
436          * after the PM active count reaches zero (otherwise, we risk turning it
437          * off prematurely) */
438         mutex_lock(&js_devdata->runpool_mutex);
439         mutex_lock(&kbdev->pm.lock);
440
441         kbase_pm_cancel_deferred_poweroff(kbdev);
442         kbase_pm_do_poweroff(kbdev, true);
443
444         kbase_backend_timer_suspend(kbdev);
445
446         mutex_unlock(&kbdev->pm.lock);
447         mutex_unlock(&js_devdata->runpool_mutex);
448
449         kbase_pm_wait_for_poweroff_complete(kbdev);
450 }
451
452 void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
453 {
454         struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
455
456         mutex_lock(&js_devdata->runpool_mutex);
457         mutex_lock(&kbdev->pm.lock);
458
459         kbdev->pm.suspending = false;
460         kbase_pm_do_poweron(kbdev, true);
461
462         kbase_backend_timer_resume(kbdev);
463
464         mutex_unlock(&kbdev->pm.lock);
465         mutex_unlock(&js_devdata->runpool_mutex);
466 }