MALI: midgard: RK: add separate src dir of Midgard driver for RK Linux device
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard_for_linux / backend / gpu / mali_kbase_pm_backend.c
1 /*
2  *
3  * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19 /*
20  * GPU backend implementation of base kernel power management APIs
21  */
22
23 #include <mali_kbase.h>
24 #include <mali_midg_regmap.h>
25 #include <mali_kbase_config_defaults.h>
26 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
27 #include <linux/pm_runtime.h>
28 #endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
29
30 #include <mali_kbase_pm.h>
31 #include <backend/gpu/mali_kbase_jm_internal.h>
32 #include <backend/gpu/mali_kbase_js_internal.h>
33 #include <backend/gpu/mali_kbase_pm_internal.h>
34
35 void kbase_pm_register_access_enable(struct kbase_device *kbdev)
36 {
37         struct kbase_pm_callback_conf *callbacks;
38
39         callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
40
41         if (callbacks)
42                 callbacks->power_on_callback(kbdev);
43
44         kbdev->pm.backend.gpu_powered = true;
45 }
46
47 void kbase_pm_register_access_disable(struct kbase_device *kbdev)
48 {
49         struct kbase_pm_callback_conf *callbacks;
50
51         callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
52
53         if (callbacks)
54                 callbacks->power_off_callback(kbdev);
55
56         kbdev->pm.backend.gpu_powered = false;
57 }
58
59 int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
60 {
61         int ret = 0;
62         struct kbase_pm_callback_conf *callbacks;
63
64         KBASE_DEBUG_ASSERT(kbdev != NULL);
65
66         mutex_init(&kbdev->pm.lock);
67
68         kbdev->pm.backend.gpu_powered = false;
69         kbdev->pm.suspending = false;
70 #ifdef CONFIG_MALI_DEBUG
71         kbdev->pm.backend.driver_ready_for_irqs = false;
72 #endif /* CONFIG_MALI_DEBUG */
73         kbdev->pm.backend.gpu_in_desired_state = true;
74         init_waitqueue_head(&kbdev->pm.backend.gpu_in_desired_state_wait);
75
76         callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
77         if (callbacks) {
78                 kbdev->pm.backend.callback_power_on =
79                                         callbacks->power_on_callback;
80                 kbdev->pm.backend.callback_power_off =
81                                         callbacks->power_off_callback;
82                 kbdev->pm.backend.callback_power_suspend =
83                                         callbacks->power_suspend_callback;
84                 kbdev->pm.backend.callback_power_resume =
85                                         callbacks->power_resume_callback;
86                 kbdev->pm.callback_power_runtime_init =
87                                         callbacks->power_runtime_init_callback;
88                 kbdev->pm.callback_power_runtime_term =
89                                         callbacks->power_runtime_term_callback;
90                 kbdev->pm.backend.callback_power_runtime_on =
91                                         callbacks->power_runtime_on_callback;
92                 kbdev->pm.backend.callback_power_runtime_off =
93                                         callbacks->power_runtime_off_callback;
94                 kbdev->pm.backend.callback_power_runtime_idle =
95                                         callbacks->power_runtime_idle_callback;
96         } else {
97                 kbdev->pm.backend.callback_power_on = NULL;
98                 kbdev->pm.backend.callback_power_off = NULL;
99                 kbdev->pm.backend.callback_power_suspend = NULL;
100                 kbdev->pm.backend.callback_power_resume = NULL;
101                 kbdev->pm.callback_power_runtime_init = NULL;
102                 kbdev->pm.callback_power_runtime_term = NULL;
103                 kbdev->pm.backend.callback_power_runtime_on = NULL;
104                 kbdev->pm.backend.callback_power_runtime_off = NULL;
105                 kbdev->pm.backend.callback_power_runtime_idle = NULL;
106         }
107
108         /* Initialise the metrics subsystem */
109         ret = kbasep_pm_metrics_init(kbdev);
110         if (ret)
111                 return ret;
112
113         init_waitqueue_head(&kbdev->pm.backend.l2_powered_wait);
114         kbdev->pm.backend.l2_powered = 0;
115
116         init_waitqueue_head(&kbdev->pm.backend.reset_done_wait);
117         kbdev->pm.backend.reset_done = false;
118
119         init_waitqueue_head(&kbdev->pm.zero_active_count_wait);
120         kbdev->pm.active_count = 0;
121
122         spin_lock_init(&kbdev->pm.power_change_lock);
123         spin_lock_init(&kbdev->pm.backend.gpu_cycle_counter_requests_lock);
124         spin_lock_init(&kbdev->pm.backend.gpu_powered_lock);
125
126         if (kbase_pm_ca_init(kbdev) != 0)
127                 goto workq_fail;
128
129         if (kbase_pm_policy_init(kbdev) != 0)
130                 goto pm_policy_fail;
131
132         return 0;
133
134 pm_policy_fail:
135         kbase_pm_ca_term(kbdev);
136 workq_fail:
137         kbasep_pm_metrics_term(kbdev);
138         return -EINVAL;
139 }
140
141 void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume)
142 {
143         lockdep_assert_held(&kbdev->pm.lock);
144
145         /* Turn clocks and interrupts on - no-op if we haven't done a previous
146          * kbase_pm_clock_off() */
147         kbase_pm_clock_on(kbdev, is_resume);
148
149         /* Update core status as required by the policy */
150         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
151                                 SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START);
152         kbase_pm_update_cores_state(kbdev);
153         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
154                                 SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END);
155
156         /* NOTE: We don't wait to reach the desired state, since running atoms
157          * will wait for that state to be reached anyway */
158 }
159
160 bool kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend)
161 {
162         unsigned long flags;
163         bool cores_are_available;
164
165         lockdep_assert_held(&kbdev->pm.lock);
166
167         spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
168
169         /* Force all cores off */
170         kbdev->pm.backend.desired_shader_state = 0;
171
172         /* Force all cores to be unavailable, in the situation where
173          * transitions are in progress for some cores but not others,
174          * and kbase_pm_check_transitions_nolock can not immediately
175          * power off the cores */
176         kbdev->shader_available_bitmap = 0;
177         kbdev->tiler_available_bitmap = 0;
178         kbdev->l2_available_bitmap = 0;
179
180         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
181                                 SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START);
182         cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
183         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
184                                 SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END);
185         /* Don't need 'cores_are_available', because we don't return anything */
186         CSTD_UNUSED(cores_are_available);
187
188         spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
189
190         /* NOTE: We won't wait to reach the core's desired state, even if we're
191          * powering off the GPU itself too. It's safe to cut the power whilst
192          * they're transitioning to off, because the cores should be idle and
193          * all cache flushes should already have occurred */
194
195         /* Consume any change-state events */
196         kbase_timeline_pm_check_handle_event(kbdev,
197                                 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
198         /* Disable interrupts and turn the clock off */
199         return kbase_pm_clock_off(kbdev, is_suspend);
200 }
201
202 int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
203                 unsigned int flags)
204 {
205         struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
206         unsigned long irq_flags;
207         int ret;
208
209         KBASE_DEBUG_ASSERT(kbdev != NULL);
210
211         mutex_lock(&js_devdata->runpool_mutex);
212         mutex_lock(&kbdev->pm.lock);
213
214         /* A suspend won't happen during startup/insmod */
215         KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
216
217         /* Power up the GPU, don't enable IRQs as we are not ready to receive
218          * them. */
219         ret = kbase_pm_init_hw(kbdev, flags);
220         if (ret) {
221                 mutex_unlock(&kbdev->pm.lock);
222                 mutex_unlock(&js_devdata->runpool_mutex);
223                 return ret;
224         }
225
226         kbasep_pm_read_present_cores(kbdev);
227
228         kbdev->pm.debug_core_mask_all = kbdev->pm.debug_core_mask[0] =
229                         kbdev->pm.debug_core_mask[1] =
230                         kbdev->pm.debug_core_mask[2] =
231                         kbdev->gpu_props.props.raw_props.shader_present;
232
233         /* Pretend the GPU is active to prevent a power policy turning the GPU
234          * cores off */
235         kbdev->pm.active_count = 1;
236
237         spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
238                                                                 irq_flags);
239         /* Ensure cycle counter is off */
240         kbdev->pm.backend.gpu_cycle_counter_requests = 0;
241         spin_unlock_irqrestore(
242                         &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
243                                                                 irq_flags);
244
245         /* We are ready to receive IRQ's now as power policy is set up, so
246          * enable them now. */
247 #ifdef CONFIG_MALI_DEBUG
248         spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
249         kbdev->pm.backend.driver_ready_for_irqs = true;
250         spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
251 #endif
252         kbase_pm_enable_interrupts(kbdev);
253
254         /* Turn on the GPU and any cores needed by the policy */
255         kbase_pm_do_poweron(kbdev, false);
256         mutex_unlock(&kbdev->pm.lock);
257         mutex_unlock(&js_devdata->runpool_mutex);
258
259         /* Idle the GPU and/or cores, if the policy wants it to */
260         kbase_pm_context_idle(kbdev);
261
262         return 0;
263 }
264
265 void kbase_hwaccess_pm_halt(struct kbase_device *kbdev)
266 {
267         KBASE_DEBUG_ASSERT(kbdev != NULL);
268
269         mutex_lock(&kbdev->pm.lock);
270         kbase_pm_cancel_deferred_poweroff(kbdev);
271         if (!kbase_pm_do_poweroff(kbdev, false)) {
272                 /* Page/bus faults are pending, must drop pm.lock to process.
273                  * Interrupts are disabled so no more faults should be
274                  * generated at this point */
275                 mutex_unlock(&kbdev->pm.lock);
276                 kbase_flush_mmu_wqs(kbdev);
277                 mutex_lock(&kbdev->pm.lock);
278                 WARN_ON(!kbase_pm_do_poweroff(kbdev, false));
279         }
280         mutex_unlock(&kbdev->pm.lock);
281 }
282
283 KBASE_EXPORT_TEST_API(kbase_hwaccess_pm_halt);
284
285 void kbase_hwaccess_pm_term(struct kbase_device *kbdev)
286 {
287         KBASE_DEBUG_ASSERT(kbdev != NULL);
288         KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0);
289         KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests == 0);
290
291         /* Free any resources the policy allocated */
292         kbase_pm_policy_term(kbdev);
293         kbase_pm_ca_term(kbdev);
294
295         /* Shut down the metrics subsystem */
296         kbasep_pm_metrics_term(kbdev);
297 }
298
299 void kbase_pm_power_changed(struct kbase_device *kbdev)
300 {
301         bool cores_are_available;
302         unsigned long flags;
303
304         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
305                                 SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START);
306         spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
307         cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
308         spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
309         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
310                                 SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END);
311
312         if (cores_are_available) {
313                 /* Log timelining information that a change in state has
314                  * completed */
315                 kbase_timeline_pm_handle_event(kbdev,
316                                 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
317
318                 spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
319                 kbase_gpu_slot_update(kbdev);
320                 spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
321         }
322 }
323
324 void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
325                 u64 new_core_mask_js0, u64 new_core_mask_js1,
326                 u64 new_core_mask_js2)
327 {
328         kbdev->pm.debug_core_mask[0] = new_core_mask_js0;
329         kbdev->pm.debug_core_mask[1] = new_core_mask_js1;
330         kbdev->pm.debug_core_mask[2] = new_core_mask_js2;
331         kbdev->pm.debug_core_mask_all = new_core_mask_js0 | new_core_mask_js1 |
332                         new_core_mask_js2;
333
334         kbase_pm_update_cores_state_nolock(kbdev);
335 }
336
337 void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev)
338 {
339         kbase_pm_update_active(kbdev);
340 }
341
342 void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev)
343 {
344         kbase_pm_update_active(kbdev);
345 }
346
347 void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev)
348 {
349         struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
350
351         /* Force power off the GPU and all cores (regardless of policy), only
352          * after the PM active count reaches zero (otherwise, we risk turning it
353          * off prematurely) */
354         mutex_lock(&js_devdata->runpool_mutex);
355         mutex_lock(&kbdev->pm.lock);
356         kbase_pm_cancel_deferred_poweroff(kbdev);
357         if (!kbase_pm_do_poweroff(kbdev, true)) {
358                 /* Page/bus faults are pending, must drop pm.lock to process.
359                  * Interrupts are disabled so no more faults should be
360                  * generated at this point */
361                 mutex_unlock(&kbdev->pm.lock);
362                 kbase_flush_mmu_wqs(kbdev);
363                 mutex_lock(&kbdev->pm.lock);
364                 WARN_ON(!kbase_pm_do_poweroff(kbdev, false));
365         }
366
367         kbase_backend_timer_suspend(kbdev);
368
369         mutex_unlock(&kbdev->pm.lock);
370         mutex_unlock(&js_devdata->runpool_mutex);
371 }
372
373 void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
374 {
375         struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
376
377         mutex_lock(&js_devdata->runpool_mutex);
378         mutex_lock(&kbdev->pm.lock);
379
380         kbdev->pm.suspending = false;
381         kbase_pm_do_poweron(kbdev, true);
382
383         kbase_backend_timer_resume(kbdev);
384
385         mutex_unlock(&kbdev->pm.lock);
386         mutex_unlock(&js_devdata->runpool_mutex);
387 }