711e44c7f80ad66c3d6611b1dd7f3f4dec7b1699
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / backend / gpu / mali_kbase_pm_backend.c
1 /*
2  *
3  * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19 /*
20  * GPU backend implementation of base kernel power management APIs
21  */
22
23 #include <mali_kbase.h>
24 #include <mali_midg_regmap.h>
25 #include <mali_kbase_config_defaults.h>
26 #ifdef CONFIG_MALI_PLATFORM_DEVICETREE
27 #include <linux/pm_runtime.h>
28 #endif /* CONFIG_MALI_PLATFORM_DEVICETREE */
29
30 #include <mali_kbase_pm.h>
31 #include <backend/gpu/mali_kbase_jm_internal.h>
32 #include <backend/gpu/mali_kbase_js_internal.h>
33 #include <backend/gpu/mali_kbase_pm_internal.h>
34
35 void kbase_pm_register_access_enable(struct kbase_device *kbdev)
36 {
37         struct kbase_pm_callback_conf *callbacks;
38
39         callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
40
41         if (callbacks)
42                 callbacks->power_on_callback(kbdev);
43
44         kbdev->pm.backend.gpu_powered = true;
45 }
46
47 void kbase_pm_register_access_disable(struct kbase_device *kbdev)
48 {
49         struct kbase_pm_callback_conf *callbacks;
50
51         callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
52
53         if (callbacks)
54                 callbacks->power_off_callback(kbdev);
55
56         kbdev->pm.backend.gpu_powered = false;
57 }
58
59 int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
60 {
61         int ret = 0;
62         struct kbase_pm_callback_conf *callbacks;
63
64         KBASE_DEBUG_ASSERT(kbdev != NULL);
65
66         mutex_init(&kbdev->pm.lock);
67
68         kbdev->pm.backend.gpu_powered = false;
69         kbdev->pm.suspending = false;
70 #ifdef CONFIG_MALI_DEBUG
71         kbdev->pm.backend.driver_ready_for_irqs = false;
72 #endif /* CONFIG_MALI_DEBUG */
73         kbdev->pm.backend.gpu_in_desired_state = true;
74         init_waitqueue_head(&kbdev->pm.backend.gpu_in_desired_state_wait);
75
76         callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
77         if (callbacks) {
78                 kbdev->pm.backend.callback_power_on =
79                                         callbacks->power_on_callback;
80                 kbdev->pm.backend.callback_power_off =
81                                         callbacks->power_off_callback;
82                 kbdev->pm.backend.callback_power_suspend =
83                                         callbacks->power_suspend_callback;
84                 kbdev->pm.backend.callback_power_resume =
85                                         callbacks->power_resume_callback;
86                 kbdev->pm.callback_power_runtime_init =
87                                         callbacks->power_runtime_init_callback;
88                 kbdev->pm.callback_power_runtime_term =
89                                         callbacks->power_runtime_term_callback;
90                 kbdev->pm.backend.callback_power_runtime_on =
91                                         callbacks->power_runtime_on_callback;
92                 kbdev->pm.backend.callback_power_runtime_off =
93                                         callbacks->power_runtime_off_callback;
94                 kbdev->pm.backend.callback_power_runtime_idle =
95                                         callbacks->power_runtime_idle_callback;
96         } else {
97                 kbdev->pm.backend.callback_power_on = NULL;
98                 kbdev->pm.backend.callback_power_off = NULL;
99                 kbdev->pm.backend.callback_power_suspend = NULL;
100                 kbdev->pm.backend.callback_power_resume = NULL;
101                 kbdev->pm.callback_power_runtime_init = NULL;
102                 kbdev->pm.callback_power_runtime_term = NULL;
103                 kbdev->pm.backend.callback_power_runtime_on = NULL;
104                 kbdev->pm.backend.callback_power_runtime_off = NULL;
105                 kbdev->pm.backend.callback_power_runtime_idle = NULL;
106         }
107
108         /* Initialise the metrics subsystem */
109         ret = kbasep_pm_metrics_init(kbdev);
110         if (ret)
111                 return ret;
112
113         init_waitqueue_head(&kbdev->pm.backend.l2_powered_wait);
114         kbdev->pm.backend.l2_powered = 0;
115
116         init_waitqueue_head(&kbdev->pm.backend.reset_done_wait);
117         kbdev->pm.backend.reset_done = false;
118
119         init_waitqueue_head(&kbdev->pm.zero_active_count_wait);
120         kbdev->pm.active_count = 0;
121
122         spin_lock_init(&kbdev->pm.power_change_lock);
123         spin_lock_init(&kbdev->pm.backend.gpu_cycle_counter_requests_lock);
124         spin_lock_init(&kbdev->pm.backend.gpu_powered_lock);
125
126         if (kbase_pm_ca_init(kbdev) != 0)
127                 goto workq_fail;
128
129         if (kbase_pm_policy_init(kbdev) != 0)
130                 goto pm_policy_fail;
131
132         return 0;
133
134 pm_policy_fail:
135         kbase_pm_ca_term(kbdev);
136 workq_fail:
137         kbasep_pm_metrics_term(kbdev);
138         return -EINVAL;
139 }
140
141 void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume)
142 {
143         lockdep_assert_held(&kbdev->pm.lock);
144
145         /* Turn clocks and interrupts on - no-op if we haven't done a previous
146          * kbase_pm_clock_off() */
147         kbase_pm_clock_on(kbdev, is_resume);
148
149         /* Update core status as required by the policy */
150         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
151                                 SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START);
152         kbase_pm_update_cores_state(kbdev);
153         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
154                                 SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END);
155
156         /* NOTE: We don't wait to reach the desired state, since running atoms
157          * will wait for that state to be reached anyway */
158 }
159
160 bool kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend)
161 {
162         unsigned long flags;
163         bool cores_are_available;
164
165         lockdep_assert_held(&kbdev->pm.lock);
166
167         spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
168
169         /* Force all cores off */
170         kbdev->pm.backend.desired_shader_state = 0;
171         kbdev->pm.backend.desired_tiler_state = 0;
172
173         /* Force all cores to be unavailable, in the situation where
174          * transitions are in progress for some cores but not others,
175          * and kbase_pm_check_transitions_nolock can not immediately
176          * power off the cores */
177         kbdev->shader_available_bitmap = 0;
178         kbdev->tiler_available_bitmap = 0;
179         kbdev->l2_available_bitmap = 0;
180
181         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
182                                 SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START);
183         cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
184         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
185                                 SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END);
186         /* Don't need 'cores_are_available', because we don't return anything */
187         CSTD_UNUSED(cores_are_available);
188
189         spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
190
191         /* NOTE: We won't wait to reach the core's desired state, even if we're
192          * powering off the GPU itself too. It's safe to cut the power whilst
193          * they're transitioning to off, because the cores should be idle and
194          * all cache flushes should already have occurred */
195
196         /* Consume any change-state events */
197         kbase_timeline_pm_check_handle_event(kbdev,
198                                 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
199         /* Disable interrupts and turn the clock off */
200         return kbase_pm_clock_off(kbdev, is_suspend);
201 }
202
203 int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
204                 unsigned int flags)
205 {
206         struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
207         unsigned long irq_flags;
208         int ret;
209
210         KBASE_DEBUG_ASSERT(kbdev != NULL);
211
212         mutex_lock(&js_devdata->runpool_mutex);
213         mutex_lock(&kbdev->pm.lock);
214
215         /* A suspend won't happen during startup/insmod */
216         KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
217
218         /* Power up the GPU, don't enable IRQs as we are not ready to receive
219          * them. */
220         ret = kbase_pm_init_hw(kbdev, flags);
221         if (ret) {
222                 mutex_unlock(&kbdev->pm.lock);
223                 mutex_unlock(&js_devdata->runpool_mutex);
224                 return ret;
225         }
226
227         kbasep_pm_read_present_cores(kbdev);
228
229         kbdev->pm.debug_core_mask_all = kbdev->pm.debug_core_mask[0] =
230                         kbdev->pm.debug_core_mask[1] =
231                         kbdev->pm.debug_core_mask[2] =
232                         kbdev->gpu_props.props.raw_props.shader_present;
233
234         /* Pretend the GPU is active to prevent a power policy turning the GPU
235          * cores off */
236         kbdev->pm.active_count = 1;
237
238         spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
239                                                                 irq_flags);
240         /* Ensure cycle counter is off */
241         kbdev->pm.backend.gpu_cycle_counter_requests = 0;
242         spin_unlock_irqrestore(
243                         &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
244                                                                 irq_flags);
245
246         /* We are ready to receive IRQ's now as power policy is set up, so
247          * enable them now. */
248 #ifdef CONFIG_MALI_DEBUG
249         spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
250         kbdev->pm.backend.driver_ready_for_irqs = true;
251         spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
252 #endif
253         kbase_pm_enable_interrupts(kbdev);
254
255         /* Turn on the GPU and any cores needed by the policy */
256         kbase_pm_do_poweron(kbdev, false);
257         mutex_unlock(&kbdev->pm.lock);
258         mutex_unlock(&js_devdata->runpool_mutex);
259
260         /* Idle the GPU and/or cores, if the policy wants it to */
261         kbase_pm_context_idle(kbdev);
262
263         return 0;
264 }
265
266 void kbase_hwaccess_pm_halt(struct kbase_device *kbdev)
267 {
268         KBASE_DEBUG_ASSERT(kbdev != NULL);
269
270         mutex_lock(&kbdev->pm.lock);
271         kbase_pm_cancel_deferred_poweroff(kbdev);
272         if (!kbase_pm_do_poweroff(kbdev, false)) {
273                 /* Page/bus faults are pending, must drop pm.lock to process.
274                  * Interrupts are disabled so no more faults should be
275                  * generated at this point */
276                 mutex_unlock(&kbdev->pm.lock);
277                 kbase_flush_mmu_wqs(kbdev);
278                 mutex_lock(&kbdev->pm.lock);
279                 WARN_ON(!kbase_pm_do_poweroff(kbdev, false));
280         }
281         mutex_unlock(&kbdev->pm.lock);
282 }
283
284 KBASE_EXPORT_TEST_API(kbase_hwaccess_pm_halt);
285
286 void kbase_hwaccess_pm_term(struct kbase_device *kbdev)
287 {
288         KBASE_DEBUG_ASSERT(kbdev != NULL);
289         KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0);
290         KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests == 0);
291
292         /* Free any resources the policy allocated */
293         kbase_pm_policy_term(kbdev);
294         kbase_pm_ca_term(kbdev);
295
296         /* Shut down the metrics subsystem */
297         kbasep_pm_metrics_term(kbdev);
298 }
299
300 void kbase_pm_power_changed(struct kbase_device *kbdev)
301 {
302         bool cores_are_available;
303         unsigned long flags;
304
305         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
306                                 SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START);
307         spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
308         cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
309         spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
310         KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
311                                 SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END);
312
313         if (cores_are_available) {
314                 /* Log timelining information that a change in state has
315                  * completed */
316                 kbase_timeline_pm_handle_event(kbdev,
317                                 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
318
319                 spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags);
320                 kbase_gpu_slot_update(kbdev);
321                 spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags);
322         }
323 }
324
325 void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
326                 u64 new_core_mask_js0, u64 new_core_mask_js1,
327                 u64 new_core_mask_js2)
328 {
329         kbdev->pm.debug_core_mask[0] = new_core_mask_js0;
330         kbdev->pm.debug_core_mask[1] = new_core_mask_js1;
331         kbdev->pm.debug_core_mask[2] = new_core_mask_js2;
332         kbdev->pm.debug_core_mask_all = new_core_mask_js0 | new_core_mask_js1 |
333                         new_core_mask_js2;
334
335         kbase_pm_update_cores_state_nolock(kbdev);
336 }
337
338 void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev)
339 {
340         kbase_pm_update_active(kbdev);
341 }
342
343 void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev)
344 {
345         kbase_pm_update_active(kbdev);
346 }
347
348 void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev)
349 {
350         struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
351
352         /* Force power off the GPU and all cores (regardless of policy), only
353          * after the PM active count reaches zero (otherwise, we risk turning it
354          * off prematurely) */
355         mutex_lock(&js_devdata->runpool_mutex);
356         mutex_lock(&kbdev->pm.lock);
357         kbase_pm_cancel_deferred_poweroff(kbdev);
358         if (!kbase_pm_do_poweroff(kbdev, true)) {
359                 /* Page/bus faults are pending, must drop pm.lock to process.
360                  * Interrupts are disabled so no more faults should be
361                  * generated at this point */
362                 mutex_unlock(&kbdev->pm.lock);
363                 kbase_flush_mmu_wqs(kbdev);
364                 mutex_lock(&kbdev->pm.lock);
365                 WARN_ON(!kbase_pm_do_poweroff(kbdev, false));
366         }
367
368         kbase_backend_timer_suspend(kbdev);
369
370         mutex_unlock(&kbdev->pm.lock);
371         mutex_unlock(&js_devdata->runpool_mutex);
372 }
373
374 void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
375 {
376         struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
377
378         mutex_lock(&js_devdata->runpool_mutex);
379         mutex_lock(&kbdev->pm.lock);
380
381         kbdev->pm.suspending = false;
382         kbase_pm_do_poweron(kbdev, true);
383
384         kbase_backend_timer_resume(kbdev);
385
386         mutex_unlock(&kbdev->pm.lock);
387         mutex_unlock(&js_devdata->runpool_mutex);
388 }