rk3288 gpu : update GPU driver r4p0_eac version
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / mali_kbase_pm.c
1 /*
2  *
3  * (C) COPYRIGHT ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19
20 /**
21  * @file mali_kbase_pm.c
22  * Base kernel power management APIs
23  */
24
25 #include <mali_kbase.h>
26 #include <mali_midg_regmap.h>
27
28 #include <mali_kbase_pm.h>
29
30 void kbase_pm_register_access_enable(kbase_device *kbdev)
31 {
32         kbase_pm_callback_conf *callbacks;
33
34         callbacks = (kbase_pm_callback_conf *) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS);
35
36         if (callbacks)
37                 callbacks->power_on_callback(kbdev);
38 }
39
40 void kbase_pm_register_access_disable(kbase_device *kbdev)
41 {
42         kbase_pm_callback_conf *callbacks;
43
44         callbacks = (kbase_pm_callback_conf *) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS);
45
46         if (callbacks)
47                 callbacks->power_off_callback(kbdev);
48 }
49
50 mali_error kbase_pm_init(kbase_device *kbdev)
51 {
52         mali_error ret = MALI_ERROR_NONE;
53         kbase_pm_callback_conf *callbacks;
54
55         KBASE_DEBUG_ASSERT(kbdev != NULL);
56
57         mutex_init(&kbdev->pm.lock);
58
59         kbdev->pm.gpu_powered = MALI_FALSE;
60         kbdev->pm.suspending = MALI_FALSE;
61 #ifdef CONFIG_MALI_DEBUG
62         kbdev->pm.driver_ready_for_irqs = MALI_FALSE;
63 #endif /* CONFIG_MALI_DEBUG */
64         kbdev->pm.gpu_in_desired_state = MALI_TRUE;
65         init_waitqueue_head(&kbdev->pm.gpu_in_desired_state_wait);
66
67         callbacks = (kbase_pm_callback_conf *) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS);
68         if (callbacks) {
69                 kbdev->pm.callback_power_on = callbacks->power_on_callback;
70                 kbdev->pm.callback_power_off = callbacks->power_off_callback;
71                 kbdev->pm.callback_power_suspend =
72                                         callbacks->power_suspend_callback;
73                 kbdev->pm.callback_power_resume =
74                                         callbacks->power_resume_callback;
75                 kbdev->pm.callback_power_runtime_init = callbacks->power_runtime_init_callback;
76                 kbdev->pm.callback_power_runtime_term = callbacks->power_runtime_term_callback;
77                 kbdev->pm.callback_power_runtime_on = callbacks->power_runtime_on_callback;
78                 kbdev->pm.callback_power_runtime_off = callbacks->power_runtime_off_callback;
79         } else {
80                 kbdev->pm.callback_power_on = NULL;
81                 kbdev->pm.callback_power_off = NULL;
82                 kbdev->pm.callback_power_suspend = NULL;
83                 kbdev->pm.callback_power_resume = NULL;
84                 kbdev->pm.callback_power_runtime_init = NULL;
85                 kbdev->pm.callback_power_runtime_term = NULL;
86                 kbdev->pm.callback_power_runtime_on = NULL;
87                 kbdev->pm.callback_power_runtime_off = NULL;
88         }
89
90         kbdev->pm.platform_dvfs_frequency = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_POWER_MANAGEMENT_DVFS_FREQ);
91
92         /* Initialise the metrics subsystem */
93         ret = kbasep_pm_metrics_init(kbdev);
94         if (MALI_ERROR_NONE != ret)
95                 return ret;
96
97         init_waitqueue_head(&kbdev->pm.l2_powered_wait);
98         kbdev->pm.l2_powered = 0;
99
100         init_waitqueue_head(&kbdev->pm.reset_done_wait);
101         kbdev->pm.reset_done = MALI_FALSE;
102
103         init_waitqueue_head(&kbdev->pm.zero_active_count_wait);
104         kbdev->pm.active_count = 0;
105
106         spin_lock_init(&kbdev->pm.power_change_lock);
107         spin_lock_init(&kbdev->pm.gpu_cycle_counter_requests_lock);
108         spin_lock_init(&kbdev->pm.gpu_powered_lock);
109
110         if (MALI_ERROR_NONE != kbase_pm_ca_init(kbdev))
111                 goto workq_fail;
112
113         if (MALI_ERROR_NONE != kbase_pm_policy_init(kbdev))
114                 goto pm_policy_fail;
115
116         return MALI_ERROR_NONE;
117
118 pm_policy_fail:
119         kbase_pm_ca_term(kbdev);
120 workq_fail:
121         kbasep_pm_metrics_term(kbdev);
122         return MALI_ERROR_FUNCTION_FAILED;
123 }
124
125 KBASE_EXPORT_TEST_API(kbase_pm_init)
126
127 void kbase_pm_do_poweron(kbase_device *kbdev, mali_bool is_resume)
128 {
129         lockdep_assert_held(&kbdev->pm.lock);
130
131         /* Turn clocks and interrupts on - no-op if we haven't done a previous
132          * kbase_pm_clock_off() */
133         kbase_pm_clock_on(kbdev, is_resume);
134
135         /* Update core status as required by the policy */
136         KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START);
137         kbase_pm_update_cores_state(kbdev);
138         KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END);
139
140         /* NOTE: We don't wait to reach the desired state, since running atoms
141          * will wait for that state to be reached anyway */
142 }
143
144 void kbase_pm_do_poweroff(kbase_device *kbdev, mali_bool is_suspend)
145 {
146         unsigned long flags;
147         mali_bool cores_are_available;
148
149         lockdep_assert_held(&kbdev->pm.lock);
150
151         spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
152
153         /* Force all cores off */
154         kbdev->pm.desired_shader_state = 0;
155
156         /* Force all cores to be unavailable, in the situation where 
157          * transitions are in progress for some cores but not others,
158          * and kbase_pm_check_transitions_nolock can not immediately
159          * power off the cores */
160         kbdev->shader_available_bitmap = 0;
161         kbdev->tiler_available_bitmap = 0;
162         kbdev->l2_available_bitmap = 0;
163
164         KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START);
165         cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
166         KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END);
167         /* Don't need 'cores_are_available', because we don't return anything */
168         CSTD_UNUSED(cores_are_available);
169
170         spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
171
172         /* NOTE: We won't wait to reach the core's desired state, even if we're
173          * powering off the GPU itself too. It's safe to cut the power whilst
174          * they're transitioning to off, because the cores should be idle and all
175          * cache flushes should already have occurred */
176
177         /* Consume any change-state events */
178         kbase_timeline_pm_check_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
179         /* Disable interrupts and turn the clock off */
180         kbase_pm_clock_off(kbdev, is_suspend);
181 }
182
183 mali_error kbase_pm_powerup(kbase_device *kbdev)
184 {
185         unsigned long flags;
186         mali_error ret;
187
188         KBASE_DEBUG_ASSERT(kbdev != NULL);
189
190         mutex_lock(&kbdev->pm.lock);
191
192         /* A suspend won't happen during startup/insmod */
193         KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
194
195         /* Power up the GPU, don't enable IRQs as we are not ready to receive them. */
196         ret = kbase_pm_init_hw(kbdev, MALI_FALSE );
197         if (ret != MALI_ERROR_NONE) {
198                 mutex_unlock(&kbdev->pm.lock);
199                 return ret;
200         }
201
202         kbasep_pm_read_present_cores(kbdev);
203
204         kbdev->pm.debug_core_mask = kbdev->shader_present_bitmap;
205
206         /* Pretend the GPU is active to prevent a power policy turning the GPU cores off */
207         kbdev->pm.active_count = 1;
208
209         spin_lock_irqsave(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
210         /* Ensure cycle counter is off */
211         kbdev->pm.gpu_cycle_counter_requests = 0;
212         kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_CYCLE_COUNT_STOP, NULL);
213         spin_unlock_irqrestore(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
214
215         /* We are ready to receive IRQ's now as power policy is set up, so enable them now. */
216 #ifdef CONFIG_MALI_DEBUG
217         spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
218         kbdev->pm.driver_ready_for_irqs = MALI_TRUE;
219         spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
220 #endif
221         kbase_pm_enable_interrupts(kbdev);
222
223         /* Turn on the GPU and any cores needed by the policy */
224         kbase_pm_do_poweron(kbdev, MALI_FALSE);
225         mutex_unlock(&kbdev->pm.lock);
226
227         /* Idle the GPU and/or cores, if the policy wants it to */
228         kbase_pm_context_idle(kbdev);
229
230         return MALI_ERROR_NONE;
231 }
232
233 KBASE_EXPORT_TEST_API(kbase_pm_powerup)
234
235 void kbase_pm_context_active(kbase_device *kbdev)
236 {
237         (void)kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE);
238 }
239
240 int kbase_pm_context_active_handle_suspend(kbase_device *kbdev, kbase_pm_suspend_handler suspend_handler)
241 {       
242         int c;
243         int old_count;
244
245         KBASE_DEBUG_ASSERT(kbdev != NULL);
246
247         /* Trace timeline information about how long it took to handle the decision
248          * to powerup. Sometimes the event might be missed due to reading the count
249          * outside of mutex, but this is necessary to get the trace timing
250          * correct. */
251         old_count = kbdev->pm.active_count;
252         if (old_count == 0)
253                 kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
254
255         mutex_lock(&kbdev->pm.lock);
256         if (kbase_pm_is_suspending(kbdev))
257         {
258                 switch (suspend_handler) {
259                 case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE:
260                         if (kbdev->pm.active_count != 0 )
261                                 break;
262                         /* FALLTHROUGH */
263                 case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE:
264                         mutex_unlock(&kbdev->pm.lock);
265                         if (old_count == 0)
266                                 kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
267                         return 1;
268
269                 case KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE:
270                         /* FALLTHROUGH */
271                 default:
272                         KBASE_DEBUG_ASSERT_MSG(MALI_FALSE,"unreachable");
273                         break;
274                 }
275         }
276         c = ++kbdev->pm.active_count;
277
278         KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_ACTIVE, NULL, NULL, 0u, c);
279
280         /* Trace the event being handled */
281         if (old_count == 0)
282                 kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
283
284         if (c == 1) {
285                 /* First context active: Power on the GPU and any cores requested by
286                  * the policy */
287                 kbase_pm_update_active(kbdev);
288
289                 kbasep_pm_record_gpu_active(kbdev);
290         }
291
292         mutex_unlock(&kbdev->pm.lock);
293
294         return 0;
295 }
296
297 KBASE_EXPORT_TEST_API(kbase_pm_context_active)
298
299 void kbase_pm_context_idle(kbase_device *kbdev)
300 {
301         int c;
302         int old_count;
303
304         KBASE_DEBUG_ASSERT(kbdev != NULL);
305
306         /* Trace timeline information about how long it took to handle the decision
307          * to powerdown. Sometimes the event might be missed due to reading the
308          * count outside of mutex, but this is necessary to get the trace timing
309          * correct. */
310         old_count = kbdev->pm.active_count;
311         if (old_count == 0)
312                 kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE);
313
314         mutex_lock(&kbdev->pm.lock);
315
316         c = --kbdev->pm.active_count;
317
318         KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_IDLE, NULL, NULL, 0u, c);
319
320         KBASE_DEBUG_ASSERT(c >= 0);
321
322         /* Trace the event being handled */
323         if (old_count == 0)
324                 kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE);
325
326         if (c == 0) {
327                 /* Last context has gone idle */
328                 kbase_pm_update_active(kbdev);
329
330                 kbasep_pm_record_gpu_idle(kbdev);
331
332                 /* Wake up anyone waiting for this to become 0 (e.g. suspend). The
333                  * waiters must synchronize with us by locking the pm.lock after
334                  * waiting */
335                 wake_up(&kbdev->pm.zero_active_count_wait);
336         }
337
338         mutex_unlock(&kbdev->pm.lock);
339 }
340
341 KBASE_EXPORT_TEST_API(kbase_pm_context_idle)
342
343 void kbase_pm_halt(kbase_device *kbdev)
344 {
345         KBASE_DEBUG_ASSERT(kbdev != NULL);
346
347         mutex_lock(&kbdev->pm.lock);
348         kbase_pm_cancel_deferred_poweroff(kbdev);
349         kbase_pm_do_poweroff(kbdev, MALI_FALSE);
350         mutex_unlock(&kbdev->pm.lock);
351 }
352
353 KBASE_EXPORT_TEST_API(kbase_pm_halt)
354
355 void kbase_pm_term(kbase_device *kbdev)
356 {
357         KBASE_DEBUG_ASSERT(kbdev != NULL);
358         KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0);
359         KBASE_DEBUG_ASSERT(kbdev->pm.gpu_cycle_counter_requests == 0);
360
361         /* Free any resources the policy allocated */
362         kbase_pm_policy_term(kbdev);
363         kbase_pm_ca_term(kbdev);
364
365         /* Shut down the metrics subsystem */
366         kbasep_pm_metrics_term(kbdev);
367 }
368
369 KBASE_EXPORT_TEST_API(kbase_pm_term)
370
371 void kbase_pm_suspend(struct kbase_device *kbdev)
372 {
373         int nr_keep_gpu_powered_ctxs;
374         KBASE_DEBUG_ASSERT(kbdev);
375
376         mutex_lock(&kbdev->pm.lock);
377         KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
378         kbdev->pm.suspending = MALI_TRUE;
379         mutex_unlock(&kbdev->pm.lock);
380
381         /* From now on, the active count will drop towards zero. Sometimes, it'll
382          * go up briefly before going down again. However, once it reaches zero it
383          * will stay there - guaranteeing that we've idled all pm references */
384
385         /* Suspend job scheduler and associated components, so that it releases all
386          * the PM active count references */
387         kbasep_js_suspend(kbdev);
388
389         /* Suspend any counter collection that might be happening */
390         kbase_instr_hwcnt_suspend(kbdev);
391
392         /* Cancel the keep_gpu_powered calls */
393         for (nr_keep_gpu_powered_ctxs = atomic_read(&kbdev->keep_gpu_powered_count);
394                  nr_keep_gpu_powered_ctxs > 0 ;
395                  --nr_keep_gpu_powered_ctxs ) {
396                 kbase_pm_context_idle(kbdev);
397         }
398
399         /* Wait for the active count to reach zero. This is not the same as
400          * waiting for a power down, since not all policies power down when this
401          * reaches zero. */
402         wait_event(kbdev->pm.zero_active_count_wait, kbdev->pm.active_count == 0);
403
404         /* Suspend PM Metric timer on system suspend.
405          * It is ok if kbase_pm_context_idle() is still running, it is safe
406          * to still complete the last active time period - the pm stats will
407          * get reset on resume anyway.
408          */
409         kbasep_pm_metrics_term(kbdev);
410
411         /* NOTE: We synchronize with anything that was just finishing a
412          * kbase_pm_context_idle() call by locking the pm.lock below */
413
414         /* Force power off the GPU and all cores (regardless of policy), only after
415          * the PM active count reaches zero (otherwise, we risk turning it off
416          * prematurely) */
417         mutex_lock(&kbdev->pm.lock);
418         kbase_pm_cancel_deferred_poweroff(kbdev);
419         kbase_pm_do_poweroff(kbdev, MALI_TRUE);
420         mutex_unlock(&kbdev->pm.lock);
421 }
422
423 void kbase_pm_resume(struct kbase_device *kbdev)
424 {
425         int nr_keep_gpu_powered_ctxs;
426
427         /* MUST happen before any pm_context_active calls occur */
428         mutex_lock(&kbdev->pm.lock);
429         kbdev->pm.suspending = MALI_FALSE;
430         mutex_unlock(&kbdev->pm.lock);
431
432         kbase_pm_do_poweron(kbdev, MALI_TRUE);
433
434         /* Restart PM Metric timer on resume */
435         kbasep_pm_metrics_init(kbdev);
436         kbasep_pm_record_gpu_idle(kbdev);
437
438         /* Initial active call, to power on the GPU/cores if needed */
439         kbase_pm_context_active(kbdev);
440
441         /* Restore the keep_gpu_powered calls */
442         for (nr_keep_gpu_powered_ctxs = atomic_read(&kbdev->keep_gpu_powered_count);
443                  nr_keep_gpu_powered_ctxs > 0 ;
444                  --nr_keep_gpu_powered_ctxs ) {
445                 kbase_pm_context_active(kbdev);
446         }
447
448         /* Re-enable instrumentation, if it was previously disabled */
449         kbase_instr_hwcnt_resume(kbdev);
450
451         /* Resume any blocked atoms (which may cause contexts to be scheduled in
452          * and dependent atoms to run) */
453         kbase_resume_suspended_soft_jobs(kbdev);
454
455         /* Resume the Job Scheduler and associated components, and start running
456          * atoms */
457         kbasep_js_resume(kbdev);
458
459         /* Matching idle call, to power off the GPU/cores if we didn't actually
460          * need it and the policy doesn't want it on */
461         kbase_pm_context_idle(kbdev);
462 }