3 * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
21 * Metrics for power management
24 #include <mali_kbase.h>
25 #include <mali_kbase_pm.h>
26 #include <backend/gpu/mali_kbase_pm_internal.h>
27 #include <backend/gpu/mali_kbase_jm_rb.h>
29 /* When VSync is being hit aim for utilisation between 70-90% */
30 #define KBASE_PM_VSYNC_MIN_UTILISATION 70
31 #define KBASE_PM_VSYNC_MAX_UTILISATION 90
32 /* Otherwise aim for 10-40% */
33 #define KBASE_PM_NO_VSYNC_MIN_UTILISATION 10
34 #define KBASE_PM_NO_VSYNC_MAX_UTILISATION 40
36 /* Shift used for kbasep_pm_metrics_data.time_busy/idle - units of (1 << 8) ns
37 * This gives a maximum period between samples of 2^(32+8)/100 ns = slightly
38 * under 11s. Exceeding this will cause overflow */
39 #define KBASE_PM_TIME_SHIFT 8
41 /* Maximum time between sampling of utilization data, without resetting the
43 #define MALI_UTILIZATION_MAX_PERIOD 100000 /* ns = 100ms */
45 #ifdef CONFIG_MALI_MIDGARD_DVFS
46 static enum hrtimer_restart dvfs_callback(struct hrtimer *timer)
49 struct kbasep_pm_metrics_data *metrics;
51 KBASE_DEBUG_ASSERT(timer != NULL);
53 metrics = container_of(timer, struct kbasep_pm_metrics_data, timer);
54 kbase_pm_get_dvfs_action(metrics->kbdev);
56 spin_lock_irqsave(&metrics->lock, flags);
58 if (metrics->timer_active)
60 HR_TIMER_DELAY_MSEC(metrics->kbdev->pm.dvfs_period),
63 spin_unlock_irqrestore(&metrics->lock, flags);
65 return HRTIMER_NORESTART;
67 #endif /* CONFIG_MALI_MIDGARD_DVFS */
69 int kbasep_pm_metrics_init(struct kbase_device *kbdev)
71 KBASE_DEBUG_ASSERT(kbdev != NULL);
73 kbdev->pm.backend.metrics.kbdev = kbdev;
75 kbdev->pm.backend.metrics.time_period_start = ktime_get();
76 kbdev->pm.backend.metrics.time_busy = 0;
77 kbdev->pm.backend.metrics.time_idle = 0;
78 kbdev->pm.backend.metrics.prev_busy = 0;
79 kbdev->pm.backend.metrics.prev_idle = 0;
80 kbdev->pm.backend.metrics.gpu_active = false;
81 kbdev->pm.backend.metrics.active_cl_ctx[0] = 0;
82 kbdev->pm.backend.metrics.active_cl_ctx[1] = 0;
83 kbdev->pm.backend.metrics.active_gl_ctx[0] = 0;
84 kbdev->pm.backend.metrics.active_gl_ctx[1] = 0;
85 kbdev->pm.backend.metrics.busy_cl[0] = 0;
86 kbdev->pm.backend.metrics.busy_cl[1] = 0;
87 kbdev->pm.backend.metrics.busy_gl = 0;
89 spin_lock_init(&kbdev->pm.backend.metrics.lock);
91 #ifdef CONFIG_MALI_MIDGARD_DVFS
92 kbdev->pm.backend.metrics.timer_active = true;
93 hrtimer_init(&kbdev->pm.backend.metrics.timer, CLOCK_MONOTONIC,
95 kbdev->pm.backend.metrics.timer.function = dvfs_callback;
97 hrtimer_start(&kbdev->pm.backend.metrics.timer,
98 HR_TIMER_DELAY_MSEC(kbdev->pm.dvfs_period),
100 #endif /* CONFIG_MALI_MIDGARD_DVFS */
105 KBASE_EXPORT_TEST_API(kbasep_pm_metrics_init);
107 void kbasep_pm_metrics_term(struct kbase_device *kbdev)
109 #ifdef CONFIG_MALI_MIDGARD_DVFS
112 KBASE_DEBUG_ASSERT(kbdev != NULL);
114 spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
115 kbdev->pm.backend.metrics.timer_active = false;
116 spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
118 hrtimer_cancel(&kbdev->pm.backend.metrics.timer);
119 #endif /* CONFIG_MALI_MIDGARD_DVFS */
122 KBASE_EXPORT_TEST_API(kbasep_pm_metrics_term);
124 /* caller needs to hold kbdev->pm.backend.metrics.lock before calling this
127 static void kbase_pm_get_dvfs_utilisation_calc(struct kbase_device *kbdev,
132 lockdep_assert_held(&kbdev->pm.backend.metrics.lock);
134 diff = ktime_sub(now, kbdev->pm.backend.metrics.time_period_start);
135 if (ktime_to_ns(diff) < 0)
138 if (kbdev->pm.backend.metrics.gpu_active) {
139 u32 ns_time = (u32) (ktime_to_ns(diff) >> KBASE_PM_TIME_SHIFT);
141 kbdev->pm.backend.metrics.time_busy += ns_time;
142 if (kbdev->pm.backend.metrics.active_cl_ctx[0])
143 kbdev->pm.backend.metrics.busy_cl[0] += ns_time;
144 if (kbdev->pm.backend.metrics.active_cl_ctx[1])
145 kbdev->pm.backend.metrics.busy_cl[1] += ns_time;
146 if (kbdev->pm.backend.metrics.active_gl_ctx[0])
147 kbdev->pm.backend.metrics.busy_gl += ns_time;
148 if (kbdev->pm.backend.metrics.active_gl_ctx[1])
149 kbdev->pm.backend.metrics.busy_gl += ns_time;
151 kbdev->pm.backend.metrics.time_idle += (u32) (ktime_to_ns(diff)
152 >> KBASE_PM_TIME_SHIFT);
155 kbdev->pm.backend.metrics.time_period_start = now;
158 #if defined(CONFIG_PM_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS)
159 /* Caller needs to hold kbdev->pm.backend.metrics.lock before calling this
162 static void kbase_pm_reset_dvfs_utilisation_unlocked(struct kbase_device *kbdev,
165 /* Store previous value */
166 kbdev->pm.backend.metrics.prev_idle =
167 kbdev->pm.backend.metrics.time_idle;
168 kbdev->pm.backend.metrics.prev_busy =
169 kbdev->pm.backend.metrics.time_busy;
171 /* Reset current values */
172 kbdev->pm.backend.metrics.time_period_start = now;
173 kbdev->pm.backend.metrics.time_idle = 0;
174 kbdev->pm.backend.metrics.time_busy = 0;
175 kbdev->pm.backend.metrics.busy_cl[0] = 0;
176 kbdev->pm.backend.metrics.busy_cl[1] = 0;
177 kbdev->pm.backend.metrics.busy_gl = 0;
180 void kbase_pm_reset_dvfs_utilisation(struct kbase_device *kbdev)
184 spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
185 kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, ktime_get());
186 spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
189 void kbase_pm_get_dvfs_utilisation(struct kbase_device *kbdev,
190 unsigned long *total_out, unsigned long *busy_out)
192 ktime_t now = ktime_get();
193 unsigned long flags, busy, total;
195 spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
196 kbase_pm_get_dvfs_utilisation_calc(kbdev, now);
198 busy = kbdev->pm.backend.metrics.time_busy;
199 total = busy + kbdev->pm.backend.metrics.time_idle;
201 /* Reset stats if older than MALI_UTILIZATION_MAX_PERIOD (default
203 if (total >= MALI_UTILIZATION_MAX_PERIOD) {
204 kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, now);
205 } else if (total < (MALI_UTILIZATION_MAX_PERIOD / 2)) {
206 total += kbdev->pm.backend.metrics.prev_idle +
207 kbdev->pm.backend.metrics.prev_busy;
208 busy += kbdev->pm.backend.metrics.prev_busy;
213 spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
217 #ifdef CONFIG_MALI_MIDGARD_DVFS
219 /* caller needs to hold kbdev->pm.backend.metrics.lock before calling this
222 int kbase_pm_get_dvfs_utilisation_old(struct kbase_device *kbdev,
224 int util_cl_share[2],
230 kbase_pm_get_dvfs_utilisation_calc(kbdev, now);
232 if (kbdev->pm.backend.metrics.time_idle +
233 kbdev->pm.backend.metrics.time_busy == 0) {
234 /* No data - so we return NOP */
239 util_cl_share[0] = -1;
240 util_cl_share[1] = -1;
245 utilisation = (100 * kbdev->pm.backend.metrics.time_busy) /
246 (kbdev->pm.backend.metrics.time_idle +
247 kbdev->pm.backend.metrics.time_busy);
249 busy = kbdev->pm.backend.metrics.busy_gl +
250 kbdev->pm.backend.metrics.busy_cl[0] +
251 kbdev->pm.backend.metrics.busy_cl[1];
256 (100 * kbdev->pm.backend.metrics.busy_gl) /
260 (100 * kbdev->pm.backend.metrics.busy_cl[0]) /
263 (100 * kbdev->pm.backend.metrics.busy_cl[1]) /
270 util_cl_share[0] = -1;
271 util_cl_share[1] = -1;
279 void kbase_pm_get_dvfs_action(struct kbase_device *kbdev)
282 int utilisation, util_gl_share;
283 int util_cl_share[2];
286 KBASE_DEBUG_ASSERT(kbdev != NULL);
288 spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
292 utilisation = kbase_pm_get_dvfs_utilisation_old(kbdev, &util_gl_share,
295 if (utilisation < 0 || util_gl_share < 0 || util_cl_share[0] < 0 ||
296 util_cl_share[1] < 0) {
299 util_cl_share[0] = 0;
300 util_cl_share[1] = 0;
305 #ifdef CONFIG_MALI_MIDGARD_DVFS
306 kbase_platform_dvfs_event(kbdev, utilisation, util_gl_share,
308 #endif /*CONFIG_MALI_MIDGARD_DVFS */
310 kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, now);
312 spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
315 bool kbase_pm_metrics_is_active(struct kbase_device *kbdev)
320 KBASE_DEBUG_ASSERT(kbdev != NULL);
322 spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
323 isactive = kbdev->pm.backend.metrics.timer_active;
324 spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
328 KBASE_EXPORT_TEST_API(kbase_pm_metrics_is_active);
330 #endif /* CONFIG_MALI_MIDGARD_DVFS */
333 * kbase_pm_metrics_active_calc - Update PM active counts based on currently
335 * @kbdev: Device pointer
337 * The caller must hold kbdev->pm.backend.metrics.lock
339 static void kbase_pm_metrics_active_calc(struct kbase_device *kbdev)
343 lockdep_assert_held(&kbdev->pm.backend.metrics.lock);
345 kbdev->pm.backend.metrics.active_gl_ctx[0] = 0;
346 kbdev->pm.backend.metrics.active_gl_ctx[1] = 0;
347 kbdev->pm.backend.metrics.active_cl_ctx[0] = 0;
348 kbdev->pm.backend.metrics.active_cl_ctx[1] = 0;
349 kbdev->pm.backend.metrics.gpu_active = false;
351 for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) {
352 struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, 0);
354 /* Head atom may have just completed, so if it isn't running
355 * then try the next atom */
356 if (katom && katom->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED)
357 katom = kbase_gpu_inspect(kbdev, js, 1);
359 if (katom && katom->gpu_rb_state ==
360 KBASE_ATOM_GPU_RB_SUBMITTED) {
361 if (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
362 int device_nr = (katom->core_req &
363 BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)
364 ? katom->device_nr : 0;
365 if (!WARN_ON(device_nr >= 2))
366 kbdev->pm.backend.metrics.
367 active_cl_ctx[device_nr] = 1;
369 /* Slot 2 should not be running non-compute
371 if (!WARN_ON(js >= 2))
372 kbdev->pm.backend.metrics.
373 active_gl_ctx[js] = 1;
375 kbdev->pm.backend.metrics.gpu_active = true;
380 /* called when job is submitted to or removed from a GPU slot */
381 void kbase_pm_metrics_update(struct kbase_device *kbdev, ktime_t *timestamp)
386 lockdep_assert_held(&kbdev->hwaccess_lock);
388 spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
395 /* Track how long CL and/or GL jobs have been busy for */
396 kbase_pm_get_dvfs_utilisation_calc(kbdev, *timestamp);
398 kbase_pm_metrics_active_calc(kbdev);
400 spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);