Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android
[firefly-linux-kernel-4.4.55.git] / kernel / sched / walt.c
1 /*
2  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  *
14  * Window Assisted Load Tracking (WALT) implementation credits:
15  * Srivatsa Vaddagiri, Steve Muckle, Syed Rameez Mustafa, Joonwoo Park,
16  * Pavan Kumar Kondeti, Olav Haugan
17  *
18  * 2016-03-06: Integration with EAS/refactoring by Vikram Mulukutla
19  *             and Todd Kjos
20  */
21
22 #include <linux/syscore_ops.h>
23 #include <linux/cpufreq.h>
24 #include <trace/events/sched.h>
25 #include <clocksource/arm_arch_timer.h>
26 #include "sched.h"
27 #include "walt.h"
28
29 #define WINDOW_STATS_RECENT             0
30 #define WINDOW_STATS_MAX                1
31 #define WINDOW_STATS_MAX_RECENT_AVG     2
32 #define WINDOW_STATS_AVG                3
33 #define WINDOW_STATS_INVALID_POLICY     4
34
35 #define EXITING_TASK_MARKER     0xdeaddead
36
37 static __read_mostly unsigned int walt_ravg_hist_size = 5;
38 static __read_mostly unsigned int walt_window_stats_policy =
39         WINDOW_STATS_MAX_RECENT_AVG;
40 static __read_mostly unsigned int walt_account_wait_time = 1;
41 static __read_mostly unsigned int walt_freq_account_wait_time = 0;
42 static __read_mostly unsigned int walt_io_is_busy = 0;
43
44 unsigned int sysctl_sched_walt_init_task_load_pct = 15;
45
46 /* 1 -> use PELT based load stats, 0 -> use window-based load stats */
47 unsigned int __read_mostly walt_disabled = 0;
48
49 static unsigned int max_possible_efficiency = 1024;
50 static unsigned int min_possible_efficiency = 1024;
51
52 /*
53  * Maximum possible frequency across all cpus. Task demand and cpu
54  * capacity (cpu_power) metrics are scaled in reference to it.
55  */
56 static unsigned int max_possible_freq = 1;
57
58 /*
59  * Minimum possible max_freq across all cpus. This will be same as
60  * max_possible_freq on homogeneous systems and could be different from
61  * max_possible_freq on heterogenous systems. min_max_freq is used to derive
62  * capacity (cpu_power) of cpus.
63  */
64 static unsigned int min_max_freq = 1;
65
66 static unsigned int max_capacity = 1024;
67 static unsigned int min_capacity = 1024;
68 static unsigned int max_load_scale_factor = 1024;
69 static unsigned int max_possible_capacity = 1024;
70
71 /* Mask of all CPUs that have  max_possible_capacity */
72 static cpumask_t mpc_mask = CPU_MASK_ALL;
73
74 /* Window size (in ns) */
75 __read_mostly unsigned int walt_ravg_window = 20000000;
76
77 /* Min window size (in ns) = 10ms */
78 #define MIN_SCHED_RAVG_WINDOW 10000000
79
80 /* Max window size (in ns) = 1s */
81 #define MAX_SCHED_RAVG_WINDOW 1000000000
82
83 static unsigned int sync_cpu;
84 static ktime_t ktime_last;
85 static bool walt_ktime_suspended;
86
87 static unsigned int task_load(struct task_struct *p)
88 {
89         return p->ravg.demand;
90 }
91
92 void
93 walt_inc_cumulative_runnable_avg(struct rq *rq,
94                                  struct task_struct *p)
95 {
96         rq->cumulative_runnable_avg += p->ravg.demand;
97 }
98
99 void
100 walt_dec_cumulative_runnable_avg(struct rq *rq,
101                                  struct task_struct *p)
102 {
103         rq->cumulative_runnable_avg -= p->ravg.demand;
104         BUG_ON((s64)rq->cumulative_runnable_avg < 0);
105 }
106
107 static void
108 fixup_cumulative_runnable_avg(struct rq *rq,
109                               struct task_struct *p, s64 task_load_delta)
110 {
111         rq->cumulative_runnable_avg += task_load_delta;
112         if ((s64)rq->cumulative_runnable_avg < 0)
113                 panic("cra less than zero: tld: %lld, task_load(p) = %u\n",
114                         task_load_delta, task_load(p));
115 }
116
117 u64 walt_ktime_clock(void)
118 {
119         if (unlikely(walt_ktime_suspended))
120                 return ktime_to_ns(ktime_last);
121         return ktime_get_ns();
122 }
123
124 static void walt_resume(void)
125 {
126         walt_ktime_suspended = false;
127 }
128
129 static int walt_suspend(void)
130 {
131         ktime_last = ktime_get();
132         walt_ktime_suspended = true;
133         return 0;
134 }
135
136 static struct syscore_ops walt_syscore_ops = {
137         .resume = walt_resume,
138         .suspend = walt_suspend
139 };
140
141 static int __init walt_init_ops(void)
142 {
143         register_syscore_ops(&walt_syscore_ops);
144         return 0;
145 }
146 late_initcall(walt_init_ops);
147
148 void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
149                 struct task_struct *p)
150 {
151         cfs_rq->cumulative_runnable_avg += p->ravg.demand;
152 }
153
154 void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
155                 struct task_struct *p)
156 {
157         cfs_rq->cumulative_runnable_avg -= p->ravg.demand;
158 }
159
160 static int exiting_task(struct task_struct *p)
161 {
162         if (p->flags & PF_EXITING) {
163                 if (p->ravg.sum_history[0] != EXITING_TASK_MARKER) {
164                         p->ravg.sum_history[0] = EXITING_TASK_MARKER;
165                 }
166                 return 1;
167         }
168         return 0;
169 }
170
171 static int __init set_walt_ravg_window(char *str)
172 {
173         get_option(&str, &walt_ravg_window);
174
175         walt_disabled = (walt_ravg_window < MIN_SCHED_RAVG_WINDOW ||
176                                 walt_ravg_window > MAX_SCHED_RAVG_WINDOW);
177         return 0;
178 }
179
180 early_param("walt_ravg_window", set_walt_ravg_window);
181
182 static void
183 update_window_start(struct rq *rq, u64 wallclock)
184 {
185         s64 delta;
186         int nr_windows;
187
188         delta = wallclock - rq->window_start;
189         /* If the MPM global timer is cleared, set delta as 0 to avoid kernel BUG happening */
190         if (delta < 0) {
191                 if (arch_timer_read_counter() == 0)
192                         delta = 0;
193                 else
194                         BUG_ON(1);
195         }
196
197         if (delta < walt_ravg_window)
198                 return;
199
200         nr_windows = div64_u64(delta, walt_ravg_window);
201         rq->window_start += (u64)nr_windows * (u64)walt_ravg_window;
202 }
203
204 static u64 scale_exec_time(u64 delta, struct rq *rq)
205 {
206         unsigned int cur_freq = rq->cur_freq;
207         int sf;
208
209         if (unlikely(cur_freq > max_possible_freq))
210                 cur_freq = rq->max_possible_freq;
211
212         /* round up div64 */
213         delta = div64_u64(delta * cur_freq + max_possible_freq - 1,
214                           max_possible_freq);
215
216         sf = DIV_ROUND_UP(rq->efficiency * 1024, max_possible_efficiency);
217
218         delta *= sf;
219         delta >>= 10;
220
221         return delta;
222 }
223
224 static int cpu_is_waiting_on_io(struct rq *rq)
225 {
226         if (!walt_io_is_busy)
227                 return 0;
228
229         return atomic_read(&rq->nr_iowait);
230 }
231
232 void walt_account_irqtime(int cpu, struct task_struct *curr,
233                                  u64 delta, u64 wallclock)
234 {
235         struct rq *rq = cpu_rq(cpu);
236         unsigned long flags, nr_windows;
237         u64 cur_jiffies_ts;
238
239         raw_spin_lock_irqsave(&rq->lock, flags);
240
241         /*
242          * cputime (wallclock) uses sched_clock so use the same here for
243          * consistency.
244          */
245         delta += sched_clock() - wallclock;
246         cur_jiffies_ts = get_jiffies_64();
247
248         if (is_idle_task(curr))
249                 walt_update_task_ravg(curr, rq, IRQ_UPDATE, walt_ktime_clock(),
250                                  delta);
251
252         nr_windows = cur_jiffies_ts - rq->irqload_ts;
253
254         if (nr_windows) {
255                 if (nr_windows < 10) {
256                         /* Decay CPU's irqload by 3/4 for each window. */
257                         rq->avg_irqload *= (3 * nr_windows);
258                         rq->avg_irqload = div64_u64(rq->avg_irqload,
259                                                     4 * nr_windows);
260                 } else {
261                         rq->avg_irqload = 0;
262                 }
263                 rq->avg_irqload += rq->cur_irqload;
264                 rq->cur_irqload = 0;
265         }
266
267         rq->cur_irqload += delta;
268         rq->irqload_ts = cur_jiffies_ts;
269         raw_spin_unlock_irqrestore(&rq->lock, flags);
270 }
271
272
273 #define WALT_HIGH_IRQ_TIMEOUT 3
274
275 u64 walt_irqload(int cpu) {
276         struct rq *rq = cpu_rq(cpu);
277         s64 delta;
278         delta = get_jiffies_64() - rq->irqload_ts;
279
280         /*
281          * Current context can be preempted by irq and rq->irqload_ts can be
282          * updated by irq context so that delta can be negative.
283          * But this is okay and we can safely return as this means there
284          * was recent irq occurrence.
285          */
286
287         if (delta < WALT_HIGH_IRQ_TIMEOUT)
288                 return rq->avg_irqload;
289         else
290                 return 0;
291 }
292
293 int walt_cpu_high_irqload(int cpu) {
294         return walt_irqload(cpu) >= sysctl_sched_walt_cpu_high_irqload;
295 }
296
297 static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
298                                      u64 irqtime, int event)
299 {
300         if (is_idle_task(p)) {
301                 /* TASK_WAKE && TASK_MIGRATE is not possible on idle task! */
302                 if (event == PICK_NEXT_TASK)
303                         return 0;
304
305                 /* PUT_PREV_TASK, TASK_UPDATE && IRQ_UPDATE are left */
306                 return irqtime || cpu_is_waiting_on_io(rq);
307         }
308
309         if (event == TASK_WAKE)
310                 return 0;
311
312         if (event == PUT_PREV_TASK || event == IRQ_UPDATE ||
313                                          event == TASK_UPDATE)
314                 return 1;
315
316         /* Only TASK_MIGRATE && PICK_NEXT_TASK left */
317         return walt_freq_account_wait_time;
318 }
319
320 /*
321  * Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum)
322  */
323 static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
324              int event, u64 wallclock, u64 irqtime)
325 {
326         int new_window, nr_full_windows = 0;
327         int p_is_curr_task = (p == rq->curr);
328         u64 mark_start = p->ravg.mark_start;
329         u64 window_start = rq->window_start;
330         u32 window_size = walt_ravg_window;
331         u64 delta;
332
333         new_window = mark_start < window_start;
334         if (new_window) {
335                 nr_full_windows = div64_u64((window_start - mark_start),
336                                                 window_size);
337                 if (p->ravg.active_windows < USHRT_MAX)
338                         p->ravg.active_windows++;
339         }
340
341         /* Handle per-task window rollover. We don't care about the idle
342          * task or exiting tasks. */
343         if (new_window && !is_idle_task(p) && !exiting_task(p)) {
344                 u32 curr_window = 0;
345
346                 if (!nr_full_windows)
347                         curr_window = p->ravg.curr_window;
348
349                 p->ravg.prev_window = curr_window;
350                 p->ravg.curr_window = 0;
351         }
352
353         if (!account_busy_for_cpu_time(rq, p, irqtime, event)) {
354                 /* account_busy_for_cpu_time() = 0, so no update to the
355                  * task's current window needs to be made. This could be
356                  * for example
357                  *
358                  *   - a wakeup event on a task within the current
359                  *     window (!new_window below, no action required),
360                  *   - switching to a new task from idle (PICK_NEXT_TASK)
361                  *     in a new window where irqtime is 0 and we aren't
362                  *     waiting on IO */
363
364                 if (!new_window)
365                         return;
366
367                 /* A new window has started. The RQ demand must be rolled
368                  * over if p is the current task. */
369                 if (p_is_curr_task) {
370                         u64 prev_sum = 0;
371
372                         /* p is either idle task or an exiting task */
373                         if (!nr_full_windows) {
374                                 prev_sum = rq->curr_runnable_sum;
375                         }
376
377                         rq->prev_runnable_sum = prev_sum;
378                         rq->curr_runnable_sum = 0;
379                 }
380
381                 return;
382         }
383
384         if (!new_window) {
385                 /* account_busy_for_cpu_time() = 1 so busy time needs
386                  * to be accounted to the current window. No rollover
387                  * since we didn't start a new window. An example of this is
388                  * when a task starts execution and then sleeps within the
389                  * same window. */
390
391                 if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq))
392                         delta = wallclock - mark_start;
393                 else
394                         delta = irqtime;
395                 delta = scale_exec_time(delta, rq);
396                 rq->curr_runnable_sum += delta;
397                 if (!is_idle_task(p) && !exiting_task(p))
398                         p->ravg.curr_window += delta;
399
400                 return;
401         }
402
403         if (!p_is_curr_task) {
404                 /* account_busy_for_cpu_time() = 1 so busy time needs
405                  * to be accounted to the current window. A new window
406                  * has also started, but p is not the current task, so the
407                  * window is not rolled over - just split up and account
408                  * as necessary into curr and prev. The window is only
409                  * rolled over when a new window is processed for the current
410                  * task.
411                  *
412                  * Irqtime can't be accounted by a task that isn't the
413                  * currently running task. */
414
415                 if (!nr_full_windows) {
416                         /* A full window hasn't elapsed, account partial
417                          * contribution to previous completed window. */
418                         delta = scale_exec_time(window_start - mark_start, rq);
419                         if (!exiting_task(p))
420                                 p->ravg.prev_window += delta;
421                 } else {
422                         /* Since at least one full window has elapsed,
423                          * the contribution to the previous window is the
424                          * full window (window_size). */
425                         delta = scale_exec_time(window_size, rq);
426                         if (!exiting_task(p))
427                                 p->ravg.prev_window = delta;
428                 }
429                 rq->prev_runnable_sum += delta;
430
431                 /* Account piece of busy time in the current window. */
432                 delta = scale_exec_time(wallclock - window_start, rq);
433                 rq->curr_runnable_sum += delta;
434                 if (!exiting_task(p))
435                         p->ravg.curr_window = delta;
436
437                 return;
438         }
439
440         if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) {
441                 /* account_busy_for_cpu_time() = 1 so busy time needs
442                  * to be accounted to the current window. A new window
443                  * has started and p is the current task so rollover is
444                  * needed. If any of these three above conditions are true
445                  * then this busy time can't be accounted as irqtime.
446                  *
447                  * Busy time for the idle task or exiting tasks need not
448                  * be accounted.
449                  *
450                  * An example of this would be a task that starts execution
451                  * and then sleeps once a new window has begun. */
452
453                 if (!nr_full_windows) {
454                         /* A full window hasn't elapsed, account partial
455                          * contribution to previous completed window. */
456                         delta = scale_exec_time(window_start - mark_start, rq);
457                         if (!is_idle_task(p) && !exiting_task(p))
458                                 p->ravg.prev_window += delta;
459
460                         delta += rq->curr_runnable_sum;
461                 } else {
462                         /* Since at least one full window has elapsed,
463                          * the contribution to the previous window is the
464                          * full window (window_size). */
465                         delta = scale_exec_time(window_size, rq);
466                         if (!is_idle_task(p) && !exiting_task(p))
467                                 p->ravg.prev_window = delta;
468
469                 }
470                 /*
471                  * Rollover for normal runnable sum is done here by overwriting
472                  * the values in prev_runnable_sum and curr_runnable_sum.
473                  * Rollover for new task runnable sum has completed by previous
474                  * if-else statement.
475                  */
476                 rq->prev_runnable_sum = delta;
477
478                 /* Account piece of busy time in the current window. */
479                 delta = scale_exec_time(wallclock - window_start, rq);
480                 rq->curr_runnable_sum = delta;
481                 if (!is_idle_task(p) && !exiting_task(p))
482                         p->ravg.curr_window = delta;
483
484                 return;
485         }
486
487         if (irqtime) {
488                 /* account_busy_for_cpu_time() = 1 so busy time needs
489                  * to be accounted to the current window. A new window
490                  * has started and p is the current task so rollover is
491                  * needed. The current task must be the idle task because
492                  * irqtime is not accounted for any other task.
493                  *
494                  * Irqtime will be accounted each time we process IRQ activity
495                  * after a period of idleness, so we know the IRQ busy time
496                  * started at wallclock - irqtime. */
497
498                 BUG_ON(!is_idle_task(p));
499                 mark_start = wallclock - irqtime;
500
501                 /* Roll window over. If IRQ busy time was just in the current
502                  * window then that is all that need be accounted. */
503                 rq->prev_runnable_sum = rq->curr_runnable_sum;
504                 if (mark_start > window_start) {
505                         rq->curr_runnable_sum = scale_exec_time(irqtime, rq);
506                         return;
507                 }
508
509                 /* The IRQ busy time spanned multiple windows. Process the
510                  * busy time preceding the current window start first. */
511                 delta = window_start - mark_start;
512                 if (delta > window_size)
513                         delta = window_size;
514                 delta = scale_exec_time(delta, rq);
515                 rq->prev_runnable_sum += delta;
516
517                 /* Process the remaining IRQ busy time in the current window. */
518                 delta = wallclock - window_start;
519                 rq->curr_runnable_sum = scale_exec_time(delta, rq);
520
521                 return;
522         }
523
524         BUG();
525 }
526
527 static int account_busy_for_task_demand(struct task_struct *p, int event)
528 {
529         /* No need to bother updating task demand for exiting tasks
530          * or the idle task. */
531         if (exiting_task(p) || is_idle_task(p))
532                 return 0;
533
534         /* When a task is waking up it is completing a segment of non-busy
535          * time. Likewise, if wait time is not treated as busy time, then
536          * when a task begins to run or is migrated, it is not running and
537          * is completing a segment of non-busy time. */
538         if (event == TASK_WAKE || (!walt_account_wait_time &&
539                          (event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
540                 return 0;
541
542         return 1;
543 }
544
545 /*
546  * Called when new window is starting for a task, to record cpu usage over
547  * recently concluded window(s). Normally 'samples' should be 1. It can be > 1
548  * when, say, a real-time task runs without preemption for several windows at a
549  * stretch.
550  */
551 static void update_history(struct rq *rq, struct task_struct *p,
552                          u32 runtime, int samples, int event)
553 {
554         u32 *hist = &p->ravg.sum_history[0];
555         int ridx, widx;
556         u32 max = 0, avg, demand;
557         u64 sum = 0;
558
559         /* Ignore windows where task had no activity */
560         if (!runtime || is_idle_task(p) || exiting_task(p) || !samples)
561                         goto done;
562
563         /* Push new 'runtime' value onto stack */
564         widx = walt_ravg_hist_size - 1;
565         ridx = widx - samples;
566         for (; ridx >= 0; --widx, --ridx) {
567                 hist[widx] = hist[ridx];
568                 sum += hist[widx];
569                 if (hist[widx] > max)
570                         max = hist[widx];
571         }
572
573         for (widx = 0; widx < samples && widx < walt_ravg_hist_size; widx++) {
574                 hist[widx] = runtime;
575                 sum += hist[widx];
576                 if (hist[widx] > max)
577                         max = hist[widx];
578         }
579
580         p->ravg.sum = 0;
581
582         if (walt_window_stats_policy == WINDOW_STATS_RECENT) {
583                 demand = runtime;
584         } else if (walt_window_stats_policy == WINDOW_STATS_MAX) {
585                 demand = max;
586         } else {
587                 avg = div64_u64(sum, walt_ravg_hist_size);
588                 if (walt_window_stats_policy == WINDOW_STATS_AVG)
589                         demand = avg;
590                 else
591                         demand = max(avg, runtime);
592         }
593
594         /*
595          * A throttled deadline sched class task gets dequeued without
596          * changing p->on_rq. Since the dequeue decrements hmp stats
597          * avoid decrementing it here again.
598          */
599         if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
600                                                 !p->dl.dl_throttled))
601                 fixup_cumulative_runnable_avg(rq, p, demand);
602
603         p->ravg.demand = demand;
604
605 done:
606         trace_walt_update_history(rq, p, runtime, samples, event);
607         return;
608 }
609
610 static void add_to_task_demand(struct rq *rq, struct task_struct *p,
611                                 u64 delta)
612 {
613         delta = scale_exec_time(delta, rq);
614         p->ravg.sum += delta;
615         if (unlikely(p->ravg.sum > walt_ravg_window))
616                 p->ravg.sum = walt_ravg_window;
617 }
618
619 /*
620  * Account cpu demand of task and/or update task's cpu demand history
621  *
622  * ms = p->ravg.mark_start;
623  * wc = wallclock
624  * ws = rq->window_start
625  *
626  * Three possibilities:
627  *
628  *      a) Task event is contained within one window.
629  *              window_start < mark_start < wallclock
630  *
631  *              ws   ms  wc
632  *              |    |   |
633  *              V    V   V
634  *              |---------------|
635  *
636  *      In this case, p->ravg.sum is updated *iff* event is appropriate
637  *      (ex: event == PUT_PREV_TASK)
638  *
639  *      b) Task event spans two windows.
640  *              mark_start < window_start < wallclock
641  *
642  *              ms   ws   wc
643  *              |    |    |
644  *              V    V    V
645  *              -----|-------------------
646  *
647  *      In this case, p->ravg.sum is updated with (ws - ms) *iff* event
648  *      is appropriate, then a new window sample is recorded followed
649  *      by p->ravg.sum being set to (wc - ws) *iff* event is appropriate.
650  *
651  *      c) Task event spans more than two windows.
652  *
653  *              ms ws_tmp                          ws  wc
654  *              |  |                               |   |
655  *              V  V                               V   V
656  *              ---|-------|-------|-------|-------|------
657  *                 |                               |
658  *                 |<------ nr_full_windows ------>|
659  *
660  *      In this case, p->ravg.sum is updated with (ws_tmp - ms) first *iff*
661  *      event is appropriate, window sample of p->ravg.sum is recorded,
662  *      'nr_full_window' samples of window_size is also recorded *iff*
663  *      event is appropriate and finally p->ravg.sum is set to (wc - ws)
664  *      *iff* event is appropriate.
665  *
666  * IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time()
667  * depends on it!
668  */
669 static void update_task_demand(struct task_struct *p, struct rq *rq,
670              int event, u64 wallclock)
671 {
672         u64 mark_start = p->ravg.mark_start;
673         u64 delta, window_start = rq->window_start;
674         int new_window, nr_full_windows;
675         u32 window_size = walt_ravg_window;
676
677         new_window = mark_start < window_start;
678         if (!account_busy_for_task_demand(p, event)) {
679                 if (new_window)
680                         /* If the time accounted isn't being accounted as
681                          * busy time, and a new window started, only the
682                          * previous window need be closed out with the
683                          * pre-existing demand. Multiple windows may have
684                          * elapsed, but since empty windows are dropped,
685                          * it is not necessary to account those. */
686                         update_history(rq, p, p->ravg.sum, 1, event);
687                 return;
688         }
689
690         if (!new_window) {
691                 /* The simple case - busy time contained within the existing
692                  * window. */
693                 add_to_task_demand(rq, p, wallclock - mark_start);
694                 return;
695         }
696
697         /* Busy time spans at least two windows. Temporarily rewind
698          * window_start to first window boundary after mark_start. */
699         delta = window_start - mark_start;
700         nr_full_windows = div64_u64(delta, window_size);
701         window_start -= (u64)nr_full_windows * (u64)window_size;
702
703         /* Process (window_start - mark_start) first */
704         add_to_task_demand(rq, p, window_start - mark_start);
705
706         /* Push new sample(s) into task's demand history */
707         update_history(rq, p, p->ravg.sum, 1, event);
708         if (nr_full_windows)
709                 update_history(rq, p, scale_exec_time(window_size, rq),
710                                nr_full_windows, event);
711
712         /* Roll window_start back to current to process any remainder
713          * in current window. */
714         window_start += (u64)nr_full_windows * (u64)window_size;
715
716         /* Process (wallclock - window_start) next */
717         mark_start = window_start;
718         add_to_task_demand(rq, p, wallclock - mark_start);
719 }
720
721 /* Reflect task activity on its demand and cpu's busy time statistics */
722 void walt_update_task_ravg(struct task_struct *p, struct rq *rq,
723              int event, u64 wallclock, u64 irqtime)
724 {
725         if (walt_disabled || !rq->window_start)
726                 return;
727
728         lockdep_assert_held(&rq->lock);
729
730         update_window_start(rq, wallclock);
731
732         if (!p->ravg.mark_start)
733                 goto done;
734
735         update_task_demand(p, rq, event, wallclock);
736         update_cpu_busy_time(p, rq, event, wallclock, irqtime);
737
738 done:
739         trace_walt_update_task_ravg(p, rq, event, wallclock, irqtime);
740
741         p->ravg.mark_start = wallclock;
742 }
743
744 unsigned long __weak arch_get_cpu_efficiency(int cpu)
745 {
746         return SCHED_LOAD_SCALE;
747 }
748
749 void walt_init_cpu_efficiency(void)
750 {
751         int i, efficiency;
752         unsigned int max = 0, min = UINT_MAX;
753
754         for_each_possible_cpu(i) {
755                 efficiency = arch_get_cpu_efficiency(i);
756                 cpu_rq(i)->efficiency = efficiency;
757
758                 if (efficiency > max)
759                         max = efficiency;
760                 if (efficiency < min)
761                         min = efficiency;
762         }
763
764         if (max)
765                 max_possible_efficiency = max;
766
767         if (min)
768                 min_possible_efficiency = min;
769 }
770
771 static void reset_task_stats(struct task_struct *p)
772 {
773         u32 sum = 0;
774
775         if (exiting_task(p))
776                 sum = EXITING_TASK_MARKER;
777
778         memset(&p->ravg, 0, sizeof(struct ravg));
779         /* Retain EXITING_TASK marker */
780         p->ravg.sum_history[0] = sum;
781 }
782
783 void walt_mark_task_starting(struct task_struct *p)
784 {
785         u64 wallclock;
786         struct rq *rq = task_rq(p);
787
788         if (!rq->window_start) {
789                 reset_task_stats(p);
790                 return;
791         }
792
793         wallclock = walt_ktime_clock();
794         p->ravg.mark_start = wallclock;
795 }
796
797 void walt_set_window_start(struct rq *rq)
798 {
799         int cpu = cpu_of(rq);
800         struct rq *sync_rq = cpu_rq(sync_cpu);
801
802         if (rq->window_start)
803                 return;
804
805         if (cpu == sync_cpu) {
806                 rq->window_start = walt_ktime_clock();
807         } else {
808                 raw_spin_unlock(&rq->lock);
809                 double_rq_lock(rq, sync_rq);
810                 rq->window_start = cpu_rq(sync_cpu)->window_start;
811                 rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
812                 raw_spin_unlock(&sync_rq->lock);
813         }
814
815         rq->curr->ravg.mark_start = rq->window_start;
816 }
817
818 void walt_migrate_sync_cpu(int cpu)
819 {
820         if (cpu == sync_cpu)
821                 sync_cpu = smp_processor_id();
822 }
823
824 void walt_fixup_busy_time(struct task_struct *p, int new_cpu)
825 {
826         struct rq *src_rq = task_rq(p);
827         struct rq *dest_rq = cpu_rq(new_cpu);
828         u64 wallclock;
829
830         if (!p->on_rq && p->state != TASK_WAKING)
831                 return;
832
833         if (exiting_task(p)) {
834                 return;
835         }
836
837         if (p->state == TASK_WAKING)
838                 double_rq_lock(src_rq, dest_rq);
839
840         wallclock = walt_ktime_clock();
841
842         walt_update_task_ravg(task_rq(p)->curr, task_rq(p),
843                         TASK_UPDATE, wallclock, 0);
844         walt_update_task_ravg(dest_rq->curr, dest_rq,
845                         TASK_UPDATE, wallclock, 0);
846
847         walt_update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0);
848
849         if (p->ravg.curr_window) {
850                 src_rq->curr_runnable_sum -= p->ravg.curr_window;
851                 dest_rq->curr_runnable_sum += p->ravg.curr_window;
852         }
853
854         if (p->ravg.prev_window) {
855                 src_rq->prev_runnable_sum -= p->ravg.prev_window;
856                 dest_rq->prev_runnable_sum += p->ravg.prev_window;
857         }
858
859         if ((s64)src_rq->prev_runnable_sum < 0) {
860                 src_rq->prev_runnable_sum = 0;
861                 WARN_ON(1);
862         }
863         if ((s64)src_rq->curr_runnable_sum < 0) {
864                 src_rq->curr_runnable_sum = 0;
865                 WARN_ON(1);
866         }
867
868         trace_walt_migration_update_sum(src_rq, p);
869         trace_walt_migration_update_sum(dest_rq, p);
870
871         if (p->state == TASK_WAKING)
872                 double_rq_unlock(src_rq, dest_rq);
873 }
874
875 /* Keep track of max/min capacity possible across CPUs "currently" */
876 static void __update_min_max_capacity(void)
877 {
878         int i;
879         int max = 0, min = INT_MAX;
880
881         for_each_online_cpu(i) {
882                 if (cpu_rq(i)->capacity > max)
883                         max = cpu_rq(i)->capacity;
884                 if (cpu_rq(i)->capacity < min)
885                         min = cpu_rq(i)->capacity;
886         }
887
888         max_capacity = max;
889         min_capacity = min;
890 }
891
892 static void update_min_max_capacity(void)
893 {
894         unsigned long flags;
895         int i;
896
897         local_irq_save(flags);
898         for_each_possible_cpu(i)
899                 raw_spin_lock(&cpu_rq(i)->lock);
900
901         __update_min_max_capacity();
902
903         for_each_possible_cpu(i)
904                 raw_spin_unlock(&cpu_rq(i)->lock);
905         local_irq_restore(flags);
906 }
907
908 /*
909  * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
910  * least efficient cpu gets capacity of 1024
911  */
912 static unsigned long capacity_scale_cpu_efficiency(int cpu)
913 {
914         return (1024 * cpu_rq(cpu)->efficiency) / min_possible_efficiency;
915 }
916
917 /*
918  * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
919  * (min_max_freq), such that one with lowest max_freq gets capacity of 1024.
920  */
921 static unsigned long capacity_scale_cpu_freq(int cpu)
922 {
923         return (1024 * cpu_rq(cpu)->max_freq) / min_max_freq;
924 }
925
926 /*
927  * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
928  * that "most" efficient cpu gets a load_scale_factor of 1
929  */
930 static unsigned long load_scale_cpu_efficiency(int cpu)
931 {
932         return DIV_ROUND_UP(1024 * max_possible_efficiency,
933                             cpu_rq(cpu)->efficiency);
934 }
935
936 /*
937  * Return load_scale_factor of a cpu in reference to cpu with best max_freq
938  * (max_possible_freq), so that one with best max_freq gets a load_scale_factor
939  * of 1.
940  */
941 static unsigned long load_scale_cpu_freq(int cpu)
942 {
943         return DIV_ROUND_UP(1024 * max_possible_freq, cpu_rq(cpu)->max_freq);
944 }
945
946 static int compute_capacity(int cpu)
947 {
948         int capacity = 1024;
949
950         capacity *= capacity_scale_cpu_efficiency(cpu);
951         capacity >>= 10;
952
953         capacity *= capacity_scale_cpu_freq(cpu);
954         capacity >>= 10;
955
956         return capacity;
957 }
958
959 static int compute_load_scale_factor(int cpu)
960 {
961         int load_scale = 1024;
962
963         /*
964          * load_scale_factor accounts for the fact that task load
965          * is in reference to "best" performing cpu. Task's load will need to be
966          * scaled (up) by a factor to determine suitability to be placed on a
967          * (little) cpu.
968          */
969         load_scale *= load_scale_cpu_efficiency(cpu);
970         load_scale >>= 10;
971
972         load_scale *= load_scale_cpu_freq(cpu);
973         load_scale >>= 10;
974
975         return load_scale;
976 }
977
978 static int cpufreq_notifier_policy(struct notifier_block *nb,
979                 unsigned long val, void *data)
980 {
981         struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
982         int i, update_max = 0;
983         u64 highest_mpc = 0, highest_mplsf = 0;
984         const struct cpumask *cpus = policy->related_cpus;
985         unsigned int orig_min_max_freq = min_max_freq;
986         unsigned int orig_max_possible_freq = max_possible_freq;
987         /* Initialized to policy->max in case policy->related_cpus is empty! */
988         unsigned int orig_max_freq = policy->max;
989
990         if (val != CPUFREQ_NOTIFY && val != CPUFREQ_REMOVE_POLICY &&
991                                                 val != CPUFREQ_CREATE_POLICY)
992                 return 0;
993
994         if (val == CPUFREQ_REMOVE_POLICY || val == CPUFREQ_CREATE_POLICY) {
995                 update_min_max_capacity();
996                 return 0;
997         }
998
999         for_each_cpu(i, policy->related_cpus) {
1000                 cpumask_copy(&cpu_rq(i)->freq_domain_cpumask,
1001                              policy->related_cpus);
1002                 orig_max_freq = cpu_rq(i)->max_freq;
1003                 cpu_rq(i)->min_freq = policy->min;
1004                 cpu_rq(i)->max_freq = policy->max;
1005                 cpu_rq(i)->cur_freq = policy->cur;
1006                 cpu_rq(i)->max_possible_freq = policy->cpuinfo.max_freq;
1007         }
1008
1009         max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq);
1010         if (min_max_freq == 1)
1011                 min_max_freq = UINT_MAX;
1012         min_max_freq = min(min_max_freq, policy->cpuinfo.max_freq);
1013         BUG_ON(!min_max_freq);
1014         BUG_ON(!policy->max);
1015
1016         /* Changes to policy other than max_freq don't require any updates */
1017         if (orig_max_freq == policy->max)
1018                 return 0;
1019
1020         /*
1021          * A changed min_max_freq or max_possible_freq (possible during bootup)
1022          * needs to trigger re-computation of load_scale_factor and capacity for
1023          * all possible cpus (even those offline). It also needs to trigger
1024          * re-computation of nr_big_task count on all online cpus.
1025          *
1026          * A changed rq->max_freq otoh needs to trigger re-computation of
1027          * load_scale_factor and capacity for just the cluster of cpus involved.
1028          * Since small task definition depends on max_load_scale_factor, a
1029          * changed load_scale_factor of one cluster could influence
1030          * classification of tasks in another cluster. Hence a changed
1031          * rq->max_freq will need to trigger re-computation of nr_big_task
1032          * count on all online cpus.
1033          *
1034          * While it should be sufficient for nr_big_tasks to be
1035          * re-computed for only online cpus, we have inadequate context
1036          * information here (in policy notifier) with regard to hotplug-safety
1037          * context in which notification is issued. As a result, we can't use
1038          * get_online_cpus() here, as it can lead to deadlock. Until cpufreq is
1039          * fixed up to issue notification always in hotplug-safe context,
1040          * re-compute nr_big_task for all possible cpus.
1041          */
1042
1043         if (orig_min_max_freq != min_max_freq ||
1044                 orig_max_possible_freq != max_possible_freq) {
1045                         cpus = cpu_possible_mask;
1046                         update_max = 1;
1047         }
1048
1049         /*
1050          * Changed load_scale_factor can trigger reclassification of tasks as
1051          * big or small. Make this change "atomic" so that tasks are accounted
1052          * properly due to changed load_scale_factor
1053          */
1054         for_each_cpu(i, cpus) {
1055                 struct rq *rq = cpu_rq(i);
1056
1057                 rq->capacity = compute_capacity(i);
1058                 rq->load_scale_factor = compute_load_scale_factor(i);
1059
1060                 if (update_max) {
1061                         u64 mpc, mplsf;
1062
1063                         mpc = div_u64(((u64) rq->capacity) *
1064                                 rq->max_possible_freq, rq->max_freq);
1065                         rq->max_possible_capacity = (int) mpc;
1066
1067                         mplsf = div_u64(((u64) rq->load_scale_factor) *
1068                                 rq->max_possible_freq, rq->max_freq);
1069
1070                         if (mpc > highest_mpc) {
1071                                 highest_mpc = mpc;
1072                                 cpumask_clear(&mpc_mask);
1073                                 cpumask_set_cpu(i, &mpc_mask);
1074                         } else if (mpc == highest_mpc) {
1075                                 cpumask_set_cpu(i, &mpc_mask);
1076                         }
1077
1078                         if (mplsf > highest_mplsf)
1079                                 highest_mplsf = mplsf;
1080                 }
1081         }
1082
1083         if (update_max) {
1084                 max_possible_capacity = highest_mpc;
1085                 max_load_scale_factor = highest_mplsf;
1086         }
1087
1088         __update_min_max_capacity();
1089
1090         return 0;
1091 }
1092
1093 static int cpufreq_notifier_trans(struct notifier_block *nb,
1094                 unsigned long val, void *data)
1095 {
1096         struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data;
1097         unsigned int cpu = freq->cpu, new_freq = freq->new;
1098         unsigned long flags;
1099         int i;
1100
1101         if (val != CPUFREQ_POSTCHANGE)
1102                 return 0;
1103
1104         BUG_ON(!new_freq);
1105
1106         if (cpu_rq(cpu)->cur_freq == new_freq)
1107                 return 0;
1108
1109         for_each_cpu(i, &cpu_rq(cpu)->freq_domain_cpumask) {
1110                 struct rq *rq = cpu_rq(i);
1111
1112                 raw_spin_lock_irqsave(&rq->lock, flags);
1113                 walt_update_task_ravg(rq->curr, rq, TASK_UPDATE,
1114                                       walt_ktime_clock(), 0);
1115                 rq->cur_freq = new_freq;
1116                 raw_spin_unlock_irqrestore(&rq->lock, flags);
1117         }
1118
1119         return 0;
1120 }
1121
1122 static struct notifier_block notifier_policy_block = {
1123         .notifier_call = cpufreq_notifier_policy
1124 };
1125
1126 static struct notifier_block notifier_trans_block = {
1127         .notifier_call = cpufreq_notifier_trans
1128 };
1129
1130 static int register_sched_callback(void)
1131 {
1132         int ret;
1133
1134         ret = cpufreq_register_notifier(&notifier_policy_block,
1135                                                 CPUFREQ_POLICY_NOTIFIER);
1136
1137         if (!ret)
1138                 ret = cpufreq_register_notifier(&notifier_trans_block,
1139                                                 CPUFREQ_TRANSITION_NOTIFIER);
1140
1141         return 0;
1142 }
1143
1144 /*
1145  * cpufreq callbacks can be registered at core_initcall or later time.
1146  * Any registration done prior to that is "forgotten" by cpufreq. See
1147  * initialization of variable init_cpufreq_transition_notifier_list_called
1148  * for further information.
1149  */
1150 core_initcall(register_sched_callback);
1151
1152 void walt_init_new_task_load(struct task_struct *p)
1153 {
1154         int i;
1155         u32 init_load_windows =
1156                         div64_u64((u64)sysctl_sched_walt_init_task_load_pct *
1157                           (u64)walt_ravg_window, 100);
1158         u32 init_load_pct = current->init_load_pct;
1159
1160         p->init_load_pct = 0;
1161         memset(&p->ravg, 0, sizeof(struct ravg));
1162
1163         if (init_load_pct) {
1164                 init_load_windows = div64_u64((u64)init_load_pct *
1165                           (u64)walt_ravg_window, 100);
1166         }
1167
1168         p->ravg.demand = init_load_windows;
1169         for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
1170                 p->ravg.sum_history[i] = init_load_windows;
1171 }