clk: rockchip: rk3288: add gate id of hclk_usb_peri for usb otg
[firefly-linux-kernel-4.4.55.git] / kernel / sched / walt.c
1 /*
2  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  *
14  * Window Assisted Load Tracking (WALT) implementation credits:
15  * Srivatsa Vaddagiri, Steve Muckle, Syed Rameez Mustafa, Joonwoo Park,
16  * Pavan Kumar Kondeti, Olav Haugan
17  *
18  * 2016-03-06: Integration with EAS/refactoring by Vikram Mulukutla
19  *             and Todd Kjos
20  */
21
22 #include <linux/syscore_ops.h>
23 #include <linux/cpufreq.h>
24 #include <trace/events/sched.h>
25 #include "sched.h"
26 #include "walt.h"
27
28 #define WINDOW_STATS_RECENT             0
29 #define WINDOW_STATS_MAX                1
30 #define WINDOW_STATS_MAX_RECENT_AVG     2
31 #define WINDOW_STATS_AVG                3
32 #define WINDOW_STATS_INVALID_POLICY     4
33
34 #define EXITING_TASK_MARKER     0xdeaddead
35
36 static __read_mostly unsigned int walt_ravg_hist_size = 5;
37 static __read_mostly unsigned int walt_window_stats_policy =
38         WINDOW_STATS_MAX_RECENT_AVG;
39 static __read_mostly unsigned int walt_account_wait_time = 1;
40 static __read_mostly unsigned int walt_freq_account_wait_time = 0;
41 static __read_mostly unsigned int walt_io_is_busy = 0;
42
43 unsigned int sysctl_sched_walt_init_task_load_pct = 15;
44
45 /* 1 -> use PELT based load stats, 0 -> use window-based load stats */
46 unsigned int __read_mostly walt_disabled = 0;
47
48 static unsigned int max_possible_efficiency = 1024;
49 static unsigned int min_possible_efficiency = 1024;
50
51 /*
52  * Maximum possible frequency across all cpus. Task demand and cpu
53  * capacity (cpu_power) metrics are scaled in reference to it.
54  */
55 static unsigned int max_possible_freq = 1;
56
57 /*
58  * Minimum possible max_freq across all cpus. This will be same as
59  * max_possible_freq on homogeneous systems and could be different from
60  * max_possible_freq on heterogenous systems. min_max_freq is used to derive
61  * capacity (cpu_power) of cpus.
62  */
63 static unsigned int min_max_freq = 1;
64
65 static unsigned int max_load_scale_factor = 1024;
66 static unsigned int max_possible_capacity = 1024;
67
68 /* Mask of all CPUs that have  max_possible_capacity */
69 static cpumask_t mpc_mask = CPU_MASK_ALL;
70
71 /* Window size (in ns) */
72 __read_mostly unsigned int walt_ravg_window = 20000000;
73
74 /* Min window size (in ns) = 10ms */
75 #define MIN_SCHED_RAVG_WINDOW 10000000
76
77 /* Max window size (in ns) = 1s */
78 #define MAX_SCHED_RAVG_WINDOW 1000000000
79
80 static unsigned int sync_cpu;
81 static ktime_t ktime_last;
82 static bool walt_ktime_suspended;
83
84 static unsigned int task_load(struct task_struct *p)
85 {
86         return p->ravg.demand;
87 }
88
89 void
90 walt_inc_cumulative_runnable_avg(struct rq *rq,
91                                  struct task_struct *p)
92 {
93         rq->cumulative_runnable_avg += p->ravg.demand;
94 }
95
96 void
97 walt_dec_cumulative_runnable_avg(struct rq *rq,
98                                  struct task_struct *p)
99 {
100         rq->cumulative_runnable_avg -= p->ravg.demand;
101         BUG_ON((s64)rq->cumulative_runnable_avg < 0);
102 }
103
104 static void
105 fixup_cumulative_runnable_avg(struct rq *rq,
106                               struct task_struct *p, s64 task_load_delta)
107 {
108         rq->cumulative_runnable_avg += task_load_delta;
109         if ((s64)rq->cumulative_runnable_avg < 0)
110                 panic("cra less than zero: tld: %lld, task_load(p) = %u\n",
111                         task_load_delta, task_load(p));
112 }
113
114 u64 walt_ktime_clock(void)
115 {
116         if (unlikely(walt_ktime_suspended))
117                 return ktime_to_ns(ktime_last);
118         return ktime_get_ns();
119 }
120
121 static void walt_resume(void)
122 {
123         walt_ktime_suspended = false;
124 }
125
126 static int walt_suspend(void)
127 {
128         ktime_last = ktime_get();
129         walt_ktime_suspended = true;
130         return 0;
131 }
132
133 static struct syscore_ops walt_syscore_ops = {
134         .resume = walt_resume,
135         .suspend = walt_suspend
136 };
137
138 static int __init walt_init_ops(void)
139 {
140         register_syscore_ops(&walt_syscore_ops);
141         return 0;
142 }
143 late_initcall(walt_init_ops);
144
145 void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
146                 struct task_struct *p)
147 {
148         cfs_rq->cumulative_runnable_avg += p->ravg.demand;
149 }
150
151 void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
152                 struct task_struct *p)
153 {
154         cfs_rq->cumulative_runnable_avg -= p->ravg.demand;
155 }
156
157 static int exiting_task(struct task_struct *p)
158 {
159         if (p->flags & PF_EXITING) {
160                 if (p->ravg.sum_history[0] != EXITING_TASK_MARKER) {
161                         p->ravg.sum_history[0] = EXITING_TASK_MARKER;
162                 }
163                 return 1;
164         }
165         return 0;
166 }
167
168 static int __init set_walt_ravg_window(char *str)
169 {
170         get_option(&str, &walt_ravg_window);
171
172         walt_disabled = (walt_ravg_window < MIN_SCHED_RAVG_WINDOW ||
173                                 walt_ravg_window > MAX_SCHED_RAVG_WINDOW);
174         return 0;
175 }
176
177 early_param("walt_ravg_window", set_walt_ravg_window);
178
179 static void
180 update_window_start(struct rq *rq, u64 wallclock)
181 {
182         s64 delta;
183         int nr_windows;
184
185         delta = wallclock - rq->window_start;
186         /* If the MPM global timer is cleared, set delta as 0 to avoid kernel BUG happening */
187         if (delta < 0) {
188                 delta = 0;
189                 WARN_ONCE(1, "WALT wallclock appears to have gone backwards or reset\n");
190         }
191
192         if (delta < walt_ravg_window)
193                 return;
194
195         nr_windows = div64_u64(delta, walt_ravg_window);
196         rq->window_start += (u64)nr_windows * (u64)walt_ravg_window;
197 }
198
199 static u64 scale_exec_time(u64 delta, struct rq *rq)
200 {
201         unsigned int cur_freq = rq->cur_freq;
202         int sf;
203
204         if (unlikely(cur_freq > max_possible_freq))
205                 cur_freq = rq->max_possible_freq;
206
207         /* round up div64 */
208         delta = div64_u64(delta * cur_freq + max_possible_freq - 1,
209                           max_possible_freq);
210
211         sf = DIV_ROUND_UP(rq->efficiency * 1024, max_possible_efficiency);
212
213         delta *= sf;
214         delta >>= 10;
215
216         return delta;
217 }
218
219 static int cpu_is_waiting_on_io(struct rq *rq)
220 {
221         if (!walt_io_is_busy)
222                 return 0;
223
224         return atomic_read(&rq->nr_iowait);
225 }
226
227 void walt_account_irqtime(int cpu, struct task_struct *curr,
228                                  u64 delta, u64 wallclock)
229 {
230         struct rq *rq = cpu_rq(cpu);
231         unsigned long flags, nr_windows;
232         u64 cur_jiffies_ts;
233
234         raw_spin_lock_irqsave(&rq->lock, flags);
235
236         /*
237          * cputime (wallclock) uses sched_clock so use the same here for
238          * consistency.
239          */
240         delta += sched_clock() - wallclock;
241         cur_jiffies_ts = get_jiffies_64();
242
243         if (is_idle_task(curr))
244                 walt_update_task_ravg(curr, rq, IRQ_UPDATE, walt_ktime_clock(),
245                                  delta);
246
247         nr_windows = cur_jiffies_ts - rq->irqload_ts;
248
249         if (nr_windows) {
250                 if (nr_windows < 10) {
251                         /* Decay CPU's irqload by 3/4 for each window. */
252                         rq->avg_irqload *= (3 * nr_windows);
253                         rq->avg_irqload = div64_u64(rq->avg_irqload,
254                                                     4 * nr_windows);
255                 } else {
256                         rq->avg_irqload = 0;
257                 }
258                 rq->avg_irqload += rq->cur_irqload;
259                 rq->cur_irqload = 0;
260         }
261
262         rq->cur_irqload += delta;
263         rq->irqload_ts = cur_jiffies_ts;
264         raw_spin_unlock_irqrestore(&rq->lock, flags);
265 }
266
267
268 #define WALT_HIGH_IRQ_TIMEOUT 3
269
270 u64 walt_irqload(int cpu) {
271         struct rq *rq = cpu_rq(cpu);
272         s64 delta;
273         delta = get_jiffies_64() - rq->irqload_ts;
274
275         /*
276          * Current context can be preempted by irq and rq->irqload_ts can be
277          * updated by irq context so that delta can be negative.
278          * But this is okay and we can safely return as this means there
279          * was recent irq occurrence.
280          */
281
282         if (delta < WALT_HIGH_IRQ_TIMEOUT)
283                 return rq->avg_irqload;
284         else
285                 return 0;
286 }
287
288 int walt_cpu_high_irqload(int cpu) {
289         return walt_irqload(cpu) >= sysctl_sched_walt_cpu_high_irqload;
290 }
291
292 static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
293                                      u64 irqtime, int event)
294 {
295         if (is_idle_task(p)) {
296                 /* TASK_WAKE && TASK_MIGRATE is not possible on idle task! */
297                 if (event == PICK_NEXT_TASK)
298                         return 0;
299
300                 /* PUT_PREV_TASK, TASK_UPDATE && IRQ_UPDATE are left */
301                 return irqtime || cpu_is_waiting_on_io(rq);
302         }
303
304         if (event == TASK_WAKE)
305                 return 0;
306
307         if (event == PUT_PREV_TASK || event == IRQ_UPDATE ||
308                                          event == TASK_UPDATE)
309                 return 1;
310
311         /* Only TASK_MIGRATE && PICK_NEXT_TASK left */
312         return walt_freq_account_wait_time;
313 }
314
315 /*
316  * Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum)
317  */
318 static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
319              int event, u64 wallclock, u64 irqtime)
320 {
321         int new_window, nr_full_windows = 0;
322         int p_is_curr_task = (p == rq->curr);
323         u64 mark_start = p->ravg.mark_start;
324         u64 window_start = rq->window_start;
325         u32 window_size = walt_ravg_window;
326         u64 delta;
327
328         new_window = mark_start < window_start;
329         if (new_window) {
330                 nr_full_windows = div64_u64((window_start - mark_start),
331                                                 window_size);
332                 if (p->ravg.active_windows < USHRT_MAX)
333                         p->ravg.active_windows++;
334         }
335
336         /* Handle per-task window rollover. We don't care about the idle
337          * task or exiting tasks. */
338         if (new_window && !is_idle_task(p) && !exiting_task(p)) {
339                 u32 curr_window = 0;
340
341                 if (!nr_full_windows)
342                         curr_window = p->ravg.curr_window;
343
344                 p->ravg.prev_window = curr_window;
345                 p->ravg.curr_window = 0;
346         }
347
348         if (!account_busy_for_cpu_time(rq, p, irqtime, event)) {
349                 /* account_busy_for_cpu_time() = 0, so no update to the
350                  * task's current window needs to be made. This could be
351                  * for example
352                  *
353                  *   - a wakeup event on a task within the current
354                  *     window (!new_window below, no action required),
355                  *   - switching to a new task from idle (PICK_NEXT_TASK)
356                  *     in a new window where irqtime is 0 and we aren't
357                  *     waiting on IO */
358
359                 if (!new_window)
360                         return;
361
362                 /* A new window has started. The RQ demand must be rolled
363                  * over if p is the current task. */
364                 if (p_is_curr_task) {
365                         u64 prev_sum = 0;
366
367                         /* p is either idle task or an exiting task */
368                         if (!nr_full_windows) {
369                                 prev_sum = rq->curr_runnable_sum;
370                         }
371
372                         rq->prev_runnable_sum = prev_sum;
373                         rq->curr_runnable_sum = 0;
374                 }
375
376                 return;
377         }
378
379         if (!new_window) {
380                 /* account_busy_for_cpu_time() = 1 so busy time needs
381                  * to be accounted to the current window. No rollover
382                  * since we didn't start a new window. An example of this is
383                  * when a task starts execution and then sleeps within the
384                  * same window. */
385
386                 if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq))
387                         delta = wallclock - mark_start;
388                 else
389                         delta = irqtime;
390                 delta = scale_exec_time(delta, rq);
391                 rq->curr_runnable_sum += delta;
392                 if (!is_idle_task(p) && !exiting_task(p))
393                         p->ravg.curr_window += delta;
394
395                 return;
396         }
397
398         if (!p_is_curr_task) {
399                 /* account_busy_for_cpu_time() = 1 so busy time needs
400                  * to be accounted to the current window. A new window
401                  * has also started, but p is not the current task, so the
402                  * window is not rolled over - just split up and account
403                  * as necessary into curr and prev. The window is only
404                  * rolled over when a new window is processed for the current
405                  * task.
406                  *
407                  * Irqtime can't be accounted by a task that isn't the
408                  * currently running task. */
409
410                 if (!nr_full_windows) {
411                         /* A full window hasn't elapsed, account partial
412                          * contribution to previous completed window. */
413                         delta = scale_exec_time(window_start - mark_start, rq);
414                         if (!exiting_task(p))
415                                 p->ravg.prev_window += delta;
416                 } else {
417                         /* Since at least one full window has elapsed,
418                          * the contribution to the previous window is the
419                          * full window (window_size). */
420                         delta = scale_exec_time(window_size, rq);
421                         if (!exiting_task(p))
422                                 p->ravg.prev_window = delta;
423                 }
424                 rq->prev_runnable_sum += delta;
425
426                 /* Account piece of busy time in the current window. */
427                 delta = scale_exec_time(wallclock - window_start, rq);
428                 rq->curr_runnable_sum += delta;
429                 if (!exiting_task(p))
430                         p->ravg.curr_window = delta;
431
432                 return;
433         }
434
435         if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) {
436                 /* account_busy_for_cpu_time() = 1 so busy time needs
437                  * to be accounted to the current window. A new window
438                  * has started and p is the current task so rollover is
439                  * needed. If any of these three above conditions are true
440                  * then this busy time can't be accounted as irqtime.
441                  *
442                  * Busy time for the idle task or exiting tasks need not
443                  * be accounted.
444                  *
445                  * An example of this would be a task that starts execution
446                  * and then sleeps once a new window has begun. */
447
448                 if (!nr_full_windows) {
449                         /* A full window hasn't elapsed, account partial
450                          * contribution to previous completed window. */
451                         delta = scale_exec_time(window_start - mark_start, rq);
452                         if (!is_idle_task(p) && !exiting_task(p))
453                                 p->ravg.prev_window += delta;
454
455                         delta += rq->curr_runnable_sum;
456                 } else {
457                         /* Since at least one full window has elapsed,
458                          * the contribution to the previous window is the
459                          * full window (window_size). */
460                         delta = scale_exec_time(window_size, rq);
461                         if (!is_idle_task(p) && !exiting_task(p))
462                                 p->ravg.prev_window = delta;
463
464                 }
465                 /*
466                  * Rollover for normal runnable sum is done here by overwriting
467                  * the values in prev_runnable_sum and curr_runnable_sum.
468                  * Rollover for new task runnable sum has completed by previous
469                  * if-else statement.
470                  */
471                 rq->prev_runnable_sum = delta;
472
473                 /* Account piece of busy time in the current window. */
474                 delta = scale_exec_time(wallclock - window_start, rq);
475                 rq->curr_runnable_sum = delta;
476                 if (!is_idle_task(p) && !exiting_task(p))
477                         p->ravg.curr_window = delta;
478
479                 return;
480         }
481
482         if (irqtime) {
483                 /* account_busy_for_cpu_time() = 1 so busy time needs
484                  * to be accounted to the current window. A new window
485                  * has started and p is the current task so rollover is
486                  * needed. The current task must be the idle task because
487                  * irqtime is not accounted for any other task.
488                  *
489                  * Irqtime will be accounted each time we process IRQ activity
490                  * after a period of idleness, so we know the IRQ busy time
491                  * started at wallclock - irqtime. */
492
493                 BUG_ON(!is_idle_task(p));
494                 mark_start = wallclock - irqtime;
495
496                 /* Roll window over. If IRQ busy time was just in the current
497                  * window then that is all that need be accounted. */
498                 rq->prev_runnable_sum = rq->curr_runnable_sum;
499                 if (mark_start > window_start) {
500                         rq->curr_runnable_sum = scale_exec_time(irqtime, rq);
501                         return;
502                 }
503
504                 /* The IRQ busy time spanned multiple windows. Process the
505                  * busy time preceding the current window start first. */
506                 delta = window_start - mark_start;
507                 if (delta > window_size)
508                         delta = window_size;
509                 delta = scale_exec_time(delta, rq);
510                 rq->prev_runnable_sum += delta;
511
512                 /* Process the remaining IRQ busy time in the current window. */
513                 delta = wallclock - window_start;
514                 rq->curr_runnable_sum = scale_exec_time(delta, rq);
515
516                 return;
517         }
518
519         BUG();
520 }
521
522 static int account_busy_for_task_demand(struct task_struct *p, int event)
523 {
524         /* No need to bother updating task demand for exiting tasks
525          * or the idle task. */
526         if (exiting_task(p) || is_idle_task(p))
527                 return 0;
528
529         /* When a task is waking up it is completing a segment of non-busy
530          * time. Likewise, if wait time is not treated as busy time, then
531          * when a task begins to run or is migrated, it is not running and
532          * is completing a segment of non-busy time. */
533         if (event == TASK_WAKE || (!walt_account_wait_time &&
534                          (event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
535                 return 0;
536
537         return 1;
538 }
539
540 /*
541  * Called when new window is starting for a task, to record cpu usage over
542  * recently concluded window(s). Normally 'samples' should be 1. It can be > 1
543  * when, say, a real-time task runs without preemption for several windows at a
544  * stretch.
545  */
546 static void update_history(struct rq *rq, struct task_struct *p,
547                          u32 runtime, int samples, int event)
548 {
549         u32 *hist = &p->ravg.sum_history[0];
550         int ridx, widx;
551         u32 max = 0, avg, demand;
552         u64 sum = 0;
553
554         /* Ignore windows where task had no activity */
555         if (!runtime || is_idle_task(p) || exiting_task(p) || !samples)
556                         goto done;
557
558         /* Push new 'runtime' value onto stack */
559         widx = walt_ravg_hist_size - 1;
560         ridx = widx - samples;
561         for (; ridx >= 0; --widx, --ridx) {
562                 hist[widx] = hist[ridx];
563                 sum += hist[widx];
564                 if (hist[widx] > max)
565                         max = hist[widx];
566         }
567
568         for (widx = 0; widx < samples && widx < walt_ravg_hist_size; widx++) {
569                 hist[widx] = runtime;
570                 sum += hist[widx];
571                 if (hist[widx] > max)
572                         max = hist[widx];
573         }
574
575         p->ravg.sum = 0;
576
577         if (walt_window_stats_policy == WINDOW_STATS_RECENT) {
578                 demand = runtime;
579         } else if (walt_window_stats_policy == WINDOW_STATS_MAX) {
580                 demand = max;
581         } else {
582                 avg = div64_u64(sum, walt_ravg_hist_size);
583                 if (walt_window_stats_policy == WINDOW_STATS_AVG)
584                         demand = avg;
585                 else
586                         demand = max(avg, runtime);
587         }
588
589         /*
590          * A throttled deadline sched class task gets dequeued without
591          * changing p->on_rq. Since the dequeue decrements hmp stats
592          * avoid decrementing it here again.
593          */
594         if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
595                                                 !p->dl.dl_throttled))
596                 fixup_cumulative_runnable_avg(rq, p, demand);
597
598         p->ravg.demand = demand;
599
600 done:
601         trace_walt_update_history(rq, p, runtime, samples, event);
602         return;
603 }
604
605 static void add_to_task_demand(struct rq *rq, struct task_struct *p,
606                                 u64 delta)
607 {
608         delta = scale_exec_time(delta, rq);
609         p->ravg.sum += delta;
610         if (unlikely(p->ravg.sum > walt_ravg_window))
611                 p->ravg.sum = walt_ravg_window;
612 }
613
614 /*
615  * Account cpu demand of task and/or update task's cpu demand history
616  *
617  * ms = p->ravg.mark_start;
618  * wc = wallclock
619  * ws = rq->window_start
620  *
621  * Three possibilities:
622  *
623  *      a) Task event is contained within one window.
624  *              window_start < mark_start < wallclock
625  *
626  *              ws   ms  wc
627  *              |    |   |
628  *              V    V   V
629  *              |---------------|
630  *
631  *      In this case, p->ravg.sum is updated *iff* event is appropriate
632  *      (ex: event == PUT_PREV_TASK)
633  *
634  *      b) Task event spans two windows.
635  *              mark_start < window_start < wallclock
636  *
637  *              ms   ws   wc
638  *              |    |    |
639  *              V    V    V
640  *              -----|-------------------
641  *
642  *      In this case, p->ravg.sum is updated with (ws - ms) *iff* event
643  *      is appropriate, then a new window sample is recorded followed
644  *      by p->ravg.sum being set to (wc - ws) *iff* event is appropriate.
645  *
646  *      c) Task event spans more than two windows.
647  *
648  *              ms ws_tmp                          ws  wc
649  *              |  |                               |   |
650  *              V  V                               V   V
651  *              ---|-------|-------|-------|-------|------
652  *                 |                               |
653  *                 |<------ nr_full_windows ------>|
654  *
655  *      In this case, p->ravg.sum is updated with (ws_tmp - ms) first *iff*
656  *      event is appropriate, window sample of p->ravg.sum is recorded,
657  *      'nr_full_window' samples of window_size is also recorded *iff*
658  *      event is appropriate and finally p->ravg.sum is set to (wc - ws)
659  *      *iff* event is appropriate.
660  *
661  * IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time()
662  * depends on it!
663  */
664 static void update_task_demand(struct task_struct *p, struct rq *rq,
665              int event, u64 wallclock)
666 {
667         u64 mark_start = p->ravg.mark_start;
668         u64 delta, window_start = rq->window_start;
669         int new_window, nr_full_windows;
670         u32 window_size = walt_ravg_window;
671
672         new_window = mark_start < window_start;
673         if (!account_busy_for_task_demand(p, event)) {
674                 if (new_window)
675                         /* If the time accounted isn't being accounted as
676                          * busy time, and a new window started, only the
677                          * previous window need be closed out with the
678                          * pre-existing demand. Multiple windows may have
679                          * elapsed, but since empty windows are dropped,
680                          * it is not necessary to account those. */
681                         update_history(rq, p, p->ravg.sum, 1, event);
682                 return;
683         }
684
685         if (!new_window) {
686                 /* The simple case - busy time contained within the existing
687                  * window. */
688                 add_to_task_demand(rq, p, wallclock - mark_start);
689                 return;
690         }
691
692         /* Busy time spans at least two windows. Temporarily rewind
693          * window_start to first window boundary after mark_start. */
694         delta = window_start - mark_start;
695         nr_full_windows = div64_u64(delta, window_size);
696         window_start -= (u64)nr_full_windows * (u64)window_size;
697
698         /* Process (window_start - mark_start) first */
699         add_to_task_demand(rq, p, window_start - mark_start);
700
701         /* Push new sample(s) into task's demand history */
702         update_history(rq, p, p->ravg.sum, 1, event);
703         if (nr_full_windows)
704                 update_history(rq, p, scale_exec_time(window_size, rq),
705                                nr_full_windows, event);
706
707         /* Roll window_start back to current to process any remainder
708          * in current window. */
709         window_start += (u64)nr_full_windows * (u64)window_size;
710
711         /* Process (wallclock - window_start) next */
712         mark_start = window_start;
713         add_to_task_demand(rq, p, wallclock - mark_start);
714 }
715
716 /* Reflect task activity on its demand and cpu's busy time statistics */
717 void walt_update_task_ravg(struct task_struct *p, struct rq *rq,
718              int event, u64 wallclock, u64 irqtime)
719 {
720         if (walt_disabled || !rq->window_start)
721                 return;
722
723         lockdep_assert_held(&rq->lock);
724
725         update_window_start(rq, wallclock);
726
727         if (!p->ravg.mark_start)
728                 goto done;
729
730         update_task_demand(p, rq, event, wallclock);
731         update_cpu_busy_time(p, rq, event, wallclock, irqtime);
732
733 done:
734         trace_walt_update_task_ravg(p, rq, event, wallclock, irqtime);
735
736         p->ravg.mark_start = wallclock;
737 }
738
739 unsigned long __weak arch_get_cpu_efficiency(int cpu)
740 {
741         return SCHED_LOAD_SCALE;
742 }
743
744 void walt_init_cpu_efficiency(void)
745 {
746         int i, efficiency;
747         unsigned int max = 0, min = UINT_MAX;
748
749         for_each_possible_cpu(i) {
750                 efficiency = arch_get_cpu_efficiency(i);
751                 cpu_rq(i)->efficiency = efficiency;
752
753                 if (efficiency > max)
754                         max = efficiency;
755                 if (efficiency < min)
756                         min = efficiency;
757         }
758
759         if (max)
760                 max_possible_efficiency = max;
761
762         if (min)
763                 min_possible_efficiency = min;
764 }
765
766 static void reset_task_stats(struct task_struct *p)
767 {
768         u32 sum = 0;
769
770         if (exiting_task(p))
771                 sum = EXITING_TASK_MARKER;
772
773         memset(&p->ravg, 0, sizeof(struct ravg));
774         /* Retain EXITING_TASK marker */
775         p->ravg.sum_history[0] = sum;
776 }
777
778 void walt_mark_task_starting(struct task_struct *p)
779 {
780         u64 wallclock;
781         struct rq *rq = task_rq(p);
782
783         if (!rq->window_start) {
784                 reset_task_stats(p);
785                 return;
786         }
787
788         wallclock = walt_ktime_clock();
789         p->ravg.mark_start = wallclock;
790 }
791
792 void walt_set_window_start(struct rq *rq)
793 {
794         int cpu = cpu_of(rq);
795         struct rq *sync_rq = cpu_rq(sync_cpu);
796
797         if (rq->window_start)
798                 return;
799
800         if (cpu == sync_cpu) {
801                 rq->window_start = walt_ktime_clock();
802         } else {
803                 raw_spin_unlock(&rq->lock);
804                 double_rq_lock(rq, sync_rq);
805                 rq->window_start = cpu_rq(sync_cpu)->window_start;
806                 rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
807                 raw_spin_unlock(&sync_rq->lock);
808         }
809
810         rq->curr->ravg.mark_start = rq->window_start;
811 }
812
813 void walt_migrate_sync_cpu(int cpu)
814 {
815         if (cpu == sync_cpu)
816                 sync_cpu = smp_processor_id();
817 }
818
819 void walt_fixup_busy_time(struct task_struct *p, int new_cpu)
820 {
821         struct rq *src_rq = task_rq(p);
822         struct rq *dest_rq = cpu_rq(new_cpu);
823         u64 wallclock;
824
825         if (!p->on_rq && p->state != TASK_WAKING)
826                 return;
827
828         if (exiting_task(p)) {
829                 return;
830         }
831
832         if (p->state == TASK_WAKING)
833                 double_rq_lock(src_rq, dest_rq);
834
835         wallclock = walt_ktime_clock();
836
837         walt_update_task_ravg(task_rq(p)->curr, task_rq(p),
838                         TASK_UPDATE, wallclock, 0);
839         walt_update_task_ravg(dest_rq->curr, dest_rq,
840                         TASK_UPDATE, wallclock, 0);
841
842         walt_update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0);
843
844         if (p->ravg.curr_window) {
845                 src_rq->curr_runnable_sum -= p->ravg.curr_window;
846                 dest_rq->curr_runnable_sum += p->ravg.curr_window;
847         }
848
849         if (p->ravg.prev_window) {
850                 src_rq->prev_runnable_sum -= p->ravg.prev_window;
851                 dest_rq->prev_runnable_sum += p->ravg.prev_window;
852         }
853
854         if ((s64)src_rq->prev_runnable_sum < 0) {
855                 src_rq->prev_runnable_sum = 0;
856                 WARN_ON(1);
857         }
858         if ((s64)src_rq->curr_runnable_sum < 0) {
859                 src_rq->curr_runnable_sum = 0;
860                 WARN_ON(1);
861         }
862
863         trace_walt_migration_update_sum(src_rq, p);
864         trace_walt_migration_update_sum(dest_rq, p);
865
866         if (p->state == TASK_WAKING)
867                 double_rq_unlock(src_rq, dest_rq);
868 }
869
870 /*
871  * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
872  * least efficient cpu gets capacity of 1024
873  */
874 static unsigned long capacity_scale_cpu_efficiency(int cpu)
875 {
876         return (1024 * cpu_rq(cpu)->efficiency) / min_possible_efficiency;
877 }
878
879 /*
880  * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
881  * (min_max_freq), such that one with lowest max_freq gets capacity of 1024.
882  */
883 static unsigned long capacity_scale_cpu_freq(int cpu)
884 {
885         return (1024 * cpu_rq(cpu)->max_freq) / min_max_freq;
886 }
887
888 /*
889  * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
890  * that "most" efficient cpu gets a load_scale_factor of 1
891  */
892 static unsigned long load_scale_cpu_efficiency(int cpu)
893 {
894         return DIV_ROUND_UP(1024 * max_possible_efficiency,
895                             cpu_rq(cpu)->efficiency);
896 }
897
898 /*
899  * Return load_scale_factor of a cpu in reference to cpu with best max_freq
900  * (max_possible_freq), so that one with best max_freq gets a load_scale_factor
901  * of 1.
902  */
903 static unsigned long load_scale_cpu_freq(int cpu)
904 {
905         return DIV_ROUND_UP(1024 * max_possible_freq, cpu_rq(cpu)->max_freq);
906 }
907
908 static int compute_capacity(int cpu)
909 {
910         int capacity = 1024;
911
912         capacity *= capacity_scale_cpu_efficiency(cpu);
913         capacity >>= 10;
914
915         capacity *= capacity_scale_cpu_freq(cpu);
916         capacity >>= 10;
917
918         return capacity;
919 }
920
921 static int compute_load_scale_factor(int cpu)
922 {
923         int load_scale = 1024;
924
925         /*
926          * load_scale_factor accounts for the fact that task load
927          * is in reference to "best" performing cpu. Task's load will need to be
928          * scaled (up) by a factor to determine suitability to be placed on a
929          * (little) cpu.
930          */
931         load_scale *= load_scale_cpu_efficiency(cpu);
932         load_scale >>= 10;
933
934         load_scale *= load_scale_cpu_freq(cpu);
935         load_scale >>= 10;
936
937         return load_scale;
938 }
939
940 static int cpufreq_notifier_policy(struct notifier_block *nb,
941                 unsigned long val, void *data)
942 {
943         struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
944         int i, update_max = 0;
945         u64 highest_mpc = 0, highest_mplsf = 0;
946         const struct cpumask *cpus = policy->related_cpus;
947         unsigned int orig_min_max_freq = min_max_freq;
948         unsigned int orig_max_possible_freq = max_possible_freq;
949         /* Initialized to policy->max in case policy->related_cpus is empty! */
950         unsigned int orig_max_freq = policy->max;
951
952         if (val != CPUFREQ_NOTIFY)
953                 return 0;
954
955         for_each_cpu(i, policy->related_cpus) {
956                 cpumask_copy(&cpu_rq(i)->freq_domain_cpumask,
957                              policy->related_cpus);
958                 orig_max_freq = cpu_rq(i)->max_freq;
959                 cpu_rq(i)->min_freq = policy->min;
960                 cpu_rq(i)->max_freq = policy->max;
961                 cpu_rq(i)->cur_freq = policy->cur;
962                 cpu_rq(i)->max_possible_freq = policy->cpuinfo.max_freq;
963         }
964
965         max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq);
966         if (min_max_freq == 1)
967                 min_max_freq = UINT_MAX;
968         min_max_freq = min(min_max_freq, policy->cpuinfo.max_freq);
969         BUG_ON(!min_max_freq);
970         BUG_ON(!policy->max);
971
972         /* Changes to policy other than max_freq don't require any updates */
973         if (orig_max_freq == policy->max)
974                 return 0;
975
976         /*
977          * A changed min_max_freq or max_possible_freq (possible during bootup)
978          * needs to trigger re-computation of load_scale_factor and capacity for
979          * all possible cpus (even those offline). It also needs to trigger
980          * re-computation of nr_big_task count on all online cpus.
981          *
982          * A changed rq->max_freq otoh needs to trigger re-computation of
983          * load_scale_factor and capacity for just the cluster of cpus involved.
984          * Since small task definition depends on max_load_scale_factor, a
985          * changed load_scale_factor of one cluster could influence
986          * classification of tasks in another cluster. Hence a changed
987          * rq->max_freq will need to trigger re-computation of nr_big_task
988          * count on all online cpus.
989          *
990          * While it should be sufficient for nr_big_tasks to be
991          * re-computed for only online cpus, we have inadequate context
992          * information here (in policy notifier) with regard to hotplug-safety
993          * context in which notification is issued. As a result, we can't use
994          * get_online_cpus() here, as it can lead to deadlock. Until cpufreq is
995          * fixed up to issue notification always in hotplug-safe context,
996          * re-compute nr_big_task for all possible cpus.
997          */
998
999         if (orig_min_max_freq != min_max_freq ||
1000                 orig_max_possible_freq != max_possible_freq) {
1001                         cpus = cpu_possible_mask;
1002                         update_max = 1;
1003         }
1004
1005         /*
1006          * Changed load_scale_factor can trigger reclassification of tasks as
1007          * big or small. Make this change "atomic" so that tasks are accounted
1008          * properly due to changed load_scale_factor
1009          */
1010         for_each_cpu(i, cpus) {
1011                 struct rq *rq = cpu_rq(i);
1012
1013                 rq->capacity = compute_capacity(i);
1014                 rq->load_scale_factor = compute_load_scale_factor(i);
1015
1016                 if (update_max) {
1017                         u64 mpc, mplsf;
1018
1019                         mpc = div_u64(((u64) rq->capacity) *
1020                                 rq->max_possible_freq, rq->max_freq);
1021                         rq->max_possible_capacity = (int) mpc;
1022
1023                         mplsf = div_u64(((u64) rq->load_scale_factor) *
1024                                 rq->max_possible_freq, rq->max_freq);
1025
1026                         if (mpc > highest_mpc) {
1027                                 highest_mpc = mpc;
1028                                 cpumask_clear(&mpc_mask);
1029                                 cpumask_set_cpu(i, &mpc_mask);
1030                         } else if (mpc == highest_mpc) {
1031                                 cpumask_set_cpu(i, &mpc_mask);
1032                         }
1033
1034                         if (mplsf > highest_mplsf)
1035                                 highest_mplsf = mplsf;
1036                 }
1037         }
1038
1039         if (update_max) {
1040                 max_possible_capacity = highest_mpc;
1041                 max_load_scale_factor = highest_mplsf;
1042         }
1043
1044         return 0;
1045 }
1046
1047 static int cpufreq_notifier_trans(struct notifier_block *nb,
1048                 unsigned long val, void *data)
1049 {
1050         struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data;
1051         unsigned int cpu = freq->cpu, new_freq = freq->new;
1052         unsigned long flags;
1053         int i;
1054
1055         if (val != CPUFREQ_POSTCHANGE)
1056                 return 0;
1057
1058         BUG_ON(!new_freq);
1059
1060         if (cpu_rq(cpu)->cur_freq == new_freq)
1061                 return 0;
1062
1063         for_each_cpu(i, &cpu_rq(cpu)->freq_domain_cpumask) {
1064                 struct rq *rq = cpu_rq(i);
1065
1066                 raw_spin_lock_irqsave(&rq->lock, flags);
1067                 walt_update_task_ravg(rq->curr, rq, TASK_UPDATE,
1068                                       walt_ktime_clock(), 0);
1069                 rq->cur_freq = new_freq;
1070                 raw_spin_unlock_irqrestore(&rq->lock, flags);
1071         }
1072
1073         return 0;
1074 }
1075
1076 static struct notifier_block notifier_policy_block = {
1077         .notifier_call = cpufreq_notifier_policy
1078 };
1079
1080 static struct notifier_block notifier_trans_block = {
1081         .notifier_call = cpufreq_notifier_trans
1082 };
1083
1084 static int register_sched_callback(void)
1085 {
1086         int ret;
1087
1088         ret = cpufreq_register_notifier(&notifier_policy_block,
1089                                                 CPUFREQ_POLICY_NOTIFIER);
1090
1091         if (!ret)
1092                 ret = cpufreq_register_notifier(&notifier_trans_block,
1093                                                 CPUFREQ_TRANSITION_NOTIFIER);
1094
1095         return 0;
1096 }
1097
1098 /*
1099  * cpufreq callbacks can be registered at core_initcall or later time.
1100  * Any registration done prior to that is "forgotten" by cpufreq. See
1101  * initialization of variable init_cpufreq_transition_notifier_list_called
1102  * for further information.
1103  */
1104 core_initcall(register_sched_callback);
1105
1106 void walt_init_new_task_load(struct task_struct *p)
1107 {
1108         int i;
1109         u32 init_load_windows =
1110                         div64_u64((u64)sysctl_sched_walt_init_task_load_pct *
1111                           (u64)walt_ravg_window, 100);
1112         u32 init_load_pct = current->init_load_pct;
1113
1114         p->init_load_pct = 0;
1115         memset(&p->ravg, 0, sizeof(struct ravg));
1116
1117         if (init_load_pct) {
1118                 init_load_windows = div64_u64((u64)init_load_pct *
1119                           (u64)walt_ravg_window, 100);
1120         }
1121
1122         p->ravg.demand = init_load_windows;
1123         for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
1124                 p->ravg.sum_history[i] = init_load_windows;
1125 }