drm/panel: add panel power delay for of_panel
[firefly-linux-kernel-4.4.55.git] / kernel / sched / walt.c
1 /*
2  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  *
14  * Window Assisted Load Tracking (WALT) implementation credits:
15  * Srivatsa Vaddagiri, Steve Muckle, Syed Rameez Mustafa, Joonwoo Park,
16  * Pavan Kumar Kondeti, Olav Haugan
17  *
18  * 2016-03-06: Integration with EAS/refactoring by Vikram Mulukutla
19  *             and Todd Kjos
20  */
21
22 #include <linux/syscore_ops.h>
23 #include <linux/cpufreq.h>
24 #include <trace/events/sched.h>
25 #include "sched.h"
26 #include "walt.h"
27
28 #define WINDOW_STATS_RECENT             0
29 #define WINDOW_STATS_MAX                1
30 #define WINDOW_STATS_MAX_RECENT_AVG     2
31 #define WINDOW_STATS_AVG                3
32 #define WINDOW_STATS_INVALID_POLICY     4
33
34 #define EXITING_TASK_MARKER     0xdeaddead
35
36 static __read_mostly unsigned int walt_ravg_hist_size = 5;
37 static __read_mostly unsigned int walt_window_stats_policy =
38         WINDOW_STATS_MAX_RECENT_AVG;
39 static __read_mostly unsigned int walt_account_wait_time = 1;
40 static __read_mostly unsigned int walt_freq_account_wait_time = 0;
41 static __read_mostly unsigned int walt_io_is_busy = 0;
42
43 unsigned int sysctl_sched_walt_init_task_load_pct = 15;
44
45 /* 1 -> use PELT based load stats, 0 -> use window-based load stats */
46 unsigned int __read_mostly walt_disabled = 0;
47
48 static unsigned int max_possible_efficiency = 1024;
49 static unsigned int min_possible_efficiency = 1024;
50
51 /*
52  * Maximum possible frequency across all cpus. Task demand and cpu
53  * capacity (cpu_power) metrics are scaled in reference to it.
54  */
55 static unsigned int max_possible_freq = 1;
56
57 /*
58  * Minimum possible max_freq across all cpus. This will be same as
59  * max_possible_freq on homogeneous systems and could be different from
60  * max_possible_freq on heterogenous systems. min_max_freq is used to derive
61  * capacity (cpu_power) of cpus.
62  */
63 static unsigned int min_max_freq = 1;
64
65 static unsigned int max_capacity = 1024;
66 static unsigned int min_capacity = 1024;
67 static unsigned int max_load_scale_factor = 1024;
68 static unsigned int max_possible_capacity = 1024;
69
70 /* Mask of all CPUs that have  max_possible_capacity */
71 static cpumask_t mpc_mask = CPU_MASK_ALL;
72
73 /* Window size (in ns) */
74 __read_mostly unsigned int walt_ravg_window = 20000000;
75
76 /* Min window size (in ns) = 10ms */
77 #define MIN_SCHED_RAVG_WINDOW 10000000
78
79 /* Max window size (in ns) = 1s */
80 #define MAX_SCHED_RAVG_WINDOW 1000000000
81
82 static unsigned int sync_cpu;
83 static ktime_t ktime_last;
84 static bool walt_ktime_suspended;
85
86 static unsigned int task_load(struct task_struct *p)
87 {
88         return p->ravg.demand;
89 }
90
91 void
92 walt_inc_cumulative_runnable_avg(struct rq *rq,
93                                  struct task_struct *p)
94 {
95         rq->cumulative_runnable_avg += p->ravg.demand;
96 }
97
98 void
99 walt_dec_cumulative_runnable_avg(struct rq *rq,
100                                  struct task_struct *p)
101 {
102         rq->cumulative_runnable_avg -= p->ravg.demand;
103         BUG_ON((s64)rq->cumulative_runnable_avg < 0);
104 }
105
106 static void
107 fixup_cumulative_runnable_avg(struct rq *rq,
108                               struct task_struct *p, s64 task_load_delta)
109 {
110         rq->cumulative_runnable_avg += task_load_delta;
111         if ((s64)rq->cumulative_runnable_avg < 0)
112                 panic("cra less than zero: tld: %lld, task_load(p) = %u\n",
113                         task_load_delta, task_load(p));
114 }
115
116 u64 walt_ktime_clock(void)
117 {
118         if (unlikely(walt_ktime_suspended))
119                 return ktime_to_ns(ktime_last);
120         return ktime_get_ns();
121 }
122
123 static void walt_resume(void)
124 {
125         walt_ktime_suspended = false;
126 }
127
128 static int walt_suspend(void)
129 {
130         ktime_last = ktime_get();
131         walt_ktime_suspended = true;
132         return 0;
133 }
134
135 static struct syscore_ops walt_syscore_ops = {
136         .resume = walt_resume,
137         .suspend = walt_suspend
138 };
139
140 static int __init walt_init_ops(void)
141 {
142         register_syscore_ops(&walt_syscore_ops);
143         return 0;
144 }
145 late_initcall(walt_init_ops);
146
147 void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
148                 struct task_struct *p)
149 {
150         cfs_rq->cumulative_runnable_avg += p->ravg.demand;
151 }
152
153 void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
154                 struct task_struct *p)
155 {
156         cfs_rq->cumulative_runnable_avg -= p->ravg.demand;
157 }
158
159 static int exiting_task(struct task_struct *p)
160 {
161         if (p->flags & PF_EXITING) {
162                 if (p->ravg.sum_history[0] != EXITING_TASK_MARKER) {
163                         p->ravg.sum_history[0] = EXITING_TASK_MARKER;
164                 }
165                 return 1;
166         }
167         return 0;
168 }
169
170 static int __init set_walt_ravg_window(char *str)
171 {
172         get_option(&str, &walt_ravg_window);
173
174         walt_disabled = (walt_ravg_window < MIN_SCHED_RAVG_WINDOW ||
175                                 walt_ravg_window > MAX_SCHED_RAVG_WINDOW);
176         return 0;
177 }
178
179 early_param("walt_ravg_window", set_walt_ravg_window);
180
181 static void
182 update_window_start(struct rq *rq, u64 wallclock)
183 {
184         s64 delta;
185         int nr_windows;
186
187         delta = wallclock - rq->window_start;
188         /* If the MPM global timer is cleared, set delta as 0 to avoid kernel BUG happening */
189         if (delta < 0) {
190                 delta = 0;
191                 WARN_ONCE(1, "WALT wallclock appears to have gone backwards or reset\n");
192         }
193
194         if (delta < walt_ravg_window)
195                 return;
196
197         nr_windows = div64_u64(delta, walt_ravg_window);
198         rq->window_start += (u64)nr_windows * (u64)walt_ravg_window;
199 }
200
201 static u64 scale_exec_time(u64 delta, struct rq *rq)
202 {
203         unsigned int cur_freq = rq->cur_freq;
204         int sf;
205
206         if (unlikely(cur_freq > max_possible_freq))
207                 cur_freq = rq->max_possible_freq;
208
209         /* round up div64 */
210         delta = div64_u64(delta * cur_freq + max_possible_freq - 1,
211                           max_possible_freq);
212
213         sf = DIV_ROUND_UP(rq->efficiency * 1024, max_possible_efficiency);
214
215         delta *= sf;
216         delta >>= 10;
217
218         return delta;
219 }
220
221 static int cpu_is_waiting_on_io(struct rq *rq)
222 {
223         if (!walt_io_is_busy)
224                 return 0;
225
226         return atomic_read(&rq->nr_iowait);
227 }
228
229 void walt_account_irqtime(int cpu, struct task_struct *curr,
230                                  u64 delta, u64 wallclock)
231 {
232         struct rq *rq = cpu_rq(cpu);
233         unsigned long flags, nr_windows;
234         u64 cur_jiffies_ts;
235
236         raw_spin_lock_irqsave(&rq->lock, flags);
237
238         /*
239          * cputime (wallclock) uses sched_clock so use the same here for
240          * consistency.
241          */
242         delta += sched_clock() - wallclock;
243         cur_jiffies_ts = get_jiffies_64();
244
245         if (is_idle_task(curr))
246                 walt_update_task_ravg(curr, rq, IRQ_UPDATE, walt_ktime_clock(),
247                                  delta);
248
249         nr_windows = cur_jiffies_ts - rq->irqload_ts;
250
251         if (nr_windows) {
252                 if (nr_windows < 10) {
253                         /* Decay CPU's irqload by 3/4 for each window. */
254                         rq->avg_irqload *= (3 * nr_windows);
255                         rq->avg_irqload = div64_u64(rq->avg_irqload,
256                                                     4 * nr_windows);
257                 } else {
258                         rq->avg_irqload = 0;
259                 }
260                 rq->avg_irqload += rq->cur_irqload;
261                 rq->cur_irqload = 0;
262         }
263
264         rq->cur_irqload += delta;
265         rq->irqload_ts = cur_jiffies_ts;
266         raw_spin_unlock_irqrestore(&rq->lock, flags);
267 }
268
269
270 #define WALT_HIGH_IRQ_TIMEOUT 3
271
272 u64 walt_irqload(int cpu) {
273         struct rq *rq = cpu_rq(cpu);
274         s64 delta;
275         delta = get_jiffies_64() - rq->irqload_ts;
276
277         /*
278          * Current context can be preempted by irq and rq->irqload_ts can be
279          * updated by irq context so that delta can be negative.
280          * But this is okay and we can safely return as this means there
281          * was recent irq occurrence.
282          */
283
284         if (delta < WALT_HIGH_IRQ_TIMEOUT)
285                 return rq->avg_irqload;
286         else
287                 return 0;
288 }
289
290 int walt_cpu_high_irqload(int cpu) {
291         return walt_irqload(cpu) >= sysctl_sched_walt_cpu_high_irqload;
292 }
293
294 static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
295                                      u64 irqtime, int event)
296 {
297         if (is_idle_task(p)) {
298                 /* TASK_WAKE && TASK_MIGRATE is not possible on idle task! */
299                 if (event == PICK_NEXT_TASK)
300                         return 0;
301
302                 /* PUT_PREV_TASK, TASK_UPDATE && IRQ_UPDATE are left */
303                 return irqtime || cpu_is_waiting_on_io(rq);
304         }
305
306         if (event == TASK_WAKE)
307                 return 0;
308
309         if (event == PUT_PREV_TASK || event == IRQ_UPDATE ||
310                                          event == TASK_UPDATE)
311                 return 1;
312
313         /* Only TASK_MIGRATE && PICK_NEXT_TASK left */
314         return walt_freq_account_wait_time;
315 }
316
317 /*
318  * Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum)
319  */
320 static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
321              int event, u64 wallclock, u64 irqtime)
322 {
323         int new_window, nr_full_windows = 0;
324         int p_is_curr_task = (p == rq->curr);
325         u64 mark_start = p->ravg.mark_start;
326         u64 window_start = rq->window_start;
327         u32 window_size = walt_ravg_window;
328         u64 delta;
329
330         new_window = mark_start < window_start;
331         if (new_window) {
332                 nr_full_windows = div64_u64((window_start - mark_start),
333                                                 window_size);
334                 if (p->ravg.active_windows < USHRT_MAX)
335                         p->ravg.active_windows++;
336         }
337
338         /* Handle per-task window rollover. We don't care about the idle
339          * task or exiting tasks. */
340         if (new_window && !is_idle_task(p) && !exiting_task(p)) {
341                 u32 curr_window = 0;
342
343                 if (!nr_full_windows)
344                         curr_window = p->ravg.curr_window;
345
346                 p->ravg.prev_window = curr_window;
347                 p->ravg.curr_window = 0;
348         }
349
350         if (!account_busy_for_cpu_time(rq, p, irqtime, event)) {
351                 /* account_busy_for_cpu_time() = 0, so no update to the
352                  * task's current window needs to be made. This could be
353                  * for example
354                  *
355                  *   - a wakeup event on a task within the current
356                  *     window (!new_window below, no action required),
357                  *   - switching to a new task from idle (PICK_NEXT_TASK)
358                  *     in a new window where irqtime is 0 and we aren't
359                  *     waiting on IO */
360
361                 if (!new_window)
362                         return;
363
364                 /* A new window has started. The RQ demand must be rolled
365                  * over if p is the current task. */
366                 if (p_is_curr_task) {
367                         u64 prev_sum = 0;
368
369                         /* p is either idle task or an exiting task */
370                         if (!nr_full_windows) {
371                                 prev_sum = rq->curr_runnable_sum;
372                         }
373
374                         rq->prev_runnable_sum = prev_sum;
375                         rq->curr_runnable_sum = 0;
376                 }
377
378                 return;
379         }
380
381         if (!new_window) {
382                 /* account_busy_for_cpu_time() = 1 so busy time needs
383                  * to be accounted to the current window. No rollover
384                  * since we didn't start a new window. An example of this is
385                  * when a task starts execution and then sleeps within the
386                  * same window. */
387
388                 if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq))
389                         delta = wallclock - mark_start;
390                 else
391                         delta = irqtime;
392                 delta = scale_exec_time(delta, rq);
393                 rq->curr_runnable_sum += delta;
394                 if (!is_idle_task(p) && !exiting_task(p))
395                         p->ravg.curr_window += delta;
396
397                 return;
398         }
399
400         if (!p_is_curr_task) {
401                 /* account_busy_for_cpu_time() = 1 so busy time needs
402                  * to be accounted to the current window. A new window
403                  * has also started, but p is not the current task, so the
404                  * window is not rolled over - just split up and account
405                  * as necessary into curr and prev. The window is only
406                  * rolled over when a new window is processed for the current
407                  * task.
408                  *
409                  * Irqtime can't be accounted by a task that isn't the
410                  * currently running task. */
411
412                 if (!nr_full_windows) {
413                         /* A full window hasn't elapsed, account partial
414                          * contribution to previous completed window. */
415                         delta = scale_exec_time(window_start - mark_start, rq);
416                         if (!exiting_task(p))
417                                 p->ravg.prev_window += delta;
418                 } else {
419                         /* Since at least one full window has elapsed,
420                          * the contribution to the previous window is the
421                          * full window (window_size). */
422                         delta = scale_exec_time(window_size, rq);
423                         if (!exiting_task(p))
424                                 p->ravg.prev_window = delta;
425                 }
426                 rq->prev_runnable_sum += delta;
427
428                 /* Account piece of busy time in the current window. */
429                 delta = scale_exec_time(wallclock - window_start, rq);
430                 rq->curr_runnable_sum += delta;
431                 if (!exiting_task(p))
432                         p->ravg.curr_window = delta;
433
434                 return;
435         }
436
437         if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) {
438                 /* account_busy_for_cpu_time() = 1 so busy time needs
439                  * to be accounted to the current window. A new window
440                  * has started and p is the current task so rollover is
441                  * needed. If any of these three above conditions are true
442                  * then this busy time can't be accounted as irqtime.
443                  *
444                  * Busy time for the idle task or exiting tasks need not
445                  * be accounted.
446                  *
447                  * An example of this would be a task that starts execution
448                  * and then sleeps once a new window has begun. */
449
450                 if (!nr_full_windows) {
451                         /* A full window hasn't elapsed, account partial
452                          * contribution to previous completed window. */
453                         delta = scale_exec_time(window_start - mark_start, rq);
454                         if (!is_idle_task(p) && !exiting_task(p))
455                                 p->ravg.prev_window += delta;
456
457                         delta += rq->curr_runnable_sum;
458                 } else {
459                         /* Since at least one full window has elapsed,
460                          * the contribution to the previous window is the
461                          * full window (window_size). */
462                         delta = scale_exec_time(window_size, rq);
463                         if (!is_idle_task(p) && !exiting_task(p))
464                                 p->ravg.prev_window = delta;
465
466                 }
467                 /*
468                  * Rollover for normal runnable sum is done here by overwriting
469                  * the values in prev_runnable_sum and curr_runnable_sum.
470                  * Rollover for new task runnable sum has completed by previous
471                  * if-else statement.
472                  */
473                 rq->prev_runnable_sum = delta;
474
475                 /* Account piece of busy time in the current window. */
476                 delta = scale_exec_time(wallclock - window_start, rq);
477                 rq->curr_runnable_sum = delta;
478                 if (!is_idle_task(p) && !exiting_task(p))
479                         p->ravg.curr_window = delta;
480
481                 return;
482         }
483
484         if (irqtime) {
485                 /* account_busy_for_cpu_time() = 1 so busy time needs
486                  * to be accounted to the current window. A new window
487                  * has started and p is the current task so rollover is
488                  * needed. The current task must be the idle task because
489                  * irqtime is not accounted for any other task.
490                  *
491                  * Irqtime will be accounted each time we process IRQ activity
492                  * after a period of idleness, so we know the IRQ busy time
493                  * started at wallclock - irqtime. */
494
495                 BUG_ON(!is_idle_task(p));
496                 mark_start = wallclock - irqtime;
497
498                 /* Roll window over. If IRQ busy time was just in the current
499                  * window then that is all that need be accounted. */
500                 rq->prev_runnable_sum = rq->curr_runnable_sum;
501                 if (mark_start > window_start) {
502                         rq->curr_runnable_sum = scale_exec_time(irqtime, rq);
503                         return;
504                 }
505
506                 /* The IRQ busy time spanned multiple windows. Process the
507                  * busy time preceding the current window start first. */
508                 delta = window_start - mark_start;
509                 if (delta > window_size)
510                         delta = window_size;
511                 delta = scale_exec_time(delta, rq);
512                 rq->prev_runnable_sum += delta;
513
514                 /* Process the remaining IRQ busy time in the current window. */
515                 delta = wallclock - window_start;
516                 rq->curr_runnable_sum = scale_exec_time(delta, rq);
517
518                 return;
519         }
520
521         BUG();
522 }
523
524 static int account_busy_for_task_demand(struct task_struct *p, int event)
525 {
526         /* No need to bother updating task demand for exiting tasks
527          * or the idle task. */
528         if (exiting_task(p) || is_idle_task(p))
529                 return 0;
530
531         /* When a task is waking up it is completing a segment of non-busy
532          * time. Likewise, if wait time is not treated as busy time, then
533          * when a task begins to run or is migrated, it is not running and
534          * is completing a segment of non-busy time. */
535         if (event == TASK_WAKE || (!walt_account_wait_time &&
536                          (event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
537                 return 0;
538
539         return 1;
540 }
541
542 /*
543  * Called when new window is starting for a task, to record cpu usage over
544  * recently concluded window(s). Normally 'samples' should be 1. It can be > 1
545  * when, say, a real-time task runs without preemption for several windows at a
546  * stretch.
547  */
548 static void update_history(struct rq *rq, struct task_struct *p,
549                          u32 runtime, int samples, int event)
550 {
551         u32 *hist = &p->ravg.sum_history[0];
552         int ridx, widx;
553         u32 max = 0, avg, demand;
554         u64 sum = 0;
555
556         /* Ignore windows where task had no activity */
557         if (!runtime || is_idle_task(p) || exiting_task(p) || !samples)
558                         goto done;
559
560         /* Push new 'runtime' value onto stack */
561         widx = walt_ravg_hist_size - 1;
562         ridx = widx - samples;
563         for (; ridx >= 0; --widx, --ridx) {
564                 hist[widx] = hist[ridx];
565                 sum += hist[widx];
566                 if (hist[widx] > max)
567                         max = hist[widx];
568         }
569
570         for (widx = 0; widx < samples && widx < walt_ravg_hist_size; widx++) {
571                 hist[widx] = runtime;
572                 sum += hist[widx];
573                 if (hist[widx] > max)
574                         max = hist[widx];
575         }
576
577         p->ravg.sum = 0;
578
579         if (walt_window_stats_policy == WINDOW_STATS_RECENT) {
580                 demand = runtime;
581         } else if (walt_window_stats_policy == WINDOW_STATS_MAX) {
582                 demand = max;
583         } else {
584                 avg = div64_u64(sum, walt_ravg_hist_size);
585                 if (walt_window_stats_policy == WINDOW_STATS_AVG)
586                         demand = avg;
587                 else
588                         demand = max(avg, runtime);
589         }
590
591         /*
592          * A throttled deadline sched class task gets dequeued without
593          * changing p->on_rq. Since the dequeue decrements hmp stats
594          * avoid decrementing it here again.
595          */
596         if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
597                                                 !p->dl.dl_throttled))
598                 fixup_cumulative_runnable_avg(rq, p, demand);
599
600         p->ravg.demand = demand;
601
602 done:
603         trace_walt_update_history(rq, p, runtime, samples, event);
604         return;
605 }
606
607 static void add_to_task_demand(struct rq *rq, struct task_struct *p,
608                                 u64 delta)
609 {
610         delta = scale_exec_time(delta, rq);
611         p->ravg.sum += delta;
612         if (unlikely(p->ravg.sum > walt_ravg_window))
613                 p->ravg.sum = walt_ravg_window;
614 }
615
616 /*
617  * Account cpu demand of task and/or update task's cpu demand history
618  *
619  * ms = p->ravg.mark_start;
620  * wc = wallclock
621  * ws = rq->window_start
622  *
623  * Three possibilities:
624  *
625  *      a) Task event is contained within one window.
626  *              window_start < mark_start < wallclock
627  *
628  *              ws   ms  wc
629  *              |    |   |
630  *              V    V   V
631  *              |---------------|
632  *
633  *      In this case, p->ravg.sum is updated *iff* event is appropriate
634  *      (ex: event == PUT_PREV_TASK)
635  *
636  *      b) Task event spans two windows.
637  *              mark_start < window_start < wallclock
638  *
639  *              ms   ws   wc
640  *              |    |    |
641  *              V    V    V
642  *              -----|-------------------
643  *
644  *      In this case, p->ravg.sum is updated with (ws - ms) *iff* event
645  *      is appropriate, then a new window sample is recorded followed
646  *      by p->ravg.sum being set to (wc - ws) *iff* event is appropriate.
647  *
648  *      c) Task event spans more than two windows.
649  *
650  *              ms ws_tmp                          ws  wc
651  *              |  |                               |   |
652  *              V  V                               V   V
653  *              ---|-------|-------|-------|-------|------
654  *                 |                               |
655  *                 |<------ nr_full_windows ------>|
656  *
657  *      In this case, p->ravg.sum is updated with (ws_tmp - ms) first *iff*
658  *      event is appropriate, window sample of p->ravg.sum is recorded,
659  *      'nr_full_window' samples of window_size is also recorded *iff*
660  *      event is appropriate and finally p->ravg.sum is set to (wc - ws)
661  *      *iff* event is appropriate.
662  *
663  * IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time()
664  * depends on it!
665  */
666 static void update_task_demand(struct task_struct *p, struct rq *rq,
667              int event, u64 wallclock)
668 {
669         u64 mark_start = p->ravg.mark_start;
670         u64 delta, window_start = rq->window_start;
671         int new_window, nr_full_windows;
672         u32 window_size = walt_ravg_window;
673
674         new_window = mark_start < window_start;
675         if (!account_busy_for_task_demand(p, event)) {
676                 if (new_window)
677                         /* If the time accounted isn't being accounted as
678                          * busy time, and a new window started, only the
679                          * previous window need be closed out with the
680                          * pre-existing demand. Multiple windows may have
681                          * elapsed, but since empty windows are dropped,
682                          * it is not necessary to account those. */
683                         update_history(rq, p, p->ravg.sum, 1, event);
684                 return;
685         }
686
687         if (!new_window) {
688                 /* The simple case - busy time contained within the existing
689                  * window. */
690                 add_to_task_demand(rq, p, wallclock - mark_start);
691                 return;
692         }
693
694         /* Busy time spans at least two windows. Temporarily rewind
695          * window_start to first window boundary after mark_start. */
696         delta = window_start - mark_start;
697         nr_full_windows = div64_u64(delta, window_size);
698         window_start -= (u64)nr_full_windows * (u64)window_size;
699
700         /* Process (window_start - mark_start) first */
701         add_to_task_demand(rq, p, window_start - mark_start);
702
703         /* Push new sample(s) into task's demand history */
704         update_history(rq, p, p->ravg.sum, 1, event);
705         if (nr_full_windows)
706                 update_history(rq, p, scale_exec_time(window_size, rq),
707                                nr_full_windows, event);
708
709         /* Roll window_start back to current to process any remainder
710          * in current window. */
711         window_start += (u64)nr_full_windows * (u64)window_size;
712
713         /* Process (wallclock - window_start) next */
714         mark_start = window_start;
715         add_to_task_demand(rq, p, wallclock - mark_start);
716 }
717
718 /* Reflect task activity on its demand and cpu's busy time statistics */
719 void walt_update_task_ravg(struct task_struct *p, struct rq *rq,
720              int event, u64 wallclock, u64 irqtime)
721 {
722         if (walt_disabled || !rq->window_start)
723                 return;
724
725         lockdep_assert_held(&rq->lock);
726
727         update_window_start(rq, wallclock);
728
729         if (!p->ravg.mark_start)
730                 goto done;
731
732         update_task_demand(p, rq, event, wallclock);
733         update_cpu_busy_time(p, rq, event, wallclock, irqtime);
734
735 done:
736         trace_walt_update_task_ravg(p, rq, event, wallclock, irqtime);
737
738         p->ravg.mark_start = wallclock;
739 }
740
741 unsigned long __weak arch_get_cpu_efficiency(int cpu)
742 {
743         return SCHED_LOAD_SCALE;
744 }
745
746 void walt_init_cpu_efficiency(void)
747 {
748         int i, efficiency;
749         unsigned int max = 0, min = UINT_MAX;
750
751         for_each_possible_cpu(i) {
752                 efficiency = arch_get_cpu_efficiency(i);
753                 cpu_rq(i)->efficiency = efficiency;
754
755                 if (efficiency > max)
756                         max = efficiency;
757                 if (efficiency < min)
758                         min = efficiency;
759         }
760
761         if (max)
762                 max_possible_efficiency = max;
763
764         if (min)
765                 min_possible_efficiency = min;
766 }
767
768 static void reset_task_stats(struct task_struct *p)
769 {
770         u32 sum = 0;
771
772         if (exiting_task(p))
773                 sum = EXITING_TASK_MARKER;
774
775         memset(&p->ravg, 0, sizeof(struct ravg));
776         /* Retain EXITING_TASK marker */
777         p->ravg.sum_history[0] = sum;
778 }
779
780 void walt_mark_task_starting(struct task_struct *p)
781 {
782         u64 wallclock;
783         struct rq *rq = task_rq(p);
784
785         if (!rq->window_start) {
786                 reset_task_stats(p);
787                 return;
788         }
789
790         wallclock = walt_ktime_clock();
791         p->ravg.mark_start = wallclock;
792 }
793
794 void walt_set_window_start(struct rq *rq)
795 {
796         int cpu = cpu_of(rq);
797         struct rq *sync_rq = cpu_rq(sync_cpu);
798
799         if (rq->window_start)
800                 return;
801
802         if (cpu == sync_cpu) {
803                 rq->window_start = walt_ktime_clock();
804         } else {
805                 raw_spin_unlock(&rq->lock);
806                 double_rq_lock(rq, sync_rq);
807                 rq->window_start = cpu_rq(sync_cpu)->window_start;
808                 rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
809                 raw_spin_unlock(&sync_rq->lock);
810         }
811
812         rq->curr->ravg.mark_start = rq->window_start;
813 }
814
815 void walt_migrate_sync_cpu(int cpu)
816 {
817         if (cpu == sync_cpu)
818                 sync_cpu = smp_processor_id();
819 }
820
821 void walt_fixup_busy_time(struct task_struct *p, int new_cpu)
822 {
823         struct rq *src_rq = task_rq(p);
824         struct rq *dest_rq = cpu_rq(new_cpu);
825         u64 wallclock;
826
827         if (!p->on_rq && p->state != TASK_WAKING)
828                 return;
829
830         if (exiting_task(p)) {
831                 return;
832         }
833
834         if (p->state == TASK_WAKING)
835                 double_rq_lock(src_rq, dest_rq);
836
837         wallclock = walt_ktime_clock();
838
839         walt_update_task_ravg(task_rq(p)->curr, task_rq(p),
840                         TASK_UPDATE, wallclock, 0);
841         walt_update_task_ravg(dest_rq->curr, dest_rq,
842                         TASK_UPDATE, wallclock, 0);
843
844         walt_update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0);
845
846         if (p->ravg.curr_window) {
847                 src_rq->curr_runnable_sum -= p->ravg.curr_window;
848                 dest_rq->curr_runnable_sum += p->ravg.curr_window;
849         }
850
851         if (p->ravg.prev_window) {
852                 src_rq->prev_runnable_sum -= p->ravg.prev_window;
853                 dest_rq->prev_runnable_sum += p->ravg.prev_window;
854         }
855
856         if ((s64)src_rq->prev_runnable_sum < 0) {
857                 src_rq->prev_runnable_sum = 0;
858                 WARN_ON(1);
859         }
860         if ((s64)src_rq->curr_runnable_sum < 0) {
861                 src_rq->curr_runnable_sum = 0;
862                 WARN_ON(1);
863         }
864
865         trace_walt_migration_update_sum(src_rq, p);
866         trace_walt_migration_update_sum(dest_rq, p);
867
868         if (p->state == TASK_WAKING)
869                 double_rq_unlock(src_rq, dest_rq);
870 }
871
872 /* Keep track of max/min capacity possible across CPUs "currently" */
873 static void __update_min_max_capacity(void)
874 {
875         int i;
876         int max = 0, min = INT_MAX;
877
878         for_each_online_cpu(i) {
879                 if (cpu_rq(i)->capacity > max)
880                         max = cpu_rq(i)->capacity;
881                 if (cpu_rq(i)->capacity < min)
882                         min = cpu_rq(i)->capacity;
883         }
884
885         max_capacity = max;
886         min_capacity = min;
887 }
888
889 static void update_min_max_capacity(void)
890 {
891         unsigned long flags;
892         int i;
893
894         local_irq_save(flags);
895         for_each_possible_cpu(i)
896                 raw_spin_lock(&cpu_rq(i)->lock);
897
898         __update_min_max_capacity();
899
900         for_each_possible_cpu(i)
901                 raw_spin_unlock(&cpu_rq(i)->lock);
902         local_irq_restore(flags);
903 }
904
905 /*
906  * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
907  * least efficient cpu gets capacity of 1024
908  */
909 static unsigned long capacity_scale_cpu_efficiency(int cpu)
910 {
911         return (1024 * cpu_rq(cpu)->efficiency) / min_possible_efficiency;
912 }
913
914 /*
915  * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
916  * (min_max_freq), such that one with lowest max_freq gets capacity of 1024.
917  */
918 static unsigned long capacity_scale_cpu_freq(int cpu)
919 {
920         return (1024 * cpu_rq(cpu)->max_freq) / min_max_freq;
921 }
922
923 /*
924  * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
925  * that "most" efficient cpu gets a load_scale_factor of 1
926  */
927 static unsigned long load_scale_cpu_efficiency(int cpu)
928 {
929         return DIV_ROUND_UP(1024 * max_possible_efficiency,
930                             cpu_rq(cpu)->efficiency);
931 }
932
933 /*
934  * Return load_scale_factor of a cpu in reference to cpu with best max_freq
935  * (max_possible_freq), so that one with best max_freq gets a load_scale_factor
936  * of 1.
937  */
938 static unsigned long load_scale_cpu_freq(int cpu)
939 {
940         return DIV_ROUND_UP(1024 * max_possible_freq, cpu_rq(cpu)->max_freq);
941 }
942
943 static int compute_capacity(int cpu)
944 {
945         int capacity = 1024;
946
947         capacity *= capacity_scale_cpu_efficiency(cpu);
948         capacity >>= 10;
949
950         capacity *= capacity_scale_cpu_freq(cpu);
951         capacity >>= 10;
952
953         return capacity;
954 }
955
956 static int compute_load_scale_factor(int cpu)
957 {
958         int load_scale = 1024;
959
960         /*
961          * load_scale_factor accounts for the fact that task load
962          * is in reference to "best" performing cpu. Task's load will need to be
963          * scaled (up) by a factor to determine suitability to be placed on a
964          * (little) cpu.
965          */
966         load_scale *= load_scale_cpu_efficiency(cpu);
967         load_scale >>= 10;
968
969         load_scale *= load_scale_cpu_freq(cpu);
970         load_scale >>= 10;
971
972         return load_scale;
973 }
974
975 static int cpufreq_notifier_policy(struct notifier_block *nb,
976                 unsigned long val, void *data)
977 {
978         struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
979         int i, update_max = 0;
980         u64 highest_mpc = 0, highest_mplsf = 0;
981         const struct cpumask *cpus = policy->related_cpus;
982         unsigned int orig_min_max_freq = min_max_freq;
983         unsigned int orig_max_possible_freq = max_possible_freq;
984         /* Initialized to policy->max in case policy->related_cpus is empty! */
985         unsigned int orig_max_freq = policy->max;
986
987         if (val != CPUFREQ_NOTIFY && val != CPUFREQ_REMOVE_POLICY &&
988                                                 val != CPUFREQ_CREATE_POLICY)
989                 return 0;
990
991         if (val == CPUFREQ_REMOVE_POLICY || val == CPUFREQ_CREATE_POLICY) {
992                 update_min_max_capacity();
993                 return 0;
994         }
995
996         for_each_cpu(i, policy->related_cpus) {
997                 cpumask_copy(&cpu_rq(i)->freq_domain_cpumask,
998                              policy->related_cpus);
999                 orig_max_freq = cpu_rq(i)->max_freq;
1000                 cpu_rq(i)->min_freq = policy->min;
1001                 cpu_rq(i)->max_freq = policy->max;
1002                 cpu_rq(i)->cur_freq = policy->cur;
1003                 cpu_rq(i)->max_possible_freq = policy->cpuinfo.max_freq;
1004         }
1005
1006         max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq);
1007         if (min_max_freq == 1)
1008                 min_max_freq = UINT_MAX;
1009         min_max_freq = min(min_max_freq, policy->cpuinfo.max_freq);
1010         BUG_ON(!min_max_freq);
1011         BUG_ON(!policy->max);
1012
1013         /* Changes to policy other than max_freq don't require any updates */
1014         if (orig_max_freq == policy->max)
1015                 return 0;
1016
1017         /*
1018          * A changed min_max_freq or max_possible_freq (possible during bootup)
1019          * needs to trigger re-computation of load_scale_factor and capacity for
1020          * all possible cpus (even those offline). It also needs to trigger
1021          * re-computation of nr_big_task count on all online cpus.
1022          *
1023          * A changed rq->max_freq otoh needs to trigger re-computation of
1024          * load_scale_factor and capacity for just the cluster of cpus involved.
1025          * Since small task definition depends on max_load_scale_factor, a
1026          * changed load_scale_factor of one cluster could influence
1027          * classification of tasks in another cluster. Hence a changed
1028          * rq->max_freq will need to trigger re-computation of nr_big_task
1029          * count on all online cpus.
1030          *
1031          * While it should be sufficient for nr_big_tasks to be
1032          * re-computed for only online cpus, we have inadequate context
1033          * information here (in policy notifier) with regard to hotplug-safety
1034          * context in which notification is issued. As a result, we can't use
1035          * get_online_cpus() here, as it can lead to deadlock. Until cpufreq is
1036          * fixed up to issue notification always in hotplug-safe context,
1037          * re-compute nr_big_task for all possible cpus.
1038          */
1039
1040         if (orig_min_max_freq != min_max_freq ||
1041                 orig_max_possible_freq != max_possible_freq) {
1042                         cpus = cpu_possible_mask;
1043                         update_max = 1;
1044         }
1045
1046         /*
1047          * Changed load_scale_factor can trigger reclassification of tasks as
1048          * big or small. Make this change "atomic" so that tasks are accounted
1049          * properly due to changed load_scale_factor
1050          */
1051         for_each_cpu(i, cpus) {
1052                 struct rq *rq = cpu_rq(i);
1053
1054                 rq->capacity = compute_capacity(i);
1055                 rq->load_scale_factor = compute_load_scale_factor(i);
1056
1057                 if (update_max) {
1058                         u64 mpc, mplsf;
1059
1060                         mpc = div_u64(((u64) rq->capacity) *
1061                                 rq->max_possible_freq, rq->max_freq);
1062                         rq->max_possible_capacity = (int) mpc;
1063
1064                         mplsf = div_u64(((u64) rq->load_scale_factor) *
1065                                 rq->max_possible_freq, rq->max_freq);
1066
1067                         if (mpc > highest_mpc) {
1068                                 highest_mpc = mpc;
1069                                 cpumask_clear(&mpc_mask);
1070                                 cpumask_set_cpu(i, &mpc_mask);
1071                         } else if (mpc == highest_mpc) {
1072                                 cpumask_set_cpu(i, &mpc_mask);
1073                         }
1074
1075                         if (mplsf > highest_mplsf)
1076                                 highest_mplsf = mplsf;
1077                 }
1078         }
1079
1080         if (update_max) {
1081                 max_possible_capacity = highest_mpc;
1082                 max_load_scale_factor = highest_mplsf;
1083         }
1084
1085         __update_min_max_capacity();
1086
1087         return 0;
1088 }
1089
1090 static int cpufreq_notifier_trans(struct notifier_block *nb,
1091                 unsigned long val, void *data)
1092 {
1093         struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data;
1094         unsigned int cpu = freq->cpu, new_freq = freq->new;
1095         unsigned long flags;
1096         int i;
1097
1098         if (val != CPUFREQ_POSTCHANGE)
1099                 return 0;
1100
1101         BUG_ON(!new_freq);
1102
1103         if (cpu_rq(cpu)->cur_freq == new_freq)
1104                 return 0;
1105
1106         for_each_cpu(i, &cpu_rq(cpu)->freq_domain_cpumask) {
1107                 struct rq *rq = cpu_rq(i);
1108
1109                 raw_spin_lock_irqsave(&rq->lock, flags);
1110                 walt_update_task_ravg(rq->curr, rq, TASK_UPDATE,
1111                                       walt_ktime_clock(), 0);
1112                 rq->cur_freq = new_freq;
1113                 raw_spin_unlock_irqrestore(&rq->lock, flags);
1114         }
1115
1116         return 0;
1117 }
1118
1119 static struct notifier_block notifier_policy_block = {
1120         .notifier_call = cpufreq_notifier_policy
1121 };
1122
1123 static struct notifier_block notifier_trans_block = {
1124         .notifier_call = cpufreq_notifier_trans
1125 };
1126
1127 static int register_sched_callback(void)
1128 {
1129         int ret;
1130
1131         ret = cpufreq_register_notifier(&notifier_policy_block,
1132                                                 CPUFREQ_POLICY_NOTIFIER);
1133
1134         if (!ret)
1135                 ret = cpufreq_register_notifier(&notifier_trans_block,
1136                                                 CPUFREQ_TRANSITION_NOTIFIER);
1137
1138         return 0;
1139 }
1140
1141 /*
1142  * cpufreq callbacks can be registered at core_initcall or later time.
1143  * Any registration done prior to that is "forgotten" by cpufreq. See
1144  * initialization of variable init_cpufreq_transition_notifier_list_called
1145  * for further information.
1146  */
1147 core_initcall(register_sched_callback);
1148
1149 void walt_init_new_task_load(struct task_struct *p)
1150 {
1151         int i;
1152         u32 init_load_windows =
1153                         div64_u64((u64)sysctl_sched_walt_init_task_load_pct *
1154                           (u64)walt_ravg_window, 100);
1155         u32 init_load_pct = current->init_load_pct;
1156
1157         p->init_load_pct = 0;
1158         memset(&p->ravg, 0, sizeof(struct ravg));
1159
1160         if (init_load_pct) {
1161                 init_load_windows = div64_u64((u64)init_load_pct *
1162                           (u64)walt_ravg_window, 100);
1163         }
1164
1165         p->ravg.demand = init_load_windows;
1166         for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
1167                 p->ravg.sum_history[i] = init_load_windows;
1168 }