arch_timer: add error handling when the MPM global timer is cleared
[firefly-linux-kernel-4.4.55.git] / kernel / sched / walt.c
1 /*
2  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  *
14  * Window Assisted Load Tracking (WALT) implementation credits:
15  * Srivatsa Vaddagiri, Steve Muckle, Syed Rameez Mustafa, Joonwoo Park,
16  * Pavan Kumar Kondeti, Olav Haugan
17  *
18  * 2016-03-06: Integration with EAS/refactoring by Vikram Mulukutla
19  *             and Todd Kjos
20  */
21
22 #include <linux/syscore_ops.h>
23 #include <linux/cpufreq.h>
24 #include <trace/events/sched.h>
25 #include "sched.h"
26 #include "walt.h"
27
28 #define WINDOW_STATS_RECENT             0
29 #define WINDOW_STATS_MAX                1
30 #define WINDOW_STATS_MAX_RECENT_AVG     2
31 #define WINDOW_STATS_AVG                3
32 #define WINDOW_STATS_INVALID_POLICY     4
33
34 #define EXITING_TASK_MARKER     0xdeaddead
35
36 static __read_mostly unsigned int walt_ravg_hist_size = 5;
37 static __read_mostly unsigned int walt_window_stats_policy =
38         WINDOW_STATS_MAX_RECENT_AVG;
39 static __read_mostly unsigned int walt_account_wait_time = 1;
40 static __read_mostly unsigned int walt_freq_account_wait_time = 0;
41 static __read_mostly unsigned int walt_io_is_busy = 0;
42
43 unsigned int sysctl_sched_walt_init_task_load_pct = 15;
44
45 /* 1 -> use PELT based load stats, 0 -> use window-based load stats */
46 unsigned int __read_mostly walt_disabled = 0;
47
48 static unsigned int max_possible_efficiency = 1024;
49 static unsigned int min_possible_efficiency = 1024;
50
51 /*
52  * Maximum possible frequency across all cpus. Task demand and cpu
53  * capacity (cpu_power) metrics are scaled in reference to it.
54  */
55 static unsigned int max_possible_freq = 1;
56
57 /*
58  * Minimum possible max_freq across all cpus. This will be same as
59  * max_possible_freq on homogeneous systems and could be different from
60  * max_possible_freq on heterogenous systems. min_max_freq is used to derive
61  * capacity (cpu_power) of cpus.
62  */
63 static unsigned int min_max_freq = 1;
64
65 static unsigned int max_capacity = 1024;
66 static unsigned int min_capacity = 1024;
67 static unsigned int max_load_scale_factor = 1024;
68 static unsigned int max_possible_capacity = 1024;
69
70 /* Mask of all CPUs that have  max_possible_capacity */
71 static cpumask_t mpc_mask = CPU_MASK_ALL;
72
73 /* Window size (in ns) */
74 __read_mostly unsigned int walt_ravg_window = 20000000;
75
76 /* Min window size (in ns) = 10ms */
77 #define MIN_SCHED_RAVG_WINDOW 10000000
78
79 /* Max window size (in ns) = 1s */
80 #define MAX_SCHED_RAVG_WINDOW 1000000000
81
82 static unsigned int sync_cpu;
83 static ktime_t ktime_last;
84 static bool walt_ktime_suspended;
85
86 static unsigned int task_load(struct task_struct *p)
87 {
88         return p->ravg.demand;
89 }
90
91 void
92 walt_inc_cumulative_runnable_avg(struct rq *rq,
93                                  struct task_struct *p)
94 {
95         rq->cumulative_runnable_avg += p->ravg.demand;
96 }
97
98 void
99 walt_dec_cumulative_runnable_avg(struct rq *rq,
100                                  struct task_struct *p)
101 {
102         rq->cumulative_runnable_avg -= p->ravg.demand;
103         BUG_ON((s64)rq->cumulative_runnable_avg < 0);
104 }
105
106 static void
107 fixup_cumulative_runnable_avg(struct rq *rq,
108                               struct task_struct *p, s64 task_load_delta)
109 {
110         rq->cumulative_runnable_avg += task_load_delta;
111         if ((s64)rq->cumulative_runnable_avg < 0)
112                 panic("cra less than zero: tld: %lld, task_load(p) = %u\n",
113                         task_load_delta, task_load(p));
114 }
115
116 u64 walt_ktime_clock(void)
117 {
118         if (unlikely(walt_ktime_suspended))
119                 return ktime_to_ns(ktime_last);
120         return ktime_get_ns();
121 }
122
123 static void walt_resume(void)
124 {
125         walt_ktime_suspended = false;
126 }
127
128 static int walt_suspend(void)
129 {
130         ktime_last = ktime_get();
131         walt_ktime_suspended = true;
132         return 0;
133 }
134
135 static struct syscore_ops walt_syscore_ops = {
136         .resume = walt_resume,
137         .suspend = walt_suspend
138 };
139
140 static int __init walt_init_ops(void)
141 {
142         register_syscore_ops(&walt_syscore_ops);
143         return 0;
144 }
145 late_initcall(walt_init_ops);
146
147 void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
148                 struct task_struct *p)
149 {
150         cfs_rq->cumulative_runnable_avg += p->ravg.demand;
151 }
152
153 void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
154                 struct task_struct *p)
155 {
156         cfs_rq->cumulative_runnable_avg -= p->ravg.demand;
157 }
158
159 static int exiting_task(struct task_struct *p)
160 {
161         if (p->flags & PF_EXITING) {
162                 if (p->ravg.sum_history[0] != EXITING_TASK_MARKER) {
163                         p->ravg.sum_history[0] = EXITING_TASK_MARKER;
164                 }
165                 return 1;
166         }
167         return 0;
168 }
169
170 static int __init set_walt_ravg_window(char *str)
171 {
172         get_option(&str, &walt_ravg_window);
173
174         walt_disabled = (walt_ravg_window < MIN_SCHED_RAVG_WINDOW ||
175                                 walt_ravg_window > MAX_SCHED_RAVG_WINDOW);
176         return 0;
177 }
178
179 early_param("walt_ravg_window", set_walt_ravg_window);
180
181 static void
182 update_window_start(struct rq *rq, u64 wallclock)
183 {
184         s64 delta;
185         int nr_windows;
186
187         delta = wallclock - rq->window_start;
188         /* If the MPM global timer is cleared, set delta as 0 to avoid kernel BUG happening */
189         if (delta < 0) {
190                 if (arch_timer_read_counter() == 0)
191                         delta = 0;
192                 else
193                         BUG_ON(1);
194         }
195
196         if (delta < walt_ravg_window)
197                 return;
198
199         nr_windows = div64_u64(delta, walt_ravg_window);
200         rq->window_start += (u64)nr_windows * (u64)walt_ravg_window;
201 }
202
203 static u64 scale_exec_time(u64 delta, struct rq *rq)
204 {
205         unsigned int cur_freq = rq->cur_freq;
206         int sf;
207
208         if (unlikely(cur_freq > max_possible_freq))
209                 cur_freq = rq->max_possible_freq;
210
211         /* round up div64 */
212         delta = div64_u64(delta * cur_freq + max_possible_freq - 1,
213                           max_possible_freq);
214
215         sf = DIV_ROUND_UP(rq->efficiency * 1024, max_possible_efficiency);
216
217         delta *= sf;
218         delta >>= 10;
219
220         return delta;
221 }
222
223 static int cpu_is_waiting_on_io(struct rq *rq)
224 {
225         if (!walt_io_is_busy)
226                 return 0;
227
228         return atomic_read(&rq->nr_iowait);
229 }
230
231 void walt_account_irqtime(int cpu, struct task_struct *curr,
232                                  u64 delta, u64 wallclock)
233 {
234         struct rq *rq = cpu_rq(cpu);
235         unsigned long flags, nr_windows;
236         u64 cur_jiffies_ts;
237
238         raw_spin_lock_irqsave(&rq->lock, flags);
239
240         /*
241          * cputime (wallclock) uses sched_clock so use the same here for
242          * consistency.
243          */
244         delta += sched_clock() - wallclock;
245         cur_jiffies_ts = get_jiffies_64();
246
247         if (is_idle_task(curr))
248                 walt_update_task_ravg(curr, rq, IRQ_UPDATE, walt_ktime_clock(),
249                                  delta);
250
251         nr_windows = cur_jiffies_ts - rq->irqload_ts;
252
253         if (nr_windows) {
254                 if (nr_windows < 10) {
255                         /* Decay CPU's irqload by 3/4 for each window. */
256                         rq->avg_irqload *= (3 * nr_windows);
257                         rq->avg_irqload = div64_u64(rq->avg_irqload,
258                                                     4 * nr_windows);
259                 } else {
260                         rq->avg_irqload = 0;
261                 }
262                 rq->avg_irqload += rq->cur_irqload;
263                 rq->cur_irqload = 0;
264         }
265
266         rq->cur_irqload += delta;
267         rq->irqload_ts = cur_jiffies_ts;
268         raw_spin_unlock_irqrestore(&rq->lock, flags);
269 }
270
271
272 #define WALT_HIGH_IRQ_TIMEOUT 3
273
274 u64 walt_irqload(int cpu) {
275         struct rq *rq = cpu_rq(cpu);
276         s64 delta;
277         delta = get_jiffies_64() - rq->irqload_ts;
278
279         /*
280          * Current context can be preempted by irq and rq->irqload_ts can be
281          * updated by irq context so that delta can be negative.
282          * But this is okay and we can safely return as this means there
283          * was recent irq occurrence.
284          */
285
286         if (delta < WALT_HIGH_IRQ_TIMEOUT)
287                 return rq->avg_irqload;
288         else
289                 return 0;
290 }
291
292 int walt_cpu_high_irqload(int cpu) {
293         return walt_irqload(cpu) >= sysctl_sched_walt_cpu_high_irqload;
294 }
295
296 static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
297                                      u64 irqtime, int event)
298 {
299         if (is_idle_task(p)) {
300                 /* TASK_WAKE && TASK_MIGRATE is not possible on idle task! */
301                 if (event == PICK_NEXT_TASK)
302                         return 0;
303
304                 /* PUT_PREV_TASK, TASK_UPDATE && IRQ_UPDATE are left */
305                 return irqtime || cpu_is_waiting_on_io(rq);
306         }
307
308         if (event == TASK_WAKE)
309                 return 0;
310
311         if (event == PUT_PREV_TASK || event == IRQ_UPDATE ||
312                                          event == TASK_UPDATE)
313                 return 1;
314
315         /* Only TASK_MIGRATE && PICK_NEXT_TASK left */
316         return walt_freq_account_wait_time;
317 }
318
319 /*
320  * Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum)
321  */
322 static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
323              int event, u64 wallclock, u64 irqtime)
324 {
325         int new_window, nr_full_windows = 0;
326         int p_is_curr_task = (p == rq->curr);
327         u64 mark_start = p->ravg.mark_start;
328         u64 window_start = rq->window_start;
329         u32 window_size = walt_ravg_window;
330         u64 delta;
331
332         new_window = mark_start < window_start;
333         if (new_window) {
334                 nr_full_windows = div64_u64((window_start - mark_start),
335                                                 window_size);
336                 if (p->ravg.active_windows < USHRT_MAX)
337                         p->ravg.active_windows++;
338         }
339
340         /* Handle per-task window rollover. We don't care about the idle
341          * task or exiting tasks. */
342         if (new_window && !is_idle_task(p) && !exiting_task(p)) {
343                 u32 curr_window = 0;
344
345                 if (!nr_full_windows)
346                         curr_window = p->ravg.curr_window;
347
348                 p->ravg.prev_window = curr_window;
349                 p->ravg.curr_window = 0;
350         }
351
352         if (!account_busy_for_cpu_time(rq, p, irqtime, event)) {
353                 /* account_busy_for_cpu_time() = 0, so no update to the
354                  * task's current window needs to be made. This could be
355                  * for example
356                  *
357                  *   - a wakeup event on a task within the current
358                  *     window (!new_window below, no action required),
359                  *   - switching to a new task from idle (PICK_NEXT_TASK)
360                  *     in a new window where irqtime is 0 and we aren't
361                  *     waiting on IO */
362
363                 if (!new_window)
364                         return;
365
366                 /* A new window has started. The RQ demand must be rolled
367                  * over if p is the current task. */
368                 if (p_is_curr_task) {
369                         u64 prev_sum = 0;
370
371                         /* p is either idle task or an exiting task */
372                         if (!nr_full_windows) {
373                                 prev_sum = rq->curr_runnable_sum;
374                         }
375
376                         rq->prev_runnable_sum = prev_sum;
377                         rq->curr_runnable_sum = 0;
378                 }
379
380                 return;
381         }
382
383         if (!new_window) {
384                 /* account_busy_for_cpu_time() = 1 so busy time needs
385                  * to be accounted to the current window. No rollover
386                  * since we didn't start a new window. An example of this is
387                  * when a task starts execution and then sleeps within the
388                  * same window. */
389
390                 if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq))
391                         delta = wallclock - mark_start;
392                 else
393                         delta = irqtime;
394                 delta = scale_exec_time(delta, rq);
395                 rq->curr_runnable_sum += delta;
396                 if (!is_idle_task(p) && !exiting_task(p))
397                         p->ravg.curr_window += delta;
398
399                 return;
400         }
401
402         if (!p_is_curr_task) {
403                 /* account_busy_for_cpu_time() = 1 so busy time needs
404                  * to be accounted to the current window. A new window
405                  * has also started, but p is not the current task, so the
406                  * window is not rolled over - just split up and account
407                  * as necessary into curr and prev. The window is only
408                  * rolled over when a new window is processed for the current
409                  * task.
410                  *
411                  * Irqtime can't be accounted by a task that isn't the
412                  * currently running task. */
413
414                 if (!nr_full_windows) {
415                         /* A full window hasn't elapsed, account partial
416                          * contribution to previous completed window. */
417                         delta = scale_exec_time(window_start - mark_start, rq);
418                         if (!exiting_task(p))
419                                 p->ravg.prev_window += delta;
420                 } else {
421                         /* Since at least one full window has elapsed,
422                          * the contribution to the previous window is the
423                          * full window (window_size). */
424                         delta = scale_exec_time(window_size, rq);
425                         if (!exiting_task(p))
426                                 p->ravg.prev_window = delta;
427                 }
428                 rq->prev_runnable_sum += delta;
429
430                 /* Account piece of busy time in the current window. */
431                 delta = scale_exec_time(wallclock - window_start, rq);
432                 rq->curr_runnable_sum += delta;
433                 if (!exiting_task(p))
434                         p->ravg.curr_window = delta;
435
436                 return;
437         }
438
439         if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) {
440                 /* account_busy_for_cpu_time() = 1 so busy time needs
441                  * to be accounted to the current window. A new window
442                  * has started and p is the current task so rollover is
443                  * needed. If any of these three above conditions are true
444                  * then this busy time can't be accounted as irqtime.
445                  *
446                  * Busy time for the idle task or exiting tasks need not
447                  * be accounted.
448                  *
449                  * An example of this would be a task that starts execution
450                  * and then sleeps once a new window has begun. */
451
452                 if (!nr_full_windows) {
453                         /* A full window hasn't elapsed, account partial
454                          * contribution to previous completed window. */
455                         delta = scale_exec_time(window_start - mark_start, rq);
456                         if (!is_idle_task(p) && !exiting_task(p))
457                                 p->ravg.prev_window += delta;
458
459                         delta += rq->curr_runnable_sum;
460                 } else {
461                         /* Since at least one full window has elapsed,
462                          * the contribution to the previous window is the
463                          * full window (window_size). */
464                         delta = scale_exec_time(window_size, rq);
465                         if (!is_idle_task(p) && !exiting_task(p))
466                                 p->ravg.prev_window = delta;
467
468                 }
469                 /*
470                  * Rollover for normal runnable sum is done here by overwriting
471                  * the values in prev_runnable_sum and curr_runnable_sum.
472                  * Rollover for new task runnable sum has completed by previous
473                  * if-else statement.
474                  */
475                 rq->prev_runnable_sum = delta;
476
477                 /* Account piece of busy time in the current window. */
478                 delta = scale_exec_time(wallclock - window_start, rq);
479                 rq->curr_runnable_sum = delta;
480                 if (!is_idle_task(p) && !exiting_task(p))
481                         p->ravg.curr_window = delta;
482
483                 return;
484         }
485
486         if (irqtime) {
487                 /* account_busy_for_cpu_time() = 1 so busy time needs
488                  * to be accounted to the current window. A new window
489                  * has started and p is the current task so rollover is
490                  * needed. The current task must be the idle task because
491                  * irqtime is not accounted for any other task.
492                  *
493                  * Irqtime will be accounted each time we process IRQ activity
494                  * after a period of idleness, so we know the IRQ busy time
495                  * started at wallclock - irqtime. */
496
497                 BUG_ON(!is_idle_task(p));
498                 mark_start = wallclock - irqtime;
499
500                 /* Roll window over. If IRQ busy time was just in the current
501                  * window then that is all that need be accounted. */
502                 rq->prev_runnable_sum = rq->curr_runnable_sum;
503                 if (mark_start > window_start) {
504                         rq->curr_runnable_sum = scale_exec_time(irqtime, rq);
505                         return;
506                 }
507
508                 /* The IRQ busy time spanned multiple windows. Process the
509                  * busy time preceding the current window start first. */
510                 delta = window_start - mark_start;
511                 if (delta > window_size)
512                         delta = window_size;
513                 delta = scale_exec_time(delta, rq);
514                 rq->prev_runnable_sum += delta;
515
516                 /* Process the remaining IRQ busy time in the current window. */
517                 delta = wallclock - window_start;
518                 rq->curr_runnable_sum = scale_exec_time(delta, rq);
519
520                 return;
521         }
522
523         BUG();
524 }
525
526 static int account_busy_for_task_demand(struct task_struct *p, int event)
527 {
528         /* No need to bother updating task demand for exiting tasks
529          * or the idle task. */
530         if (exiting_task(p) || is_idle_task(p))
531                 return 0;
532
533         /* When a task is waking up it is completing a segment of non-busy
534          * time. Likewise, if wait time is not treated as busy time, then
535          * when a task begins to run or is migrated, it is not running and
536          * is completing a segment of non-busy time. */
537         if (event == TASK_WAKE || (!walt_account_wait_time &&
538                          (event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
539                 return 0;
540
541         return 1;
542 }
543
544 /*
545  * Called when new window is starting for a task, to record cpu usage over
546  * recently concluded window(s). Normally 'samples' should be 1. It can be > 1
547  * when, say, a real-time task runs without preemption for several windows at a
548  * stretch.
549  */
550 static void update_history(struct rq *rq, struct task_struct *p,
551                          u32 runtime, int samples, int event)
552 {
553         u32 *hist = &p->ravg.sum_history[0];
554         int ridx, widx;
555         u32 max = 0, avg, demand;
556         u64 sum = 0;
557
558         /* Ignore windows where task had no activity */
559         if (!runtime || is_idle_task(p) || exiting_task(p) || !samples)
560                         goto done;
561
562         /* Push new 'runtime' value onto stack */
563         widx = walt_ravg_hist_size - 1;
564         ridx = widx - samples;
565         for (; ridx >= 0; --widx, --ridx) {
566                 hist[widx] = hist[ridx];
567                 sum += hist[widx];
568                 if (hist[widx] > max)
569                         max = hist[widx];
570         }
571
572         for (widx = 0; widx < samples && widx < walt_ravg_hist_size; widx++) {
573                 hist[widx] = runtime;
574                 sum += hist[widx];
575                 if (hist[widx] > max)
576                         max = hist[widx];
577         }
578
579         p->ravg.sum = 0;
580
581         if (walt_window_stats_policy == WINDOW_STATS_RECENT) {
582                 demand = runtime;
583         } else if (walt_window_stats_policy == WINDOW_STATS_MAX) {
584                 demand = max;
585         } else {
586                 avg = div64_u64(sum, walt_ravg_hist_size);
587                 if (walt_window_stats_policy == WINDOW_STATS_AVG)
588                         demand = avg;
589                 else
590                         demand = max(avg, runtime);
591         }
592
593         /*
594          * A throttled deadline sched class task gets dequeued without
595          * changing p->on_rq. Since the dequeue decrements hmp stats
596          * avoid decrementing it here again.
597          */
598         if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
599                                                 !p->dl.dl_throttled))
600                 fixup_cumulative_runnable_avg(rq, p, demand);
601
602         p->ravg.demand = demand;
603
604 done:
605         trace_walt_update_history(rq, p, runtime, samples, event);
606         return;
607 }
608
609 static void add_to_task_demand(struct rq *rq, struct task_struct *p,
610                                 u64 delta)
611 {
612         delta = scale_exec_time(delta, rq);
613         p->ravg.sum += delta;
614         if (unlikely(p->ravg.sum > walt_ravg_window))
615                 p->ravg.sum = walt_ravg_window;
616 }
617
618 /*
619  * Account cpu demand of task and/or update task's cpu demand history
620  *
621  * ms = p->ravg.mark_start;
622  * wc = wallclock
623  * ws = rq->window_start
624  *
625  * Three possibilities:
626  *
627  *      a) Task event is contained within one window.
628  *              window_start < mark_start < wallclock
629  *
630  *              ws   ms  wc
631  *              |    |   |
632  *              V    V   V
633  *              |---------------|
634  *
635  *      In this case, p->ravg.sum is updated *iff* event is appropriate
636  *      (ex: event == PUT_PREV_TASK)
637  *
638  *      b) Task event spans two windows.
639  *              mark_start < window_start < wallclock
640  *
641  *              ms   ws   wc
642  *              |    |    |
643  *              V    V    V
644  *              -----|-------------------
645  *
646  *      In this case, p->ravg.sum is updated with (ws - ms) *iff* event
647  *      is appropriate, then a new window sample is recorded followed
648  *      by p->ravg.sum being set to (wc - ws) *iff* event is appropriate.
649  *
650  *      c) Task event spans more than two windows.
651  *
652  *              ms ws_tmp                          ws  wc
653  *              |  |                               |   |
654  *              V  V                               V   V
655  *              ---|-------|-------|-------|-------|------
656  *                 |                               |
657  *                 |<------ nr_full_windows ------>|
658  *
659  *      In this case, p->ravg.sum is updated with (ws_tmp - ms) first *iff*
660  *      event is appropriate, window sample of p->ravg.sum is recorded,
661  *      'nr_full_window' samples of window_size is also recorded *iff*
662  *      event is appropriate and finally p->ravg.sum is set to (wc - ws)
663  *      *iff* event is appropriate.
664  *
665  * IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time()
666  * depends on it!
667  */
668 static void update_task_demand(struct task_struct *p, struct rq *rq,
669              int event, u64 wallclock)
670 {
671         u64 mark_start = p->ravg.mark_start;
672         u64 delta, window_start = rq->window_start;
673         int new_window, nr_full_windows;
674         u32 window_size = walt_ravg_window;
675
676         new_window = mark_start < window_start;
677         if (!account_busy_for_task_demand(p, event)) {
678                 if (new_window)
679                         /* If the time accounted isn't being accounted as
680                          * busy time, and a new window started, only the
681                          * previous window need be closed out with the
682                          * pre-existing demand. Multiple windows may have
683                          * elapsed, but since empty windows are dropped,
684                          * it is not necessary to account those. */
685                         update_history(rq, p, p->ravg.sum, 1, event);
686                 return;
687         }
688
689         if (!new_window) {
690                 /* The simple case - busy time contained within the existing
691                  * window. */
692                 add_to_task_demand(rq, p, wallclock - mark_start);
693                 return;
694         }
695
696         /* Busy time spans at least two windows. Temporarily rewind
697          * window_start to first window boundary after mark_start. */
698         delta = window_start - mark_start;
699         nr_full_windows = div64_u64(delta, window_size);
700         window_start -= (u64)nr_full_windows * (u64)window_size;
701
702         /* Process (window_start - mark_start) first */
703         add_to_task_demand(rq, p, window_start - mark_start);
704
705         /* Push new sample(s) into task's demand history */
706         update_history(rq, p, p->ravg.sum, 1, event);
707         if (nr_full_windows)
708                 update_history(rq, p, scale_exec_time(window_size, rq),
709                                nr_full_windows, event);
710
711         /* Roll window_start back to current to process any remainder
712          * in current window. */
713         window_start += (u64)nr_full_windows * (u64)window_size;
714
715         /* Process (wallclock - window_start) next */
716         mark_start = window_start;
717         add_to_task_demand(rq, p, wallclock - mark_start);
718 }
719
720 /* Reflect task activity on its demand and cpu's busy time statistics */
721 void walt_update_task_ravg(struct task_struct *p, struct rq *rq,
722              int event, u64 wallclock, u64 irqtime)
723 {
724         if (walt_disabled || !rq->window_start)
725                 return;
726
727         lockdep_assert_held(&rq->lock);
728
729         update_window_start(rq, wallclock);
730
731         if (!p->ravg.mark_start)
732                 goto done;
733
734         update_task_demand(p, rq, event, wallclock);
735         update_cpu_busy_time(p, rq, event, wallclock, irqtime);
736
737 done:
738         trace_walt_update_task_ravg(p, rq, event, wallclock, irqtime);
739
740         p->ravg.mark_start = wallclock;
741 }
742
743 unsigned long __weak arch_get_cpu_efficiency(int cpu)
744 {
745         return SCHED_LOAD_SCALE;
746 }
747
748 void walt_init_cpu_efficiency(void)
749 {
750         int i, efficiency;
751         unsigned int max = 0, min = UINT_MAX;
752
753         for_each_possible_cpu(i) {
754                 efficiency = arch_get_cpu_efficiency(i);
755                 cpu_rq(i)->efficiency = efficiency;
756
757                 if (efficiency > max)
758                         max = efficiency;
759                 if (efficiency < min)
760                         min = efficiency;
761         }
762
763         if (max)
764                 max_possible_efficiency = max;
765
766         if (min)
767                 min_possible_efficiency = min;
768 }
769
770 static void reset_task_stats(struct task_struct *p)
771 {
772         u32 sum = 0;
773
774         if (exiting_task(p))
775                 sum = EXITING_TASK_MARKER;
776
777         memset(&p->ravg, 0, sizeof(struct ravg));
778         /* Retain EXITING_TASK marker */
779         p->ravg.sum_history[0] = sum;
780 }
781
782 void walt_mark_task_starting(struct task_struct *p)
783 {
784         u64 wallclock;
785         struct rq *rq = task_rq(p);
786
787         if (!rq->window_start) {
788                 reset_task_stats(p);
789                 return;
790         }
791
792         wallclock = walt_ktime_clock();
793         p->ravg.mark_start = wallclock;
794 }
795
796 void walt_set_window_start(struct rq *rq)
797 {
798         int cpu = cpu_of(rq);
799         struct rq *sync_rq = cpu_rq(sync_cpu);
800
801         if (rq->window_start)
802                 return;
803
804         if (cpu == sync_cpu) {
805                 rq->window_start = walt_ktime_clock();
806         } else {
807                 raw_spin_unlock(&rq->lock);
808                 double_rq_lock(rq, sync_rq);
809                 rq->window_start = cpu_rq(sync_cpu)->window_start;
810                 rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
811                 raw_spin_unlock(&sync_rq->lock);
812         }
813
814         rq->curr->ravg.mark_start = rq->window_start;
815 }
816
817 void walt_migrate_sync_cpu(int cpu)
818 {
819         if (cpu == sync_cpu)
820                 sync_cpu = smp_processor_id();
821 }
822
823 void walt_fixup_busy_time(struct task_struct *p, int new_cpu)
824 {
825         struct rq *src_rq = task_rq(p);
826         struct rq *dest_rq = cpu_rq(new_cpu);
827         u64 wallclock;
828
829         if (!p->on_rq && p->state != TASK_WAKING)
830                 return;
831
832         if (exiting_task(p)) {
833                 return;
834         }
835
836         if (p->state == TASK_WAKING)
837                 double_rq_lock(src_rq, dest_rq);
838
839         wallclock = walt_ktime_clock();
840
841         walt_update_task_ravg(task_rq(p)->curr, task_rq(p),
842                         TASK_UPDATE, wallclock, 0);
843         walt_update_task_ravg(dest_rq->curr, dest_rq,
844                         TASK_UPDATE, wallclock, 0);
845
846         walt_update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0);
847
848         if (p->ravg.curr_window) {
849                 src_rq->curr_runnable_sum -= p->ravg.curr_window;
850                 dest_rq->curr_runnable_sum += p->ravg.curr_window;
851         }
852
853         if (p->ravg.prev_window) {
854                 src_rq->prev_runnable_sum -= p->ravg.prev_window;
855                 dest_rq->prev_runnable_sum += p->ravg.prev_window;
856         }
857
858         if ((s64)src_rq->prev_runnable_sum < 0) {
859                 src_rq->prev_runnable_sum = 0;
860                 WARN_ON(1);
861         }
862         if ((s64)src_rq->curr_runnable_sum < 0) {
863                 src_rq->curr_runnable_sum = 0;
864                 WARN_ON(1);
865         }
866
867         trace_walt_migration_update_sum(src_rq, p);
868         trace_walt_migration_update_sum(dest_rq, p);
869
870         if (p->state == TASK_WAKING)
871                 double_rq_unlock(src_rq, dest_rq);
872 }
873
874 /* Keep track of max/min capacity possible across CPUs "currently" */
875 static void __update_min_max_capacity(void)
876 {
877         int i;
878         int max = 0, min = INT_MAX;
879
880         for_each_online_cpu(i) {
881                 if (cpu_rq(i)->capacity > max)
882                         max = cpu_rq(i)->capacity;
883                 if (cpu_rq(i)->capacity < min)
884                         min = cpu_rq(i)->capacity;
885         }
886
887         max_capacity = max;
888         min_capacity = min;
889 }
890
891 static void update_min_max_capacity(void)
892 {
893         unsigned long flags;
894         int i;
895
896         local_irq_save(flags);
897         for_each_possible_cpu(i)
898                 raw_spin_lock(&cpu_rq(i)->lock);
899
900         __update_min_max_capacity();
901
902         for_each_possible_cpu(i)
903                 raw_spin_unlock(&cpu_rq(i)->lock);
904         local_irq_restore(flags);
905 }
906
907 /*
908  * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
909  * least efficient cpu gets capacity of 1024
910  */
911 static unsigned long capacity_scale_cpu_efficiency(int cpu)
912 {
913         return (1024 * cpu_rq(cpu)->efficiency) / min_possible_efficiency;
914 }
915
916 /*
917  * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
918  * (min_max_freq), such that one with lowest max_freq gets capacity of 1024.
919  */
920 static unsigned long capacity_scale_cpu_freq(int cpu)
921 {
922         return (1024 * cpu_rq(cpu)->max_freq) / min_max_freq;
923 }
924
925 /*
926  * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
927  * that "most" efficient cpu gets a load_scale_factor of 1
928  */
929 static unsigned long load_scale_cpu_efficiency(int cpu)
930 {
931         return DIV_ROUND_UP(1024 * max_possible_efficiency,
932                             cpu_rq(cpu)->efficiency);
933 }
934
935 /*
936  * Return load_scale_factor of a cpu in reference to cpu with best max_freq
937  * (max_possible_freq), so that one with best max_freq gets a load_scale_factor
938  * of 1.
939  */
940 static unsigned long load_scale_cpu_freq(int cpu)
941 {
942         return DIV_ROUND_UP(1024 * max_possible_freq, cpu_rq(cpu)->max_freq);
943 }
944
945 static int compute_capacity(int cpu)
946 {
947         int capacity = 1024;
948
949         capacity *= capacity_scale_cpu_efficiency(cpu);
950         capacity >>= 10;
951
952         capacity *= capacity_scale_cpu_freq(cpu);
953         capacity >>= 10;
954
955         return capacity;
956 }
957
958 static int compute_load_scale_factor(int cpu)
959 {
960         int load_scale = 1024;
961
962         /*
963          * load_scale_factor accounts for the fact that task load
964          * is in reference to "best" performing cpu. Task's load will need to be
965          * scaled (up) by a factor to determine suitability to be placed on a
966          * (little) cpu.
967          */
968         load_scale *= load_scale_cpu_efficiency(cpu);
969         load_scale >>= 10;
970
971         load_scale *= load_scale_cpu_freq(cpu);
972         load_scale >>= 10;
973
974         return load_scale;
975 }
976
977 static int cpufreq_notifier_policy(struct notifier_block *nb,
978                 unsigned long val, void *data)
979 {
980         struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
981         int i, update_max = 0;
982         u64 highest_mpc = 0, highest_mplsf = 0;
983         const struct cpumask *cpus = policy->related_cpus;
984         unsigned int orig_min_max_freq = min_max_freq;
985         unsigned int orig_max_possible_freq = max_possible_freq;
986         /* Initialized to policy->max in case policy->related_cpus is empty! */
987         unsigned int orig_max_freq = policy->max;
988
989         if (val != CPUFREQ_NOTIFY && val != CPUFREQ_REMOVE_POLICY &&
990                                                 val != CPUFREQ_CREATE_POLICY)
991                 return 0;
992
993         if (val == CPUFREQ_REMOVE_POLICY || val == CPUFREQ_CREATE_POLICY) {
994                 update_min_max_capacity();
995                 return 0;
996         }
997
998         for_each_cpu(i, policy->related_cpus) {
999                 cpumask_copy(&cpu_rq(i)->freq_domain_cpumask,
1000                              policy->related_cpus);
1001                 orig_max_freq = cpu_rq(i)->max_freq;
1002                 cpu_rq(i)->min_freq = policy->min;
1003                 cpu_rq(i)->max_freq = policy->max;
1004                 cpu_rq(i)->cur_freq = policy->cur;
1005                 cpu_rq(i)->max_possible_freq = policy->cpuinfo.max_freq;
1006         }
1007
1008         max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq);
1009         if (min_max_freq == 1)
1010                 min_max_freq = UINT_MAX;
1011         min_max_freq = min(min_max_freq, policy->cpuinfo.max_freq);
1012         BUG_ON(!min_max_freq);
1013         BUG_ON(!policy->max);
1014
1015         /* Changes to policy other than max_freq don't require any updates */
1016         if (orig_max_freq == policy->max)
1017                 return 0;
1018
1019         /*
1020          * A changed min_max_freq or max_possible_freq (possible during bootup)
1021          * needs to trigger re-computation of load_scale_factor and capacity for
1022          * all possible cpus (even those offline). It also needs to trigger
1023          * re-computation of nr_big_task count on all online cpus.
1024          *
1025          * A changed rq->max_freq otoh needs to trigger re-computation of
1026          * load_scale_factor and capacity for just the cluster of cpus involved.
1027          * Since small task definition depends on max_load_scale_factor, a
1028          * changed load_scale_factor of one cluster could influence
1029          * classification of tasks in another cluster. Hence a changed
1030          * rq->max_freq will need to trigger re-computation of nr_big_task
1031          * count on all online cpus.
1032          *
1033          * While it should be sufficient for nr_big_tasks to be
1034          * re-computed for only online cpus, we have inadequate context
1035          * information here (in policy notifier) with regard to hotplug-safety
1036          * context in which notification is issued. As a result, we can't use
1037          * get_online_cpus() here, as it can lead to deadlock. Until cpufreq is
1038          * fixed up to issue notification always in hotplug-safe context,
1039          * re-compute nr_big_task for all possible cpus.
1040          */
1041
1042         if (orig_min_max_freq != min_max_freq ||
1043                 orig_max_possible_freq != max_possible_freq) {
1044                         cpus = cpu_possible_mask;
1045                         update_max = 1;
1046         }
1047
1048         /*
1049          * Changed load_scale_factor can trigger reclassification of tasks as
1050          * big or small. Make this change "atomic" so that tasks are accounted
1051          * properly due to changed load_scale_factor
1052          */
1053         for_each_cpu(i, cpus) {
1054                 struct rq *rq = cpu_rq(i);
1055
1056                 rq->capacity = compute_capacity(i);
1057                 rq->load_scale_factor = compute_load_scale_factor(i);
1058
1059                 if (update_max) {
1060                         u64 mpc, mplsf;
1061
1062                         mpc = div_u64(((u64) rq->capacity) *
1063                                 rq->max_possible_freq, rq->max_freq);
1064                         rq->max_possible_capacity = (int) mpc;
1065
1066                         mplsf = div_u64(((u64) rq->load_scale_factor) *
1067                                 rq->max_possible_freq, rq->max_freq);
1068
1069                         if (mpc > highest_mpc) {
1070                                 highest_mpc = mpc;
1071                                 cpumask_clear(&mpc_mask);
1072                                 cpumask_set_cpu(i, &mpc_mask);
1073                         } else if (mpc == highest_mpc) {
1074                                 cpumask_set_cpu(i, &mpc_mask);
1075                         }
1076
1077                         if (mplsf > highest_mplsf)
1078                                 highest_mplsf = mplsf;
1079                 }
1080         }
1081
1082         if (update_max) {
1083                 max_possible_capacity = highest_mpc;
1084                 max_load_scale_factor = highest_mplsf;
1085         }
1086
1087         __update_min_max_capacity();
1088
1089         return 0;
1090 }
1091
1092 static int cpufreq_notifier_trans(struct notifier_block *nb,
1093                 unsigned long val, void *data)
1094 {
1095         struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data;
1096         unsigned int cpu = freq->cpu, new_freq = freq->new;
1097         unsigned long flags;
1098         int i;
1099
1100         if (val != CPUFREQ_POSTCHANGE)
1101                 return 0;
1102
1103         BUG_ON(!new_freq);
1104
1105         if (cpu_rq(cpu)->cur_freq == new_freq)
1106                 return 0;
1107
1108         for_each_cpu(i, &cpu_rq(cpu)->freq_domain_cpumask) {
1109                 struct rq *rq = cpu_rq(i);
1110
1111                 raw_spin_lock_irqsave(&rq->lock, flags);
1112                 walt_update_task_ravg(rq->curr, rq, TASK_UPDATE,
1113                                       walt_ktime_clock(), 0);
1114                 rq->cur_freq = new_freq;
1115                 raw_spin_unlock_irqrestore(&rq->lock, flags);
1116         }
1117
1118         return 0;
1119 }
1120
1121 static struct notifier_block notifier_policy_block = {
1122         .notifier_call = cpufreq_notifier_policy
1123 };
1124
1125 static struct notifier_block notifier_trans_block = {
1126         .notifier_call = cpufreq_notifier_trans
1127 };
1128
1129 static int register_sched_callback(void)
1130 {
1131         int ret;
1132
1133         ret = cpufreq_register_notifier(&notifier_policy_block,
1134                                                 CPUFREQ_POLICY_NOTIFIER);
1135
1136         if (!ret)
1137                 ret = cpufreq_register_notifier(&notifier_trans_block,
1138                                                 CPUFREQ_TRANSITION_NOTIFIER);
1139
1140         return 0;
1141 }
1142
1143 /*
1144  * cpufreq callbacks can be registered at core_initcall or later time.
1145  * Any registration done prior to that is "forgotten" by cpufreq. See
1146  * initialization of variable init_cpufreq_transition_notifier_list_called
1147  * for further information.
1148  */
1149 core_initcall(register_sched_callback);
1150
1151 void walt_init_new_task_load(struct task_struct *p)
1152 {
1153         int i;
1154         u32 init_load_windows =
1155                         div64_u64((u64)sysctl_sched_walt_init_task_load_pct *
1156                           (u64)walt_ravg_window, 100);
1157         u32 init_load_pct = current->init_load_pct;
1158
1159         p->init_load_pct = 0;
1160         memset(&p->ravg, 0, sizeof(struct ravg));
1161
1162         if (init_load_pct) {
1163                 init_load_windows = div64_u64((u64)init_load_pct *
1164                           (u64)walt_ravg_window, 100);
1165         }
1166
1167         p->ravg.demand = init_load_windows;
1168         for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
1169                 p->ravg.sum_history[i] = init_load_windows;
1170 }