2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 * Window Assisted Load Tracking (WALT) implementation credits:
15 * Srivatsa Vaddagiri, Steve Muckle, Syed Rameez Mustafa, Joonwoo Park,
16 * Pavan Kumar Kondeti, Olav Haugan
18 * 2016-03-06: Integration with EAS/refactoring by Vikram Mulukutla
22 #include <linux/syscore_ops.h>
23 #include <linux/cpufreq.h>
24 #include <trace/events/sched.h>
25 #include <clocksource/arm_arch_timer.h>
29 #define WINDOW_STATS_RECENT 0
30 #define WINDOW_STATS_MAX 1
31 #define WINDOW_STATS_MAX_RECENT_AVG 2
32 #define WINDOW_STATS_AVG 3
33 #define WINDOW_STATS_INVALID_POLICY 4
35 #define EXITING_TASK_MARKER 0xdeaddead
37 static __read_mostly unsigned int walt_ravg_hist_size = 5;
38 static __read_mostly unsigned int walt_window_stats_policy =
39 WINDOW_STATS_MAX_RECENT_AVG;
40 static __read_mostly unsigned int walt_account_wait_time = 1;
41 static __read_mostly unsigned int walt_freq_account_wait_time = 0;
42 static __read_mostly unsigned int walt_io_is_busy = 0;
44 unsigned int sysctl_sched_walt_init_task_load_pct = 15;
46 /* 1 -> use PELT based load stats, 0 -> use window-based load stats */
47 unsigned int __read_mostly walt_disabled = 0;
49 static unsigned int max_possible_efficiency = 1024;
50 static unsigned int min_possible_efficiency = 1024;
53 * Maximum possible frequency across all cpus. Task demand and cpu
54 * capacity (cpu_power) metrics are scaled in reference to it.
56 static unsigned int max_possible_freq = 1;
59 * Minimum possible max_freq across all cpus. This will be same as
60 * max_possible_freq on homogeneous systems and could be different from
61 * max_possible_freq on heterogenous systems. min_max_freq is used to derive
62 * capacity (cpu_power) of cpus.
64 static unsigned int min_max_freq = 1;
66 static unsigned int max_capacity = 1024;
67 static unsigned int min_capacity = 1024;
68 static unsigned int max_load_scale_factor = 1024;
69 static unsigned int max_possible_capacity = 1024;
71 /* Mask of all CPUs that have max_possible_capacity */
72 static cpumask_t mpc_mask = CPU_MASK_ALL;
74 /* Window size (in ns) */
75 __read_mostly unsigned int walt_ravg_window = 20000000;
77 /* Min window size (in ns) = 10ms */
78 #define MIN_SCHED_RAVG_WINDOW 10000000
80 /* Max window size (in ns) = 1s */
81 #define MAX_SCHED_RAVG_WINDOW 1000000000
83 static unsigned int sync_cpu;
84 static ktime_t ktime_last;
85 static bool walt_ktime_suspended;
87 static unsigned int task_load(struct task_struct *p)
89 return p->ravg.demand;
93 walt_inc_cumulative_runnable_avg(struct rq *rq,
94 struct task_struct *p)
96 rq->cumulative_runnable_avg += p->ravg.demand;
100 walt_dec_cumulative_runnable_avg(struct rq *rq,
101 struct task_struct *p)
103 rq->cumulative_runnable_avg -= p->ravg.demand;
104 BUG_ON((s64)rq->cumulative_runnable_avg < 0);
108 fixup_cumulative_runnable_avg(struct rq *rq,
109 struct task_struct *p, s64 task_load_delta)
111 rq->cumulative_runnable_avg += task_load_delta;
112 if ((s64)rq->cumulative_runnable_avg < 0)
113 panic("cra less than zero: tld: %lld, task_load(p) = %u\n",
114 task_load_delta, task_load(p));
117 u64 walt_ktime_clock(void)
119 if (unlikely(walt_ktime_suspended))
120 return ktime_to_ns(ktime_last);
121 return ktime_get_ns();
124 static void walt_resume(void)
126 walt_ktime_suspended = false;
129 static int walt_suspend(void)
131 ktime_last = ktime_get();
132 walt_ktime_suspended = true;
136 static struct syscore_ops walt_syscore_ops = {
137 .resume = walt_resume,
138 .suspend = walt_suspend
141 static int __init walt_init_ops(void)
143 register_syscore_ops(&walt_syscore_ops);
146 late_initcall(walt_init_ops);
148 void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
149 struct task_struct *p)
151 cfs_rq->cumulative_runnable_avg += p->ravg.demand;
154 void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
155 struct task_struct *p)
157 cfs_rq->cumulative_runnable_avg -= p->ravg.demand;
160 static int exiting_task(struct task_struct *p)
162 if (p->flags & PF_EXITING) {
163 if (p->ravg.sum_history[0] != EXITING_TASK_MARKER) {
164 p->ravg.sum_history[0] = EXITING_TASK_MARKER;
171 static int __init set_walt_ravg_window(char *str)
173 get_option(&str, &walt_ravg_window);
175 walt_disabled = (walt_ravg_window < MIN_SCHED_RAVG_WINDOW ||
176 walt_ravg_window > MAX_SCHED_RAVG_WINDOW);
180 early_param("walt_ravg_window", set_walt_ravg_window);
183 update_window_start(struct rq *rq, u64 wallclock)
188 delta = wallclock - rq->window_start;
189 /* If the MPM global timer is cleared, set delta as 0 to avoid kernel BUG happening */
191 if (arch_timer_read_counter() == 0)
197 if (delta < walt_ravg_window)
200 nr_windows = div64_u64(delta, walt_ravg_window);
201 rq->window_start += (u64)nr_windows * (u64)walt_ravg_window;
204 static u64 scale_exec_time(u64 delta, struct rq *rq)
206 unsigned int cur_freq = rq->cur_freq;
209 if (unlikely(cur_freq > max_possible_freq))
210 cur_freq = rq->max_possible_freq;
213 delta = div64_u64(delta * cur_freq + max_possible_freq - 1,
216 sf = DIV_ROUND_UP(rq->efficiency * 1024, max_possible_efficiency);
224 static int cpu_is_waiting_on_io(struct rq *rq)
226 if (!walt_io_is_busy)
229 return atomic_read(&rq->nr_iowait);
232 void walt_account_irqtime(int cpu, struct task_struct *curr,
233 u64 delta, u64 wallclock)
235 struct rq *rq = cpu_rq(cpu);
236 unsigned long flags, nr_windows;
239 raw_spin_lock_irqsave(&rq->lock, flags);
242 * cputime (wallclock) uses sched_clock so use the same here for
245 delta += sched_clock() - wallclock;
246 cur_jiffies_ts = get_jiffies_64();
248 if (is_idle_task(curr))
249 walt_update_task_ravg(curr, rq, IRQ_UPDATE, walt_ktime_clock(),
252 nr_windows = cur_jiffies_ts - rq->irqload_ts;
255 if (nr_windows < 10) {
256 /* Decay CPU's irqload by 3/4 for each window. */
257 rq->avg_irqload *= (3 * nr_windows);
258 rq->avg_irqload = div64_u64(rq->avg_irqload,
263 rq->avg_irqload += rq->cur_irqload;
267 rq->cur_irqload += delta;
268 rq->irqload_ts = cur_jiffies_ts;
269 raw_spin_unlock_irqrestore(&rq->lock, flags);
273 #define WALT_HIGH_IRQ_TIMEOUT 3
275 u64 walt_irqload(int cpu) {
276 struct rq *rq = cpu_rq(cpu);
278 delta = get_jiffies_64() - rq->irqload_ts;
281 * Current context can be preempted by irq and rq->irqload_ts can be
282 * updated by irq context so that delta can be negative.
283 * But this is okay and we can safely return as this means there
284 * was recent irq occurrence.
287 if (delta < WALT_HIGH_IRQ_TIMEOUT)
288 return rq->avg_irqload;
293 int walt_cpu_high_irqload(int cpu) {
294 return walt_irqload(cpu) >= sysctl_sched_walt_cpu_high_irqload;
297 static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
298 u64 irqtime, int event)
300 if (is_idle_task(p)) {
301 /* TASK_WAKE && TASK_MIGRATE is not possible on idle task! */
302 if (event == PICK_NEXT_TASK)
305 /* PUT_PREV_TASK, TASK_UPDATE && IRQ_UPDATE are left */
306 return irqtime || cpu_is_waiting_on_io(rq);
309 if (event == TASK_WAKE)
312 if (event == PUT_PREV_TASK || event == IRQ_UPDATE ||
313 event == TASK_UPDATE)
316 /* Only TASK_MIGRATE && PICK_NEXT_TASK left */
317 return walt_freq_account_wait_time;
321 * Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum)
323 static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
324 int event, u64 wallclock, u64 irqtime)
326 int new_window, nr_full_windows = 0;
327 int p_is_curr_task = (p == rq->curr);
328 u64 mark_start = p->ravg.mark_start;
329 u64 window_start = rq->window_start;
330 u32 window_size = walt_ravg_window;
333 new_window = mark_start < window_start;
335 nr_full_windows = div64_u64((window_start - mark_start),
337 if (p->ravg.active_windows < USHRT_MAX)
338 p->ravg.active_windows++;
341 /* Handle per-task window rollover. We don't care about the idle
342 * task or exiting tasks. */
343 if (new_window && !is_idle_task(p) && !exiting_task(p)) {
346 if (!nr_full_windows)
347 curr_window = p->ravg.curr_window;
349 p->ravg.prev_window = curr_window;
350 p->ravg.curr_window = 0;
353 if (!account_busy_for_cpu_time(rq, p, irqtime, event)) {
354 /* account_busy_for_cpu_time() = 0, so no update to the
355 * task's current window needs to be made. This could be
358 * - a wakeup event on a task within the current
359 * window (!new_window below, no action required),
360 * - switching to a new task from idle (PICK_NEXT_TASK)
361 * in a new window where irqtime is 0 and we aren't
367 /* A new window has started. The RQ demand must be rolled
368 * over if p is the current task. */
369 if (p_is_curr_task) {
372 /* p is either idle task or an exiting task */
373 if (!nr_full_windows) {
374 prev_sum = rq->curr_runnable_sum;
377 rq->prev_runnable_sum = prev_sum;
378 rq->curr_runnable_sum = 0;
385 /* account_busy_for_cpu_time() = 1 so busy time needs
386 * to be accounted to the current window. No rollover
387 * since we didn't start a new window. An example of this is
388 * when a task starts execution and then sleeps within the
391 if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq))
392 delta = wallclock - mark_start;
395 delta = scale_exec_time(delta, rq);
396 rq->curr_runnable_sum += delta;
397 if (!is_idle_task(p) && !exiting_task(p))
398 p->ravg.curr_window += delta;
403 if (!p_is_curr_task) {
404 /* account_busy_for_cpu_time() = 1 so busy time needs
405 * to be accounted to the current window. A new window
406 * has also started, but p is not the current task, so the
407 * window is not rolled over - just split up and account
408 * as necessary into curr and prev. The window is only
409 * rolled over when a new window is processed for the current
412 * Irqtime can't be accounted by a task that isn't the
413 * currently running task. */
415 if (!nr_full_windows) {
416 /* A full window hasn't elapsed, account partial
417 * contribution to previous completed window. */
418 delta = scale_exec_time(window_start - mark_start, rq);
419 if (!exiting_task(p))
420 p->ravg.prev_window += delta;
422 /* Since at least one full window has elapsed,
423 * the contribution to the previous window is the
424 * full window (window_size). */
425 delta = scale_exec_time(window_size, rq);
426 if (!exiting_task(p))
427 p->ravg.prev_window = delta;
429 rq->prev_runnable_sum += delta;
431 /* Account piece of busy time in the current window. */
432 delta = scale_exec_time(wallclock - window_start, rq);
433 rq->curr_runnable_sum += delta;
434 if (!exiting_task(p))
435 p->ravg.curr_window = delta;
440 if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) {
441 /* account_busy_for_cpu_time() = 1 so busy time needs
442 * to be accounted to the current window. A new window
443 * has started and p is the current task so rollover is
444 * needed. If any of these three above conditions are true
445 * then this busy time can't be accounted as irqtime.
447 * Busy time for the idle task or exiting tasks need not
450 * An example of this would be a task that starts execution
451 * and then sleeps once a new window has begun. */
453 if (!nr_full_windows) {
454 /* A full window hasn't elapsed, account partial
455 * contribution to previous completed window. */
456 delta = scale_exec_time(window_start - mark_start, rq);
457 if (!is_idle_task(p) && !exiting_task(p))
458 p->ravg.prev_window += delta;
460 delta += rq->curr_runnable_sum;
462 /* Since at least one full window has elapsed,
463 * the contribution to the previous window is the
464 * full window (window_size). */
465 delta = scale_exec_time(window_size, rq);
466 if (!is_idle_task(p) && !exiting_task(p))
467 p->ravg.prev_window = delta;
471 * Rollover for normal runnable sum is done here by overwriting
472 * the values in prev_runnable_sum and curr_runnable_sum.
473 * Rollover for new task runnable sum has completed by previous
476 rq->prev_runnable_sum = delta;
478 /* Account piece of busy time in the current window. */
479 delta = scale_exec_time(wallclock - window_start, rq);
480 rq->curr_runnable_sum = delta;
481 if (!is_idle_task(p) && !exiting_task(p))
482 p->ravg.curr_window = delta;
488 /* account_busy_for_cpu_time() = 1 so busy time needs
489 * to be accounted to the current window. A new window
490 * has started and p is the current task so rollover is
491 * needed. The current task must be the idle task because
492 * irqtime is not accounted for any other task.
494 * Irqtime will be accounted each time we process IRQ activity
495 * after a period of idleness, so we know the IRQ busy time
496 * started at wallclock - irqtime. */
498 BUG_ON(!is_idle_task(p));
499 mark_start = wallclock - irqtime;
501 /* Roll window over. If IRQ busy time was just in the current
502 * window then that is all that need be accounted. */
503 rq->prev_runnable_sum = rq->curr_runnable_sum;
504 if (mark_start > window_start) {
505 rq->curr_runnable_sum = scale_exec_time(irqtime, rq);
509 /* The IRQ busy time spanned multiple windows. Process the
510 * busy time preceding the current window start first. */
511 delta = window_start - mark_start;
512 if (delta > window_size)
514 delta = scale_exec_time(delta, rq);
515 rq->prev_runnable_sum += delta;
517 /* Process the remaining IRQ busy time in the current window. */
518 delta = wallclock - window_start;
519 rq->curr_runnable_sum = scale_exec_time(delta, rq);
527 static int account_busy_for_task_demand(struct task_struct *p, int event)
529 /* No need to bother updating task demand for exiting tasks
530 * or the idle task. */
531 if (exiting_task(p) || is_idle_task(p))
534 /* When a task is waking up it is completing a segment of non-busy
535 * time. Likewise, if wait time is not treated as busy time, then
536 * when a task begins to run or is migrated, it is not running and
537 * is completing a segment of non-busy time. */
538 if (event == TASK_WAKE || (!walt_account_wait_time &&
539 (event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
546 * Called when new window is starting for a task, to record cpu usage over
547 * recently concluded window(s). Normally 'samples' should be 1. It can be > 1
548 * when, say, a real-time task runs without preemption for several windows at a
551 static void update_history(struct rq *rq, struct task_struct *p,
552 u32 runtime, int samples, int event)
554 u32 *hist = &p->ravg.sum_history[0];
556 u32 max = 0, avg, demand;
559 /* Ignore windows where task had no activity */
560 if (!runtime || is_idle_task(p) || exiting_task(p) || !samples)
563 /* Push new 'runtime' value onto stack */
564 widx = walt_ravg_hist_size - 1;
565 ridx = widx - samples;
566 for (; ridx >= 0; --widx, --ridx) {
567 hist[widx] = hist[ridx];
569 if (hist[widx] > max)
573 for (widx = 0; widx < samples && widx < walt_ravg_hist_size; widx++) {
574 hist[widx] = runtime;
576 if (hist[widx] > max)
582 if (walt_window_stats_policy == WINDOW_STATS_RECENT) {
584 } else if (walt_window_stats_policy == WINDOW_STATS_MAX) {
587 avg = div64_u64(sum, walt_ravg_hist_size);
588 if (walt_window_stats_policy == WINDOW_STATS_AVG)
591 demand = max(avg, runtime);
595 * A throttled deadline sched class task gets dequeued without
596 * changing p->on_rq. Since the dequeue decrements hmp stats
597 * avoid decrementing it here again.
599 if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
600 !p->dl.dl_throttled))
601 fixup_cumulative_runnable_avg(rq, p, demand);
603 p->ravg.demand = demand;
606 trace_walt_update_history(rq, p, runtime, samples, event);
610 static void add_to_task_demand(struct rq *rq, struct task_struct *p,
613 delta = scale_exec_time(delta, rq);
614 p->ravg.sum += delta;
615 if (unlikely(p->ravg.sum > walt_ravg_window))
616 p->ravg.sum = walt_ravg_window;
620 * Account cpu demand of task and/or update task's cpu demand history
622 * ms = p->ravg.mark_start;
624 * ws = rq->window_start
626 * Three possibilities:
628 * a) Task event is contained within one window.
629 * window_start < mark_start < wallclock
636 * In this case, p->ravg.sum is updated *iff* event is appropriate
637 * (ex: event == PUT_PREV_TASK)
639 * b) Task event spans two windows.
640 * mark_start < window_start < wallclock
645 * -----|-------------------
647 * In this case, p->ravg.sum is updated with (ws - ms) *iff* event
648 * is appropriate, then a new window sample is recorded followed
649 * by p->ravg.sum being set to (wc - ws) *iff* event is appropriate.
651 * c) Task event spans more than two windows.
656 * ---|-------|-------|-------|-------|------
658 * |<------ nr_full_windows ------>|
660 * In this case, p->ravg.sum is updated with (ws_tmp - ms) first *iff*
661 * event is appropriate, window sample of p->ravg.sum is recorded,
662 * 'nr_full_window' samples of window_size is also recorded *iff*
663 * event is appropriate and finally p->ravg.sum is set to (wc - ws)
664 * *iff* event is appropriate.
666 * IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time()
669 static void update_task_demand(struct task_struct *p, struct rq *rq,
670 int event, u64 wallclock)
672 u64 mark_start = p->ravg.mark_start;
673 u64 delta, window_start = rq->window_start;
674 int new_window, nr_full_windows;
675 u32 window_size = walt_ravg_window;
677 new_window = mark_start < window_start;
678 if (!account_busy_for_task_demand(p, event)) {
680 /* If the time accounted isn't being accounted as
681 * busy time, and a new window started, only the
682 * previous window need be closed out with the
683 * pre-existing demand. Multiple windows may have
684 * elapsed, but since empty windows are dropped,
685 * it is not necessary to account those. */
686 update_history(rq, p, p->ravg.sum, 1, event);
691 /* The simple case - busy time contained within the existing
693 add_to_task_demand(rq, p, wallclock - mark_start);
697 /* Busy time spans at least two windows. Temporarily rewind
698 * window_start to first window boundary after mark_start. */
699 delta = window_start - mark_start;
700 nr_full_windows = div64_u64(delta, window_size);
701 window_start -= (u64)nr_full_windows * (u64)window_size;
703 /* Process (window_start - mark_start) first */
704 add_to_task_demand(rq, p, window_start - mark_start);
706 /* Push new sample(s) into task's demand history */
707 update_history(rq, p, p->ravg.sum, 1, event);
709 update_history(rq, p, scale_exec_time(window_size, rq),
710 nr_full_windows, event);
712 /* Roll window_start back to current to process any remainder
713 * in current window. */
714 window_start += (u64)nr_full_windows * (u64)window_size;
716 /* Process (wallclock - window_start) next */
717 mark_start = window_start;
718 add_to_task_demand(rq, p, wallclock - mark_start);
721 /* Reflect task activity on its demand and cpu's busy time statistics */
722 void walt_update_task_ravg(struct task_struct *p, struct rq *rq,
723 int event, u64 wallclock, u64 irqtime)
725 if (walt_disabled || !rq->window_start)
728 lockdep_assert_held(&rq->lock);
730 update_window_start(rq, wallclock);
732 if (!p->ravg.mark_start)
735 update_task_demand(p, rq, event, wallclock);
736 update_cpu_busy_time(p, rq, event, wallclock, irqtime);
739 trace_walt_update_task_ravg(p, rq, event, wallclock, irqtime);
741 p->ravg.mark_start = wallclock;
744 unsigned long __weak arch_get_cpu_efficiency(int cpu)
746 return SCHED_LOAD_SCALE;
749 void walt_init_cpu_efficiency(void)
752 unsigned int max = 0, min = UINT_MAX;
754 for_each_possible_cpu(i) {
755 efficiency = arch_get_cpu_efficiency(i);
756 cpu_rq(i)->efficiency = efficiency;
758 if (efficiency > max)
760 if (efficiency < min)
765 max_possible_efficiency = max;
768 min_possible_efficiency = min;
771 static void reset_task_stats(struct task_struct *p)
776 sum = EXITING_TASK_MARKER;
778 memset(&p->ravg, 0, sizeof(struct ravg));
779 /* Retain EXITING_TASK marker */
780 p->ravg.sum_history[0] = sum;
783 void walt_mark_task_starting(struct task_struct *p)
786 struct rq *rq = task_rq(p);
788 if (!rq->window_start) {
793 wallclock = walt_ktime_clock();
794 p->ravg.mark_start = wallclock;
797 void walt_set_window_start(struct rq *rq)
799 int cpu = cpu_of(rq);
800 struct rq *sync_rq = cpu_rq(sync_cpu);
802 if (rq->window_start)
805 if (cpu == sync_cpu) {
806 rq->window_start = walt_ktime_clock();
808 raw_spin_unlock(&rq->lock);
809 double_rq_lock(rq, sync_rq);
810 rq->window_start = cpu_rq(sync_cpu)->window_start;
811 rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
812 raw_spin_unlock(&sync_rq->lock);
815 rq->curr->ravg.mark_start = rq->window_start;
818 void walt_migrate_sync_cpu(int cpu)
821 sync_cpu = smp_processor_id();
824 void walt_fixup_busy_time(struct task_struct *p, int new_cpu)
826 struct rq *src_rq = task_rq(p);
827 struct rq *dest_rq = cpu_rq(new_cpu);
830 if (!p->on_rq && p->state != TASK_WAKING)
833 if (exiting_task(p)) {
837 if (p->state == TASK_WAKING)
838 double_rq_lock(src_rq, dest_rq);
840 wallclock = walt_ktime_clock();
842 walt_update_task_ravg(task_rq(p)->curr, task_rq(p),
843 TASK_UPDATE, wallclock, 0);
844 walt_update_task_ravg(dest_rq->curr, dest_rq,
845 TASK_UPDATE, wallclock, 0);
847 walt_update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0);
849 if (p->ravg.curr_window) {
850 src_rq->curr_runnable_sum -= p->ravg.curr_window;
851 dest_rq->curr_runnable_sum += p->ravg.curr_window;
854 if (p->ravg.prev_window) {
855 src_rq->prev_runnable_sum -= p->ravg.prev_window;
856 dest_rq->prev_runnable_sum += p->ravg.prev_window;
859 if ((s64)src_rq->prev_runnable_sum < 0) {
860 src_rq->prev_runnable_sum = 0;
863 if ((s64)src_rq->curr_runnable_sum < 0) {
864 src_rq->curr_runnable_sum = 0;
868 trace_walt_migration_update_sum(src_rq, p);
869 trace_walt_migration_update_sum(dest_rq, p);
871 if (p->state == TASK_WAKING)
872 double_rq_unlock(src_rq, dest_rq);
875 /* Keep track of max/min capacity possible across CPUs "currently" */
876 static void __update_min_max_capacity(void)
879 int max = 0, min = INT_MAX;
881 for_each_online_cpu(i) {
882 if (cpu_rq(i)->capacity > max)
883 max = cpu_rq(i)->capacity;
884 if (cpu_rq(i)->capacity < min)
885 min = cpu_rq(i)->capacity;
892 static void update_min_max_capacity(void)
897 local_irq_save(flags);
898 for_each_possible_cpu(i)
899 raw_spin_lock(&cpu_rq(i)->lock);
901 __update_min_max_capacity();
903 for_each_possible_cpu(i)
904 raw_spin_unlock(&cpu_rq(i)->lock);
905 local_irq_restore(flags);
909 * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
910 * least efficient cpu gets capacity of 1024
912 static unsigned long capacity_scale_cpu_efficiency(int cpu)
914 return (1024 * cpu_rq(cpu)->efficiency) / min_possible_efficiency;
918 * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
919 * (min_max_freq), such that one with lowest max_freq gets capacity of 1024.
921 static unsigned long capacity_scale_cpu_freq(int cpu)
923 return (1024 * cpu_rq(cpu)->max_freq) / min_max_freq;
927 * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
928 * that "most" efficient cpu gets a load_scale_factor of 1
930 static unsigned long load_scale_cpu_efficiency(int cpu)
932 return DIV_ROUND_UP(1024 * max_possible_efficiency,
933 cpu_rq(cpu)->efficiency);
937 * Return load_scale_factor of a cpu in reference to cpu with best max_freq
938 * (max_possible_freq), so that one with best max_freq gets a load_scale_factor
941 static unsigned long load_scale_cpu_freq(int cpu)
943 return DIV_ROUND_UP(1024 * max_possible_freq, cpu_rq(cpu)->max_freq);
946 static int compute_capacity(int cpu)
950 capacity *= capacity_scale_cpu_efficiency(cpu);
953 capacity *= capacity_scale_cpu_freq(cpu);
959 static int compute_load_scale_factor(int cpu)
961 int load_scale = 1024;
964 * load_scale_factor accounts for the fact that task load
965 * is in reference to "best" performing cpu. Task's load will need to be
966 * scaled (up) by a factor to determine suitability to be placed on a
969 load_scale *= load_scale_cpu_efficiency(cpu);
972 load_scale *= load_scale_cpu_freq(cpu);
978 static int cpufreq_notifier_policy(struct notifier_block *nb,
979 unsigned long val, void *data)
981 struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
982 int i, update_max = 0;
983 u64 highest_mpc = 0, highest_mplsf = 0;
984 const struct cpumask *cpus = policy->related_cpus;
985 unsigned int orig_min_max_freq = min_max_freq;
986 unsigned int orig_max_possible_freq = max_possible_freq;
987 /* Initialized to policy->max in case policy->related_cpus is empty! */
988 unsigned int orig_max_freq = policy->max;
990 if (val != CPUFREQ_NOTIFY && val != CPUFREQ_REMOVE_POLICY &&
991 val != CPUFREQ_CREATE_POLICY)
994 if (val == CPUFREQ_REMOVE_POLICY || val == CPUFREQ_CREATE_POLICY) {
995 update_min_max_capacity();
999 for_each_cpu(i, policy->related_cpus) {
1000 cpumask_copy(&cpu_rq(i)->freq_domain_cpumask,
1001 policy->related_cpus);
1002 orig_max_freq = cpu_rq(i)->max_freq;
1003 cpu_rq(i)->min_freq = policy->min;
1004 cpu_rq(i)->max_freq = policy->max;
1005 cpu_rq(i)->cur_freq = policy->cur;
1006 cpu_rq(i)->max_possible_freq = policy->cpuinfo.max_freq;
1009 max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq);
1010 if (min_max_freq == 1)
1011 min_max_freq = UINT_MAX;
1012 min_max_freq = min(min_max_freq, policy->cpuinfo.max_freq);
1013 BUG_ON(!min_max_freq);
1014 BUG_ON(!policy->max);
1016 /* Changes to policy other than max_freq don't require any updates */
1017 if (orig_max_freq == policy->max)
1021 * A changed min_max_freq or max_possible_freq (possible during bootup)
1022 * needs to trigger re-computation of load_scale_factor and capacity for
1023 * all possible cpus (even those offline). It also needs to trigger
1024 * re-computation of nr_big_task count on all online cpus.
1026 * A changed rq->max_freq otoh needs to trigger re-computation of
1027 * load_scale_factor and capacity for just the cluster of cpus involved.
1028 * Since small task definition depends on max_load_scale_factor, a
1029 * changed load_scale_factor of one cluster could influence
1030 * classification of tasks in another cluster. Hence a changed
1031 * rq->max_freq will need to trigger re-computation of nr_big_task
1032 * count on all online cpus.
1034 * While it should be sufficient for nr_big_tasks to be
1035 * re-computed for only online cpus, we have inadequate context
1036 * information here (in policy notifier) with regard to hotplug-safety
1037 * context in which notification is issued. As a result, we can't use
1038 * get_online_cpus() here, as it can lead to deadlock. Until cpufreq is
1039 * fixed up to issue notification always in hotplug-safe context,
1040 * re-compute nr_big_task for all possible cpus.
1043 if (orig_min_max_freq != min_max_freq ||
1044 orig_max_possible_freq != max_possible_freq) {
1045 cpus = cpu_possible_mask;
1050 * Changed load_scale_factor can trigger reclassification of tasks as
1051 * big or small. Make this change "atomic" so that tasks are accounted
1052 * properly due to changed load_scale_factor
1054 for_each_cpu(i, cpus) {
1055 struct rq *rq = cpu_rq(i);
1057 rq->capacity = compute_capacity(i);
1058 rq->load_scale_factor = compute_load_scale_factor(i);
1063 mpc = div_u64(((u64) rq->capacity) *
1064 rq->max_possible_freq, rq->max_freq);
1065 rq->max_possible_capacity = (int) mpc;
1067 mplsf = div_u64(((u64) rq->load_scale_factor) *
1068 rq->max_possible_freq, rq->max_freq);
1070 if (mpc > highest_mpc) {
1072 cpumask_clear(&mpc_mask);
1073 cpumask_set_cpu(i, &mpc_mask);
1074 } else if (mpc == highest_mpc) {
1075 cpumask_set_cpu(i, &mpc_mask);
1078 if (mplsf > highest_mplsf)
1079 highest_mplsf = mplsf;
1084 max_possible_capacity = highest_mpc;
1085 max_load_scale_factor = highest_mplsf;
1088 __update_min_max_capacity();
1093 static int cpufreq_notifier_trans(struct notifier_block *nb,
1094 unsigned long val, void *data)
1096 struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data;
1097 unsigned int cpu = freq->cpu, new_freq = freq->new;
1098 unsigned long flags;
1101 if (val != CPUFREQ_POSTCHANGE)
1106 if (cpu_rq(cpu)->cur_freq == new_freq)
1109 for_each_cpu(i, &cpu_rq(cpu)->freq_domain_cpumask) {
1110 struct rq *rq = cpu_rq(i);
1112 raw_spin_lock_irqsave(&rq->lock, flags);
1113 walt_update_task_ravg(rq->curr, rq, TASK_UPDATE,
1114 walt_ktime_clock(), 0);
1115 rq->cur_freq = new_freq;
1116 raw_spin_unlock_irqrestore(&rq->lock, flags);
1122 static struct notifier_block notifier_policy_block = {
1123 .notifier_call = cpufreq_notifier_policy
1126 static struct notifier_block notifier_trans_block = {
1127 .notifier_call = cpufreq_notifier_trans
1130 static int register_sched_callback(void)
1134 ret = cpufreq_register_notifier(¬ifier_policy_block,
1135 CPUFREQ_POLICY_NOTIFIER);
1138 ret = cpufreq_register_notifier(¬ifier_trans_block,
1139 CPUFREQ_TRANSITION_NOTIFIER);
1145 * cpufreq callbacks can be registered at core_initcall or later time.
1146 * Any registration done prior to that is "forgotten" by cpufreq. See
1147 * initialization of variable init_cpufreq_transition_notifier_list_called
1148 * for further information.
1150 core_initcall(register_sched_callback);
1152 void walt_init_new_task_load(struct task_struct *p)
1155 u32 init_load_windows =
1156 div64_u64((u64)sysctl_sched_walt_init_task_load_pct *
1157 (u64)walt_ravg_window, 100);
1158 u32 init_load_pct = current->init_load_pct;
1160 p->init_load_pct = 0;
1161 memset(&p->ravg, 0, sizeof(struct ravg));
1163 if (init_load_pct) {
1164 init_load_windows = div64_u64((u64)init_load_pct *
1165 (u64)walt_ravg_window, 100);
1168 p->ravg.demand = init_load_windows;
1169 for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
1170 p->ravg.sum_history[i] = init_load_windows;