Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android
[firefly-linux-kernel-4.4.55.git] / kernel / sched / cputime.c
index 05de80b48586e9fa3241c708c7e6fd7c9b6fb24a..acde1d7c763ceeafdcb6cd0f009c95adccda6332 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/static_key.h>
 #include <linux/context_tracking.h>
 #include "sched.h"
+#include "walt.h"
 
 
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -49,6 +50,10 @@ void irqtime_account_irq(struct task_struct *curr)
        unsigned long flags;
        s64 delta;
        int cpu;
+#ifdef CONFIG_SCHED_WALT
+       u64 wallclock;
+       bool account = true;
+#endif
 
        if (!sched_clock_irqtime)
                return;
@@ -56,6 +61,9 @@ void irqtime_account_irq(struct task_struct *curr)
        local_irq_save(flags);
 
        cpu = smp_processor_id();
+#ifdef CONFIG_SCHED_WALT
+       wallclock = sched_clock_cpu(cpu);
+#endif
        delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
        __this_cpu_add(irq_start_time, delta);
 
@@ -70,8 +78,16 @@ void irqtime_account_irq(struct task_struct *curr)
                __this_cpu_add(cpu_hardirq_time, delta);
        else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
                __this_cpu_add(cpu_softirq_time, delta);
+#ifdef CONFIG_SCHED_WALT
+       else
+               account = false;
+#endif
 
        irq_time_write_end();
+#ifdef CONFIG_SCHED_WALT
+       if (account)
+               walt_account_irqtime(cpu, curr, delta, wallclock);
+#endif
        local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(irqtime_account_irq);
@@ -259,21 +275,21 @@ static __always_inline bool steal_account_process_tick(void)
 #ifdef CONFIG_PARAVIRT
        if (static_key_false(&paravirt_steal_enabled)) {
                u64 steal;
-               cputime_t steal_ct;
+               unsigned long steal_jiffies;
 
                steal = paravirt_steal_clock(smp_processor_id());
                steal -= this_rq()->prev_steal_time;
 
                /*
-                * cputime_t may be less precise than nsecs (eg: if it's
-                * based on jiffies). Lets cast the result to cputime
+                * steal is in nsecs but our caller is expecting steal
+                * time in jiffies. Lets cast the result to jiffies
                 * granularity and account the rest on the next rounds.
                 */
-               steal_ct = nsecs_to_cputime(steal);
-               this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct);
+               steal_jiffies = nsecs_to_jiffies(steal);
+               this_rq()->prev_steal_time += jiffies_to_nsecs(steal_jiffies);
 
-               account_steal_time(steal_ct);
-               return steal_ct;
+               account_steal_time(jiffies_to_cputime(steal_jiffies));
+               return steal_jiffies;
        }
 #endif
        return false;
@@ -600,19 +616,25 @@ static void cputime_adjust(struct task_cputime *curr,
        stime = curr->stime;
        utime = curr->utime;
 
-       if (utime == 0) {
-               stime = rtime;
+       /*
+        * If either stime or both stime and utime are 0, assume all runtime is
+        * userspace. Once a task gets some ticks, the monotonicy code at
+        * 'update' will ensure things converge to the observed ratio.
+        */
+       if (stime == 0) {
+               utime = rtime;
                goto update;
        }
 
-       if (stime == 0) {
-               utime = rtime;
+       if (utime == 0) {
+               stime = rtime;
                goto update;
        }
 
        stime = scale_stime((__force u64)stime, (__force u64)rtime,
                            (__force u64)(stime + utime));
 
+update:
        /*
         * Make sure stime doesn't go backwards; this preserves monotonicity
         * for utime because rtime is monotonic.
@@ -635,7 +657,6 @@ static void cputime_adjust(struct task_cputime *curr,
                stime = rtime - utime;
        }
 
-update:
        prev->stime = stime;
        prev->utime = utime;
 out: