1 #include <linux/cpufreq.h>
2 #include <linux/export.h>
3 #include <linux/sched.h>
4 #include <linux/tsacct_kern.h>
5 #include <linux/kernel_stat.h>
6 #include <linux/static_key.h>
7 #include <linux/context_tracking.h>
11 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
14 * There are no locks covering percpu hardirq/softirq time.
15 * They are only modified in vtime_account, on corresponding CPU
16 * with interrupts disabled. So, writes are safe.
17 * They are read and saved off onto struct rq in update_rq_clock().
18 * This may result in other CPU reading this CPU's irq time and can
19 * race with irq/vtime_account on this CPU. We would either get old
20 * or new value with a side effect of accounting a slice of irq time to wrong
21 * task when irq is in progress while we read rq->clock. That is a worthy
22 * compromise in place of having locks on each irq in account_system_time.
24 DEFINE_PER_CPU(u64, cpu_hardirq_time);
25 DEFINE_PER_CPU(u64, cpu_softirq_time);
27 static DEFINE_PER_CPU(u64, irq_start_time);
28 static int sched_clock_irqtime;
30 void enable_sched_clock_irqtime(void)
32 sched_clock_irqtime = 1;
35 void disable_sched_clock_irqtime(void)
37 sched_clock_irqtime = 0;
41 DEFINE_PER_CPU(seqcount_t, irq_time_seq);
42 #endif /* CONFIG_64BIT */
45 * Called before incrementing preempt_count on {soft,}irq_enter
46 * and before decrementing preempt_count on {soft,}irq_exit.
48 void irqtime_account_irq(struct task_struct *curr)
54 if (!sched_clock_irqtime)
57 local_irq_save(flags);
59 cpu = smp_processor_id();
60 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
61 __this_cpu_add(irq_start_time, delta);
63 irq_time_write_begin();
65 * We do not account for softirq time from ksoftirqd here.
66 * We want to continue accounting softirq time to ksoftirqd thread
67 * in that case, so as not to confuse scheduler with a special task
68 * that do not consume any time, but still wants to run.
71 __this_cpu_add(cpu_hardirq_time, delta);
72 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
73 __this_cpu_add(cpu_softirq_time, delta);
76 local_irq_restore(flags);
78 EXPORT_SYMBOL_GPL(irqtime_account_irq);
80 static int irqtime_account_hi_update(void)
82 u64 *cpustat = kcpustat_this_cpu->cpustat;
87 local_irq_save(flags);
88 latest_ns = this_cpu_read(cpu_hardirq_time);
89 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ])
91 local_irq_restore(flags);
95 static int irqtime_account_si_update(void)
97 u64 *cpustat = kcpustat_this_cpu->cpustat;
102 local_irq_save(flags);
103 latest_ns = this_cpu_read(cpu_softirq_time);
104 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ])
106 local_irq_restore(flags);
110 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
112 #define sched_clock_irqtime (0)
114 #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
116 static inline void task_group_account_field(struct task_struct *p, int index,
120 * Since all updates are sure to touch the root cgroup, we
121 * get ourselves ahead and touch it first. If the root cgroup
122 * is the only cgroup, then nothing else should be necessary.
125 __get_cpu_var(kernel_cpustat).cpustat[index] += tmp;
127 cpuacct_account_field(p, index, tmp);
131 * Account user cpu time to a process.
132 * @p: the process that the cpu time gets accounted to
133 * @cputime: the cpu time spent in user space since the last update
134 * @cputime_scaled: cputime scaled by cpu frequency
136 void account_user_time(struct task_struct *p, cputime_t cputime,
137 cputime_t cputime_scaled)
141 /* Add user time to process. */
143 p->utimescaled += cputime_scaled;
144 account_group_user_time(p, cputime);
146 index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
148 /* Add user time to cpustat. */
149 task_group_account_field(p, index, (__force u64) cputime);
151 /* Account for user time used */
152 acct_account_cputime(p);
154 #ifdef CONFIG_CPU_FREQ_STAT
155 /* Account power usage for user time */
156 acct_update_power(p, cputime);
161 * Account guest cpu time to a process.
162 * @p: the process that the cpu time gets accounted to
163 * @cputime: the cpu time spent in virtual machine since the last update
164 * @cputime_scaled: cputime scaled by cpu frequency
166 static void account_guest_time(struct task_struct *p, cputime_t cputime,
167 cputime_t cputime_scaled)
169 u64 *cpustat = kcpustat_this_cpu->cpustat;
171 /* Add guest time to process. */
173 p->utimescaled += cputime_scaled;
174 account_group_user_time(p, cputime);
177 /* Add guest time to cpustat. */
178 if (TASK_NICE(p) > 0) {
179 cpustat[CPUTIME_NICE] += (__force u64) cputime;
180 cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
182 cpustat[CPUTIME_USER] += (__force u64) cputime;
183 cpustat[CPUTIME_GUEST] += (__force u64) cputime;
188 * Account system cpu time to a process and desired cpustat field
189 * @p: the process that the cpu time gets accounted to
190 * @cputime: the cpu time spent in kernel space since the last update
191 * @cputime_scaled: cputime scaled by cpu frequency
192 * @target_cputime64: pointer to cpustat field that has to be updated
195 void __account_system_time(struct task_struct *p, cputime_t cputime,
196 cputime_t cputime_scaled, int index)
198 /* Add system time to process. */
200 p->stimescaled += cputime_scaled;
201 account_group_system_time(p, cputime);
203 /* Add system time to cpustat. */
204 task_group_account_field(p, index, (__force u64) cputime);
206 /* Account for system time used */
207 acct_account_cputime(p);
209 #ifdef CONFIG_CPU_FREQ_STAT
210 /* Account power usage for system time */
211 acct_update_power(p, cputime);
216 * Account system cpu time to a process.
217 * @p: the process that the cpu time gets accounted to
218 * @hardirq_offset: the offset to subtract from hardirq_count()
219 * @cputime: the cpu time spent in kernel space since the last update
220 * @cputime_scaled: cputime scaled by cpu frequency
222 void account_system_time(struct task_struct *p, int hardirq_offset,
223 cputime_t cputime, cputime_t cputime_scaled)
227 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
228 account_guest_time(p, cputime, cputime_scaled);
232 if (hardirq_count() - hardirq_offset)
234 else if (in_serving_softirq())
235 index = CPUTIME_SOFTIRQ;
237 index = CPUTIME_SYSTEM;
239 __account_system_time(p, cputime, cputime_scaled, index);
243 * Account for involuntary wait time.
244 * @cputime: the cpu time spent in involuntary wait
246 void account_steal_time(cputime_t cputime)
248 u64 *cpustat = kcpustat_this_cpu->cpustat;
250 cpustat[CPUTIME_STEAL] += (__force u64) cputime;
254 * Account for idle time.
255 * @cputime: the cpu time spent in idle wait
257 void account_idle_time(cputime_t cputime)
259 u64 *cpustat = kcpustat_this_cpu->cpustat;
260 struct rq *rq = this_rq();
262 if (atomic_read(&rq->nr_iowait) > 0)
263 cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
265 cpustat[CPUTIME_IDLE] += (__force u64) cputime;
268 static __always_inline bool steal_account_process_tick(void)
270 #ifdef CONFIG_PARAVIRT
271 if (static_key_false(¶virt_steal_enabled)) {
274 steal = paravirt_steal_clock(smp_processor_id());
275 steal -= this_rq()->prev_steal_time;
277 st = steal_ticks(steal);
278 this_rq()->prev_steal_time += st * TICK_NSEC;
280 account_steal_time(st);
288 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
289 * tasks (sum on group iteration) belonging to @tsk's group.
291 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
293 struct signal_struct *sig = tsk->signal;
294 cputime_t utime, stime;
295 struct task_struct *t;
297 times->utime = sig->utime;
298 times->stime = sig->stime;
299 times->sum_exec_runtime = sig->sum_sched_runtime;
302 /* make sure we can trust tsk->thread_group list */
303 if (!likely(pid_alive(tsk)))
308 task_cputime(t, &utime, &stime);
309 times->utime += utime;
310 times->stime += stime;
311 times->sum_exec_runtime += task_sched_runtime(t);
312 } while_each_thread(tsk, t);
317 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
319 * Account a tick to a process and cpustat
320 * @p: the process that the cpu time gets accounted to
321 * @user_tick: is the tick from userspace
322 * @rq: the pointer to rq
324 * Tick demultiplexing follows the order
325 * - pending hardirq update
326 * - pending softirq update
330 * - check for guest_time
331 * - else account as system_time
333 * Check for hardirq is done both for system and user time as there is
334 * no timer going off while we are on hardirq and hence we may never get an
335 * opportunity to update it solely in system time.
336 * p->stime and friends are only updated on system time and not on irq
337 * softirq as those do not count in task exec_runtime any more.
339 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
340 struct rq *rq, int ticks)
342 cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
343 u64 cputime = (__force u64) cputime_one_jiffy;
344 u64 *cpustat = kcpustat_this_cpu->cpustat;
346 if (steal_account_process_tick())
352 if (irqtime_account_hi_update()) {
353 cpustat[CPUTIME_IRQ] += cputime;
354 } else if (irqtime_account_si_update()) {
355 cpustat[CPUTIME_SOFTIRQ] += cputime;
356 } else if (this_cpu_ksoftirqd() == p) {
358 * ksoftirqd time do not get accounted in cpu_softirq_time.
359 * So, we have to handle it separately here.
360 * Also, p->stime needs to be updated for ksoftirqd.
362 __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
363 } else if (user_tick) {
364 account_user_time(p, cputime, scaled);
365 } else if (p == rq->idle) {
366 account_idle_time(cputime);
367 } else if (p->flags & PF_VCPU) { /* System time or guest time */
368 account_guest_time(p, cputime, scaled);
370 __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM);
374 static void irqtime_account_idle_ticks(int ticks)
376 struct rq *rq = this_rq();
378 irqtime_account_process_tick(current, 0, rq, ticks);
380 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
381 static inline void irqtime_account_idle_ticks(int ticks) {}
382 static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
383 struct rq *rq, int nr_ticks) {}
384 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
387 * Use precise platform statistics if available:
389 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
391 #ifndef __ARCH_HAS_VTIME_TASK_SWITCH
392 void vtime_task_switch(struct task_struct *prev)
394 if (!vtime_accounting_enabled())
397 if (is_idle_task(prev))
398 vtime_account_idle(prev);
400 vtime_account_system(prev);
402 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
403 vtime_account_user(prev);
405 arch_vtime_task_switch(prev);
410 * Archs that account the whole time spent in the idle task
411 * (outside irq) as idle time can rely on this and just implement
412 * vtime_account_system() and vtime_account_idle(). Archs that
413 * have other meaning of the idle time (s390 only includes the
414 * time spent by the CPU when it's in low power mode) must override
417 #ifndef __ARCH_HAS_VTIME_ACCOUNT
418 void vtime_account_irq_enter(struct task_struct *tsk)
420 if (!vtime_accounting_enabled())
423 if (!in_interrupt()) {
425 * If we interrupted user, context_tracking_in_user()
426 * is 1 because the context tracking don't hook
427 * on irq entry/exit. This way we know if
428 * we need to flush user time on kernel entry.
430 if (context_tracking_in_user()) {
431 vtime_account_user(tsk);
435 if (is_idle_task(tsk)) {
436 vtime_account_idle(tsk);
440 vtime_account_system(tsk);
442 EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
443 #endif /* __ARCH_HAS_VTIME_ACCOUNT */
444 #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
447 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
448 void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
454 void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
456 struct task_cputime cputime;
458 thread_group_cputime(p, &cputime);
463 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
465 * Account a single tick of cpu time.
466 * @p: the process that the cpu time gets accounted to
467 * @user_tick: indicates if the tick is a user or a system tick
469 void account_process_tick(struct task_struct *p, int user_tick)
471 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
472 struct rq *rq = this_rq();
474 if (vtime_accounting_enabled())
477 if (sched_clock_irqtime) {
478 irqtime_account_process_tick(p, user_tick, rq, 1);
482 if (steal_account_process_tick())
486 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
487 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
488 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
491 account_idle_time(cputime_one_jiffy);
495 * Account multiple ticks of steal time.
496 * @p: the process from which the cpu time has been stolen
497 * @ticks: number of stolen ticks
499 void account_steal_ticks(unsigned long ticks)
501 account_steal_time(jiffies_to_cputime(ticks));
505 * Account multiple ticks of idle time.
506 * @ticks: number of stolen ticks
508 void account_idle_ticks(unsigned long ticks)
511 if (sched_clock_irqtime) {
512 irqtime_account_idle_ticks(ticks);
516 account_idle_time(jiffies_to_cputime(ticks));
520 * Perform (stime * rtime) / total, but avoid multiplication overflow by
521 * loosing precision when the numbers are big.
523 static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
528 /* Make sure "rtime" is the bigger of stime/rtime */
530 u64 tmp = rtime; rtime = stime; stime = tmp;
533 /* Make sure 'total' fits in 32 bits */
537 /* Does rtime (and thus stime) fit in 32 bits? */
541 /* Can we just balance rtime/stime rather than dropping bits? */
545 /* We can grow stime and shrink rtime and try to make them both fit */
551 /* We drop from rtime, it has more bits than stime */
557 * Make sure gcc understands that this is a 32x32->64 multiply,
558 * followed by a 64/32->64 divide.
560 scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
561 return (__force cputime_t) scaled;
565 * Adjust tick based cputime random precision against scheduler
566 * runtime accounting.
568 static void cputime_adjust(struct task_cputime *curr,
569 struct cputime *prev,
570 cputime_t *ut, cputime_t *st)
572 cputime_t rtime, stime, utime;
574 if (vtime_accounting_enabled()) {
581 * Tick based cputime accounting depend on random scheduling
582 * timeslices of a task to be interrupted or not by the timer.
583 * Depending on these circumstances, the number of these interrupts
584 * may be over or under-optimistic, matching the real user and system
585 * cputime with a variable precision.
587 * Fix this by scaling these tick based values against the total
588 * runtime accounted by the CFS scheduler.
590 rtime = nsecs_to_cputime(curr->sum_exec_runtime);
593 * Update userspace visible utime/stime values only if actual execution
594 * time is bigger than already exported. Note that can happen, that we
595 * provided bigger values due to scaling inaccuracy on big numbers.
597 if (prev->stime + prev->utime >= rtime)
605 } else if (stime == 0) {
608 cputime_t total = stime + utime;
610 stime = scale_stime((__force u64)stime,
611 (__force u64)rtime, (__force u64)total);
612 utime = rtime - stime;
616 * If the tick based count grows faster than the scheduler one,
617 * the result of the scaling may go backward.
618 * Let's enforce monotonicity.
620 prev->stime = max(prev->stime, stime);
621 prev->utime = max(prev->utime, utime);
628 void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
630 struct task_cputime cputime = {
631 .sum_exec_runtime = p->se.sum_exec_runtime,
634 task_cputime(p, &cputime.utime, &cputime.stime);
635 cputime_adjust(&cputime, &p->prev_cputime, ut, st);
639 * Must be called with siglock held.
641 void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
643 struct task_cputime cputime;
645 thread_group_cputime(p, &cputime);
646 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
648 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
650 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
651 static unsigned long long vtime_delta(struct task_struct *tsk)
653 unsigned long long clock;
655 clock = local_clock();
656 if (clock < tsk->vtime_snap)
659 return clock - tsk->vtime_snap;
662 static cputime_t get_vtime_delta(struct task_struct *tsk)
664 unsigned long long delta = vtime_delta(tsk);
666 WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING);
667 tsk->vtime_snap += delta;
669 /* CHECKME: always safe to convert nsecs to cputime? */
670 return nsecs_to_cputime(delta);
673 static void __vtime_account_system(struct task_struct *tsk)
675 cputime_t delta_cpu = get_vtime_delta(tsk);
677 account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu));
680 void vtime_account_system(struct task_struct *tsk)
682 if (!vtime_accounting_enabled())
685 write_seqlock(&tsk->vtime_seqlock);
686 __vtime_account_system(tsk);
687 write_sequnlock(&tsk->vtime_seqlock);
690 void vtime_account_irq_exit(struct task_struct *tsk)
692 if (!vtime_accounting_enabled())
695 write_seqlock(&tsk->vtime_seqlock);
696 if (context_tracking_in_user())
697 tsk->vtime_snap_whence = VTIME_USER;
698 __vtime_account_system(tsk);
699 write_sequnlock(&tsk->vtime_seqlock);
702 void vtime_account_user(struct task_struct *tsk)
706 if (!vtime_accounting_enabled())
709 delta_cpu = get_vtime_delta(tsk);
711 write_seqlock(&tsk->vtime_seqlock);
712 tsk->vtime_snap_whence = VTIME_SYS;
713 account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
714 write_sequnlock(&tsk->vtime_seqlock);
717 void vtime_user_enter(struct task_struct *tsk)
719 if (!vtime_accounting_enabled())
722 write_seqlock(&tsk->vtime_seqlock);
723 tsk->vtime_snap_whence = VTIME_USER;
724 __vtime_account_system(tsk);
725 write_sequnlock(&tsk->vtime_seqlock);
728 void vtime_guest_enter(struct task_struct *tsk)
730 write_seqlock(&tsk->vtime_seqlock);
731 __vtime_account_system(tsk);
732 current->flags |= PF_VCPU;
733 write_sequnlock(&tsk->vtime_seqlock);
736 void vtime_guest_exit(struct task_struct *tsk)
738 write_seqlock(&tsk->vtime_seqlock);
739 __vtime_account_system(tsk);
740 current->flags &= ~PF_VCPU;
741 write_sequnlock(&tsk->vtime_seqlock);
744 void vtime_account_idle(struct task_struct *tsk)
746 cputime_t delta_cpu = get_vtime_delta(tsk);
748 account_idle_time(delta_cpu);
751 bool vtime_accounting_enabled(void)
753 return context_tracking_active();
756 void arch_vtime_task_switch(struct task_struct *prev)
758 write_seqlock(&prev->vtime_seqlock);
759 prev->vtime_snap_whence = VTIME_SLEEPING;
760 write_sequnlock(&prev->vtime_seqlock);
762 write_seqlock(¤t->vtime_seqlock);
763 current->vtime_snap_whence = VTIME_SYS;
764 current->vtime_snap = sched_clock_cpu(smp_processor_id());
765 write_sequnlock(¤t->vtime_seqlock);
768 void vtime_init_idle(struct task_struct *t, int cpu)
772 write_seqlock_irqsave(&t->vtime_seqlock, flags);
773 t->vtime_snap_whence = VTIME_SYS;
774 t->vtime_snap = sched_clock_cpu(cpu);
775 write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
778 cputime_t task_gtime(struct task_struct *t)
784 seq = read_seqbegin(&t->vtime_seqlock);
787 if (t->flags & PF_VCPU)
788 gtime += vtime_delta(t);
790 } while (read_seqretry(&t->vtime_seqlock, seq));
796 * Fetch cputime raw values from fields of task_struct and
797 * add up the pending nohz execution time since the last
801 fetch_task_cputime(struct task_struct *t,
802 cputime_t *u_dst, cputime_t *s_dst,
803 cputime_t *u_src, cputime_t *s_src,
804 cputime_t *udelta, cputime_t *sdelta)
807 unsigned long long delta;
813 seq = read_seqbegin(&t->vtime_seqlock);
820 /* Task is sleeping, nothing to add */
821 if (t->vtime_snap_whence == VTIME_SLEEPING ||
825 delta = vtime_delta(t);
828 * Task runs either in user or kernel space, add pending nohz time to
831 if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) {
834 if (t->vtime_snap_whence == VTIME_SYS)
837 } while (read_seqretry(&t->vtime_seqlock, seq));
841 void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
843 cputime_t udelta, sdelta;
845 fetch_task_cputime(t, utime, stime, &t->utime,
846 &t->stime, &udelta, &sdelta);
853 void task_cputime_scaled(struct task_struct *t,
854 cputime_t *utimescaled, cputime_t *stimescaled)
856 cputime_t udelta, sdelta;
858 fetch_task_cputime(t, utimescaled, stimescaled,
859 &t->utimescaled, &t->stimescaled, &udelta, &sdelta);
861 *utimescaled += cputime_to_scaled(udelta);
863 *stimescaled += cputime_to_scaled(sdelta);
865 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */