4 * Kernel scheduler and related syscalls
6 * Copyright (C) 1991-2002 Linus Torvalds
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
22 #include <linux/module.h>
23 #include <linux/nmi.h>
24 #include <linux/init.h>
25 #include <linux/uaccess.h>
26 #include <linux/highmem.h>
27 #include <linux/smp_lock.h>
28 #include <asm/mmu_context.h>
29 #include <linux/interrupt.h>
30 #include <linux/capability.h>
31 #include <linux/completion.h>
32 #include <linux/kernel_stat.h>
33 #include <linux/debug_locks.h>
34 #include <linux/security.h>
35 #include <linux/notifier.h>
36 #include <linux/profile.h>
37 #include <linux/freezer.h>
38 #include <linux/vmalloc.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/smp.h>
42 #include <linux/threads.h>
43 #include <linux/timer.h>
44 #include <linux/rcupdate.h>
45 #include <linux/cpu.h>
46 #include <linux/cpuset.h>
47 #include <linux/percpu.h>
48 #include <linux/kthread.h>
49 #include <linux/seq_file.h>
50 #include <linux/syscalls.h>
51 #include <linux/times.h>
52 #include <linux/tsacct_kern.h>
53 #include <linux/kprobes.h>
54 #include <linux/delayacct.h>
55 #include <linux/reciprocal_div.h>
56 #include <linux/unistd.h>
61 * Scheduler clock - returns current time in nanosec units.
62 * This is default implementation.
63 * Architectures and sub-architectures can override this.
65 unsigned long long __attribute__((weak)) sched_clock(void)
67 return (unsigned long long)jiffies * (1000000000 / HZ);
71 * Convert user-nice values [ -20 ... 0 ... 19 ]
72 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
75 #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
76 #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
77 #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
80 * 'User priority' is the nice value converted to something we
81 * can work with better when scaling various scheduler parameters,
82 * it's a [ 0 ... 39 ] range.
84 #define USER_PRIO(p) ((p)-MAX_RT_PRIO)
85 #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
86 #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
89 * Some helpers for converting nanosecond timing to jiffy resolution
91 #define NS_TO_JIFFIES(TIME) ((TIME) / (1000000000 / HZ))
92 #define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ))
94 #define NICE_0_LOAD SCHED_LOAD_SCALE
95 #define NICE_0_SHIFT SCHED_LOAD_SHIFT
98 * These are the 'tuning knobs' of the scheduler:
100 * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
101 * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
102 * Timeslices get refilled after they expire.
104 #define MIN_TIMESLICE max(5 * HZ / 1000, 1)
105 #define DEF_TIMESLICE (100 * HZ / 1000)
109 * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
110 * Since cpu_power is a 'constant', we can use a reciprocal divide.
112 static inline u32 sg_div_cpu_power(const struct sched_group *sg, u32 load)
114 return reciprocal_divide(load, sg->reciprocal_cpu_power);
118 * Each time a sched group cpu_power is changed,
119 * we must compute its reciprocal value
121 static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
123 sg->__cpu_power += val;
124 sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power);
128 #define SCALE_PRIO(x, prio) \
129 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
132 * static_prio_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
133 * to time slice values: [800ms ... 100ms ... 5ms]
135 static unsigned int static_prio_timeslice(int static_prio)
137 if (static_prio == NICE_TO_PRIO(19))
140 if (static_prio < NICE_TO_PRIO(0))
141 return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
143 return SCALE_PRIO(DEF_TIMESLICE, static_prio);
146 static inline int rt_policy(int policy)
148 if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR))
153 static inline int task_has_rt_policy(struct task_struct *p)
155 return rt_policy(p->policy);
159 * This is the priority-queue data structure of the RT scheduling class:
161 struct rt_prio_array {
162 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
163 struct list_head queue[MAX_RT_PRIO];
167 struct load_weight load;
168 u64 load_update_start, load_update_last;
169 unsigned long delta_fair, delta_exec, delta_stat;
172 /* CFS-related fields in a runqueue */
174 struct load_weight load;
175 unsigned long nr_running;
181 unsigned long wait_runtime_overruns, wait_runtime_underruns;
183 struct rb_root tasks_timeline;
184 struct rb_node *rb_leftmost;
185 struct rb_node *rb_load_balance_curr;
186 #ifdef CONFIG_FAIR_GROUP_SCHED
187 /* 'curr' points to currently running entity on this cfs_rq.
188 * It is set to NULL otherwise (i.e when none are currently running).
190 struct sched_entity *curr;
191 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
193 /* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
194 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
195 * (like users, containers etc.)
197 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
198 * list is used during load balance.
200 struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
204 /* Real-Time classes' related field in a runqueue: */
206 struct rt_prio_array active;
207 int rt_load_balance_idx;
208 struct list_head *rt_load_balance_head, *rt_load_balance_curr;
212 * This is the main, per-CPU runqueue data structure.
214 * Locking rule: those places that want to lock multiple runqueues
215 * (such as the load balancing or the thread migration code), lock
216 * acquire operations must be ordered by ascending &runqueue.
219 spinlock_t lock; /* runqueue lock */
222 * nr_running and cpu_load should be in the same cacheline because
223 * remote CPUs use both these fields when doing load calculation.
225 unsigned long nr_running;
226 #define CPU_LOAD_IDX_MAX 5
227 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
228 unsigned char idle_at_tick;
230 unsigned char in_nohz_recently;
232 struct load_stat ls; /* capture load from *all* tasks on this cpu */
233 unsigned long nr_load_updates;
237 #ifdef CONFIG_FAIR_GROUP_SCHED
238 struct list_head leaf_cfs_rq_list; /* list of leaf cfs_rq on this cpu */
243 * This is part of a global counter where only the total sum
244 * over all CPUs matters. A task can increase this counter on
245 * one CPU and if it got migrated afterwards it may decrease
246 * it on another CPU. Always updated under the runqueue lock:
248 unsigned long nr_uninterruptible;
250 struct task_struct *curr, *idle;
251 unsigned long next_balance;
252 struct mm_struct *prev_mm;
254 u64 clock, prev_clock_raw;
257 unsigned int clock_warps, clock_overflows;
258 unsigned int clock_unstable_events;
260 struct sched_class *load_balance_class;
265 struct sched_domain *sd;
267 /* For active balancing */
270 int cpu; /* cpu of this runqueue */
272 struct task_struct *migration_thread;
273 struct list_head migration_queue;
276 #ifdef CONFIG_SCHEDSTATS
278 struct sched_info rq_sched_info;
280 /* sys_sched_yield() stats */
281 unsigned long yld_exp_empty;
282 unsigned long yld_act_empty;
283 unsigned long yld_both_empty;
284 unsigned long yld_cnt;
286 /* schedule() stats */
287 unsigned long sched_switch;
288 unsigned long sched_cnt;
289 unsigned long sched_goidle;
291 /* try_to_wake_up() stats */
292 unsigned long ttwu_cnt;
293 unsigned long ttwu_local;
295 struct lock_class_key rq_lock_key;
298 static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp;
299 static DEFINE_MUTEX(sched_hotcpu_mutex);
301 static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)
303 rq->curr->sched_class->check_preempt_curr(rq, p);
306 static inline int cpu_of(struct rq *rq)
316 * Per-runqueue clock, as finegrained as the platform can give us:
318 static unsigned long long __rq_clock(struct rq *rq)
320 u64 prev_raw = rq->prev_clock_raw;
321 u64 now = sched_clock();
322 s64 delta = now - prev_raw;
323 u64 clock = rq->clock;
326 * Protect against sched_clock() occasionally going backwards:
328 if (unlikely(delta < 0)) {
333 * Catch too large forward jumps too:
335 if (unlikely(delta > 2*TICK_NSEC)) {
337 rq->clock_overflows++;
339 if (unlikely(delta > rq->clock_max_delta))
340 rq->clock_max_delta = delta;
345 rq->prev_clock_raw = now;
351 static inline unsigned long long rq_clock(struct rq *rq)
353 int this_cpu = smp_processor_id();
355 if (this_cpu == cpu_of(rq))
356 return __rq_clock(rq);
362 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
363 * See detach_destroy_domains: synchronize_sched for details.
365 * The domain tree of any CPU may only be accessed from within
366 * preempt-disabled sections.
368 #define for_each_domain(cpu, __sd) \
369 for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
371 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
372 #define this_rq() (&__get_cpu_var(runqueues))
373 #define task_rq(p) cpu_rq(task_cpu(p))
374 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
376 #ifdef CONFIG_FAIR_GROUP_SCHED
377 /* Change a task's ->cfs_rq if it moves across CPUs */
378 static inline void set_task_cfs_rq(struct task_struct *p)
380 p->se.cfs_rq = &task_rq(p)->cfs;
383 static inline void set_task_cfs_rq(struct task_struct *p)
388 #ifndef prepare_arch_switch
389 # define prepare_arch_switch(next) do { } while (0)
391 #ifndef finish_arch_switch
392 # define finish_arch_switch(prev) do { } while (0)
395 #ifndef __ARCH_WANT_UNLOCKED_CTXSW
396 static inline int task_running(struct rq *rq, struct task_struct *p)
398 return rq->curr == p;
401 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
405 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
407 #ifdef CONFIG_DEBUG_SPINLOCK
408 /* this is a valid case when another task releases the spinlock */
409 rq->lock.owner = current;
412 * If we are tracking spinlock dependencies then we have to
413 * fix up the runqueue lock - which gets 'carried over' from
416 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
418 spin_unlock_irq(&rq->lock);
421 #else /* __ARCH_WANT_UNLOCKED_CTXSW */
422 static inline int task_running(struct rq *rq, struct task_struct *p)
427 return rq->curr == p;
431 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
435 * We can optimise this out completely for !SMP, because the
436 * SMP rebalancing from interrupt is the only thing that cares
441 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
442 spin_unlock_irq(&rq->lock);
444 spin_unlock(&rq->lock);
448 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
452 * After ->oncpu is cleared, the task can be moved to a different CPU.
453 * We must ensure this doesn't happen until the switch is completely
459 #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
463 #endif /* __ARCH_WANT_UNLOCKED_CTXSW */
466 * __task_rq_lock - lock the runqueue a given task resides on.
467 * Must be called interrupts disabled.
469 static inline struct rq *__task_rq_lock(struct task_struct *p)
476 spin_lock(&rq->lock);
477 if (unlikely(rq != task_rq(p))) {
478 spin_unlock(&rq->lock);
479 goto repeat_lock_task;
485 * task_rq_lock - lock the runqueue a given task resides on and disable
486 * interrupts. Note the ordering: we can safely lookup the task_rq without
487 * explicitly disabling preemption.
489 static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
495 local_irq_save(*flags);
497 spin_lock(&rq->lock);
498 if (unlikely(rq != task_rq(p))) {
499 spin_unlock_irqrestore(&rq->lock, *flags);
500 goto repeat_lock_task;
505 static inline void __task_rq_unlock(struct rq *rq)
508 spin_unlock(&rq->lock);
511 static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
514 spin_unlock_irqrestore(&rq->lock, *flags);
518 * this_rq_lock - lock this runqueue and disable interrupts.
520 static inline struct rq *this_rq_lock(void)
527 spin_lock(&rq->lock);
533 * CPU frequency is/was unstable - start new by setting prev_clock_raw:
535 void sched_clock_unstable_event(void)
540 rq = task_rq_lock(current, &flags);
541 rq->prev_clock_raw = sched_clock();
542 rq->clock_unstable_events++;
543 task_rq_unlock(rq, &flags);
547 * resched_task - mark a task 'to be rescheduled now'.
549 * On UP this means the setting of the need_resched flag, on SMP it
550 * might also involve a cross-CPU call to trigger the scheduler on
555 #ifndef tsk_is_polling
556 #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
559 static void resched_task(struct task_struct *p)
563 assert_spin_locked(&task_rq(p)->lock);
565 if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
568 set_tsk_thread_flag(p, TIF_NEED_RESCHED);
571 if (cpu == smp_processor_id())
574 /* NEED_RESCHED must be visible before we test polling */
576 if (!tsk_is_polling(p))
577 smp_send_reschedule(cpu);
580 static void resched_cpu(int cpu)
582 struct rq *rq = cpu_rq(cpu);
585 if (!spin_trylock_irqsave(&rq->lock, flags))
587 resched_task(cpu_curr(cpu));
588 spin_unlock_irqrestore(&rq->lock, flags);
591 static inline void resched_task(struct task_struct *p)
593 assert_spin_locked(&task_rq(p)->lock);
594 set_tsk_need_resched(p);
598 static u64 div64_likely32(u64 divident, unsigned long divisor)
600 #if BITS_PER_LONG == 32
601 if (likely(divident <= 0xffffffffULL))
602 return (u32)divident / divisor;
603 do_div(divident, divisor);
607 return divident / divisor;
611 #if BITS_PER_LONG == 32
612 # define WMULT_CONST (~0UL)
614 # define WMULT_CONST (1UL << 32)
617 #define WMULT_SHIFT 32
619 static inline unsigned long
620 calc_delta_mine(unsigned long delta_exec, unsigned long weight,
621 struct load_weight *lw)
625 if (unlikely(!lw->inv_weight))
626 lw->inv_weight = WMULT_CONST / lw->weight;
628 tmp = (u64)delta_exec * weight;
630 * Check whether we'd overflow the 64-bit multiplication:
632 if (unlikely(tmp > WMULT_CONST)) {
633 tmp = ((tmp >> WMULT_SHIFT/2) * lw->inv_weight)
636 tmp = (tmp * lw->inv_weight) >> WMULT_SHIFT;
639 return (unsigned long)min(tmp, (u64)sysctl_sched_runtime_limit);
642 static inline unsigned long
643 calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
645 return calc_delta_mine(delta_exec, NICE_0_LOAD, lw);
648 static void update_load_add(struct load_weight *lw, unsigned long inc)
654 static void update_load_sub(struct load_weight *lw, unsigned long dec)
660 static void __update_curr_load(struct rq *rq, struct load_stat *ls)
662 if (rq->curr != rq->idle && ls->load.weight) {
663 ls->delta_exec += ls->delta_stat;
664 ls->delta_fair += calc_delta_fair(ls->delta_stat, &ls->load);
670 * Update delta_exec, delta_fair fields for rq.
672 * delta_fair clock advances at a rate inversely proportional to
673 * total load (rq->ls.load.weight) on the runqueue, while
674 * delta_exec advances at the same rate as wall-clock (provided
677 * delta_exec / delta_fair is a measure of the (smoothened) load on this
678 * runqueue over any given interval. This (smoothened) load is used
679 * during load balance.
681 * This function is called /before/ updating rq->ls.load
682 * and when switching tasks.
684 static void update_curr_load(struct rq *rq, u64 now)
686 struct load_stat *ls = &rq->ls;
689 start = ls->load_update_start;
690 ls->load_update_start = now;
691 ls->delta_stat += now - start;
693 * Stagger updates to ls->delta_fair. Very frequent updates
696 if (ls->delta_stat >= sysctl_sched_stat_granularity)
697 __update_curr_load(rq, ls);
701 * To aid in avoiding the subversion of "niceness" due to uneven distribution
702 * of tasks with abnormal "nice" values across CPUs the contribution that
703 * each task makes to its run queue's load is weighted according to its
704 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
705 * scaled version of the new time slice allocation that they receive on time
710 * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE
711 * If static_prio_timeslice() is ever changed to break this assumption then
712 * this code will need modification
714 #define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
715 #define load_weight(lp) \
716 (((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
717 #define PRIO_TO_LOAD_WEIGHT(prio) \
718 load_weight(static_prio_timeslice(prio))
719 #define RTPRIO_TO_LOAD_WEIGHT(rp) \
720 (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + load_weight(rp))
722 #define WEIGHT_IDLEPRIO 2
723 #define WMULT_IDLEPRIO (1 << 31)
726 * Nice levels are multiplicative, with a gentle 10% change for every
727 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
728 * nice 1, it will get ~10% less CPU time than another CPU-bound task
729 * that remained on nice 0.
731 * The "10% effect" is relative and cumulative: from _any_ nice level,
732 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
733 * it's +10% CPU usage.
735 static const int prio_to_weight[40] = {
736 /* -20 */ 88818, 71054, 56843, 45475, 36380, 29104, 23283, 18626, 14901, 11921,
737 /* -10 */ 9537, 7629, 6103, 4883, 3906, 3125, 2500, 2000, 1600, 1280,
738 /* 0 */ NICE_0_LOAD /* 1024 */,
739 /* 1 */ 819, 655, 524, 419, 336, 268, 215, 172, 137,
740 /* 10 */ 110, 87, 70, 56, 45, 36, 29, 23, 18, 15,
743 static const u32 prio_to_wmult[40] = {
744 48356, 60446, 75558, 94446, 118058, 147573,
745 184467, 230589, 288233, 360285, 450347,
746 562979, 703746, 879575, 1099582, 1374389,
747 717986, 2147483, 2684354, 3355443, 4194304,
748 244160, 6557201, 8196502, 10250518, 12782640,
749 16025997, 19976592, 24970740, 31350126, 39045157,
750 49367440, 61356675, 76695844, 95443717, 119304647,
751 148102320, 186737708, 238609294, 286331153,
755 inc_load(struct rq *rq, const struct task_struct *p, u64 now)
757 update_curr_load(rq, now);
758 update_load_add(&rq->ls.load, p->se.load.weight);
762 dec_load(struct rq *rq, const struct task_struct *p, u64 now)
764 update_curr_load(rq, now);
765 update_load_sub(&rq->ls.load, p->se.load.weight);
768 static inline void inc_nr_running(struct task_struct *p, struct rq *rq, u64 now)
771 inc_load(rq, p, now);
774 static inline void dec_nr_running(struct task_struct *p, struct rq *rq, u64 now)
777 dec_load(rq, p, now);
780 static void activate_task(struct rq *rq, struct task_struct *p, int wakeup);
783 * runqueue iterator, to support SMP load-balancing between different
784 * scheduling classes, without having to expose their internal data
785 * structures to the load-balancing proper:
789 struct task_struct *(*start)(void *);
790 struct task_struct *(*next)(void *);
793 static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
794 unsigned long max_nr_move, unsigned long max_load_move,
795 struct sched_domain *sd, enum cpu_idle_type idle,
796 int *all_pinned, unsigned long *load_moved,
797 int this_best_prio, int best_prio, int best_prio_seen,
798 struct rq_iterator *iterator);
800 #include "sched_stats.h"
801 #include "sched_rt.c"
802 #include "sched_fair.c"
803 #include "sched_idletask.c"
804 #ifdef CONFIG_SCHED_DEBUG
805 # include "sched_debug.c"
808 #define sched_class_highest (&rt_sched_class)
810 static void set_load_weight(struct task_struct *p)
812 task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime;
813 p->se.wait_runtime = 0;
815 if (task_has_rt_policy(p)) {
816 p->se.load.weight = prio_to_weight[0] * 2;
817 p->se.load.inv_weight = prio_to_wmult[0] >> 1;
822 * SCHED_IDLE tasks get minimal weight:
824 if (p->policy == SCHED_IDLE) {
825 p->se.load.weight = WEIGHT_IDLEPRIO;
826 p->se.load.inv_weight = WMULT_IDLEPRIO;
830 p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
831 p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
835 enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, u64 now)
837 sched_info_queued(p);
838 p->sched_class->enqueue_task(rq, p, wakeup, now);
843 dequeue_task(struct rq *rq, struct task_struct *p, int sleep, u64 now)
845 p->sched_class->dequeue_task(rq, p, sleep, now);
850 * __normal_prio - return the priority that is based on the static prio
852 static inline int __normal_prio(struct task_struct *p)
854 return p->static_prio;
858 * Calculate the expected normal priority: i.e. priority
859 * without taking RT-inheritance into account. Might be
860 * boosted by interactivity modifiers. Changes upon fork,
861 * setprio syscalls, and whenever the interactivity
862 * estimator recalculates.
864 static inline int normal_prio(struct task_struct *p)
868 if (task_has_rt_policy(p))
869 prio = MAX_RT_PRIO-1 - p->rt_priority;
871 prio = __normal_prio(p);
876 * Calculate the current priority, i.e. the priority
877 * taken into account by the scheduler. This value might
878 * be boosted by RT tasks, or might be boosted by
879 * interactivity modifiers. Will be RT if the task got
880 * RT-boosted. If not then it returns p->normal_prio.
882 static int effective_prio(struct task_struct *p)
884 p->normal_prio = normal_prio(p);
886 * If we are RT tasks or we were boosted to RT priority,
887 * keep the priority unchanged. Otherwise, update priority
888 * to the normal priority:
890 if (!rt_prio(p->prio))
891 return p->normal_prio;
896 * activate_task - move a task to the runqueue.
898 static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
900 u64 now = rq_clock(rq);
902 if (p->state == TASK_UNINTERRUPTIBLE)
903 rq->nr_uninterruptible--;
905 enqueue_task(rq, p, wakeup, now);
906 inc_nr_running(p, rq, now);
910 * activate_idle_task - move idle task to the _front_ of runqueue.
912 static inline void activate_idle_task(struct task_struct *p, struct rq *rq)
914 u64 now = rq_clock(rq);
916 if (p->state == TASK_UNINTERRUPTIBLE)
917 rq->nr_uninterruptible--;
919 enqueue_task(rq, p, 0, now);
920 inc_nr_running(p, rq, now);
924 * deactivate_task - remove a task from the runqueue.
926 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
928 u64 now = rq_clock(rq);
930 if (p->state == TASK_UNINTERRUPTIBLE)
931 rq->nr_uninterruptible++;
933 dequeue_task(rq, p, sleep, now);
934 dec_nr_running(p, rq, now);
938 * task_curr - is this task currently executing on a CPU?
939 * @p: the task in question.
941 inline int task_curr(const struct task_struct *p)
943 return cpu_curr(task_cpu(p)) == p;
946 /* Used instead of source_load when we know the type == 0 */
947 unsigned long weighted_cpuload(const int cpu)
949 return cpu_rq(cpu)->ls.load.weight;
952 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
955 task_thread_info(p)->cpu = cpu;
962 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
964 int old_cpu = task_cpu(p);
965 struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
966 u64 clock_offset, fair_clock_offset;
968 clock_offset = old_rq->clock - new_rq->clock;
969 fair_clock_offset = old_rq->cfs.fair_clock -
970 new_rq->cfs.fair_clock;
971 if (p->se.wait_start)
972 p->se.wait_start -= clock_offset;
973 if (p->se.wait_start_fair)
974 p->se.wait_start_fair -= fair_clock_offset;
975 if (p->se.sleep_start)
976 p->se.sleep_start -= clock_offset;
977 if (p->se.block_start)
978 p->se.block_start -= clock_offset;
979 if (p->se.sleep_start_fair)
980 p->se.sleep_start_fair -= fair_clock_offset;
982 __set_task_cpu(p, new_cpu);
985 struct migration_req {
986 struct list_head list;
988 struct task_struct *task;
991 struct completion done;
995 * The task's runqueue lock must be held.
996 * Returns true if you have to wait for migration thread.
999 migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
1001 struct rq *rq = task_rq(p);
1004 * If the task is not on a runqueue (and not running), then
1005 * it is sufficient to simply update the task's cpu field.
1007 if (!p->se.on_rq && !task_running(rq, p)) {
1008 set_task_cpu(p, dest_cpu);
1012 init_completion(&req->done);
1014 req->dest_cpu = dest_cpu;
1015 list_add(&req->list, &rq->migration_queue);
1021 * wait_task_inactive - wait for a thread to unschedule.
1023 * The caller must ensure that the task *will* unschedule sometime soon,
1024 * else this function might spin for a *long* time. This function can't
1025 * be called with interrupts off, or it may introduce deadlock with
1026 * smp_call_function() if an IPI is sent by the same process we are
1027 * waiting to become inactive.
1029 void wait_task_inactive(struct task_struct *p)
1031 unsigned long flags;
1037 * We do the initial early heuristics without holding
1038 * any task-queue locks at all. We'll only try to get
1039 * the runqueue lock when things look like they will
1045 * If the task is actively running on another CPU
1046 * still, just relax and busy-wait without holding
1049 * NOTE! Since we don't hold any locks, it's not
1050 * even sure that "rq" stays as the right runqueue!
1051 * But we don't care, since "task_running()" will
1052 * return false if the runqueue has changed and p
1053 * is actually now running somewhere else!
1055 while (task_running(rq, p))
1059 * Ok, time to look more closely! We need the rq
1060 * lock now, to be *sure*. If we're wrong, we'll
1061 * just go back and repeat.
1063 rq = task_rq_lock(p, &flags);
1064 running = task_running(rq, p);
1065 on_rq = p->se.on_rq;
1066 task_rq_unlock(rq, &flags);
1069 * Was it really running after all now that we
1070 * checked with the proper locks actually held?
1072 * Oops. Go back and try again..
1074 if (unlikely(running)) {
1080 * It's not enough that it's not actively running,
1081 * it must be off the runqueue _entirely_, and not
1084 * So if it wa still runnable (but just not actively
1085 * running right now), it's preempted, and we should
1086 * yield - it could be a while.
1088 if (unlikely(on_rq)) {
1094 * Ahh, all good. It wasn't running, and it wasn't
1095 * runnable, which means that it will never become
1096 * running in the future either. We're all done!
1101 * kick_process - kick a running thread to enter/exit the kernel
1102 * @p: the to-be-kicked thread
1104 * Cause a process which is running on another CPU to enter
1105 * kernel-mode, without any delay. (to get signals handled.)
1107 * NOTE: this function doesnt have to take the runqueue lock,
1108 * because all it wants to ensure is that the remote task enters
1109 * the kernel. If the IPI races and the task has been migrated
1110 * to another CPU then no harm is done and the purpose has been
1113 void kick_process(struct task_struct *p)
1119 if ((cpu != smp_processor_id()) && task_curr(p))
1120 smp_send_reschedule(cpu);
1125 * Return a low guess at the load of a migration-source cpu weighted
1126 * according to the scheduling class and "nice" value.
1128 * We want to under-estimate the load of migration sources, to
1129 * balance conservatively.
1131 static inline unsigned long source_load(int cpu, int type)
1133 struct rq *rq = cpu_rq(cpu);
1134 unsigned long total = weighted_cpuload(cpu);
1139 return min(rq->cpu_load[type-1], total);
1143 * Return a high guess at the load of a migration-target cpu weighted
1144 * according to the scheduling class and "nice" value.
1146 static inline unsigned long target_load(int cpu, int type)
1148 struct rq *rq = cpu_rq(cpu);
1149 unsigned long total = weighted_cpuload(cpu);
1154 return max(rq->cpu_load[type-1], total);
1158 * Return the average load per task on the cpu's run queue
1160 static inline unsigned long cpu_avg_load_per_task(int cpu)
1162 struct rq *rq = cpu_rq(cpu);
1163 unsigned long total = weighted_cpuload(cpu);
1164 unsigned long n = rq->nr_running;
1166 return n ? total / n : SCHED_LOAD_SCALE;
1170 * find_idlest_group finds and returns the least busy CPU group within the
1173 static struct sched_group *
1174 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
1176 struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
1177 unsigned long min_load = ULONG_MAX, this_load = 0;
1178 int load_idx = sd->forkexec_idx;
1179 int imbalance = 100 + (sd->imbalance_pct-100)/2;
1182 unsigned long load, avg_load;
1186 /* Skip over this group if it has no CPUs allowed */
1187 if (!cpus_intersects(group->cpumask, p->cpus_allowed))
1190 local_group = cpu_isset(this_cpu, group->cpumask);
1192 /* Tally up the load of all CPUs in the group */
1195 for_each_cpu_mask(i, group->cpumask) {
1196 /* Bias balancing toward cpus of our domain */
1198 load = source_load(i, load_idx);
1200 load = target_load(i, load_idx);
1205 /* Adjust by relative CPU power of the group */
1206 avg_load = sg_div_cpu_power(group,
1207 avg_load * SCHED_LOAD_SCALE);
1210 this_load = avg_load;
1212 } else if (avg_load < min_load) {
1213 min_load = avg_load;
1217 group = group->next;
1218 } while (group != sd->groups);
1220 if (!idlest || 100*this_load < imbalance*min_load)
1226 * find_idlest_cpu - find the idlest cpu among the cpus in group.
1229 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1232 unsigned long load, min_load = ULONG_MAX;
1236 /* Traverse only the allowed CPUs */
1237 cpus_and(tmp, group->cpumask, p->cpus_allowed);
1239 for_each_cpu_mask(i, tmp) {
1240 load = weighted_cpuload(i);
1242 if (load < min_load || (load == min_load && i == this_cpu)) {
1252 * sched_balance_self: balance the current task (running on cpu) in domains
1253 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
1256 * Balance, ie. select the least loaded group.
1258 * Returns the target CPU number, or the same CPU if no balancing is needed.
1260 * preempt must be disabled.
1262 static int sched_balance_self(int cpu, int flag)
1264 struct task_struct *t = current;
1265 struct sched_domain *tmp, *sd = NULL;
1267 for_each_domain(cpu, tmp) {
1269 * If power savings logic is enabled for a domain, stop there.
1271 if (tmp->flags & SD_POWERSAVINGS_BALANCE)
1273 if (tmp->flags & flag)
1279 struct sched_group *group;
1280 int new_cpu, weight;
1282 if (!(sd->flags & flag)) {
1288 group = find_idlest_group(sd, t, cpu);
1294 new_cpu = find_idlest_cpu(group, t, cpu);
1295 if (new_cpu == -1 || new_cpu == cpu) {
1296 /* Now try balancing at a lower domain level of cpu */
1301 /* Now try balancing at a lower domain level of new_cpu */
1304 weight = cpus_weight(span);
1305 for_each_domain(cpu, tmp) {
1306 if (weight <= cpus_weight(tmp->span))
1308 if (tmp->flags & flag)
1311 /* while loop will break here if sd == NULL */
1317 #endif /* CONFIG_SMP */
1320 * wake_idle() will wake a task on an idle cpu if task->cpu is
1321 * not idle and an idle cpu is available. The span of cpus to
1322 * search starts with cpus closest then further out as needed,
1323 * so we always favor a closer, idle cpu.
1325 * Returns the CPU we should wake onto.
1327 #if defined(ARCH_HAS_SCHED_WAKE_IDLE)
1328 static int wake_idle(int cpu, struct task_struct *p)
1331 struct sched_domain *sd;
1335 * If it is idle, then it is the best cpu to run this task.
1337 * This cpu is also the best, if it has more than one task already.
1338 * Siblings must be also busy(in most cases) as they didn't already
1339 * pickup the extra load from this cpu and hence we need not check
1340 * sibling runqueue info. This will avoid the checks and cache miss
1341 * penalities associated with that.
1343 if (idle_cpu(cpu) || cpu_rq(cpu)->nr_running > 1)
1346 for_each_domain(cpu, sd) {
1347 if (sd->flags & SD_WAKE_IDLE) {
1348 cpus_and(tmp, sd->span, p->cpus_allowed);
1349 for_each_cpu_mask(i, tmp) {
1360 static inline int wake_idle(int cpu, struct task_struct *p)
1367 * try_to_wake_up - wake up a thread
1368 * @p: the to-be-woken-up thread
1369 * @state: the mask of task states that can be woken
1370 * @sync: do a synchronous wakeup?
1372 * Put it on the run-queue if it's not already there. The "current"
1373 * thread is always on the run-queue (except when the actual
1374 * re-schedule is in progress), and as such you're allowed to do
1375 * the simpler "current->state = TASK_RUNNING" to mark yourself
1376 * runnable without the overhead of this.
1378 * returns failure only if the task is already active.
1380 static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1382 int cpu, this_cpu, success = 0;
1383 unsigned long flags;
1387 struct sched_domain *sd, *this_sd = NULL;
1388 unsigned long load, this_load;
1392 rq = task_rq_lock(p, &flags);
1393 old_state = p->state;
1394 if (!(old_state & state))
1401 this_cpu = smp_processor_id();
1404 if (unlikely(task_running(rq, p)))
1409 schedstat_inc(rq, ttwu_cnt);
1410 if (cpu == this_cpu) {
1411 schedstat_inc(rq, ttwu_local);
1415 for_each_domain(this_cpu, sd) {
1416 if (cpu_isset(cpu, sd->span)) {
1417 schedstat_inc(sd, ttwu_wake_remote);
1423 if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
1427 * Check for affine wakeup and passive balancing possibilities.
1430 int idx = this_sd->wake_idx;
1431 unsigned int imbalance;
1433 imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
1435 load = source_load(cpu, idx);
1436 this_load = target_load(this_cpu, idx);
1438 new_cpu = this_cpu; /* Wake to this CPU if we can */
1440 if (this_sd->flags & SD_WAKE_AFFINE) {
1441 unsigned long tl = this_load;
1442 unsigned long tl_per_task;
1444 tl_per_task = cpu_avg_load_per_task(this_cpu);
1447 * If sync wakeup then subtract the (maximum possible)
1448 * effect of the currently running task from the load
1449 * of the current CPU:
1452 tl -= current->se.load.weight;
1455 tl + target_load(cpu, idx) <= tl_per_task) ||
1456 100*(tl + p->se.load.weight) <= imbalance*load) {
1458 * This domain has SD_WAKE_AFFINE and
1459 * p is cache cold in this domain, and
1460 * there is no bad imbalance.
1462 schedstat_inc(this_sd, ttwu_move_affine);
1468 * Start passive balancing when half the imbalance_pct
1471 if (this_sd->flags & SD_WAKE_BALANCE) {
1472 if (imbalance*this_load <= 100*load) {
1473 schedstat_inc(this_sd, ttwu_move_balance);
1479 new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
1481 new_cpu = wake_idle(new_cpu, p);
1482 if (new_cpu != cpu) {
1483 set_task_cpu(p, new_cpu);
1484 task_rq_unlock(rq, &flags);
1485 /* might preempt at this point */
1486 rq = task_rq_lock(p, &flags);
1487 old_state = p->state;
1488 if (!(old_state & state))
1493 this_cpu = smp_processor_id();
1498 #endif /* CONFIG_SMP */
1499 activate_task(rq, p, 1);
1501 * Sync wakeups (i.e. those types of wakeups where the waker
1502 * has indicated that it will leave the CPU in short order)
1503 * don't trigger a preemption, if the woken up task will run on
1504 * this cpu. (in this case the 'I will reschedule' promise of
1505 * the waker guarantees that the freshly woken up task is going
1506 * to be considered on this CPU.)
1508 if (!sync || cpu != this_cpu)
1509 check_preempt_curr(rq, p);
1513 p->state = TASK_RUNNING;
1515 task_rq_unlock(rq, &flags);
1520 int fastcall wake_up_process(struct task_struct *p)
1522 return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
1523 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
1525 EXPORT_SYMBOL(wake_up_process);
1527 int fastcall wake_up_state(struct task_struct *p, unsigned int state)
1529 return try_to_wake_up(p, state, 0);
1533 * Perform scheduler related setup for a newly forked process p.
1534 * p is forked by current.
1536 * __sched_fork() is basic setup used by init_idle() too:
1538 static void __sched_fork(struct task_struct *p)
1540 p->se.wait_start_fair = 0;
1541 p->se.wait_start = 0;
1542 p->se.exec_start = 0;
1543 p->se.sum_exec_runtime = 0;
1544 p->se.delta_exec = 0;
1545 p->se.delta_fair_run = 0;
1546 p->se.delta_fair_sleep = 0;
1547 p->se.wait_runtime = 0;
1548 p->se.sum_wait_runtime = 0;
1549 p->se.sum_sleep_runtime = 0;
1550 p->se.sleep_start = 0;
1551 p->se.sleep_start_fair = 0;
1552 p->se.block_start = 0;
1553 p->se.sleep_max = 0;
1554 p->se.block_max = 0;
1557 p->se.wait_runtime_overruns = 0;
1558 p->se.wait_runtime_underruns = 0;
1560 INIT_LIST_HEAD(&p->run_list);
1564 * We mark the process as running here, but have not actually
1565 * inserted it onto the runqueue yet. This guarantees that
1566 * nobody will actually run it, and a signal or other external
1567 * event cannot wake it up and insert it on the runqueue either.
1569 p->state = TASK_RUNNING;
1573 * fork()/clone()-time setup:
1575 void sched_fork(struct task_struct *p, int clone_flags)
1577 int cpu = get_cpu();
1582 cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
1584 __set_task_cpu(p, cpu);
1587 * Make sure we do not leak PI boosting priority to the child:
1589 p->prio = current->normal_prio;
1591 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1592 if (likely(sched_info_on()))
1593 memset(&p->sched_info, 0, sizeof(p->sched_info));
1595 #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
1598 #ifdef CONFIG_PREEMPT
1599 /* Want to start with kernel preemption disabled. */
1600 task_thread_info(p)->preempt_count = 1;
1606 * After fork, child runs first. (default) If set to 0 then
1607 * parent will (try to) run first.
1609 unsigned int __read_mostly sysctl_sched_child_runs_first = 1;
1612 * wake_up_new_task - wake up a newly created task for the first time.
1614 * This function will do some initial scheduler statistics housekeeping
1615 * that must be done for every newly created context, then puts the task
1616 * on the runqueue and wakes it.
1618 void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1620 unsigned long flags;
1624 rq = task_rq_lock(p, &flags);
1625 BUG_ON(p->state != TASK_RUNNING);
1626 this_cpu = smp_processor_id(); /* parent's CPU */
1628 p->prio = effective_prio(p);
1630 if (!sysctl_sched_child_runs_first || (clone_flags & CLONE_VM) ||
1631 task_cpu(p) != this_cpu || !current->se.on_rq) {
1632 activate_task(rq, p, 0);
1635 * Let the scheduling class do new task startup
1636 * management (if any):
1638 p->sched_class->task_new(rq, p);
1640 check_preempt_curr(rq, p);
1641 task_rq_unlock(rq, &flags);
1645 * prepare_task_switch - prepare to switch tasks
1646 * @rq: the runqueue preparing to switch
1647 * @next: the task we are going to switch to.
1649 * This is called with the rq lock held and interrupts off. It must
1650 * be paired with a subsequent finish_task_switch after the context
1653 * prepare_task_switch sets up locking and calls architecture specific
1656 static inline void prepare_task_switch(struct rq *rq, struct task_struct *next)
1658 prepare_lock_switch(rq, next);
1659 prepare_arch_switch(next);
1663 * finish_task_switch - clean up after a task-switch
1664 * @rq: runqueue associated with task-switch
1665 * @prev: the thread we just switched away from.
1667 * finish_task_switch must be called after the context switch, paired
1668 * with a prepare_task_switch call before the context switch.
1669 * finish_task_switch will reconcile locking set up by prepare_task_switch,
1670 * and do any other architecture-specific cleanup actions.
1672 * Note that we may have delayed dropping an mm in context_switch(). If
1673 * so, we finish that here outside of the runqueue lock. (Doing it
1674 * with the lock held can cause deadlocks; see schedule() for
1677 static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
1678 __releases(rq->lock)
1680 struct mm_struct *mm = rq->prev_mm;
1686 * A task struct has one reference for the use as "current".
1687 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
1688 * schedule one last time. The schedule call will never return, and
1689 * the scheduled task must drop that reference.
1690 * The test for TASK_DEAD must occur while the runqueue locks are
1691 * still held, otherwise prev could be scheduled on another cpu, die
1692 * there before we look at prev->state, and then the reference would
1694 * Manfred Spraul <manfred@colorfullife.com>
1696 prev_state = prev->state;
1697 finish_arch_switch(prev);
1698 finish_lock_switch(rq, prev);
1701 if (unlikely(prev_state == TASK_DEAD)) {
1703 * Remove function-return probe instances associated with this
1704 * task and put them back on the free list.
1706 kprobe_flush_task(prev);
1707 put_task_struct(prev);
1712 * schedule_tail - first thing a freshly forked thread must call.
1713 * @prev: the thread we just switched away from.
1715 asmlinkage void schedule_tail(struct task_struct *prev)
1716 __releases(rq->lock)
1718 struct rq *rq = this_rq();
1720 finish_task_switch(rq, prev);
1721 #ifdef __ARCH_WANT_UNLOCKED_CTXSW
1722 /* In this case, finish_task_switch does not reenable preemption */
1725 if (current->set_child_tid)
1726 put_user(current->pid, current->set_child_tid);
1730 * context_switch - switch to the new MM and the new
1731 * thread's register state.
1734 context_switch(struct rq *rq, struct task_struct *prev,
1735 struct task_struct *next)
1737 struct mm_struct *mm, *oldmm;
1739 prepare_task_switch(rq, next);
1741 oldmm = prev->active_mm;
1743 * For paravirt, this is coupled with an exit in switch_to to
1744 * combine the page table reload and the switch backend into
1747 arch_enter_lazy_cpu_mode();
1749 if (unlikely(!mm)) {
1750 next->active_mm = oldmm;
1751 atomic_inc(&oldmm->mm_count);
1752 enter_lazy_tlb(oldmm, next);
1754 switch_mm(oldmm, mm, next);
1756 if (unlikely(!prev->mm)) {
1757 prev->active_mm = NULL;
1758 rq->prev_mm = oldmm;
1761 * Since the runqueue lock will be released by the next
1762 * task (which is an invalid locking op but in the case
1763 * of the scheduler it's an obvious special-case), so we
1764 * do an early lockdep release here:
1766 #ifndef __ARCH_WANT_UNLOCKED_CTXSW
1767 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
1770 /* Here we just switch the register state and the stack. */
1771 switch_to(prev, next, prev);
1775 * this_rq must be evaluated again because prev may have moved
1776 * CPUs since it called schedule(), thus the 'rq' on its stack
1777 * frame will be invalid.
1779 finish_task_switch(this_rq(), prev);
1783 * nr_running, nr_uninterruptible and nr_context_switches:
1785 * externally visible scheduler statistics: current number of runnable
1786 * threads, current number of uninterruptible-sleeping threads, total
1787 * number of context switches performed since bootup.
1789 unsigned long nr_running(void)
1791 unsigned long i, sum = 0;
1793 for_each_online_cpu(i)
1794 sum += cpu_rq(i)->nr_running;
1799 unsigned long nr_uninterruptible(void)
1801 unsigned long i, sum = 0;
1803 for_each_possible_cpu(i)
1804 sum += cpu_rq(i)->nr_uninterruptible;
1807 * Since we read the counters lockless, it might be slightly
1808 * inaccurate. Do not allow it to go below zero though:
1810 if (unlikely((long)sum < 0))
1816 unsigned long long nr_context_switches(void)
1819 unsigned long long sum = 0;
1821 for_each_possible_cpu(i)
1822 sum += cpu_rq(i)->nr_switches;
1827 unsigned long nr_iowait(void)
1829 unsigned long i, sum = 0;
1831 for_each_possible_cpu(i)
1832 sum += atomic_read(&cpu_rq(i)->nr_iowait);
1837 unsigned long nr_active(void)
1839 unsigned long i, running = 0, uninterruptible = 0;
1841 for_each_online_cpu(i) {
1842 running += cpu_rq(i)->nr_running;
1843 uninterruptible += cpu_rq(i)->nr_uninterruptible;
1846 if (unlikely((long)uninterruptible < 0))
1847 uninterruptible = 0;
1849 return running + uninterruptible;
1853 * Update rq->cpu_load[] statistics. This function is usually called every
1854 * scheduler tick (TICK_NSEC).
1856 static void update_cpu_load(struct rq *this_rq)
1858 u64 fair_delta64, exec_delta64, idle_delta64, sample_interval64, tmp64;
1859 unsigned long total_load = this_rq->ls.load.weight;
1860 unsigned long this_load = total_load;
1861 struct load_stat *ls = &this_rq->ls;
1862 u64 now = __rq_clock(this_rq);
1865 this_rq->nr_load_updates++;
1866 if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD)))
1869 /* Update delta_fair/delta_exec fields first */
1870 update_curr_load(this_rq, now);
1872 fair_delta64 = ls->delta_fair + 1;
1875 exec_delta64 = ls->delta_exec + 1;
1878 sample_interval64 = now - ls->load_update_last;
1879 ls->load_update_last = now;
1881 if ((s64)sample_interval64 < (s64)TICK_NSEC)
1882 sample_interval64 = TICK_NSEC;
1884 if (exec_delta64 > sample_interval64)
1885 exec_delta64 = sample_interval64;
1887 idle_delta64 = sample_interval64 - exec_delta64;
1889 tmp64 = div64_64(SCHED_LOAD_SCALE * exec_delta64, fair_delta64);
1890 tmp64 = div64_64(tmp64 * exec_delta64, sample_interval64);
1892 this_load = (unsigned long)tmp64;
1896 /* Update our load: */
1897 for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
1898 unsigned long old_load, new_load;
1900 /* scale is effectively 1 << i now, and >> i divides by scale */
1902 old_load = this_rq->cpu_load[i];
1903 new_load = this_load;
1905 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
1912 * double_rq_lock - safely lock two runqueues
1914 * Note this does not disable interrupts like task_rq_lock,
1915 * you need to do so manually before calling.
1917 static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1918 __acquires(rq1->lock)
1919 __acquires(rq2->lock)
1921 BUG_ON(!irqs_disabled());
1923 spin_lock(&rq1->lock);
1924 __acquire(rq2->lock); /* Fake it out ;) */
1927 spin_lock(&rq1->lock);
1928 spin_lock(&rq2->lock);
1930 spin_lock(&rq2->lock);
1931 spin_lock(&rq1->lock);
1937 * double_rq_unlock - safely unlock two runqueues
1939 * Note this does not restore interrupts like task_rq_unlock,
1940 * you need to do so manually after calling.
1942 static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1943 __releases(rq1->lock)
1944 __releases(rq2->lock)
1946 spin_unlock(&rq1->lock);
1948 spin_unlock(&rq2->lock);
1950 __release(rq2->lock);
1954 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1956 static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
1957 __releases(this_rq->lock)
1958 __acquires(busiest->lock)
1959 __acquires(this_rq->lock)
1961 if (unlikely(!irqs_disabled())) {
1962 /* printk() doesn't work good under rq->lock */
1963 spin_unlock(&this_rq->lock);
1966 if (unlikely(!spin_trylock(&busiest->lock))) {
1967 if (busiest < this_rq) {
1968 spin_unlock(&this_rq->lock);
1969 spin_lock(&busiest->lock);
1970 spin_lock(&this_rq->lock);
1972 spin_lock(&busiest->lock);
1977 * If dest_cpu is allowed for this process, migrate the task to it.
1978 * This is accomplished by forcing the cpu_allowed mask to only
1979 * allow dest_cpu, which will force the cpu onto dest_cpu. Then
1980 * the cpu_allowed mask is restored.
1982 static void sched_migrate_task(struct task_struct *p, int dest_cpu)
1984 struct migration_req req;
1985 unsigned long flags;
1988 rq = task_rq_lock(p, &flags);
1989 if (!cpu_isset(dest_cpu, p->cpus_allowed)
1990 || unlikely(cpu_is_offline(dest_cpu)))
1993 /* force the process onto the specified CPU */
1994 if (migrate_task(p, dest_cpu, &req)) {
1995 /* Need to wait for migration thread (might exit: take ref). */
1996 struct task_struct *mt = rq->migration_thread;
1998 get_task_struct(mt);
1999 task_rq_unlock(rq, &flags);
2000 wake_up_process(mt);
2001 put_task_struct(mt);
2002 wait_for_completion(&req.done);
2007 task_rq_unlock(rq, &flags);
2011 * sched_exec - execve() is a valuable balancing opportunity, because at
2012 * this point the task has the smallest effective memory and cache footprint.
2014 void sched_exec(void)
2016 int new_cpu, this_cpu = get_cpu();
2017 new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC);
2019 if (new_cpu != this_cpu)
2020 sched_migrate_task(current, new_cpu);
2024 * pull_task - move a task from a remote runqueue to the local runqueue.
2025 * Both runqueues must be locked.
2027 static void pull_task(struct rq *src_rq, struct task_struct *p,
2028 struct rq *this_rq, int this_cpu)
2030 deactivate_task(src_rq, p, 0);
2031 set_task_cpu(p, this_cpu);
2032 activate_task(this_rq, p, 0);
2034 * Note that idle threads have a prio of MAX_PRIO, for this test
2035 * to be always true for them.
2037 check_preempt_curr(this_rq, p);
2041 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
2044 int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2045 struct sched_domain *sd, enum cpu_idle_type idle,
2049 * We do not migrate tasks that are:
2050 * 1) running (obviously), or
2051 * 2) cannot be migrated to this CPU due to cpus_allowed, or
2052 * 3) are cache-hot on their current CPU.
2054 if (!cpu_isset(this_cpu, p->cpus_allowed))
2058 if (task_running(rq, p))
2062 * Aggressive migration if too many balance attempts have failed:
2064 if (sd->nr_balance_failed > sd->cache_nice_tries)
2070 static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2071 unsigned long max_nr_move, unsigned long max_load_move,
2072 struct sched_domain *sd, enum cpu_idle_type idle,
2073 int *all_pinned, unsigned long *load_moved,
2074 int this_best_prio, int best_prio, int best_prio_seen,
2075 struct rq_iterator *iterator)
2077 int pulled = 0, pinned = 0, skip_for_load;
2078 struct task_struct *p;
2079 long rem_load_move = max_load_move;
2081 if (max_nr_move == 0 || max_load_move == 0)
2087 * Start the load-balancing iterator:
2089 p = iterator->start(iterator->arg);
2094 * To help distribute high priority tasks accross CPUs we don't
2095 * skip a task if it will be the highest priority task (i.e. smallest
2096 * prio value) on its new queue regardless of its load weight
2098 skip_for_load = (p->se.load.weight >> 1) > rem_load_move +
2099 SCHED_LOAD_SCALE_FUZZ;
2100 if (skip_for_load && p->prio < this_best_prio)
2101 skip_for_load = !best_prio_seen && p->prio == best_prio;
2102 if (skip_for_load ||
2103 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
2105 best_prio_seen |= p->prio == best_prio;
2106 p = iterator->next(iterator->arg);
2110 pull_task(busiest, p, this_rq, this_cpu);
2112 rem_load_move -= p->se.load.weight;
2115 * We only want to steal up to the prescribed number of tasks
2116 * and the prescribed amount of weighted load.
2118 if (pulled < max_nr_move && rem_load_move > 0) {
2119 if (p->prio < this_best_prio)
2120 this_best_prio = p->prio;
2121 p = iterator->next(iterator->arg);
2126 * Right now, this is the only place pull_task() is called,
2127 * so we can safely collect pull_task() stats here rather than
2128 * inside pull_task().
2130 schedstat_add(sd, lb_gained[idle], pulled);
2133 *all_pinned = pinned;
2134 *load_moved = max_load_move - rem_load_move;
2139 * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted
2140 * load from busiest to this_rq, as part of a balancing operation within
2141 * "domain". Returns the number of tasks moved.
2143 * Called with both runqueues locked.
2145 static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2146 unsigned long max_nr_move, unsigned long max_load_move,
2147 struct sched_domain *sd, enum cpu_idle_type idle,
2150 struct sched_class *class = sched_class_highest;
2151 unsigned long load_moved, total_nr_moved = 0, nr_moved;
2152 long rem_load_move = max_load_move;
2155 nr_moved = class->load_balance(this_rq, this_cpu, busiest,
2156 max_nr_move, (unsigned long)rem_load_move,
2157 sd, idle, all_pinned, &load_moved);
2158 total_nr_moved += nr_moved;
2159 max_nr_move -= nr_moved;
2160 rem_load_move -= load_moved;
2161 class = class->next;
2162 } while (class && max_nr_move && rem_load_move > 0);
2164 return total_nr_moved;
2168 * find_busiest_group finds and returns the busiest CPU group within the
2169 * domain. It calculates and returns the amount of weighted load which
2170 * should be moved to restore balance via the imbalance parameter.
2172 static struct sched_group *
2173 find_busiest_group(struct sched_domain *sd, int this_cpu,
2174 unsigned long *imbalance, enum cpu_idle_type idle,
2175 int *sd_idle, cpumask_t *cpus, int *balance)
2177 struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
2178 unsigned long max_load, avg_load, total_load, this_load, total_pwr;
2179 unsigned long max_pull;
2180 unsigned long busiest_load_per_task, busiest_nr_running;
2181 unsigned long this_load_per_task, this_nr_running;
2183 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2184 int power_savings_balance = 1;
2185 unsigned long leader_nr_running = 0, min_load_per_task = 0;
2186 unsigned long min_nr_running = ULONG_MAX;
2187 struct sched_group *group_min = NULL, *group_leader = NULL;
2190 max_load = this_load = total_load = total_pwr = 0;
2191 busiest_load_per_task = busiest_nr_running = 0;
2192 this_load_per_task = this_nr_running = 0;
2193 if (idle == CPU_NOT_IDLE)
2194 load_idx = sd->busy_idx;
2195 else if (idle == CPU_NEWLY_IDLE)
2196 load_idx = sd->newidle_idx;
2198 load_idx = sd->idle_idx;
2201 unsigned long load, group_capacity;
2204 unsigned int balance_cpu = -1, first_idle_cpu = 0;
2205 unsigned long sum_nr_running, sum_weighted_load;
2207 local_group = cpu_isset(this_cpu, group->cpumask);
2210 balance_cpu = first_cpu(group->cpumask);
2212 /* Tally up the load of all CPUs in the group */
2213 sum_weighted_load = sum_nr_running = avg_load = 0;
2215 for_each_cpu_mask(i, group->cpumask) {
2218 if (!cpu_isset(i, *cpus))
2223 if (*sd_idle && !idle_cpu(i))
2226 /* Bias balancing toward cpus of our domain */
2228 if (idle_cpu(i) && !first_idle_cpu) {
2233 load = target_load(i, load_idx);
2235 load = source_load(i, load_idx);
2238 sum_nr_running += rq->nr_running;
2239 sum_weighted_load += weighted_cpuload(i);
2243 * First idle cpu or the first cpu(busiest) in this sched group
2244 * is eligible for doing load balancing at this and above
2247 if (local_group && balance_cpu != this_cpu && balance) {
2252 total_load += avg_load;
2253 total_pwr += group->__cpu_power;
2255 /* Adjust by relative CPU power of the group */
2256 avg_load = sg_div_cpu_power(group,
2257 avg_load * SCHED_LOAD_SCALE);
2259 group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
2262 this_load = avg_load;
2264 this_nr_running = sum_nr_running;
2265 this_load_per_task = sum_weighted_load;
2266 } else if (avg_load > max_load &&
2267 sum_nr_running > group_capacity) {
2268 max_load = avg_load;
2270 busiest_nr_running = sum_nr_running;
2271 busiest_load_per_task = sum_weighted_load;
2274 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2276 * Busy processors will not participate in power savings
2279 if (idle == CPU_NOT_IDLE ||
2280 !(sd->flags & SD_POWERSAVINGS_BALANCE))
2284 * If the local group is idle or completely loaded
2285 * no need to do power savings balance at this domain
2287 if (local_group && (this_nr_running >= group_capacity ||
2289 power_savings_balance = 0;
2292 * If a group is already running at full capacity or idle,
2293 * don't include that group in power savings calculations
2295 if (!power_savings_balance || sum_nr_running >= group_capacity
2300 * Calculate the group which has the least non-idle load.
2301 * This is the group from where we need to pick up the load
2304 if ((sum_nr_running < min_nr_running) ||
2305 (sum_nr_running == min_nr_running &&
2306 first_cpu(group->cpumask) <
2307 first_cpu(group_min->cpumask))) {
2309 min_nr_running = sum_nr_running;
2310 min_load_per_task = sum_weighted_load /
2315 * Calculate the group which is almost near its
2316 * capacity but still has some space to pick up some load
2317 * from other group and save more power
2319 if (sum_nr_running <= group_capacity - 1) {
2320 if (sum_nr_running > leader_nr_running ||
2321 (sum_nr_running == leader_nr_running &&
2322 first_cpu(group->cpumask) >
2323 first_cpu(group_leader->cpumask))) {
2324 group_leader = group;
2325 leader_nr_running = sum_nr_running;
2330 group = group->next;
2331 } while (group != sd->groups);
2333 if (!busiest || this_load >= max_load || busiest_nr_running == 0)
2336 avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
2338 if (this_load >= avg_load ||
2339 100*max_load <= sd->imbalance_pct*this_load)
2342 busiest_load_per_task /= busiest_nr_running;
2344 * We're trying to get all the cpus to the average_load, so we don't
2345 * want to push ourselves above the average load, nor do we wish to
2346 * reduce the max loaded cpu below the average load, as either of these
2347 * actions would just result in more rebalancing later, and ping-pong
2348 * tasks around. Thus we look for the minimum possible imbalance.
2349 * Negative imbalances (*we* are more loaded than anyone else) will
2350 * be counted as no imbalance for these purposes -- we can't fix that
2351 * by pulling tasks to us. Be careful of negative numbers as they'll
2352 * appear as very large values with unsigned longs.
2354 if (max_load <= busiest_load_per_task)
2358 * In the presence of smp nice balancing, certain scenarios can have
2359 * max load less than avg load(as we skip the groups at or below
2360 * its cpu_power, while calculating max_load..)
2362 if (max_load < avg_load) {
2364 goto small_imbalance;
2367 /* Don't want to pull so many tasks that a group would go idle */
2368 max_pull = min(max_load - avg_load, max_load - busiest_load_per_task);
2370 /* How much load to actually move to equalise the imbalance */
2371 *imbalance = min(max_pull * busiest->__cpu_power,
2372 (avg_load - this_load) * this->__cpu_power)
2376 * if *imbalance is less than the average load per runnable task
2377 * there is no gaurantee that any tasks will be moved so we'll have
2378 * a think about bumping its value to force at least one task to be
2381 if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task/2) {
2382 unsigned long tmp, pwr_now, pwr_move;
2386 pwr_move = pwr_now = 0;
2388 if (this_nr_running) {
2389 this_load_per_task /= this_nr_running;
2390 if (busiest_load_per_task > this_load_per_task)
2393 this_load_per_task = SCHED_LOAD_SCALE;
2395 if (max_load - this_load + SCHED_LOAD_SCALE_FUZZ >=
2396 busiest_load_per_task * imbn) {
2397 *imbalance = busiest_load_per_task;
2402 * OK, we don't have enough imbalance to justify moving tasks,
2403 * however we may be able to increase total CPU power used by
2407 pwr_now += busiest->__cpu_power *
2408 min(busiest_load_per_task, max_load);
2409 pwr_now += this->__cpu_power *
2410 min(this_load_per_task, this_load);
2411 pwr_now /= SCHED_LOAD_SCALE;
2413 /* Amount of load we'd subtract */
2414 tmp = sg_div_cpu_power(busiest,
2415 busiest_load_per_task * SCHED_LOAD_SCALE);
2417 pwr_move += busiest->__cpu_power *
2418 min(busiest_load_per_task, max_load - tmp);
2420 /* Amount of load we'd add */
2421 if (max_load * busiest->__cpu_power <
2422 busiest_load_per_task * SCHED_LOAD_SCALE)
2423 tmp = sg_div_cpu_power(this,
2424 max_load * busiest->__cpu_power);
2426 tmp = sg_div_cpu_power(this,
2427 busiest_load_per_task * SCHED_LOAD_SCALE);
2428 pwr_move += this->__cpu_power *
2429 min(this_load_per_task, this_load + tmp);
2430 pwr_move /= SCHED_LOAD_SCALE;
2432 /* Move if we gain throughput */
2433 if (pwr_move <= pwr_now)
2436 *imbalance = busiest_load_per_task;
2442 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2443 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
2446 if (this == group_leader && group_leader != group_min) {
2447 *imbalance = min_load_per_task;
2457 * find_busiest_queue - find the busiest runqueue among the cpus in group.
2460 find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
2461 unsigned long imbalance, cpumask_t *cpus)
2463 struct rq *busiest = NULL, *rq;
2464 unsigned long max_load = 0;
2467 for_each_cpu_mask(i, group->cpumask) {
2470 if (!cpu_isset(i, *cpus))
2474 wl = weighted_cpuload(i);
2476 if (rq->nr_running == 1 && wl > imbalance)
2479 if (wl > max_load) {
2489 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
2490 * so long as it is large enough.
2492 #define MAX_PINNED_INTERVAL 512
2494 static inline unsigned long minus_1_or_zero(unsigned long n)
2496 return n > 0 ? n - 1 : 0;
2500 * Check this_cpu to ensure it is balanced within domain. Attempt to move
2501 * tasks if there is an imbalance.
2503 static int load_balance(int this_cpu, struct rq *this_rq,
2504 struct sched_domain *sd, enum cpu_idle_type idle,
2507 int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
2508 struct sched_group *group;
2509 unsigned long imbalance;
2511 cpumask_t cpus = CPU_MASK_ALL;
2512 unsigned long flags;
2515 * When power savings policy is enabled for the parent domain, idle
2516 * sibling can pick up load irrespective of busy siblings. In this case,
2517 * let the state of idle sibling percolate up as CPU_IDLE, instead of
2518 * portraying it as CPU_NOT_IDLE.
2520 if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
2521 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2524 schedstat_inc(sd, lb_cnt[idle]);
2527 group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
2534 schedstat_inc(sd, lb_nobusyg[idle]);
2538 busiest = find_busiest_queue(group, idle, imbalance, &cpus);
2540 schedstat_inc(sd, lb_nobusyq[idle]);
2544 BUG_ON(busiest == this_rq);
2546 schedstat_add(sd, lb_imbalance[idle], imbalance);
2549 if (busiest->nr_running > 1) {
2551 * Attempt to move tasks. If find_busiest_group has found
2552 * an imbalance but busiest->nr_running <= 1, the group is
2553 * still unbalanced. nr_moved simply stays zero, so it is
2554 * correctly treated as an imbalance.
2556 local_irq_save(flags);
2557 double_rq_lock(this_rq, busiest);
2558 nr_moved = move_tasks(this_rq, this_cpu, busiest,
2559 minus_1_or_zero(busiest->nr_running),
2560 imbalance, sd, idle, &all_pinned);
2561 double_rq_unlock(this_rq, busiest);
2562 local_irq_restore(flags);
2565 * some other cpu did the load balance for us.
2567 if (nr_moved && this_cpu != smp_processor_id())
2568 resched_cpu(this_cpu);
2570 /* All tasks on this runqueue were pinned by CPU affinity */
2571 if (unlikely(all_pinned)) {
2572 cpu_clear(cpu_of(busiest), cpus);
2573 if (!cpus_empty(cpus))
2580 schedstat_inc(sd, lb_failed[idle]);
2581 sd->nr_balance_failed++;
2583 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
2585 spin_lock_irqsave(&busiest->lock, flags);
2587 /* don't kick the migration_thread, if the curr
2588 * task on busiest cpu can't be moved to this_cpu
2590 if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
2591 spin_unlock_irqrestore(&busiest->lock, flags);
2593 goto out_one_pinned;
2596 if (!busiest->active_balance) {
2597 busiest->active_balance = 1;
2598 busiest->push_cpu = this_cpu;
2601 spin_unlock_irqrestore(&busiest->lock, flags);
2603 wake_up_process(busiest->migration_thread);
2606 * We've kicked active balancing, reset the failure
2609 sd->nr_balance_failed = sd->cache_nice_tries+1;
2612 sd->nr_balance_failed = 0;
2614 if (likely(!active_balance)) {
2615 /* We were unbalanced, so reset the balancing interval */
2616 sd->balance_interval = sd->min_interval;
2619 * If we've begun active balancing, start to back off. This
2620 * case may not be covered by the all_pinned logic if there
2621 * is only 1 task on the busy runqueue (because we don't call
2624 if (sd->balance_interval < sd->max_interval)
2625 sd->balance_interval *= 2;
2628 if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
2629 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2634 schedstat_inc(sd, lb_balanced[idle]);
2636 sd->nr_balance_failed = 0;
2639 /* tune up the balancing interval */
2640 if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
2641 (sd->balance_interval < sd->max_interval))
2642 sd->balance_interval *= 2;
2644 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
2645 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2651 * Check this_cpu to ensure it is balanced within domain. Attempt to move
2652 * tasks if there is an imbalance.
2654 * Called from schedule when this_rq is about to become idle (CPU_NEWLY_IDLE).
2655 * this_rq is locked.
2658 load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
2660 struct sched_group *group;
2661 struct rq *busiest = NULL;
2662 unsigned long imbalance;
2665 cpumask_t cpus = CPU_MASK_ALL;
2668 * When power savings policy is enabled for the parent domain, idle
2669 * sibling can pick up load irrespective of busy siblings. In this case,
2670 * let the state of idle sibling percolate up as IDLE, instead of
2671 * portraying it as CPU_NOT_IDLE.
2673 if (sd->flags & SD_SHARE_CPUPOWER &&
2674 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2677 schedstat_inc(sd, lb_cnt[CPU_NEWLY_IDLE]);
2679 group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
2680 &sd_idle, &cpus, NULL);
2682 schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]);
2686 busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance,
2689 schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]);
2693 BUG_ON(busiest == this_rq);
2695 schedstat_add(sd, lb_imbalance[CPU_NEWLY_IDLE], imbalance);
2698 if (busiest->nr_running > 1) {
2699 /* Attempt to move tasks */
2700 double_lock_balance(this_rq, busiest);
2701 nr_moved = move_tasks(this_rq, this_cpu, busiest,
2702 minus_1_or_zero(busiest->nr_running),
2703 imbalance, sd, CPU_NEWLY_IDLE, NULL);
2704 spin_unlock(&busiest->lock);
2707 cpu_clear(cpu_of(busiest), cpus);
2708 if (!cpus_empty(cpus))
2714 schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]);
2715 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
2716 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2719 sd->nr_balance_failed = 0;
2724 schedstat_inc(sd, lb_balanced[CPU_NEWLY_IDLE]);
2725 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
2726 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2728 sd->nr_balance_failed = 0;
2734 * idle_balance is called by schedule() if this_cpu is about to become
2735 * idle. Attempts to pull tasks from other CPUs.
2737 static void idle_balance(int this_cpu, struct rq *this_rq)
2739 struct sched_domain *sd;
2740 int pulled_task = -1;
2741 unsigned long next_balance = jiffies + HZ;
2743 for_each_domain(this_cpu, sd) {
2744 unsigned long interval;
2746 if (!(sd->flags & SD_LOAD_BALANCE))
2749 if (sd->flags & SD_BALANCE_NEWIDLE)
2750 /* If we've pulled tasks over stop searching: */
2751 pulled_task = load_balance_newidle(this_cpu,
2754 interval = msecs_to_jiffies(sd->balance_interval);
2755 if (time_after(next_balance, sd->last_balance + interval))
2756 next_balance = sd->last_balance + interval;
2760 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
2762 * We are going idle. next_balance may be set based on
2763 * a busy processor. So reset next_balance.
2765 this_rq->next_balance = next_balance;
2770 * active_load_balance is run by migration threads. It pushes running tasks
2771 * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
2772 * running on each physical CPU where possible, and avoids physical /
2773 * logical imbalances.
2775 * Called with busiest_rq locked.
2777 static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
2779 int target_cpu = busiest_rq->push_cpu;
2780 struct sched_domain *sd;
2781 struct rq *target_rq;
2783 /* Is there any task to move? */
2784 if (busiest_rq->nr_running <= 1)
2787 target_rq = cpu_rq(target_cpu);
2790 * This condition is "impossible", if it occurs
2791 * we need to fix it. Originally reported by
2792 * Bjorn Helgaas on a 128-cpu setup.
2794 BUG_ON(busiest_rq == target_rq);
2796 /* move a task from busiest_rq to target_rq */
2797 double_lock_balance(busiest_rq, target_rq);
2799 /* Search for an sd spanning us and the target CPU. */
2800 for_each_domain(target_cpu, sd) {
2801 if ((sd->flags & SD_LOAD_BALANCE) &&
2802 cpu_isset(busiest_cpu, sd->span))
2807 schedstat_inc(sd, alb_cnt);
2809 if (move_tasks(target_rq, target_cpu, busiest_rq, 1,
2810 RTPRIO_TO_LOAD_WEIGHT(100), sd, CPU_IDLE,
2812 schedstat_inc(sd, alb_pushed);
2814 schedstat_inc(sd, alb_failed);
2816 spin_unlock(&target_rq->lock);
2821 atomic_t load_balancer;
2823 } nohz ____cacheline_aligned = {
2824 .load_balancer = ATOMIC_INIT(-1),
2825 .cpu_mask = CPU_MASK_NONE,
2829 * This routine will try to nominate the ilb (idle load balancing)
2830 * owner among the cpus whose ticks are stopped. ilb owner will do the idle
2831 * load balancing on behalf of all those cpus. If all the cpus in the system
2832 * go into this tickless mode, then there will be no ilb owner (as there is
2833 * no need for one) and all the cpus will sleep till the next wakeup event
2836 * For the ilb owner, tick is not stopped. And this tick will be used
2837 * for idle load balancing. ilb owner will still be part of
2840 * While stopping the tick, this cpu will become the ilb owner if there
2841 * is no other owner. And will be the owner till that cpu becomes busy
2842 * or if all cpus in the system stop their ticks at which point
2843 * there is no need for ilb owner.
2845 * When the ilb owner becomes busy, it nominates another owner, during the
2846 * next busy scheduler_tick()
2848 int select_nohz_load_balancer(int stop_tick)
2850 int cpu = smp_processor_id();
2853 cpu_set(cpu, nohz.cpu_mask);
2854 cpu_rq(cpu)->in_nohz_recently = 1;
2857 * If we are going offline and still the leader, give up!
2859 if (cpu_is_offline(cpu) &&
2860 atomic_read(&nohz.load_balancer) == cpu) {
2861 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
2866 /* time for ilb owner also to sleep */
2867 if (cpus_weight(nohz.cpu_mask) == num_online_cpus()) {
2868 if (atomic_read(&nohz.load_balancer) == cpu)
2869 atomic_set(&nohz.load_balancer, -1);
2873 if (atomic_read(&nohz.load_balancer) == -1) {
2874 /* make me the ilb owner */
2875 if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1)
2877 } else if (atomic_read(&nohz.load_balancer) == cpu)
2880 if (!cpu_isset(cpu, nohz.cpu_mask))
2883 cpu_clear(cpu, nohz.cpu_mask);
2885 if (atomic_read(&nohz.load_balancer) == cpu)
2886 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
2893 static DEFINE_SPINLOCK(balancing);
2896 * It checks each scheduling domain to see if it is due to be balanced,
2897 * and initiates a balancing operation if so.
2899 * Balancing parameters are set up in arch_init_sched_domains.
2901 static inline void rebalance_domains(int cpu, enum cpu_idle_type idle)
2904 struct rq *rq = cpu_rq(cpu);
2905 unsigned long interval;
2906 struct sched_domain *sd;
2907 /* Earliest time when we have to do rebalance again */
2908 unsigned long next_balance = jiffies + 60*HZ;
2910 for_each_domain(cpu, sd) {
2911 if (!(sd->flags & SD_LOAD_BALANCE))
2914 interval = sd->balance_interval;
2915 if (idle != CPU_IDLE)
2916 interval *= sd->busy_factor;
2918 /* scale ms to jiffies */
2919 interval = msecs_to_jiffies(interval);
2920 if (unlikely(!interval))
2922 if (interval > HZ*NR_CPUS/10)
2923 interval = HZ*NR_CPUS/10;
2926 if (sd->flags & SD_SERIALIZE) {
2927 if (!spin_trylock(&balancing))
2931 if (time_after_eq(jiffies, sd->last_balance + interval)) {
2932 if (load_balance(cpu, rq, sd, idle, &balance)) {
2934 * We've pulled tasks over so either we're no
2935 * longer idle, or one of our SMT siblings is
2938 idle = CPU_NOT_IDLE;
2940 sd->last_balance = jiffies;
2942 if (sd->flags & SD_SERIALIZE)
2943 spin_unlock(&balancing);
2945 if (time_after(next_balance, sd->last_balance + interval))
2946 next_balance = sd->last_balance + interval;
2949 * Stop the load balance at this level. There is another
2950 * CPU in our sched group which is doing load balancing more
2956 rq->next_balance = next_balance;
2960 * run_rebalance_domains is triggered when needed from the scheduler tick.
2961 * In CONFIG_NO_HZ case, the idle load balance owner will do the
2962 * rebalancing for all the cpus for whom scheduler ticks are stopped.
2964 static void run_rebalance_domains(struct softirq_action *h)
2966 int this_cpu = smp_processor_id();
2967 struct rq *this_rq = cpu_rq(this_cpu);
2968 enum cpu_idle_type idle = this_rq->idle_at_tick ?
2969 CPU_IDLE : CPU_NOT_IDLE;
2971 rebalance_domains(this_cpu, idle);
2975 * If this cpu is the owner for idle load balancing, then do the
2976 * balancing on behalf of the other idle cpus whose ticks are
2979 if (this_rq->idle_at_tick &&
2980 atomic_read(&nohz.load_balancer) == this_cpu) {
2981 cpumask_t cpus = nohz.cpu_mask;
2985 cpu_clear(this_cpu, cpus);
2986 for_each_cpu_mask(balance_cpu, cpus) {
2988 * If this cpu gets work to do, stop the load balancing
2989 * work being done for other cpus. Next load
2990 * balancing owner will pick it up.
2995 rebalance_domains(balance_cpu, SCHED_IDLE);
2997 rq = cpu_rq(balance_cpu);
2998 if (time_after(this_rq->next_balance, rq->next_balance))
2999 this_rq->next_balance = rq->next_balance;
3006 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
3008 * In case of CONFIG_NO_HZ, this is the place where we nominate a new
3009 * idle load balancing owner or decide to stop the periodic load balancing,
3010 * if the whole system is idle.
3012 static inline void trigger_load_balance(struct rq *rq, int cpu)
3016 * If we were in the nohz mode recently and busy at the current
3017 * scheduler tick, then check if we need to nominate new idle
3020 if (rq->in_nohz_recently && !rq->idle_at_tick) {
3021 rq->in_nohz_recently = 0;
3023 if (atomic_read(&nohz.load_balancer) == cpu) {
3024 cpu_clear(cpu, nohz.cpu_mask);
3025 atomic_set(&nohz.load_balancer, -1);
3028 if (atomic_read(&nohz.load_balancer) == -1) {
3030 * simple selection for now: Nominate the
3031 * first cpu in the nohz list to be the next
3034 * TBD: Traverse the sched domains and nominate
3035 * the nearest cpu in the nohz.cpu_mask.
3037 int ilb = first_cpu(nohz.cpu_mask);
3045 * If this cpu is idle and doing idle load balancing for all the
3046 * cpus with ticks stopped, is it time for that to stop?
3048 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu &&
3049 cpus_weight(nohz.cpu_mask) == num_online_cpus()) {
3055 * If this cpu is idle and the idle load balancing is done by
3056 * someone else, then no need raise the SCHED_SOFTIRQ
3058 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu &&
3059 cpu_isset(cpu, nohz.cpu_mask))
3062 if (time_after_eq(jiffies, rq->next_balance))
3063 raise_softirq(SCHED_SOFTIRQ);
3066 #else /* CONFIG_SMP */
3069 * on UP we do not need to balance between CPUs:
3071 static inline void idle_balance(int cpu, struct rq *rq)
3075 /* Avoid "used but not defined" warning on UP */
3076 static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3077 unsigned long max_nr_move, unsigned long max_load_move,
3078 struct sched_domain *sd, enum cpu_idle_type idle,
3079 int *all_pinned, unsigned long *load_moved,
3080 int this_best_prio, int best_prio, int best_prio_seen,
3081 struct rq_iterator *iterator)
3090 DEFINE_PER_CPU(struct kernel_stat, kstat);
3092 EXPORT_PER_CPU_SYMBOL(kstat);
3095 * Return p->sum_exec_runtime plus any more ns on the sched_clock
3096 * that have not yet been banked in case the task is currently running.
3098 unsigned long long task_sched_runtime(struct task_struct *p)
3100 unsigned long flags;
3104 rq = task_rq_lock(p, &flags);
3105 ns = p->se.sum_exec_runtime;
3106 if (rq->curr == p) {
3107 delta_exec = rq_clock(rq) - p->se.exec_start;
3108 if ((s64)delta_exec > 0)
3111 task_rq_unlock(rq, &flags);
3117 * Account user cpu time to a process.
3118 * @p: the process that the cpu time gets accounted to
3119 * @hardirq_offset: the offset to subtract from hardirq_count()
3120 * @cputime: the cpu time spent in user space since the last update
3122 void account_user_time(struct task_struct *p, cputime_t cputime)
3124 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3127 p->utime = cputime_add(p->utime, cputime);
3129 /* Add user time to cpustat. */
3130 tmp = cputime_to_cputime64(cputime);
3131 if (TASK_NICE(p) > 0)
3132 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3134 cpustat->user = cputime64_add(cpustat->user, tmp);
3138 * Account system cpu time to a process.
3139 * @p: the process that the cpu time gets accounted to
3140 * @hardirq_offset: the offset to subtract from hardirq_count()
3141 * @cputime: the cpu time spent in kernel space since the last update
3143 void account_system_time(struct task_struct *p, int hardirq_offset,
3146 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3147 struct rq *rq = this_rq();
3150 p->stime = cputime_add(p->stime, cputime);
3152 /* Add system time to cpustat. */
3153 tmp = cputime_to_cputime64(cputime);
3154 if (hardirq_count() - hardirq_offset)
3155 cpustat->irq = cputime64_add(cpustat->irq, tmp);
3156 else if (softirq_count())
3157 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
3158 else if (p != rq->idle)
3159 cpustat->system = cputime64_add(cpustat->system, tmp);
3160 else if (atomic_read(&rq->nr_iowait) > 0)
3161 cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
3163 cpustat->idle = cputime64_add(cpustat->idle, tmp);
3164 /* Account for system time used */
3165 acct_update_integrals(p);
3169 * Account for involuntary wait time.
3170 * @p: the process from which the cpu time has been stolen
3171 * @steal: the cpu time spent in involuntary wait
3173 void account_steal_time(struct task_struct *p, cputime_t steal)
3175 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3176 cputime64_t tmp = cputime_to_cputime64(steal);
3177 struct rq *rq = this_rq();
3179 if (p == rq->idle) {
3180 p->stime = cputime_add(p->stime, steal);
3181 if (atomic_read(&rq->nr_iowait) > 0)
3182 cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
3184 cpustat->idle = cputime64_add(cpustat->idle, tmp);
3186 cpustat->steal = cputime64_add(cpustat->steal, tmp);
3190 * This function gets called by the timer code, with HZ frequency.
3191 * We call it with interrupts disabled.
3193 * It also gets called by the fork code, when changing the parent's
3196 void scheduler_tick(void)
3198 int cpu = smp_processor_id();
3199 struct rq *rq = cpu_rq(cpu);
3200 struct task_struct *curr = rq->curr;
3202 spin_lock(&rq->lock);
3203 if (curr != rq->idle) /* FIXME: needed? */
3204 curr->sched_class->task_tick(rq, curr);
3205 update_cpu_load(rq);
3206 spin_unlock(&rq->lock);
3209 rq->idle_at_tick = idle_cpu(cpu);
3210 trigger_load_balance(rq, cpu);
3214 #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
3216 void fastcall add_preempt_count(int val)
3221 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3223 preempt_count() += val;
3225 * Spinlock count overflowing soon?
3227 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3230 EXPORT_SYMBOL(add_preempt_count);
3232 void fastcall sub_preempt_count(int val)
3237 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
3240 * Is the spinlock portion underflowing?
3242 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3243 !(preempt_count() & PREEMPT_MASK)))
3246 preempt_count() -= val;
3248 EXPORT_SYMBOL(sub_preempt_count);
3253 * Print scheduling while atomic bug:
3255 static noinline void __schedule_bug(struct task_struct *prev)
3257 printk(KERN_ERR "BUG: scheduling while atomic: %s/0x%08x/%d\n",
3258 prev->comm, preempt_count(), prev->pid);
3259 debug_show_held_locks(prev);
3260 if (irqs_disabled())
3261 print_irqtrace_events(prev);
3266 * Various schedule()-time debugging checks and statistics:
3268 static inline void schedule_debug(struct task_struct *prev)
3271 * Test if we are atomic. Since do_exit() needs to call into
3272 * schedule() atomically, we ignore that path for now.
3273 * Otherwise, whine if we are scheduling when we should not be.
3275 if (unlikely(in_atomic_preempt_off()) && unlikely(!prev->exit_state))
3276 __schedule_bug(prev);
3278 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3280 schedstat_inc(this_rq(), sched_cnt);
3284 * Pick up the highest-prio task:
3286 static inline struct task_struct *
3287 pick_next_task(struct rq *rq, struct task_struct *prev, u64 now)
3289 struct sched_class *class;
3290 struct task_struct *p;
3293 * Optimization: we know that if all tasks are in
3294 * the fair class we can call that function directly:
3296 if (likely(rq->nr_running == rq->cfs.nr_running)) {
3297 p = fair_sched_class.pick_next_task(rq, now);
3302 class = sched_class_highest;
3304 p = class->pick_next_task(rq, now);
3308 * Will never be NULL as the idle class always
3309 * returns a non-NULL p:
3311 class = class->next;
3316 * schedule() is the main scheduler function.
3318 asmlinkage void __sched schedule(void)
3320 struct task_struct *prev, *next;
3328 cpu = smp_processor_id();
3332 switch_count = &prev->nivcsw;
3334 release_kernel_lock(prev);
3335 need_resched_nonpreemptible:
3337 schedule_debug(prev);
3339 spin_lock_irq(&rq->lock);
3340 clear_tsk_need_resched(prev);
3342 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
3343 if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
3344 unlikely(signal_pending(prev)))) {
3345 prev->state = TASK_RUNNING;
3347 deactivate_task(rq, prev, 1);
3349 switch_count = &prev->nvcsw;
3352 if (unlikely(!rq->nr_running))
3353 idle_balance(cpu, rq);
3355 now = __rq_clock(rq);
3356 prev->sched_class->put_prev_task(rq, prev, now);
3357 next = pick_next_task(rq, prev, now);
3359 sched_info_switch(prev, next);
3361 if (likely(prev != next)) {
3366 context_switch(rq, prev, next); /* unlocks the rq */
3368 spin_unlock_irq(&rq->lock);
3370 if (unlikely(reacquire_kernel_lock(current) < 0)) {
3371 cpu = smp_processor_id();
3373 goto need_resched_nonpreemptible;
3375 preempt_enable_no_resched();
3376 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
3379 EXPORT_SYMBOL(schedule);
3381 #ifdef CONFIG_PREEMPT
3383 * this is the entry point to schedule() from in-kernel preemption
3384 * off of preempt_enable. Kernel preemptions off return from interrupt
3385 * occur there and call schedule directly.
3387 asmlinkage void __sched preempt_schedule(void)
3389 struct thread_info *ti = current_thread_info();
3390 #ifdef CONFIG_PREEMPT_BKL
3391 struct task_struct *task = current;
3392 int saved_lock_depth;
3395 * If there is a non-zero preempt_count or interrupts are disabled,
3396 * we do not want to preempt the current task. Just return..
3398 if (likely(ti->preempt_count || irqs_disabled()))
3402 add_preempt_count(PREEMPT_ACTIVE);
3404 * We keep the big kernel semaphore locked, but we
3405 * clear ->lock_depth so that schedule() doesnt
3406 * auto-release the semaphore:
3408 #ifdef CONFIG_PREEMPT_BKL
3409 saved_lock_depth = task->lock_depth;
3410 task->lock_depth = -1;
3413 #ifdef CONFIG_PREEMPT_BKL
3414 task->lock_depth = saved_lock_depth;
3416 sub_preempt_count(PREEMPT_ACTIVE);
3418 /* we could miss a preemption opportunity between schedule and now */
3420 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
3423 EXPORT_SYMBOL(preempt_schedule);
3426 * this is the entry point to schedule() from kernel preemption
3427 * off of irq context.
3428 * Note, that this is called and return with irqs disabled. This will
3429 * protect us against recursive calling from irq.
3431 asmlinkage void __sched preempt_schedule_irq(void)
3433 struct thread_info *ti = current_thread_info();
3434 #ifdef CONFIG_PREEMPT_BKL
3435 struct task_struct *task = current;
3436 int saved_lock_depth;
3438 /* Catch callers which need to be fixed */
3439 BUG_ON(ti->preempt_count || !irqs_disabled());
3442 add_preempt_count(PREEMPT_ACTIVE);
3444 * We keep the big kernel semaphore locked, but we
3445 * clear ->lock_depth so that schedule() doesnt
3446 * auto-release the semaphore:
3448 #ifdef CONFIG_PREEMPT_BKL
3449 saved_lock_depth = task->lock_depth;
3450 task->lock_depth = -1;
3454 local_irq_disable();
3455 #ifdef CONFIG_PREEMPT_BKL
3456 task->lock_depth = saved_lock_depth;
3458 sub_preempt_count(PREEMPT_ACTIVE);
3460 /* we could miss a preemption opportunity between schedule and now */
3462 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
3466 #endif /* CONFIG_PREEMPT */
3468 int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
3471 return try_to_wake_up(curr->private, mode, sync);
3473 EXPORT_SYMBOL(default_wake_function);
3476 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
3477 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
3478 * number) then we wake all the non-exclusive tasks and one exclusive task.
3480 * There are circumstances in which we can try to wake a task which has already
3481 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
3482 * zero in this (rare) case, and we handle it by continuing to scan the queue.
3484 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
3485 int nr_exclusive, int sync, void *key)
3487 struct list_head *tmp, *next;
3489 list_for_each_safe(tmp, next, &q->task_list) {
3490 wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
3491 unsigned flags = curr->flags;
3493 if (curr->func(curr, mode, sync, key) &&
3494 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
3500 * __wake_up - wake up threads blocked on a waitqueue.
3502 * @mode: which threads
3503 * @nr_exclusive: how many wake-one or wake-many threads to wake up
3504 * @key: is directly passed to the wakeup function
3506 void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
3507 int nr_exclusive, void *key)
3509 unsigned long flags;
3511 spin_lock_irqsave(&q->lock, flags);
3512 __wake_up_common(q, mode, nr_exclusive, 0, key);
3513 spin_unlock_irqrestore(&q->lock, flags);
3515 EXPORT_SYMBOL(__wake_up);
3518 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
3520 void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
3522 __wake_up_common(q, mode, 1, 0, NULL);
3526 * __wake_up_sync - wake up threads blocked on a waitqueue.
3528 * @mode: which threads
3529 * @nr_exclusive: how many wake-one or wake-many threads to wake up
3531 * The sync wakeup differs that the waker knows that it will schedule
3532 * away soon, so while the target thread will be woken up, it will not
3533 * be migrated to another CPU - ie. the two threads are 'synchronized'
3534 * with each other. This can prevent needless bouncing between CPUs.
3536 * On UP it can prevent extra preemption.
3539 __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
3541 unsigned long flags;
3547 if (unlikely(!nr_exclusive))
3550 spin_lock_irqsave(&q->lock, flags);
3551 __wake_up_common(q, mode, nr_exclusive, sync, NULL);
3552 spin_unlock_irqrestore(&q->lock, flags);
3554 EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
3556 void fastcall complete(struct completion *x)
3558 unsigned long flags;
3560 spin_lock_irqsave(&x->wait.lock, flags);
3562 __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
3564 spin_unlock_irqrestore(&x->wait.lock, flags);
3566 EXPORT_SYMBOL(complete);
3568 void fastcall complete_all(struct completion *x)
3570 unsigned long flags;
3572 spin_lock_irqsave(&x->wait.lock, flags);
3573 x->done += UINT_MAX/2;
3574 __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
3576 spin_unlock_irqrestore(&x->wait.lock, flags);
3578 EXPORT_SYMBOL(complete_all);
3580 void fastcall __sched wait_for_completion(struct completion *x)
3584 spin_lock_irq(&x->wait.lock);
3586 DECLARE_WAITQUEUE(wait, current);
3588 wait.flags |= WQ_FLAG_EXCLUSIVE;
3589 __add_wait_queue_tail(&x->wait, &wait);
3591 __set_current_state(TASK_UNINTERRUPTIBLE);
3592 spin_unlock_irq(&x->wait.lock);
3594 spin_lock_irq(&x->wait.lock);
3596 __remove_wait_queue(&x->wait, &wait);
3599 spin_unlock_irq(&x->wait.lock);
3601 EXPORT_SYMBOL(wait_for_completion);
3603 unsigned long fastcall __sched
3604 wait_for_completion_timeout(struct completion *x, unsigned long timeout)
3608 spin_lock_irq(&x->wait.lock);
3610 DECLARE_WAITQUEUE(wait, current);
3612 wait.flags |= WQ_FLAG_EXCLUSIVE;
3613 __add_wait_queue_tail(&x->wait, &wait);
3615 __set_current_state(TASK_UNINTERRUPTIBLE);
3616 spin_unlock_irq(&x->wait.lock);
3617 timeout = schedule_timeout(timeout);
3618 spin_lock_irq(&x->wait.lock);
3620 __remove_wait_queue(&x->wait, &wait);
3624 __remove_wait_queue(&x->wait, &wait);
3628 spin_unlock_irq(&x->wait.lock);
3631 EXPORT_SYMBOL(wait_for_completion_timeout);
3633 int fastcall __sched wait_for_completion_interruptible(struct completion *x)
3639 spin_lock_irq(&x->wait.lock);
3641 DECLARE_WAITQUEUE(wait, current);
3643 wait.flags |= WQ_FLAG_EXCLUSIVE;
3644 __add_wait_queue_tail(&x->wait, &wait);
3646 if (signal_pending(current)) {
3648 __remove_wait_queue(&x->wait, &wait);
3651 __set_current_state(TASK_INTERRUPTIBLE);
3652 spin_unlock_irq(&x->wait.lock);
3654 spin_lock_irq(&x->wait.lock);
3656 __remove_wait_queue(&x->wait, &wait);
3660 spin_unlock_irq(&x->wait.lock);
3664 EXPORT_SYMBOL(wait_for_completion_interruptible);
3666 unsigned long fastcall __sched
3667 wait_for_completion_interruptible_timeout(struct completion *x,
3668 unsigned long timeout)
3672 spin_lock_irq(&x->wait.lock);
3674 DECLARE_WAITQUEUE(wait, current);
3676 wait.flags |= WQ_FLAG_EXCLUSIVE;
3677 __add_wait_queue_tail(&x->wait, &wait);
3679 if (signal_pending(current)) {
3680 timeout = -ERESTARTSYS;
3681 __remove_wait_queue(&x->wait, &wait);
3684 __set_current_state(TASK_INTERRUPTIBLE);
3685 spin_unlock_irq(&x->wait.lock);
3686 timeout = schedule_timeout(timeout);
3687 spin_lock_irq(&x->wait.lock);
3689 __remove_wait_queue(&x->wait, &wait);
3693 __remove_wait_queue(&x->wait, &wait);
3697 spin_unlock_irq(&x->wait.lock);
3700 EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
3703 #define SLEEP_ON_VAR \
3704 unsigned long flags; \
3705 wait_queue_t wait; \
3706 init_waitqueue_entry(&wait, current);
3708 #define SLEEP_ON_HEAD \
3709 spin_lock_irqsave(&q->lock,flags); \
3710 __add_wait_queue(q, &wait); \
3711 spin_unlock(&q->lock);
3713 #define SLEEP_ON_TAIL \
3714 spin_lock_irq(&q->lock); \
3715 __remove_wait_queue(q, &wait); \
3716 spin_unlock_irqrestore(&q->lock, flags);
3718 void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
3722 current->state = TASK_INTERRUPTIBLE;
3728 EXPORT_SYMBOL(interruptible_sleep_on);
3730 long fastcall __sched
3731 interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
3735 current->state = TASK_INTERRUPTIBLE;
3738 timeout = schedule_timeout(timeout);
3743 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
3745 void fastcall __sched sleep_on(wait_queue_head_t *q)
3749 current->state = TASK_UNINTERRUPTIBLE;
3755 EXPORT_SYMBOL(sleep_on);
3757 long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
3761 current->state = TASK_UNINTERRUPTIBLE;
3764 timeout = schedule_timeout(timeout);
3770 EXPORT_SYMBOL(sleep_on_timeout);
3772 #ifdef CONFIG_RT_MUTEXES
3775 * rt_mutex_setprio - set the current priority of a task
3777 * @prio: prio value (kernel-internal form)
3779 * This function changes the 'effective' priority of a task. It does
3780 * not touch ->normal_prio like __setscheduler().
3782 * Used by the rt_mutex code to implement priority inheritance logic.
3784 void rt_mutex_setprio(struct task_struct *p, int prio)
3786 unsigned long flags;
3791 BUG_ON(prio < 0 || prio > MAX_PRIO);
3793 rq = task_rq_lock(p, &flags);
3797 on_rq = p->se.on_rq;
3799 dequeue_task(rq, p, 0, now);
3802 p->sched_class = &rt_sched_class;
3804 p->sched_class = &fair_sched_class;
3809 enqueue_task(rq, p, 0, now);
3811 * Reschedule if we are currently running on this runqueue and
3812 * our priority decreased, or if we are not currently running on
3813 * this runqueue and our priority is higher than the current's
3815 if (task_running(rq, p)) {
3816 if (p->prio > oldprio)
3817 resched_task(rq->curr);
3819 check_preempt_curr(rq, p);
3822 task_rq_unlock(rq, &flags);
3827 void set_user_nice(struct task_struct *p, long nice)
3829 int old_prio, delta, on_rq;
3830 unsigned long flags;
3834 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
3837 * We have to be careful, if called from sys_setpriority(),
3838 * the task might be in the middle of scheduling on another CPU.
3840 rq = task_rq_lock(p, &flags);
3843 * The RT priorities are set via sched_setscheduler(), but we still
3844 * allow the 'normal' nice value to be set - but as expected
3845 * it wont have any effect on scheduling until the task is
3846 * SCHED_FIFO/SCHED_RR:
3848 if (task_has_rt_policy(p)) {
3849 p->static_prio = NICE_TO_PRIO(nice);
3852 on_rq = p->se.on_rq;
3854 dequeue_task(rq, p, 0, now);
3855 dec_load(rq, p, now);
3858 p->static_prio = NICE_TO_PRIO(nice);
3861 p->prio = effective_prio(p);
3862 delta = p->prio - old_prio;
3865 enqueue_task(rq, p, 0, now);
3866 inc_load(rq, p, now);
3868 * If the task increased its priority or is running and
3869 * lowered its priority, then reschedule its CPU:
3871 if (delta < 0 || (delta > 0 && task_running(rq, p)))
3872 resched_task(rq->curr);
3875 task_rq_unlock(rq, &flags);
3877 EXPORT_SYMBOL(set_user_nice);
3880 * can_nice - check if a task can reduce its nice value
3884 int can_nice(const struct task_struct *p, const int nice)
3886 /* convert nice value [19,-20] to rlimit style value [1,40] */
3887 int nice_rlim = 20 - nice;
3889 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
3890 capable(CAP_SYS_NICE));
3893 #ifdef __ARCH_WANT_SYS_NICE
3896 * sys_nice - change the priority of the current process.
3897 * @increment: priority increment
3899 * sys_setpriority is a more generic, but much slower function that
3900 * does similar things.
3902 asmlinkage long sys_nice(int increment)
3907 * Setpriority might change our priority at the same moment.
3908 * We don't have to worry. Conceptually one call occurs first
3909 * and we have a single winner.
3911 if (increment < -40)
3916 nice = PRIO_TO_NICE(current->static_prio) + increment;
3922 if (increment < 0 && !can_nice(current, nice))
3925 retval = security_task_setnice(current, nice);
3929 set_user_nice(current, nice);
3936 * task_prio - return the priority value of a given task.
3937 * @p: the task in question.
3939 * This is the priority value as seen by users in /proc.
3940 * RT tasks are offset by -200. Normal tasks are centered
3941 * around 0, value goes from -16 to +15.
3943 int task_prio(const struct task_struct *p)
3945 return p->prio - MAX_RT_PRIO;
3949 * task_nice - return the nice value of a given task.
3950 * @p: the task in question.
3952 int task_nice(const struct task_struct *p)
3954 return TASK_NICE(p);
3956 EXPORT_SYMBOL_GPL(task_nice);
3959 * idle_cpu - is a given cpu idle currently?
3960 * @cpu: the processor in question.
3962 int idle_cpu(int cpu)
3964 return cpu_curr(cpu) == cpu_rq(cpu)->idle;
3968 * idle_task - return the idle task for a given cpu.
3969 * @cpu: the processor in question.
3971 struct task_struct *idle_task(int cpu)
3973 return cpu_rq(cpu)->idle;
3977 * find_process_by_pid - find a process with a matching PID value.
3978 * @pid: the pid in question.
3980 static inline struct task_struct *find_process_by_pid(pid_t pid)
3982 return pid ? find_task_by_pid(pid) : current;
3985 /* Actually do priority change: must hold rq lock. */
3987 __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
3989 BUG_ON(p->se.on_rq);
3992 switch (p->policy) {
3996 p->sched_class = &fair_sched_class;
4000 p->sched_class = &rt_sched_class;
4004 p->rt_priority = prio;
4005 p->normal_prio = normal_prio(p);
4006 /* we are holding p->pi_lock already */
4007 p->prio = rt_mutex_getprio(p);
4012 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4013 * @p: the task in question.
4014 * @policy: new policy.
4015 * @param: structure containing the new RT priority.
4017 * NOTE that the task may be already dead.
4019 int sched_setscheduler(struct task_struct *p, int policy,
4020 struct sched_param *param)
4022 int retval, oldprio, oldpolicy = -1, on_rq;
4023 unsigned long flags;
4026 /* may grab non-irq protected spin_locks */
4027 BUG_ON(in_interrupt());
4029 /* double check policy once rq lock held */
4031 policy = oldpolicy = p->policy;
4032 else if (policy != SCHED_FIFO && policy != SCHED_RR &&
4033 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
4034 policy != SCHED_IDLE)
4037 * Valid priorities for SCHED_FIFO and SCHED_RR are
4038 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
4039 * SCHED_BATCH and SCHED_IDLE is 0.
4041 if (param->sched_priority < 0 ||
4042 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
4043 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
4045 if (rt_policy(policy) != (param->sched_priority != 0))
4049 * Allow unprivileged RT tasks to decrease priority:
4051 if (!capable(CAP_SYS_NICE)) {
4052 if (rt_policy(policy)) {
4053 unsigned long rlim_rtprio;
4055 if (!lock_task_sighand(p, &flags))
4057 rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
4058 unlock_task_sighand(p, &flags);
4060 /* can't set/change the rt policy */
4061 if (policy != p->policy && !rlim_rtprio)
4064 /* can't increase priority */
4065 if (param->sched_priority > p->rt_priority &&
4066 param->sched_priority > rlim_rtprio)
4070 * Like positive nice levels, dont allow tasks to
4071 * move out of SCHED_IDLE either:
4073 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE)
4076 /* can't change other user's priorities */
4077 if ((current->euid != p->euid) &&
4078 (current->euid != p->uid))
4082 retval = security_task_setscheduler(p, policy, param);
4086 * make sure no PI-waiters arrive (or leave) while we are
4087 * changing the priority of the task:
4089 spin_lock_irqsave(&p->pi_lock, flags);
4091 * To be able to change p->policy safely, the apropriate
4092 * runqueue lock must be held.
4094 rq = __task_rq_lock(p);
4095 /* recheck policy now with rq lock held */
4096 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4097 policy = oldpolicy = -1;
4098 __task_rq_unlock(rq);
4099 spin_unlock_irqrestore(&p->pi_lock, flags);
4102 on_rq = p->se.on_rq;
4104 deactivate_task(rq, p, 0);
4106 __setscheduler(rq, p, policy, param->sched_priority);
4108 activate_task(rq, p, 0);
4110 * Reschedule if we are currently running on this runqueue and
4111 * our priority decreased, or if we are not currently running on
4112 * this runqueue and our priority is higher than the current's
4114 if (task_running(rq, p)) {
4115 if (p->prio > oldprio)
4116 resched_task(rq->curr);
4118 check_preempt_curr(rq, p);
4121 __task_rq_unlock(rq);
4122 spin_unlock_irqrestore(&p->pi_lock, flags);
4124 rt_mutex_adjust_pi(p);
4128 EXPORT_SYMBOL_GPL(sched_setscheduler);
4131 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4133 struct sched_param lparam;
4134 struct task_struct *p;
4137 if (!param || pid < 0)
4139 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4144 p = find_process_by_pid(pid);
4146 retval = sched_setscheduler(p, policy, &lparam);
4153 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4154 * @pid: the pid in question.
4155 * @policy: new policy.
4156 * @param: structure containing the new RT priority.
4158 asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
4159 struct sched_param __user *param)
4161 /* negative values for policy are not valid */
4165 return do_sched_setscheduler(pid, policy, param);
4169 * sys_sched_setparam - set/change the RT priority of a thread
4170 * @pid: the pid in question.
4171 * @param: structure containing the new RT priority.
4173 asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
4175 return do_sched_setscheduler(pid, -1, param);
4179 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4180 * @pid: the pid in question.
4182 asmlinkage long sys_sched_getscheduler(pid_t pid)
4184 struct task_struct *p;
4185 int retval = -EINVAL;
4191 read_lock(&tasklist_lock);
4192 p = find_process_by_pid(pid);
4194 retval = security_task_getscheduler(p);
4198 read_unlock(&tasklist_lock);
4205 * sys_sched_getscheduler - get the RT priority of a thread
4206 * @pid: the pid in question.
4207 * @param: structure containing the RT priority.
4209 asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
4211 struct sched_param lp;
4212 struct task_struct *p;
4213 int retval = -EINVAL;
4215 if (!param || pid < 0)
4218 read_lock(&tasklist_lock);
4219 p = find_process_by_pid(pid);
4224 retval = security_task_getscheduler(p);
4228 lp.sched_priority = p->rt_priority;
4229 read_unlock(&tasklist_lock);
4232 * This one might sleep, we cannot do it with a spinlock held ...
4234 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4240 read_unlock(&tasklist_lock);
4244 long sched_setaffinity(pid_t pid, cpumask_t new_mask)
4246 cpumask_t cpus_allowed;
4247 struct task_struct *p;
4250 mutex_lock(&sched_hotcpu_mutex);
4251 read_lock(&tasklist_lock);
4253 p = find_process_by_pid(pid);
4255 read_unlock(&tasklist_lock);
4256 mutex_unlock(&sched_hotcpu_mutex);
4261 * It is not safe to call set_cpus_allowed with the
4262 * tasklist_lock held. We will bump the task_struct's
4263 * usage count and then drop tasklist_lock.
4266 read_unlock(&tasklist_lock);
4269 if ((current->euid != p->euid) && (current->euid != p->uid) &&
4270 !capable(CAP_SYS_NICE))
4273 retval = security_task_setscheduler(p, 0, NULL);
4277 cpus_allowed = cpuset_cpus_allowed(p);
4278 cpus_and(new_mask, new_mask, cpus_allowed);
4279 retval = set_cpus_allowed(p, new_mask);
4283 mutex_unlock(&sched_hotcpu_mutex);
4287 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
4288 cpumask_t *new_mask)
4290 if (len < sizeof(cpumask_t)) {
4291 memset(new_mask, 0, sizeof(cpumask_t));
4292 } else if (len > sizeof(cpumask_t)) {
4293 len = sizeof(cpumask_t);
4295 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4299 * sys_sched_setaffinity - set the cpu affinity of a process
4300 * @pid: pid of the process
4301 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4302 * @user_mask_ptr: user-space pointer to the new cpu mask
4304 asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
4305 unsigned long __user *user_mask_ptr)
4310 retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask);
4314 return sched_setaffinity(pid, new_mask);
4318 * Represents all cpu's present in the system
4319 * In systems capable of hotplug, this map could dynamically grow
4320 * as new cpu's are detected in the system via any platform specific
4321 * method, such as ACPI for e.g.
4324 cpumask_t cpu_present_map __read_mostly;
4325 EXPORT_SYMBOL(cpu_present_map);
4328 cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
4329 EXPORT_SYMBOL(cpu_online_map);
4331 cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
4332 EXPORT_SYMBOL(cpu_possible_map);
4335 long sched_getaffinity(pid_t pid, cpumask_t *mask)
4337 struct task_struct *p;
4340 mutex_lock(&sched_hotcpu_mutex);
4341 read_lock(&tasklist_lock);
4344 p = find_process_by_pid(pid);
4348 retval = security_task_getscheduler(p);
4352 cpus_and(*mask, p->cpus_allowed, cpu_online_map);
4355 read_unlock(&tasklist_lock);
4356 mutex_unlock(&sched_hotcpu_mutex);
4364 * sys_sched_getaffinity - get the cpu affinity of a process
4365 * @pid: pid of the process
4366 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4367 * @user_mask_ptr: user-space pointer to hold the current cpu mask
4369 asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
4370 unsigned long __user *user_mask_ptr)
4375 if (len < sizeof(cpumask_t))
4378 ret = sched_getaffinity(pid, &mask);
4382 if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t)))
4385 return sizeof(cpumask_t);
4389 * sys_sched_yield - yield the current processor to other threads.
4391 * This function yields the current CPU to other tasks. If there are no
4392 * other threads running on this CPU then this function will return.
4394 asmlinkage long sys_sched_yield(void)
4396 struct rq *rq = this_rq_lock();
4398 schedstat_inc(rq, yld_cnt);
4399 if (unlikely(rq->nr_running == 1))
4400 schedstat_inc(rq, yld_act_empty);
4402 current->sched_class->yield_task(rq, current);
4405 * Since we are going to call schedule() anyway, there's
4406 * no need to preempt or enable interrupts:
4408 __release(rq->lock);
4409 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
4410 _raw_spin_unlock(&rq->lock);
4411 preempt_enable_no_resched();
4418 static void __cond_resched(void)
4420 #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
4421 __might_sleep(__FILE__, __LINE__);
4424 * The BKS might be reacquired before we have dropped
4425 * PREEMPT_ACTIVE, which could trigger a second
4426 * cond_resched() call.
4429 add_preempt_count(PREEMPT_ACTIVE);
4431 sub_preempt_count(PREEMPT_ACTIVE);
4432 } while (need_resched());
4435 int __sched cond_resched(void)
4437 if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
4438 system_state == SYSTEM_RUNNING) {
4444 EXPORT_SYMBOL(cond_resched);
4447 * cond_resched_lock() - if a reschedule is pending, drop the given lock,
4448 * call schedule, and on return reacquire the lock.
4450 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
4451 * operations here to prevent schedule() from being called twice (once via
4452 * spin_unlock(), once by hand).
4454 int cond_resched_lock(spinlock_t *lock)
4458 if (need_lockbreak(lock)) {
4464 if (need_resched() && system_state == SYSTEM_RUNNING) {
4465 spin_release(&lock->dep_map, 1, _THIS_IP_);
4466 _raw_spin_unlock(lock);
4467 preempt_enable_no_resched();
4474 EXPORT_SYMBOL(cond_resched_lock);
4476 int __sched cond_resched_softirq(void)
4478 BUG_ON(!in_softirq());
4480 if (need_resched() && system_state == SYSTEM_RUNNING) {
4488 EXPORT_SYMBOL(cond_resched_softirq);
4491 * yield - yield the current processor to other threads.
4493 * This is a shortcut for kernel-space yielding - it marks the
4494 * thread runnable and calls sys_sched_yield().
4496 void __sched yield(void)
4498 set_current_state(TASK_RUNNING);
4501 EXPORT_SYMBOL(yield);
4504 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
4505 * that process accounting knows that this is a task in IO wait state.
4507 * But don't do that if it is a deliberate, throttling IO wait (this task
4508 * has set its backing_dev_info: the queue against which it should throttle)
4510 void __sched io_schedule(void)
4512 struct rq *rq = &__raw_get_cpu_var(runqueues);
4514 delayacct_blkio_start();
4515 atomic_inc(&rq->nr_iowait);
4517 atomic_dec(&rq->nr_iowait);
4518 delayacct_blkio_end();
4520 EXPORT_SYMBOL(io_schedule);
4522 long __sched io_schedule_timeout(long timeout)
4524 struct rq *rq = &__raw_get_cpu_var(runqueues);
4527 delayacct_blkio_start();
4528 atomic_inc(&rq->nr_iowait);
4529 ret = schedule_timeout(timeout);
4530 atomic_dec(&rq->nr_iowait);
4531 delayacct_blkio_end();
4536 * sys_sched_get_priority_max - return maximum RT priority.
4537 * @policy: scheduling class.
4539 * this syscall returns the maximum rt_priority that can be used
4540 * by a given scheduling class.
4542 asmlinkage long sys_sched_get_priority_max(int policy)
4549 ret = MAX_USER_RT_PRIO-1;
4561 * sys_sched_get_priority_min - return minimum RT priority.
4562 * @policy: scheduling class.
4564 * this syscall returns the minimum rt_priority that can be used
4565 * by a given scheduling class.
4567 asmlinkage long sys_sched_get_priority_min(int policy)
4585 * sys_sched_rr_get_interval - return the default timeslice of a process.
4586 * @pid: pid of the process.
4587 * @interval: userspace pointer to the timeslice value.
4589 * this syscall writes the default timeslice value of a given process
4590 * into the user-space timespec buffer. A value of '0' means infinity.
4593 long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
4595 struct task_struct *p;
4596 int retval = -EINVAL;
4603 read_lock(&tasklist_lock);
4604 p = find_process_by_pid(pid);
4608 retval = security_task_getscheduler(p);
4612 jiffies_to_timespec(p->policy == SCHED_FIFO ?
4613 0 : static_prio_timeslice(p->static_prio), &t);
4614 read_unlock(&tasklist_lock);
4615 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
4619 read_unlock(&tasklist_lock);
4623 static const char stat_nam[] = "RSDTtZX";
4625 static void show_task(struct task_struct *p)
4627 unsigned long free = 0;
4630 state = p->state ? __ffs(p->state) + 1 : 0;
4631 printk("%-13.13s %c", p->comm,
4632 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4633 #if (BITS_PER_LONG == 32)
4634 if (state == TASK_RUNNING)
4635 printk(" running ");
4637 printk(" %08lX ", thread_saved_pc(p));
4639 if (state == TASK_RUNNING)
4640 printk(" running task ");
4642 printk(" %016lx ", thread_saved_pc(p));
4644 #ifdef CONFIG_DEBUG_STACK_USAGE
4646 unsigned long *n = end_of_stack(p);
4649 free = (unsigned long)n - (unsigned long)end_of_stack(p);
4652 printk("%5lu %5d %6d", free, p->pid, p->parent->pid);
4654 printk(" (L-TLB)\n");
4656 printk(" (NOTLB)\n");
4658 if (state != TASK_RUNNING)
4659 show_stack(p, NULL);
4662 void show_state_filter(unsigned long state_filter)
4664 struct task_struct *g, *p;
4666 #if (BITS_PER_LONG == 32)
4669 printk(" task PC stack pid father child younger older\n");
4673 printk(" task PC stack pid father child younger older\n");
4675 read_lock(&tasklist_lock);
4676 do_each_thread(g, p) {
4678 * reset the NMI-timeout, listing all files on a slow
4679 * console might take alot of time:
4681 touch_nmi_watchdog();
4682 if (!state_filter || (p->state & state_filter))
4684 } while_each_thread(g, p);
4686 touch_all_softlockup_watchdogs();
4688 #ifdef CONFIG_SCHED_DEBUG
4689 sysrq_sched_debug_show();
4691 read_unlock(&tasklist_lock);
4693 * Only show locks if all tasks are dumped:
4695 if (state_filter == -1)
4696 debug_show_all_locks();
4699 void __cpuinit init_idle_bootup_task(struct task_struct *idle)
4701 idle->sched_class = &idle_sched_class;
4705 * init_idle - set up an idle thread for a given CPU
4706 * @idle: task in question
4707 * @cpu: cpu the idle task belongs to
4709 * NOTE: this function does not set the idle thread's NEED_RESCHED
4710 * flag, to make booting more robust.
4712 void __cpuinit init_idle(struct task_struct *idle, int cpu)
4714 struct rq *rq = cpu_rq(cpu);
4715 unsigned long flags;
4718 idle->se.exec_start = sched_clock();
4720 idle->prio = idle->normal_prio = MAX_PRIO;
4721 idle->cpus_allowed = cpumask_of_cpu(cpu);
4722 __set_task_cpu(idle, cpu);
4724 spin_lock_irqsave(&rq->lock, flags);
4725 rq->curr = rq->idle = idle;
4726 #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
4729 spin_unlock_irqrestore(&rq->lock, flags);
4731 /* Set the preempt count _outside_ the spinlocks! */
4732 #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
4733 task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
4735 task_thread_info(idle)->preempt_count = 0;
4738 * The idle tasks have their own, simple scheduling class:
4740 idle->sched_class = &idle_sched_class;
4744 * In a system that switches off the HZ timer nohz_cpu_mask
4745 * indicates which cpus entered this state. This is used
4746 * in the rcu update to wait only for active cpus. For system
4747 * which do not switch off the HZ timer nohz_cpu_mask should
4748 * always be CPU_MASK_NONE.
4750 cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
4753 * Increase the granularity value when there are more CPUs,
4754 * because with more CPUs the 'effective latency' as visible
4755 * to users decreases. But the relationship is not linear,
4756 * so pick a second-best guess by going with the log2 of the
4759 * This idea comes from the SD scheduler of Con Kolivas:
4761 static inline void sched_init_granularity(void)
4763 unsigned int factor = 1 + ilog2(num_online_cpus());
4764 const unsigned long gran_limit = 10000000;
4766 sysctl_sched_granularity *= factor;
4767 if (sysctl_sched_granularity > gran_limit)
4768 sysctl_sched_granularity = gran_limit;
4770 sysctl_sched_runtime_limit = sysctl_sched_granularity * 4;
4771 sysctl_sched_wakeup_granularity = sysctl_sched_granularity / 2;
4776 * This is how migration works:
4778 * 1) we queue a struct migration_req structure in the source CPU's
4779 * runqueue and wake up that CPU's migration thread.
4780 * 2) we down() the locked semaphore => thread blocks.
4781 * 3) migration thread wakes up (implicitly it forces the migrated
4782 * thread off the CPU)
4783 * 4) it gets the migration request and checks whether the migrated
4784 * task is still in the wrong runqueue.
4785 * 5) if it's in the wrong runqueue then the migration thread removes
4786 * it and puts it into the right queue.
4787 * 6) migration thread up()s the semaphore.
4788 * 7) we wake up and the migration is done.
4792 * Change a given task's CPU affinity. Migrate the thread to a
4793 * proper CPU and schedule it away if the CPU it's executing on
4794 * is removed from the allowed bitmask.
4796 * NOTE: the caller must have a valid reference to the task, the
4797 * task must not exit() & deallocate itself prematurely. The
4798 * call is not atomic; no spinlocks may be held.
4800 int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
4802 struct migration_req req;
4803 unsigned long flags;
4807 rq = task_rq_lock(p, &flags);
4808 if (!cpus_intersects(new_mask, cpu_online_map)) {
4813 p->cpus_allowed = new_mask;
4814 /* Can the task run on the task's current CPU? If so, we're done */
4815 if (cpu_isset(task_cpu(p), new_mask))
4818 if (migrate_task(p, any_online_cpu(new_mask), &req)) {
4819 /* Need help from migration thread: drop lock and wait. */
4820 task_rq_unlock(rq, &flags);
4821 wake_up_process(rq->migration_thread);
4822 wait_for_completion(&req.done);
4823 tlb_migrate_finish(p->mm);
4827 task_rq_unlock(rq, &flags);
4831 EXPORT_SYMBOL_GPL(set_cpus_allowed);
4834 * Move (not current) task off this cpu, onto dest cpu. We're doing
4835 * this because either it can't run here any more (set_cpus_allowed()
4836 * away from this CPU, or CPU going down), or because we're
4837 * attempting to rebalance this task on exec (sched_exec).
4839 * So we race with normal scheduler movements, but that's OK, as long
4840 * as the task is no longer on this CPU.
4842 * Returns non-zero if task was successfully migrated.
4844 static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
4846 struct rq *rq_dest, *rq_src;
4849 if (unlikely(cpu_is_offline(dest_cpu)))
4852 rq_src = cpu_rq(src_cpu);
4853 rq_dest = cpu_rq(dest_cpu);
4855 double_rq_lock(rq_src, rq_dest);
4856 /* Already moved. */
4857 if (task_cpu(p) != src_cpu)
4859 /* Affinity changed (again). */
4860 if (!cpu_isset(dest_cpu, p->cpus_allowed))
4863 on_rq = p->se.on_rq;
4865 deactivate_task(rq_src, p, 0);
4866 set_task_cpu(p, dest_cpu);
4868 activate_task(rq_dest, p, 0);
4869 check_preempt_curr(rq_dest, p);
4873 double_rq_unlock(rq_src, rq_dest);
4878 * migration_thread - this is a highprio system thread that performs
4879 * thread migration by bumping thread off CPU then 'pushing' onto
4882 static int migration_thread(void *data)
4884 int cpu = (long)data;
4888 BUG_ON(rq->migration_thread != current);
4890 set_current_state(TASK_INTERRUPTIBLE);
4891 while (!kthread_should_stop()) {
4892 struct migration_req *req;
4893 struct list_head *head;
4897 spin_lock_irq(&rq->lock);
4899 if (cpu_is_offline(cpu)) {
4900 spin_unlock_irq(&rq->lock);
4904 if (rq->active_balance) {
4905 active_load_balance(rq, cpu);
4906 rq->active_balance = 0;
4909 head = &rq->migration_queue;
4911 if (list_empty(head)) {
4912 spin_unlock_irq(&rq->lock);
4914 set_current_state(TASK_INTERRUPTIBLE);
4917 req = list_entry(head->next, struct migration_req, list);
4918 list_del_init(head->next);
4920 spin_unlock(&rq->lock);
4921 __migrate_task(req->task, cpu, req->dest_cpu);
4924 complete(&req->done);
4926 __set_current_state(TASK_RUNNING);
4930 /* Wait for kthread_stop */
4931 set_current_state(TASK_INTERRUPTIBLE);
4932 while (!kthread_should_stop()) {
4934 set_current_state(TASK_INTERRUPTIBLE);
4936 __set_current_state(TASK_RUNNING);
4940 #ifdef CONFIG_HOTPLUG_CPU
4942 * Figure out where task on dead CPU should go, use force if neccessary.
4943 * NOTE: interrupts should be disabled by the caller
4945 static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
4947 unsigned long flags;
4954 mask = node_to_cpumask(cpu_to_node(dead_cpu));
4955 cpus_and(mask, mask, p->cpus_allowed);
4956 dest_cpu = any_online_cpu(mask);
4958 /* On any allowed CPU? */
4959 if (dest_cpu == NR_CPUS)
4960 dest_cpu = any_online_cpu(p->cpus_allowed);
4962 /* No more Mr. Nice Guy. */
4963 if (dest_cpu == NR_CPUS) {
4964 rq = task_rq_lock(p, &flags);
4965 cpus_setall(p->cpus_allowed);
4966 dest_cpu = any_online_cpu(p->cpus_allowed);
4967 task_rq_unlock(rq, &flags);
4970 * Don't tell them about moving exiting tasks or
4971 * kernel threads (both mm NULL), since they never
4974 if (p->mm && printk_ratelimit())
4975 printk(KERN_INFO "process %d (%s) no "
4976 "longer affine to cpu%d\n",
4977 p->pid, p->comm, dead_cpu);
4979 if (!__migrate_task(p, dead_cpu, dest_cpu))
4984 * While a dead CPU has no uninterruptible tasks queued at this point,
4985 * it might still have a nonzero ->nr_uninterruptible counter, because
4986 * for performance reasons the counter is not stricly tracking tasks to
4987 * their home CPUs. So we just add the counter to another CPU's counter,
4988 * to keep the global sum constant after CPU-down:
4990 static void migrate_nr_uninterruptible(struct rq *rq_src)
4992 struct rq *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL));
4993 unsigned long flags;
4995 local_irq_save(flags);
4996 double_rq_lock(rq_src, rq_dest);
4997 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
4998 rq_src->nr_uninterruptible = 0;
4999 double_rq_unlock(rq_src, rq_dest);
5000 local_irq_restore(flags);
5003 /* Run through task list and migrate tasks from the dead cpu. */
5004 static void migrate_live_tasks(int src_cpu)
5006 struct task_struct *p, *t;
5008 write_lock_irq(&tasklist_lock);
5010 do_each_thread(t, p) {
5014 if (task_cpu(p) == src_cpu)
5015 move_task_off_dead_cpu(src_cpu, p);
5016 } while_each_thread(t, p);
5018 write_unlock_irq(&tasklist_lock);
5022 * Schedules idle task to be the next runnable task on current CPU.
5023 * It does so by boosting its priority to highest possible and adding it to
5024 * the _front_ of the runqueue. Used by CPU offline code.
5026 void sched_idle_next(void)
5028 int this_cpu = smp_processor_id();
5029 struct rq *rq = cpu_rq(this_cpu);
5030 struct task_struct *p = rq->idle;
5031 unsigned long flags;
5033 /* cpu has to be offline */
5034 BUG_ON(cpu_online(this_cpu));
5037 * Strictly not necessary since rest of the CPUs are stopped by now
5038 * and interrupts disabled on the current cpu.
5040 spin_lock_irqsave(&rq->lock, flags);
5042 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
5044 /* Add idle task to the _front_ of its priority queue: */
5045 activate_idle_task(p, rq);
5047 spin_unlock_irqrestore(&rq->lock, flags);
5051 * Ensures that the idle task is using init_mm right before its cpu goes
5054 void idle_task_exit(void)
5056 struct mm_struct *mm = current->active_mm;
5058 BUG_ON(cpu_online(smp_processor_id()));
5061 switch_mm(mm, &init_mm, current);
5065 /* called under rq->lock with disabled interrupts */
5066 static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
5068 struct rq *rq = cpu_rq(dead_cpu);
5070 /* Must be exiting, otherwise would be on tasklist. */
5071 BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD);
5073 /* Cannot have done final schedule yet: would have vanished. */
5074 BUG_ON(p->state == TASK_DEAD);
5079 * Drop lock around migration; if someone else moves it,
5080 * that's OK. No task can be added to this CPU, so iteration is
5082 * NOTE: interrupts should be left disabled --dev@
5084 spin_unlock(&rq->lock);
5085 move_task_off_dead_cpu(dead_cpu, p);
5086 spin_lock(&rq->lock);
5091 /* release_task() removes task from tasklist, so we won't find dead tasks. */
5092 static void migrate_dead_tasks(unsigned int dead_cpu)
5094 struct rq *rq = cpu_rq(dead_cpu);
5095 struct task_struct *next;
5098 if (!rq->nr_running)
5100 next = pick_next_task(rq, rq->curr, rq_clock(rq));
5103 migrate_dead(dead_cpu, next);
5106 #endif /* CONFIG_HOTPLUG_CPU */
5109 * migration_call - callback that gets triggered when a CPU is added.
5110 * Here we can start up the necessary migration thread for the new CPU.
5112 static int __cpuinit
5113 migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5115 struct task_struct *p;
5116 int cpu = (long)hcpu;
5117 unsigned long flags;
5121 case CPU_LOCK_ACQUIRE:
5122 mutex_lock(&sched_hotcpu_mutex);
5125 case CPU_UP_PREPARE:
5126 case CPU_UP_PREPARE_FROZEN:
5127 p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
5130 p->flags |= PF_NOFREEZE;
5131 kthread_bind(p, cpu);
5132 /* Must be high prio: stop_machine expects to yield to it. */
5133 rq = task_rq_lock(p, &flags);
5134 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
5135 task_rq_unlock(rq, &flags);
5136 cpu_rq(cpu)->migration_thread = p;
5140 case CPU_ONLINE_FROZEN:
5141 /* Strictly unneccessary, as first user will wake it. */
5142 wake_up_process(cpu_rq(cpu)->migration_thread);
5145 #ifdef CONFIG_HOTPLUG_CPU
5146 case CPU_UP_CANCELED:
5147 case CPU_UP_CANCELED_FROZEN:
5148 if (!cpu_rq(cpu)->migration_thread)
5150 /* Unbind it from offline cpu so it can run. Fall thru. */
5151 kthread_bind(cpu_rq(cpu)->migration_thread,
5152 any_online_cpu(cpu_online_map));
5153 kthread_stop(cpu_rq(cpu)->migration_thread);
5154 cpu_rq(cpu)->migration_thread = NULL;
5158 case CPU_DEAD_FROZEN:
5159 migrate_live_tasks(cpu);
5161 kthread_stop(rq->migration_thread);
5162 rq->migration_thread = NULL;
5163 /* Idle task back to normal (off runqueue, low prio) */
5164 rq = task_rq_lock(rq->idle, &flags);
5165 deactivate_task(rq, rq->idle, 0);
5166 rq->idle->static_prio = MAX_PRIO;
5167 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
5168 rq->idle->sched_class = &idle_sched_class;
5169 migrate_dead_tasks(cpu);
5170 task_rq_unlock(rq, &flags);
5171 migrate_nr_uninterruptible(rq);
5172 BUG_ON(rq->nr_running != 0);
5174 /* No need to migrate the tasks: it was best-effort if
5175 * they didn't take sched_hotcpu_mutex. Just wake up
5176 * the requestors. */
5177 spin_lock_irq(&rq->lock);
5178 while (!list_empty(&rq->migration_queue)) {
5179 struct migration_req *req;
5181 req = list_entry(rq->migration_queue.next,
5182 struct migration_req, list);
5183 list_del_init(&req->list);
5184 complete(&req->done);
5186 spin_unlock_irq(&rq->lock);
5189 case CPU_LOCK_RELEASE:
5190 mutex_unlock(&sched_hotcpu_mutex);
5196 /* Register at highest priority so that task migration (migrate_all_tasks)
5197 * happens before everything else.
5199 static struct notifier_block __cpuinitdata migration_notifier = {
5200 .notifier_call = migration_call,
5204 int __init migration_init(void)
5206 void *cpu = (void *)(long)smp_processor_id();
5209 /* Start one for the boot CPU: */
5210 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
5211 BUG_ON(err == NOTIFY_BAD);
5212 migration_call(&migration_notifier, CPU_ONLINE, cpu);
5213 register_cpu_notifier(&migration_notifier);
5221 /* Number of possible processor ids */
5222 int nr_cpu_ids __read_mostly = NR_CPUS;
5223 EXPORT_SYMBOL(nr_cpu_ids);
5225 #undef SCHED_DOMAIN_DEBUG
5226 #ifdef SCHED_DOMAIN_DEBUG
5227 static void sched_domain_debug(struct sched_domain *sd, int cpu)
5232 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
5236 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
5241 struct sched_group *group = sd->groups;
5242 cpumask_t groupmask;
5244 cpumask_scnprintf(str, NR_CPUS, sd->span);
5245 cpus_clear(groupmask);
5248 for (i = 0; i < level + 1; i++)
5250 printk("domain %d: ", level);
5252 if (!(sd->flags & SD_LOAD_BALANCE)) {
5253 printk("does not load-balance\n");
5255 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
5260 printk("span %s\n", str);
5262 if (!cpu_isset(cpu, sd->span))
5263 printk(KERN_ERR "ERROR: domain->span does not contain "
5265 if (!cpu_isset(cpu, group->cpumask))
5266 printk(KERN_ERR "ERROR: domain->groups does not contain"
5270 for (i = 0; i < level + 2; i++)
5276 printk(KERN_ERR "ERROR: group is NULL\n");
5280 if (!group->__cpu_power) {
5282 printk(KERN_ERR "ERROR: domain->cpu_power not "
5286 if (!cpus_weight(group->cpumask)) {
5288 printk(KERN_ERR "ERROR: empty group\n");
5291 if (cpus_intersects(groupmask, group->cpumask)) {
5293 printk(KERN_ERR "ERROR: repeated CPUs\n");
5296 cpus_or(groupmask, groupmask, group->cpumask);
5298 cpumask_scnprintf(str, NR_CPUS, group->cpumask);
5301 group = group->next;
5302 } while (group != sd->groups);
5305 if (!cpus_equal(sd->span, groupmask))
5306 printk(KERN_ERR "ERROR: groups don't span "
5314 if (!cpus_subset(groupmask, sd->span))
5315 printk(KERN_ERR "ERROR: parent span is not a superset "
5316 "of domain->span\n");
5321 # define sched_domain_debug(sd, cpu) do { } while (0)
5324 static int sd_degenerate(struct sched_domain *sd)
5326 if (cpus_weight(sd->span) == 1)
5329 /* Following flags need at least 2 groups */
5330 if (sd->flags & (SD_LOAD_BALANCE |
5331 SD_BALANCE_NEWIDLE |
5335 SD_SHARE_PKG_RESOURCES)) {
5336 if (sd->groups != sd->groups->next)
5340 /* Following flags don't use groups */
5341 if (sd->flags & (SD_WAKE_IDLE |
5350 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
5352 unsigned long cflags = sd->flags, pflags = parent->flags;
5354 if (sd_degenerate(parent))
5357 if (!cpus_equal(sd->span, parent->span))
5360 /* Does parent contain flags not in child? */
5361 /* WAKE_BALANCE is a subset of WAKE_AFFINE */
5362 if (cflags & SD_WAKE_AFFINE)
5363 pflags &= ~SD_WAKE_BALANCE;
5364 /* Flags needing groups don't count if only 1 group in parent */
5365 if (parent->groups == parent->groups->next) {
5366 pflags &= ~(SD_LOAD_BALANCE |
5367 SD_BALANCE_NEWIDLE |
5371 SD_SHARE_PKG_RESOURCES);
5373 if (~cflags & pflags)
5380 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
5381 * hold the hotplug lock.
5383 static void cpu_attach_domain(struct sched_domain *sd, int cpu)
5385 struct rq *rq = cpu_rq(cpu);
5386 struct sched_domain *tmp;
5388 /* Remove the sched domains which do not contribute to scheduling. */
5389 for (tmp = sd; tmp; tmp = tmp->parent) {
5390 struct sched_domain *parent = tmp->parent;
5393 if (sd_parent_degenerate(tmp, parent)) {
5394 tmp->parent = parent->parent;
5396 parent->parent->child = tmp;
5400 if (sd && sd_degenerate(sd)) {
5406 sched_domain_debug(sd, cpu);
5408 rcu_assign_pointer(rq->sd, sd);
5411 /* cpus with isolated domains */
5412 static cpumask_t cpu_isolated_map = CPU_MASK_NONE;
5414 /* Setup the mask of cpus configured for isolated domains */
5415 static int __init isolated_cpu_setup(char *str)
5417 int ints[NR_CPUS], i;
5419 str = get_options(str, ARRAY_SIZE(ints), ints);
5420 cpus_clear(cpu_isolated_map);
5421 for (i = 1; i <= ints[0]; i++)
5422 if (ints[i] < NR_CPUS)
5423 cpu_set(ints[i], cpu_isolated_map);
5427 __setup ("isolcpus=", isolated_cpu_setup);
5430 * init_sched_build_groups takes the cpumask we wish to span, and a pointer
5431 * to a function which identifies what group(along with sched group) a CPU
5432 * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS
5433 * (due to the fact that we keep track of groups covered with a cpumask_t).
5435 * init_sched_build_groups will build a circular linked list of the groups
5436 * covered by the given span, and will set each group's ->cpumask correctly,
5437 * and ->cpu_power to 0.
5440 init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
5441 int (*group_fn)(int cpu, const cpumask_t *cpu_map,
5442 struct sched_group **sg))
5444 struct sched_group *first = NULL, *last = NULL;
5445 cpumask_t covered = CPU_MASK_NONE;
5448 for_each_cpu_mask(i, span) {
5449 struct sched_group *sg;
5450 int group = group_fn(i, cpu_map, &sg);
5453 if (cpu_isset(i, covered))
5456 sg->cpumask = CPU_MASK_NONE;
5457 sg->__cpu_power = 0;
5459 for_each_cpu_mask(j, span) {
5460 if (group_fn(j, cpu_map, NULL) != group)
5463 cpu_set(j, covered);
5464 cpu_set(j, sg->cpumask);
5475 #define SD_NODES_PER_DOMAIN 16
5480 * find_next_best_node - find the next node to include in a sched_domain
5481 * @node: node whose sched_domain we're building
5482 * @used_nodes: nodes already in the sched_domain
5484 * Find the next node to include in a given scheduling domain. Simply
5485 * finds the closest node not already in the @used_nodes map.
5487 * Should use nodemask_t.
5489 static int find_next_best_node(int node, unsigned long *used_nodes)
5491 int i, n, val, min_val, best_node = 0;
5495 for (i = 0; i < MAX_NUMNODES; i++) {
5496 /* Start at @node */
5497 n = (node + i) % MAX_NUMNODES;
5499 if (!nr_cpus_node(n))
5502 /* Skip already used nodes */
5503 if (test_bit(n, used_nodes))
5506 /* Simple min distance search */
5507 val = node_distance(node, n);
5509 if (val < min_val) {
5515 set_bit(best_node, used_nodes);
5520 * sched_domain_node_span - get a cpumask for a node's sched_domain
5521 * @node: node whose cpumask we're constructing
5522 * @size: number of nodes to include in this span
5524 * Given a node, construct a good cpumask for its sched_domain to span. It
5525 * should be one that prevents unnecessary balancing, but also spreads tasks
5528 static cpumask_t sched_domain_node_span(int node)
5530 DECLARE_BITMAP(used_nodes, MAX_NUMNODES);
5531 cpumask_t span, nodemask;
5535 bitmap_zero(used_nodes, MAX_NUMNODES);
5537 nodemask = node_to_cpumask(node);
5538 cpus_or(span, span, nodemask);
5539 set_bit(node, used_nodes);
5541 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
5542 int next_node = find_next_best_node(node, used_nodes);
5544 nodemask = node_to_cpumask(next_node);
5545 cpus_or(span, span, nodemask);
5552 int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
5555 * SMT sched-domains:
5557 #ifdef CONFIG_SCHED_SMT
5558 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
5559 static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
5561 static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map,
5562 struct sched_group **sg)
5565 *sg = &per_cpu(sched_group_cpus, cpu);
5571 * multi-core sched-domains:
5573 #ifdef CONFIG_SCHED_MC
5574 static DEFINE_PER_CPU(struct sched_domain, core_domains);
5575 static DEFINE_PER_CPU(struct sched_group, sched_group_core);
5578 #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
5579 static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
5580 struct sched_group **sg)
5583 cpumask_t mask = cpu_sibling_map[cpu];
5584 cpus_and(mask, mask, *cpu_map);
5585 group = first_cpu(mask);
5587 *sg = &per_cpu(sched_group_core, group);
5590 #elif defined(CONFIG_SCHED_MC)
5591 static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
5592 struct sched_group **sg)
5595 *sg = &per_cpu(sched_group_core, cpu);
5600 static DEFINE_PER_CPU(struct sched_domain, phys_domains);
5601 static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
5603 static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
5604 struct sched_group **sg)
5607 #ifdef CONFIG_SCHED_MC
5608 cpumask_t mask = cpu_coregroup_map(cpu);
5609 cpus_and(mask, mask, *cpu_map);
5610 group = first_cpu(mask);
5611 #elif defined(CONFIG_SCHED_SMT)
5612 cpumask_t mask = cpu_sibling_map[cpu];
5613 cpus_and(mask, mask, *cpu_map);
5614 group = first_cpu(mask);
5619 *sg = &per_cpu(sched_group_phys, group);
5625 * The init_sched_build_groups can't handle what we want to do with node
5626 * groups, so roll our own. Now each node has its own list of groups which
5627 * gets dynamically allocated.
5629 static DEFINE_PER_CPU(struct sched_domain, node_domains);
5630 static struct sched_group **sched_group_nodes_bycpu[NR_CPUS];
5632 static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
5633 static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes);
5635 static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
5636 struct sched_group **sg)
5638 cpumask_t nodemask = node_to_cpumask(cpu_to_node(cpu));
5641 cpus_and(nodemask, nodemask, *cpu_map);
5642 group = first_cpu(nodemask);
5645 *sg = &per_cpu(sched_group_allnodes, group);
5649 static void init_numa_sched_groups_power(struct sched_group *group_head)
5651 struct sched_group *sg = group_head;
5657 for_each_cpu_mask(j, sg->cpumask) {
5658 struct sched_domain *sd;
5660 sd = &per_cpu(phys_domains, j);
5661 if (j != first_cpu(sd->groups->cpumask)) {
5663 * Only add "power" once for each
5669 sg_inc_cpu_power(sg, sd->groups->__cpu_power);
5672 if (sg != group_head)
5678 /* Free memory allocated for various sched_group structures */
5679 static void free_sched_groups(const cpumask_t *cpu_map)
5683 for_each_cpu_mask(cpu, *cpu_map) {
5684 struct sched_group **sched_group_nodes
5685 = sched_group_nodes_bycpu[cpu];
5687 if (!sched_group_nodes)
5690 for (i = 0; i < MAX_NUMNODES; i++) {
5691 cpumask_t nodemask = node_to_cpumask(i);
5692 struct sched_group *oldsg, *sg = sched_group_nodes[i];
5694 cpus_and(nodemask, nodemask, *cpu_map);
5695 if (cpus_empty(nodemask))
5705 if (oldsg != sched_group_nodes[i])
5708 kfree(sched_group_nodes);
5709 sched_group_nodes_bycpu[cpu] = NULL;
5713 static void free_sched_groups(const cpumask_t *cpu_map)
5719 * Initialize sched groups cpu_power.
5721 * cpu_power indicates the capacity of sched group, which is used while
5722 * distributing the load between different sched groups in a sched domain.
5723 * Typically cpu_power for all the groups in a sched domain will be same unless
5724 * there are asymmetries in the topology. If there are asymmetries, group
5725 * having more cpu_power will pickup more load compared to the group having
5728 * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents
5729 * the maximum number of tasks a group can handle in the presence of other idle
5730 * or lightly loaded groups in the same sched domain.
5732 static void init_sched_groups_power(int cpu, struct sched_domain *sd)
5734 struct sched_domain *child;
5735 struct sched_group *group;
5737 WARN_ON(!sd || !sd->groups);
5739 if (cpu != first_cpu(sd->groups->cpumask))
5744 sd->groups->__cpu_power = 0;
5747 * For perf policy, if the groups in child domain share resources
5748 * (for example cores sharing some portions of the cache hierarchy
5749 * or SMT), then set this domain groups cpu_power such that each group
5750 * can handle only one task, when there are other idle groups in the
5751 * same sched domain.
5753 if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) &&
5755 (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) {
5756 sg_inc_cpu_power(sd->groups, SCHED_LOAD_SCALE);
5761 * add cpu_power of each child group to this groups cpu_power
5763 group = child->groups;
5765 sg_inc_cpu_power(sd->groups, group->__cpu_power);
5766 group = group->next;
5767 } while (group != child->groups);
5771 * Build sched domains for a given set of cpus and attach the sched domains
5772 * to the individual cpus
5774 static int build_sched_domains(const cpumask_t *cpu_map)
5778 struct sched_group **sched_group_nodes = NULL;
5779 int sd_allnodes = 0;
5782 * Allocate the per-node list of sched groups
5784 sched_group_nodes = kzalloc(sizeof(struct sched_group *)*MAX_NUMNODES,
5786 if (!sched_group_nodes) {
5787 printk(KERN_WARNING "Can not alloc sched group node list\n");
5790 sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
5794 * Set up domains for cpus specified by the cpu_map.
5796 for_each_cpu_mask(i, *cpu_map) {
5797 struct sched_domain *sd = NULL, *p;
5798 cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
5800 cpus_and(nodemask, nodemask, *cpu_map);
5803 if (cpus_weight(*cpu_map) >
5804 SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
5805 sd = &per_cpu(allnodes_domains, i);
5806 *sd = SD_ALLNODES_INIT;
5807 sd->span = *cpu_map;
5808 cpu_to_allnodes_group(i, cpu_map, &sd->groups);
5814 sd = &per_cpu(node_domains, i);
5816 sd->span = sched_domain_node_span(cpu_to_node(i));
5820 cpus_and(sd->span, sd->span, *cpu_map);
5824 sd = &per_cpu(phys_domains, i);
5826 sd->span = nodemask;
5830 cpu_to_phys_group(i, cpu_map, &sd->groups);
5832 #ifdef CONFIG_SCHED_MC
5834 sd = &per_cpu(core_domains, i);
5836 sd->span = cpu_coregroup_map(i);
5837 cpus_and(sd->span, sd->span, *cpu_map);
5840 cpu_to_core_group(i, cpu_map, &sd->groups);
5843 #ifdef CONFIG_SCHED_SMT
5845 sd = &per_cpu(cpu_domains, i);
5846 *sd = SD_SIBLING_INIT;
5847 sd->span = cpu_sibling_map[i];
5848 cpus_and(sd->span, sd->span, *cpu_map);
5851 cpu_to_cpu_group(i, cpu_map, &sd->groups);
5855 #ifdef CONFIG_SCHED_SMT
5856 /* Set up CPU (sibling) groups */
5857 for_each_cpu_mask(i, *cpu_map) {
5858 cpumask_t this_sibling_map = cpu_sibling_map[i];
5859 cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
5860 if (i != first_cpu(this_sibling_map))
5863 init_sched_build_groups(this_sibling_map, cpu_map,
5868 #ifdef CONFIG_SCHED_MC
5869 /* Set up multi-core groups */
5870 for_each_cpu_mask(i, *cpu_map) {
5871 cpumask_t this_core_map = cpu_coregroup_map(i);
5872 cpus_and(this_core_map, this_core_map, *cpu_map);
5873 if (i != first_cpu(this_core_map))
5875 init_sched_build_groups(this_core_map, cpu_map,
5876 &cpu_to_core_group);
5880 /* Set up physical groups */
5881 for (i = 0; i < MAX_NUMNODES; i++) {
5882 cpumask_t nodemask = node_to_cpumask(i);
5884 cpus_and(nodemask, nodemask, *cpu_map);
5885 if (cpus_empty(nodemask))
5888 init_sched_build_groups(nodemask, cpu_map, &cpu_to_phys_group);
5892 /* Set up node groups */
5894 init_sched_build_groups(*cpu_map, cpu_map,
5895 &cpu_to_allnodes_group);
5897 for (i = 0; i < MAX_NUMNODES; i++) {
5898 /* Set up node groups */
5899 struct sched_group *sg, *prev;
5900 cpumask_t nodemask = node_to_cpumask(i);
5901 cpumask_t domainspan;
5902 cpumask_t covered = CPU_MASK_NONE;
5905 cpus_and(nodemask, nodemask, *cpu_map);
5906 if (cpus_empty(nodemask)) {
5907 sched_group_nodes[i] = NULL;
5911 domainspan = sched_domain_node_span(i);
5912 cpus_and(domainspan, domainspan, *cpu_map);
5914 sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i);
5916 printk(KERN_WARNING "Can not alloc domain group for "
5920 sched_group_nodes[i] = sg;
5921 for_each_cpu_mask(j, nodemask) {
5922 struct sched_domain *sd;
5924 sd = &per_cpu(node_domains, j);
5927 sg->__cpu_power = 0;
5928 sg->cpumask = nodemask;
5930 cpus_or(covered, covered, nodemask);
5933 for (j = 0; j < MAX_NUMNODES; j++) {
5934 cpumask_t tmp, notcovered;
5935 int n = (i + j) % MAX_NUMNODES;
5937 cpus_complement(notcovered, covered);
5938 cpus_and(tmp, notcovered, *cpu_map);
5939 cpus_and(tmp, tmp, domainspan);
5940 if (cpus_empty(tmp))
5943 nodemask = node_to_cpumask(n);
5944 cpus_and(tmp, tmp, nodemask);
5945 if (cpus_empty(tmp))
5948 sg = kmalloc_node(sizeof(struct sched_group),
5952 "Can not alloc domain group for node %d\n", j);
5955 sg->__cpu_power = 0;
5957 sg->next = prev->next;
5958 cpus_or(covered, covered, tmp);
5965 /* Calculate CPU power for physical packages and nodes */
5966 #ifdef CONFIG_SCHED_SMT
5967 for_each_cpu_mask(i, *cpu_map) {
5968 struct sched_domain *sd = &per_cpu(cpu_domains, i);
5970 init_sched_groups_power(i, sd);
5973 #ifdef CONFIG_SCHED_MC
5974 for_each_cpu_mask(i, *cpu_map) {
5975 struct sched_domain *sd = &per_cpu(core_domains, i);
5977 init_sched_groups_power(i, sd);
5981 for_each_cpu_mask(i, *cpu_map) {
5982 struct sched_domain *sd = &per_cpu(phys_domains, i);
5984 init_sched_groups_power(i, sd);
5988 for (i = 0; i < MAX_NUMNODES; i++)
5989 init_numa_sched_groups_power(sched_group_nodes[i]);
5992 struct sched_group *sg;
5994 cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg);
5995 init_numa_sched_groups_power(sg);
5999 /* Attach the domains */
6000 for_each_cpu_mask(i, *cpu_map) {
6001 struct sched_domain *sd;
6002 #ifdef CONFIG_SCHED_SMT
6003 sd = &per_cpu(cpu_domains, i);
6004 #elif defined(CONFIG_SCHED_MC)
6005 sd = &per_cpu(core_domains, i);
6007 sd = &per_cpu(phys_domains, i);
6009 cpu_attach_domain(sd, i);
6016 free_sched_groups(cpu_map);
6021 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
6023 static int arch_init_sched_domains(const cpumask_t *cpu_map)
6025 cpumask_t cpu_default_map;
6029 * Setup mask for cpus without special case scheduling requirements.
6030 * For now this just excludes isolated cpus, but could be used to
6031 * exclude other special cases in the future.
6033 cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);
6035 err = build_sched_domains(&cpu_default_map);
6040 static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
6042 free_sched_groups(cpu_map);
6046 * Detach sched domains from a group of cpus specified in cpu_map
6047 * These cpus will now be attached to the NULL domain
6049 static void detach_destroy_domains(const cpumask_t *cpu_map)
6053 for_each_cpu_mask(i, *cpu_map)
6054 cpu_attach_domain(NULL, i);
6055 synchronize_sched();
6056 arch_destroy_sched_domains(cpu_map);
6060 * Partition sched domains as specified by the cpumasks below.
6061 * This attaches all cpus from the cpumasks to the NULL domain,
6062 * waits for a RCU quiescent period, recalculates sched
6063 * domain information and then attaches them back to the
6064 * correct sched domains
6065 * Call with hotplug lock held
6067 int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
6069 cpumask_t change_map;
6072 cpus_and(*partition1, *partition1, cpu_online_map);
6073 cpus_and(*partition2, *partition2, cpu_online_map);
6074 cpus_or(change_map, *partition1, *partition2);
6076 /* Detach sched domains from all of the affected cpus */
6077 detach_destroy_domains(&change_map);
6078 if (!cpus_empty(*partition1))
6079 err = build_sched_domains(partition1);
6080 if (!err && !cpus_empty(*partition2))
6081 err = build_sched_domains(partition2);
6086 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
6087 int arch_reinit_sched_domains(void)
6091 mutex_lock(&sched_hotcpu_mutex);
6092 detach_destroy_domains(&cpu_online_map);
6093 err = arch_init_sched_domains(&cpu_online_map);
6094 mutex_unlock(&sched_hotcpu_mutex);
6099 static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
6103 if (buf[0] != '0' && buf[0] != '1')
6107 sched_smt_power_savings = (buf[0] == '1');
6109 sched_mc_power_savings = (buf[0] == '1');
6111 ret = arch_reinit_sched_domains();
6113 return ret ? ret : count;
6116 int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
6120 #ifdef CONFIG_SCHED_SMT
6122 err = sysfs_create_file(&cls->kset.kobj,
6123 &attr_sched_smt_power_savings.attr);
6125 #ifdef CONFIG_SCHED_MC
6126 if (!err && mc_capable())
6127 err = sysfs_create_file(&cls->kset.kobj,
6128 &attr_sched_mc_power_savings.attr);
6134 #ifdef CONFIG_SCHED_MC
6135 static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page)
6137 return sprintf(page, "%u\n", sched_mc_power_savings);
6139 static ssize_t sched_mc_power_savings_store(struct sys_device *dev,
6140 const char *buf, size_t count)
6142 return sched_power_savings_store(buf, count, 0);
6144 SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show,
6145 sched_mc_power_savings_store);
6148 #ifdef CONFIG_SCHED_SMT
6149 static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page)
6151 return sprintf(page, "%u\n", sched_smt_power_savings);
6153 static ssize_t sched_smt_power_savings_store(struct sys_device *dev,
6154 const char *buf, size_t count)
6156 return sched_power_savings_store(buf, count, 1);
6158 SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show,
6159 sched_smt_power_savings_store);
6163 * Force a reinitialization of the sched domains hierarchy. The domains
6164 * and groups cannot be updated in place without racing with the balancing
6165 * code, so we temporarily attach all running cpus to the NULL domain
6166 * which will prevent rebalancing while the sched domains are recalculated.
6168 static int update_sched_domains(struct notifier_block *nfb,
6169 unsigned long action, void *hcpu)
6172 case CPU_UP_PREPARE:
6173 case CPU_UP_PREPARE_FROZEN:
6174 case CPU_DOWN_PREPARE:
6175 case CPU_DOWN_PREPARE_FROZEN:
6176 detach_destroy_domains(&cpu_online_map);
6179 case CPU_UP_CANCELED:
6180 case CPU_UP_CANCELED_FROZEN:
6181 case CPU_DOWN_FAILED:
6182 case CPU_DOWN_FAILED_FROZEN:
6184 case CPU_ONLINE_FROZEN:
6186 case CPU_DEAD_FROZEN:
6188 * Fall through and re-initialise the domains.
6195 /* The hotplug lock is already held by cpu_up/cpu_down */
6196 arch_init_sched_domains(&cpu_online_map);
6201 void __init sched_init_smp(void)
6203 cpumask_t non_isolated_cpus;
6205 mutex_lock(&sched_hotcpu_mutex);
6206 arch_init_sched_domains(&cpu_online_map);
6207 cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
6208 if (cpus_empty(non_isolated_cpus))
6209 cpu_set(smp_processor_id(), non_isolated_cpus);
6210 mutex_unlock(&sched_hotcpu_mutex);
6211 /* XXX: Theoretical race here - CPU may be hotplugged now */
6212 hotcpu_notifier(update_sched_domains, 0);
6214 /* Move init over to a non-isolated CPU */
6215 if (set_cpus_allowed(current, non_isolated_cpus) < 0)
6217 sched_init_granularity();
6220 void __init sched_init_smp(void)
6222 sched_init_granularity();
6224 #endif /* CONFIG_SMP */
6226 int in_sched_functions(unsigned long addr)
6228 /* Linker adds these: start and end of __sched functions */
6229 extern char __sched_text_start[], __sched_text_end[];
6231 return in_lock_functions(addr) ||
6232 (addr >= (unsigned long)__sched_text_start
6233 && addr < (unsigned long)__sched_text_end);
6236 static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
6238 cfs_rq->tasks_timeline = RB_ROOT;
6239 cfs_rq->fair_clock = 1;
6240 #ifdef CONFIG_FAIR_GROUP_SCHED
6245 void __init sched_init(void)
6247 u64 now = sched_clock();
6248 int highest_cpu = 0;
6252 * Link up the scheduling class hierarchy:
6254 rt_sched_class.next = &fair_sched_class;
6255 fair_sched_class.next = &idle_sched_class;
6256 idle_sched_class.next = NULL;
6258 for_each_possible_cpu(i) {
6259 struct rt_prio_array *array;
6263 spin_lock_init(&rq->lock);
6264 lockdep_set_class(&rq->lock, &rq->rq_lock_key);
6267 init_cfs_rq(&rq->cfs, rq);
6268 #ifdef CONFIG_FAIR_GROUP_SCHED
6269 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
6270 list_add(&rq->cfs.leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
6272 rq->ls.load_update_last = now;
6273 rq->ls.load_update_start = now;
6275 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
6276 rq->cpu_load[j] = 0;
6279 rq->active_balance = 0;
6280 rq->next_balance = jiffies;
6283 rq->migration_thread = NULL;
6284 INIT_LIST_HEAD(&rq->migration_queue);
6286 atomic_set(&rq->nr_iowait, 0);
6288 array = &rq->rt.active;
6289 for (j = 0; j < MAX_RT_PRIO; j++) {
6290 INIT_LIST_HEAD(array->queue + j);
6291 __clear_bit(j, array->bitmap);
6294 /* delimiter for bitsearch: */
6295 __set_bit(MAX_RT_PRIO, array->bitmap);
6298 set_load_weight(&init_task);
6301 nr_cpu_ids = highest_cpu + 1;
6302 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL);
6305 #ifdef CONFIG_RT_MUTEXES
6306 plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
6310 * The boot idle thread does lazy MMU switching as well:
6312 atomic_inc(&init_mm.mm_count);
6313 enter_lazy_tlb(&init_mm, current);
6316 * Make us the idle thread. Technically, schedule() should not be
6317 * called from this thread, however somewhere below it might be,
6318 * but because we are the idle thread, we just pick up running again
6319 * when this runqueue becomes "idle".
6321 init_idle(current, smp_processor_id());
6323 * During early bootup we pretend to be a normal task:
6325 current->sched_class = &fair_sched_class;
6328 #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
6329 void __might_sleep(char *file, int line)
6332 static unsigned long prev_jiffy; /* ratelimiting */
6334 if ((in_atomic() || irqs_disabled()) &&
6335 system_state == SYSTEM_RUNNING && !oops_in_progress) {
6336 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
6338 prev_jiffy = jiffies;
6339 printk(KERN_ERR "BUG: sleeping function called from invalid"
6340 " context at %s:%d\n", file, line);
6341 printk("in_atomic():%d, irqs_disabled():%d\n",
6342 in_atomic(), irqs_disabled());
6343 debug_show_held_locks(current);
6344 if (irqs_disabled())
6345 print_irqtrace_events(current);
6350 EXPORT_SYMBOL(__might_sleep);
6353 #ifdef CONFIG_MAGIC_SYSRQ
6354 void normalize_rt_tasks(void)
6356 struct task_struct *g, *p;
6357 unsigned long flags;
6361 read_lock_irq(&tasklist_lock);
6362 do_each_thread(g, p) {
6364 p->se.wait_runtime = 0;
6365 p->se.wait_start_fair = 0;
6366 p->se.wait_start = 0;
6367 p->se.exec_start = 0;
6368 p->se.sleep_start = 0;
6369 p->se.sleep_start_fair = 0;
6370 p->se.block_start = 0;
6371 task_rq(p)->cfs.fair_clock = 0;
6372 task_rq(p)->clock = 0;
6376 * Renice negative nice level userspace
6379 if (TASK_NICE(p) < 0 && p->mm)
6380 set_user_nice(p, 0);
6384 spin_lock_irqsave(&p->pi_lock, flags);
6385 rq = __task_rq_lock(p);
6388 * Do not touch the migration thread:
6390 if (p == rq->migration_thread)
6394 on_rq = p->se.on_rq;
6396 deactivate_task(task_rq(p), p, 0);
6397 __setscheduler(rq, p, SCHED_NORMAL, 0);
6399 activate_task(task_rq(p), p, 0);
6400 resched_task(rq->curr);
6405 __task_rq_unlock(rq);
6406 spin_unlock_irqrestore(&p->pi_lock, flags);
6407 } while_each_thread(g, p);
6409 read_unlock_irq(&tasklist_lock);
6412 #endif /* CONFIG_MAGIC_SYSRQ */
6416 * These functions are only useful for the IA64 MCA handling.
6418 * They can only be called when the whole system has been
6419 * stopped - every CPU needs to be quiescent, and no scheduling
6420 * activity can take place. Using them for anything else would
6421 * be a serious bug, and as a result, they aren't even visible
6422 * under any other configuration.
6426 * curr_task - return the current task for a given cpu.
6427 * @cpu: the processor in question.
6429 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6431 struct task_struct *curr_task(int cpu)
6433 return cpu_curr(cpu);
6437 * set_curr_task - set the current task for a given cpu.
6438 * @cpu: the processor in question.
6439 * @p: the task pointer to set.
6441 * Description: This function must only be used when non-maskable interrupts
6442 * are serviced on a separate stack. It allows the architecture to switch the
6443 * notion of the current task on a cpu in a non-blocking manner. This function
6444 * must be called with all CPU's synchronized, and interrupts disabled, the
6445 * and caller must save the original value of the current task (see
6446 * curr_task() above) and restore that value before reenabling interrupts and
6447 * re-starting the system.
6449 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6451 void set_curr_task(int cpu, struct task_struct *p)