2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <linux/tick.h>
24 #include <linux/irq.h>
25 #include <trace/events/power.h>
27 #include <trace/events/sched.h>
32 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
33 static DEFINE_MUTEX(cpu_add_remove_lock);
36 * The following two APIs (cpu_maps_update_begin/done) must be used when
37 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
38 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
39 * hotplug callback (un)registration performed using __register_cpu_notifier()
40 * or __unregister_cpu_notifier().
42 void cpu_maps_update_begin(void)
44 mutex_lock(&cpu_add_remove_lock);
46 EXPORT_SYMBOL(cpu_notifier_register_begin);
48 void cpu_maps_update_done(void)
50 mutex_unlock(&cpu_add_remove_lock);
52 EXPORT_SYMBOL(cpu_notifier_register_done);
54 static RAW_NOTIFIER_HEAD(cpu_chain);
56 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
57 * Should always be manipulated under cpu_add_remove_lock
59 static int cpu_hotplug_disabled;
61 #ifdef CONFIG_HOTPLUG_CPU
64 struct task_struct *active_writer;
65 /* wait queue to wake up the active_writer */
67 /* verifies that no writer will get active while readers are active */
70 * Also blocks the new readers during
71 * an ongoing cpu hotplug operation.
75 #ifdef CONFIG_DEBUG_LOCK_ALLOC
76 struct lockdep_map dep_map;
79 .active_writer = NULL,
80 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
81 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
82 #ifdef CONFIG_DEBUG_LOCK_ALLOC
83 .dep_map = {.name = "cpu_hotplug.lock" },
87 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
88 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
89 #define cpuhp_lock_acquire_tryread() \
90 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
91 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
92 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
95 void get_online_cpus(void)
98 if (cpu_hotplug.active_writer == current)
100 cpuhp_lock_acquire_read();
101 mutex_lock(&cpu_hotplug.lock);
102 atomic_inc(&cpu_hotplug.refcount);
103 mutex_unlock(&cpu_hotplug.lock);
105 EXPORT_SYMBOL_GPL(get_online_cpus);
107 void put_online_cpus(void)
111 if (cpu_hotplug.active_writer == current)
114 refcount = atomic_dec_return(&cpu_hotplug.refcount);
115 if (WARN_ON(refcount < 0)) /* try to fix things up */
116 atomic_inc(&cpu_hotplug.refcount);
118 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
119 wake_up(&cpu_hotplug.wq);
121 cpuhp_lock_release();
124 EXPORT_SYMBOL_GPL(put_online_cpus);
127 * This ensures that the hotplug operation can begin only when the
128 * refcount goes to zero.
130 * Note that during a cpu-hotplug operation, the new readers, if any,
131 * will be blocked by the cpu_hotplug.lock
133 * Since cpu_hotplug_begin() is always called after invoking
134 * cpu_maps_update_begin(), we can be sure that only one writer is active.
136 * Note that theoretically, there is a possibility of a livelock:
137 * - Refcount goes to zero, last reader wakes up the sleeping
139 * - Last reader unlocks the cpu_hotplug.lock.
140 * - A new reader arrives at this moment, bumps up the refcount.
141 * - The writer acquires the cpu_hotplug.lock finds the refcount
142 * non zero and goes to sleep again.
144 * However, this is very difficult to achieve in practice since
145 * get_online_cpus() not an api which is called all that often.
148 void cpu_hotplug_begin(void)
152 cpu_hotplug.active_writer = current;
153 cpuhp_lock_acquire();
156 mutex_lock(&cpu_hotplug.lock);
157 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
158 if (likely(!atomic_read(&cpu_hotplug.refcount)))
160 mutex_unlock(&cpu_hotplug.lock);
163 finish_wait(&cpu_hotplug.wq, &wait);
166 void cpu_hotplug_done(void)
168 cpu_hotplug.active_writer = NULL;
169 mutex_unlock(&cpu_hotplug.lock);
170 cpuhp_lock_release();
174 * Wait for currently running CPU hotplug operations to complete (if any) and
175 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
176 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
177 * hotplug path before performing hotplug operations. So acquiring that lock
178 * guarantees mutual exclusion from any currently running hotplug operations.
180 void cpu_hotplug_disable(void)
182 cpu_maps_update_begin();
183 cpu_hotplug_disabled++;
184 cpu_maps_update_done();
186 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
188 static void __cpu_hotplug_enable(void)
190 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
192 cpu_hotplug_disabled--;
195 void cpu_hotplug_enable(void)
197 cpu_maps_update_begin();
198 __cpu_hotplug_enable();
199 cpu_maps_update_done();
201 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
202 #endif /* CONFIG_HOTPLUG_CPU */
204 /* Need to know about CPUs going up/down? */
205 int register_cpu_notifier(struct notifier_block *nb)
208 cpu_maps_update_begin();
209 ret = raw_notifier_chain_register(&cpu_chain, nb);
210 cpu_maps_update_done();
214 int __register_cpu_notifier(struct notifier_block *nb)
216 return raw_notifier_chain_register(&cpu_chain, nb);
219 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
224 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
227 return notifier_to_errno(ret);
230 static int cpu_notify(unsigned long val, void *v)
232 return __cpu_notify(val, v, -1, NULL);
235 static void cpu_notify_nofail(unsigned long val, void *v)
237 BUG_ON(cpu_notify(val, v));
239 EXPORT_SYMBOL(register_cpu_notifier);
240 EXPORT_SYMBOL(__register_cpu_notifier);
242 void unregister_cpu_notifier(struct notifier_block *nb)
244 cpu_maps_update_begin();
245 raw_notifier_chain_unregister(&cpu_chain, nb);
246 cpu_maps_update_done();
248 EXPORT_SYMBOL(unregister_cpu_notifier);
250 void __unregister_cpu_notifier(struct notifier_block *nb)
252 raw_notifier_chain_unregister(&cpu_chain, nb);
254 EXPORT_SYMBOL(__unregister_cpu_notifier);
256 #ifdef CONFIG_HOTPLUG_CPU
258 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
261 * This function walks all processes, finds a valid mm struct for each one and
262 * then clears a corresponding bit in mm's cpumask. While this all sounds
263 * trivial, there are various non-obvious corner cases, which this function
264 * tries to solve in a safe manner.
266 * Also note that the function uses a somewhat relaxed locking scheme, so it may
267 * be called only for an already offlined CPU.
269 void clear_tasks_mm_cpumask(int cpu)
271 struct task_struct *p;
274 * This function is called after the cpu is taken down and marked
275 * offline, so its not like new tasks will ever get this cpu set in
276 * their mm mask. -- Peter Zijlstra
277 * Thus, we may use rcu_read_lock() here, instead of grabbing
278 * full-fledged tasklist_lock.
280 WARN_ON(cpu_online(cpu));
282 for_each_process(p) {
283 struct task_struct *t;
286 * Main thread might exit, but other threads may still have
287 * a valid mm. Find one.
289 t = find_lock_task_mm(p);
292 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
298 static inline void check_for_tasks(int dead_cpu)
300 struct task_struct *g, *p;
302 read_lock(&tasklist_lock);
303 for_each_process_thread(g, p) {
307 * We do the check with unlocked task_rq(p)->lock.
308 * Order the reading to do not warn about a task,
309 * which was running on this cpu in the past, and
310 * it's just been woken on another cpu.
313 if (task_cpu(p) != dead_cpu)
316 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
317 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
319 read_unlock(&tasklist_lock);
322 struct take_cpu_down_param {
327 /* Take this CPU down. */
328 static int take_cpu_down(void *_param)
330 struct take_cpu_down_param *param = _param;
333 /* Ensure this CPU doesn't handle any more interrupts. */
334 err = __cpu_disable();
338 cpu_notify(CPU_DYING | param->mod, param->hcpu);
339 /* Give up timekeeping duties */
340 tick_handover_do_timer();
341 /* Park the stopper thread */
342 stop_machine_park((long)param->hcpu);
346 /* Requires cpu_add_remove_lock to be held */
347 static int _cpu_down(unsigned int cpu, int tasks_frozen)
349 int err, nr_calls = 0;
350 void *hcpu = (void *)(long)cpu;
351 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
352 struct take_cpu_down_param tcd_param = {
357 if (num_online_cpus() == 1)
360 if (!cpu_online(cpu))
365 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
368 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
369 pr_warn("%s: attempt to take down CPU %u failed\n",
375 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
376 * and RCU users of this state to go away such that all new such users
379 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
380 * not imply sync_sched(), so wait for both.
382 * Do sync before park smpboot threads to take care the rcu boost case.
384 if (IS_ENABLED(CONFIG_PREEMPT))
385 synchronize_rcu_mult(call_rcu, call_rcu_sched);
389 smpboot_park_threads(cpu);
392 * Prevent irq alloc/free while the dying cpu reorganizes the
393 * interrupt affinities.
398 * So now all preempt/rcu users must observe !cpu_active().
400 err = stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
402 /* CPU didn't die: tell everyone. Can't complain. */
403 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
407 BUG_ON(cpu_online(cpu));
410 * The migration_call() CPU_DYING callback will have removed all
411 * runnable tasks from the cpu, there's only the idle task left now
412 * that the migration thread is done doing the stop_machine thing.
414 * Wait for the stop thread to go away.
416 while (!per_cpu(cpu_dead_idle, cpu))
418 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
419 per_cpu(cpu_dead_idle, cpu) = false;
421 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
424 hotplug_cpu__broadcast_tick_pull(cpu);
425 /* This actually kills the CPU. */
428 /* CPU is completely dead: tell everyone. Too late to complain. */
429 tick_cleanup_dead_cpu(cpu);
430 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
432 check_for_tasks(cpu);
436 trace_sched_cpu_hotplug(cpu, err, 0);
438 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
442 int cpu_down(unsigned int cpu)
446 cpu_maps_update_begin();
448 if (cpu_hotplug_disabled) {
453 err = _cpu_down(cpu, 0);
456 cpu_maps_update_done();
459 EXPORT_SYMBOL(cpu_down);
460 #endif /*CONFIG_HOTPLUG_CPU*/
463 * Unpark per-CPU smpboot kthreads at CPU-online time.
465 static int smpboot_thread_call(struct notifier_block *nfb,
466 unsigned long action, void *hcpu)
468 int cpu = (long)hcpu;
470 switch (action & ~CPU_TASKS_FROZEN) {
472 case CPU_DOWN_FAILED:
474 smpboot_unpark_threads(cpu);
484 static struct notifier_block smpboot_thread_notifier = {
485 .notifier_call = smpboot_thread_call,
486 .priority = CPU_PRI_SMPBOOT,
489 void smpboot_thread_init(void)
491 register_cpu_notifier(&smpboot_thread_notifier);
494 /* Requires cpu_add_remove_lock to be held */
495 static int _cpu_up(unsigned int cpu, int tasks_frozen)
497 int ret, nr_calls = 0;
498 void *hcpu = (void *)(long)cpu;
499 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
500 struct task_struct *idle;
504 if (cpu_online(cpu) || !cpu_present(cpu)) {
509 idle = idle_thread_get(cpu);
515 ret = smpboot_create_threads(cpu);
519 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
522 pr_warn("%s: attempt to bring up CPU %u failed\n",
527 /* Arch-specific enabling code. */
528 ret = __cpu_up(cpu, idle);
532 BUG_ON(!cpu_online(cpu));
534 /* Now call notifier in preparation. */
535 cpu_notify(CPU_ONLINE | mod, hcpu);
539 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
542 trace_sched_cpu_hotplug(cpu, ret, 1);
547 int cpu_up(unsigned int cpu)
551 if (!cpu_possible(cpu)) {
552 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
554 #if defined(CONFIG_IA64)
555 pr_err("please check additional_cpus= boot parameter\n");
560 err = try_online_node(cpu_to_node(cpu));
564 cpu_maps_update_begin();
566 if (cpu_hotplug_disabled) {
571 err = _cpu_up(cpu, 0);
574 cpu_maps_update_done();
577 EXPORT_SYMBOL_GPL(cpu_up);
579 #ifdef CONFIG_PM_SLEEP_SMP
580 static cpumask_var_t frozen_cpus;
582 int disable_nonboot_cpus(void)
584 int cpu, first_cpu, error = 0;
586 cpu_maps_update_begin();
587 first_cpu = cpumask_first(cpu_online_mask);
589 * We take down all of the non-boot CPUs in one shot to avoid races
590 * with the userspace trying to use the CPU hotplug at the same time
592 cpumask_clear(frozen_cpus);
594 pr_info("Disabling non-boot CPUs ...\n");
595 for_each_online_cpu(cpu) {
596 if (cpu == first_cpu)
598 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
599 error = _cpu_down(cpu, 1);
600 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
602 cpumask_set_cpu(cpu, frozen_cpus);
604 pr_err("Error taking CPU%d down: %d\n", cpu, error);
610 BUG_ON(num_online_cpus() > 1);
612 pr_err("Non-boot CPUs are not disabled\n");
615 * Make sure the CPUs won't be enabled by someone else. We need to do
616 * this even in case of failure as all disable_nonboot_cpus() users are
617 * supposed to do enable_nonboot_cpus() on the failure path.
619 cpu_hotplug_disabled++;
621 cpu_maps_update_done();
625 void __weak arch_enable_nonboot_cpus_begin(void)
629 void __weak arch_enable_nonboot_cpus_end(void)
633 void enable_nonboot_cpus(void)
636 struct device *cpu_device;
638 /* Allow everyone to use the CPU hotplug again */
639 cpu_maps_update_begin();
640 __cpu_hotplug_enable();
641 if (cpumask_empty(frozen_cpus))
644 pr_info("Enabling non-boot CPUs ...\n");
646 arch_enable_nonboot_cpus_begin();
648 for_each_cpu(cpu, frozen_cpus) {
649 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
650 error = _cpu_up(cpu, 1);
651 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
653 pr_info("CPU%d is up\n", cpu);
654 cpu_device = get_cpu_device(cpu);
656 pr_err("%s: failed to get cpu%d device\n",
659 kobject_uevent(&cpu_device->kobj, KOBJ_ONLINE);
662 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
665 arch_enable_nonboot_cpus_end();
667 cpumask_clear(frozen_cpus);
669 cpu_maps_update_done();
672 static int __init alloc_frozen_cpus(void)
674 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
678 core_initcall(alloc_frozen_cpus);
681 * When callbacks for CPU hotplug notifications are being executed, we must
682 * ensure that the state of the system with respect to the tasks being frozen
683 * or not, as reported by the notification, remains unchanged *throughout the
684 * duration* of the execution of the callbacks.
685 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
687 * This synchronization is implemented by mutually excluding regular CPU
688 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
689 * Hibernate notifications.
692 cpu_hotplug_pm_callback(struct notifier_block *nb,
693 unsigned long action, void *ptr)
697 case PM_SUSPEND_PREPARE:
698 case PM_HIBERNATION_PREPARE:
699 cpu_hotplug_disable();
702 case PM_POST_SUSPEND:
703 case PM_POST_HIBERNATION:
704 cpu_hotplug_enable();
715 static int __init cpu_hotplug_pm_sync_init(void)
718 * cpu_hotplug_pm_callback has higher priority than x86
719 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
720 * to disable cpu hotplug to avoid cpu hotplug race.
722 pm_notifier(cpu_hotplug_pm_callback, 0);
725 core_initcall(cpu_hotplug_pm_sync_init);
727 #endif /* CONFIG_PM_SLEEP_SMP */
730 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
731 * @cpu: cpu that just started
733 * This function calls the cpu_chain notifiers with CPU_STARTING.
734 * It must be called by the arch code on the new cpu, before the new cpu
735 * enables interrupts and before the "boot" cpu returns from __cpu_up().
737 void notify_cpu_starting(unsigned int cpu)
739 unsigned long val = CPU_STARTING;
741 #ifdef CONFIG_PM_SLEEP_SMP
742 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
743 val = CPU_STARTING_FROZEN;
744 #endif /* CONFIG_PM_SLEEP_SMP */
745 cpu_notify(val, (void *)(long)cpu);
748 #endif /* CONFIG_SMP */
751 * cpu_bit_bitmap[] is a special, "compressed" data structure that
752 * represents all NR_CPUS bits binary values of 1<<nr.
754 * It is used by cpumask_of() to get a constant address to a CPU
755 * mask value that has a single bit set only.
758 /* cpu_bit_bitmap[0] is empty - so we can back into it */
759 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
760 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
761 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
762 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
764 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
766 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
767 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
768 #if BITS_PER_LONG > 32
769 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
770 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
773 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
775 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
776 EXPORT_SYMBOL(cpu_all_bits);
778 #ifdef CONFIG_INIT_ALL_POSSIBLE
779 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
782 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
784 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
785 EXPORT_SYMBOL(cpu_possible_mask);
787 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
788 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
789 EXPORT_SYMBOL(cpu_online_mask);
791 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
792 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
793 EXPORT_SYMBOL(cpu_present_mask);
795 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
796 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
797 EXPORT_SYMBOL(cpu_active_mask);
799 void set_cpu_possible(unsigned int cpu, bool possible)
802 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
804 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
807 void set_cpu_present(unsigned int cpu, bool present)
810 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
812 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
815 void set_cpu_online(unsigned int cpu, bool online)
818 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
819 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
821 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
825 void set_cpu_active(unsigned int cpu, bool active)
828 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
830 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
833 void init_cpu_present(const struct cpumask *src)
835 cpumask_copy(to_cpumask(cpu_present_bits), src);
838 void init_cpu_possible(const struct cpumask *src)
840 cpumask_copy(to_cpumask(cpu_possible_bits), src);
843 void init_cpu_online(const struct cpumask *src)
845 cpumask_copy(to_cpumask(cpu_online_bits), src);
848 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
850 void idle_notifier_register(struct notifier_block *n)
852 atomic_notifier_chain_register(&idle_notifier, n);
854 EXPORT_SYMBOL_GPL(idle_notifier_register);
856 void idle_notifier_unregister(struct notifier_block *n)
858 atomic_notifier_chain_unregister(&idle_notifier, n);
860 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
862 void idle_notifier_call_chain(unsigned long val)
864 atomic_notifier_call_chain(&idle_notifier, val, NULL);
866 EXPORT_SYMBOL_GPL(idle_notifier_call_chain);