2 * linux/kernel/time/tick-common.c
4 * This file contains the base functions to manage periodic tick
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/interrupt.h>
18 #include <linux/percpu.h>
19 #include <linux/profile.h>
20 #include <linux/sched.h>
21 #include <linux/module.h>
23 #include <asm/irq_regs.h>
25 #include "tick-internal.h"
30 DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
32 * Tick next event: keeps track of the tick time
34 ktime_t tick_next_period;
38 * tick_do_timer_cpu is a timer core internal variable which holds the CPU NR
39 * which is responsible for calling do_timer(), i.e. the timekeeping stuff. This
40 * variable has two functions:
42 * 1) Prevent a thundering herd issue of a gazillion of CPUs trying to grab the
43 * timekeeping lock all at once. Only the CPU which is assigned to do the
44 * update is handling it.
46 * 2) Hand off the duty in the NOHZ idle case by setting the value to
47 * TICK_DO_TIMER_NONE, i.e. a non existing CPU. So the next cpu which looks
48 * at it will take over and keep the time keeping alive. The handover
49 * procedure also covers cpu hotplug.
51 int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
54 * Debugging: see timer_list.c
56 struct tick_device *tick_get_device(int cpu)
58 return &per_cpu(tick_cpu_device, cpu);
62 * tick_is_oneshot_available - check for a oneshot capable event device
64 int tick_is_oneshot_available(void)
66 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
68 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
70 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
72 return tick_broadcast_oneshot_available();
78 static void tick_periodic(int cpu)
80 if (tick_do_timer_cpu == cpu) {
81 write_seqlock(&jiffies_lock);
83 /* Keep track of the next tick event */
84 tick_next_period = ktime_add(tick_next_period, tick_period);
87 write_sequnlock(&jiffies_lock);
91 update_process_times(user_mode(get_irq_regs()));
92 profile_tick(CPU_PROFILING);
96 * Event handler for periodic ticks
98 void tick_handle_periodic(struct clock_event_device *dev)
100 int cpu = smp_processor_id();
101 ktime_t next = dev->next_event;
105 #if defined(CONFIG_HIGH_RES_TIMERS) || defined(CONFIG_NO_HZ_COMMON)
107 * The cpu might have transitioned to HIGHRES or NOHZ mode via
108 * update_process_times() -> run_local_timers() ->
109 * hrtimer_run_queues().
111 if (dev->event_handler != tick_handle_periodic)
115 if (!clockevent_state_oneshot(dev))
119 * Setup the next period for devices, which do not have
122 next = ktime_add(next, tick_period);
124 if (!clockevents_program_event(dev, next, false))
127 * Have to be careful here. If we're in oneshot mode,
128 * before we call tick_periodic() in a loop, we need
129 * to be sure we're using a real hardware clocksource.
130 * Otherwise we could get trapped in an infinite
131 * loop, as the tick_periodic() increments jiffies,
132 * which then will increment time, possibly causing
133 * the loop to trigger again and again.
135 if (timekeeping_valid_for_hres())
141 * Setup the device for a periodic tick
143 void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
145 tick_set_periodic_handler(dev, broadcast);
147 /* Broadcast setup ? */
148 if (!tick_device_is_functional(dev))
151 if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
152 !tick_broadcast_oneshot_active()) {
153 clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
159 seq = read_seqbegin(&jiffies_lock);
160 next = tick_next_period;
161 } while (read_seqretry(&jiffies_lock, seq));
163 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
166 if (!clockevents_program_event(dev, next, false))
168 next = ktime_add(next, tick_period);
174 * Setup the tick device
176 static void tick_setup_device(struct tick_device *td,
177 struct clock_event_device *newdev, int cpu,
178 const struct cpumask *cpumask)
181 void (*handler)(struct clock_event_device *) = NULL;
184 * First device setup ?
188 * If no cpu took the do_timer update, assign it to
191 if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
192 if (!tick_nohz_full_cpu(cpu))
193 tick_do_timer_cpu = cpu;
195 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
196 tick_next_period = ktime_get();
197 tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
201 * Startup in periodic mode first.
203 td->mode = TICKDEV_MODE_PERIODIC;
205 handler = td->evtdev->event_handler;
206 next_event = td->evtdev->next_event;
207 td->evtdev->event_handler = clockevents_handle_noop;
213 * When the device is not per cpu, pin the interrupt to the
216 if (!cpumask_equal(newdev->cpumask, cpumask))
217 irq_set_affinity(newdev->irq, cpumask);
220 * When global broadcasting is active, check if the current
221 * device is registered as a placeholder for broadcast mode.
222 * This allows us to handle this x86 misfeature in a generic
223 * way. This function also returns !=0 when we keep the
224 * current active broadcast state for this CPU.
226 if (tick_device_uses_broadcast(newdev, cpu))
229 if (td->mode == TICKDEV_MODE_PERIODIC)
230 tick_setup_periodic(newdev, 0);
232 tick_setup_oneshot(newdev, handler, next_event);
235 void tick_install_replacement(struct clock_event_device *newdev)
237 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
238 int cpu = smp_processor_id();
240 clockevents_exchange_device(td->evtdev, newdev);
241 tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
242 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
243 tick_oneshot_notify();
246 static bool tick_check_percpu(struct clock_event_device *curdev,
247 struct clock_event_device *newdev, int cpu)
249 if (!cpumask_test_cpu(cpu, newdev->cpumask))
251 if (cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
253 /* Check if irq affinity can be set */
254 if (newdev->irq >= 0 && !irq_can_set_affinity(newdev->irq))
256 /* Prefer an existing cpu local device */
257 if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
262 static bool tick_check_preferred(struct clock_event_device *curdev,
263 struct clock_event_device *newdev)
265 /* Prefer oneshot capable device */
266 if (!(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) {
267 if (curdev && (curdev->features & CLOCK_EVT_FEAT_ONESHOT))
269 if (tick_oneshot_mode_active())
274 * Use the higher rated one, but prefer a CPU local device with a lower
275 * rating than a non-CPU local device
278 newdev->rating > curdev->rating ||
279 !cpumask_equal(curdev->cpumask, newdev->cpumask);
283 * Check whether the new device is a better fit than curdev. curdev
286 bool tick_check_replacement(struct clock_event_device *curdev,
287 struct clock_event_device *newdev)
289 if (!tick_check_percpu(curdev, newdev, smp_processor_id()))
292 return tick_check_preferred(curdev, newdev);
296 * Check, if the new registered device should be used. Called with
297 * clockevents_lock held and interrupts disabled.
299 void tick_check_new_device(struct clock_event_device *newdev)
301 struct clock_event_device *curdev;
302 struct tick_device *td;
305 cpu = smp_processor_id();
306 if (!cpumask_test_cpu(cpu, newdev->cpumask))
309 td = &per_cpu(tick_cpu_device, cpu);
312 /* cpu local device ? */
313 if (!tick_check_percpu(curdev, newdev, cpu))
316 /* Preference decision */
317 if (!tick_check_preferred(curdev, newdev))
320 if (!try_module_get(newdev->owner))
324 * Replace the eventually existing device by the new
325 * device. If the current device is the broadcast device, do
326 * not give it back to the clockevents layer !
328 if (tick_is_broadcast_device(curdev)) {
329 clockevents_shutdown(curdev);
332 clockevents_exchange_device(curdev, newdev);
333 tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
334 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
335 tick_oneshot_notify();
340 * Can the new device be used as a broadcast device ?
342 tick_install_broadcast_device(newdev);
345 #ifdef CONFIG_HOTPLUG_CPU
347 * Transfer the do_timer job away from a dying cpu.
349 * Called with interrupts disabled. Not locking required. If
350 * tick_do_timer_cpu is owned by this cpu, nothing can change it.
352 void tick_handover_do_timer(void)
354 if (tick_do_timer_cpu == smp_processor_id()) {
355 int cpu = cpumask_first(cpu_online_mask);
357 tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
363 * Shutdown an event device on a given cpu:
365 * This is called on a life CPU, when a CPU is dead. So we cannot
366 * access the hardware device itself.
367 * We just set the mode and remove it from the lists.
369 void tick_shutdown(unsigned int cpu)
371 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
372 struct clock_event_device *dev = td->evtdev;
374 td->mode = TICKDEV_MODE_PERIODIC;
377 * Prevent that the clock events layer tries to call
378 * the set mode function!
380 clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
381 dev->mode = CLOCK_EVT_MODE_UNUSED;
382 clockevents_exchange_device(dev, NULL);
383 dev->event_handler = clockevents_handle_noop;
390 * tick_suspend_local - Suspend the local tick device
392 * Called from the local cpu for freeze with interrupts disabled.
394 * No locks required. Nothing can change the per cpu device.
396 void tick_suspend_local(void)
398 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
400 clockevents_shutdown(td->evtdev);
404 * tick_resume_local - Resume the local tick device
406 * Called from the local CPU for unfreeze or XEN resume magic.
408 * No locks required. Nothing can change the per cpu device.
410 void tick_resume_local(void)
412 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
413 bool broadcast = tick_resume_check_broadcast();
415 clockevents_tick_resume(td->evtdev);
417 if (td->mode == TICKDEV_MODE_PERIODIC)
418 tick_setup_periodic(td->evtdev, 0);
420 tick_resume_oneshot();
425 * tick_suspend - Suspend the tick and the broadcast device
427 * Called from syscore_suspend() via timekeeping_suspend with only one
428 * CPU online and interrupts disabled or from tick_unfreeze() under
431 * No locks required. Nothing can change the per cpu device.
433 void tick_suspend(void)
435 tick_suspend_local();
436 tick_suspend_broadcast();
440 * tick_resume - Resume the tick and the broadcast device
442 * Called from syscore_resume() via timekeeping_resume with only one
443 * CPU online and interrupts disabled.
445 * No locks required. Nothing can change the per cpu device.
447 void tick_resume(void)
449 tick_resume_broadcast();
453 static DEFINE_RAW_SPINLOCK(tick_freeze_lock);
454 static unsigned int tick_freeze_depth;
457 * tick_freeze - Suspend the local tick and (possibly) timekeeping.
459 * Check if this is the last online CPU executing the function and if so,
460 * suspend timekeeping. Otherwise suspend the local tick.
462 * Call with interrupts disabled. Must be balanced with %tick_unfreeze().
463 * Interrupts must not be enabled before the subsequent %tick_unfreeze().
465 void tick_freeze(void)
467 raw_spin_lock(&tick_freeze_lock);
470 if (tick_freeze_depth == num_online_cpus())
471 timekeeping_suspend();
473 tick_suspend_local();
475 raw_spin_unlock(&tick_freeze_lock);
479 * tick_unfreeze - Resume the local tick and (possibly) timekeeping.
481 * Check if this is the first CPU executing the function and if so, resume
482 * timekeeping. Otherwise resume the local tick.
484 * Call with interrupts disabled. Must be balanced with %tick_freeze().
485 * Interrupts must not be enabled after the preceding %tick_freeze().
487 void tick_unfreeze(void)
489 raw_spin_lock(&tick_freeze_lock);
491 if (tick_freeze_depth == num_online_cpus())
492 timekeeping_resume();
498 raw_spin_unlock(&tick_freeze_lock);
502 * tick_init - initialize the tick control
504 void __init tick_init(void)
506 tick_broadcast_init();