2 * linux/kernel/time/timekeeping.c
4 * Kernel timekeeping code and accessor functions
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
11 #include <linux/timekeeper_internal.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/percpu.h>
15 #include <linux/init.h>
17 #include <linux/sched.h>
18 #include <linux/syscore_ops.h>
19 #include <linux/clocksource.h>
20 #include <linux/jiffies.h>
21 #include <linux/time.h>
22 #include <linux/tick.h>
23 #include <linux/stop_machine.h>
24 #include <linux/pvclock_gtod.h>
27 static struct timekeeper timekeeper;
29 /* flag for if timekeeping is suspended */
30 int __read_mostly timekeeping_suspended;
32 /* Flag for if there is a persistent clock on this platform */
33 bool __read_mostly persistent_clock_exist = false;
35 static inline void tk_normalize_xtime(struct timekeeper *tk)
37 while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) {
38 tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift;
43 static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts)
45 tk->xtime_sec = ts->tv_sec;
46 tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift;
49 static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
51 tk->xtime_sec += ts->tv_sec;
52 tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
53 tk_normalize_xtime(tk);
56 static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
61 * Verify consistency of: offset_real = -wall_to_monotonic
62 * before modifying anything
64 set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec,
65 -tk->wall_to_monotonic.tv_nsec);
66 WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64);
67 tk->wall_to_monotonic = wtm;
68 set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
69 tk->offs_real = timespec_to_ktime(tmp);
72 static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
74 /* Verify consistency before modifying */
75 WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64);
77 tk->total_sleep_time = t;
78 tk->offs_boot = timespec_to_ktime(t);
82 * timekeeper_setup_internals - Set up internals to use clocksource clock.
84 * @clock: Pointer to clocksource.
86 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
87 * pair and interval request.
89 * Unless you're the timekeeping code, you should not be using this!
91 static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
95 struct clocksource *old_clock;
97 old_clock = tk->clock;
99 clock->cycle_last = clock->read(clock);
101 /* Do the ns -> cycle conversion first, using original mult */
102 tmp = NTP_INTERVAL_LENGTH;
103 tmp <<= clock->shift;
105 tmp += clock->mult/2;
106 do_div(tmp, clock->mult);
110 interval = (cycle_t) tmp;
111 tk->cycle_interval = interval;
113 /* Go back from cycles -> shifted ns */
114 tk->xtime_interval = (u64) interval * clock->mult;
115 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
117 ((u64) interval * clock->mult) >> clock->shift;
119 /* if changing clocks, convert xtime_nsec shift units */
121 int shift_change = clock->shift - old_clock->shift;
122 if (shift_change < 0)
123 tk->xtime_nsec >>= -shift_change;
125 tk->xtime_nsec <<= shift_change;
127 tk->shift = clock->shift;
130 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
133 * The timekeeper keeps its own mult values for the currently
134 * active clocksource. These value will be adjusted via NTP
135 * to counteract clock drifting.
137 tk->mult = clock->mult;
140 /* Timekeeper helper functions. */
141 static inline s64 timekeeping_get_ns(struct timekeeper *tk)
143 cycle_t cycle_now, cycle_delta;
144 struct clocksource *clock;
147 /* read clocksource: */
149 cycle_now = clock->read(clock);
151 /* calculate the delta since the last update_wall_time: */
152 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
154 nsec = cycle_delta * tk->mult + tk->xtime_nsec;
157 /* If arch requires, add in gettimeoffset() */
158 return nsec + arch_gettimeoffset();
161 static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
163 cycle_t cycle_now, cycle_delta;
164 struct clocksource *clock;
167 /* read clocksource: */
169 cycle_now = clock->read(clock);
171 /* calculate the delta since the last update_wall_time: */
172 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
174 /* convert delta to nanoseconds. */
175 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
177 /* If arch requires, add in gettimeoffset() */
178 return nsec + arch_gettimeoffset();
181 static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
183 static void update_pvclock_gtod(struct timekeeper *tk)
185 raw_notifier_call_chain(&pvclock_gtod_chain, 0, tk);
189 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
191 * Must hold write on timekeeper.lock
193 int pvclock_gtod_register_notifier(struct notifier_block *nb)
195 struct timekeeper *tk = &timekeeper;
199 write_seqlock_irqsave(&tk->lock, flags);
200 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
201 /* update timekeeping data */
202 update_pvclock_gtod(tk);
203 write_sequnlock_irqrestore(&tk->lock, flags);
207 EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
210 * pvclock_gtod_unregister_notifier - unregister a pvclock
211 * timedata update listener
213 * Must hold write on timekeeper.lock
215 int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
217 struct timekeeper *tk = &timekeeper;
221 write_seqlock_irqsave(&tk->lock, flags);
222 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
223 write_sequnlock_irqrestore(&tk->lock, flags);
227 EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
229 /* must hold write on timekeeper.lock */
230 static void timekeeping_update(struct timekeeper *tk, bool clearntp)
237 update_pvclock_gtod(tk);
241 * timekeeping_forward_now - update clock to the current time
243 * Forward the current clock to update its state since the last call to
244 * update_wall_time(). This is useful before significant clock changes,
245 * as it avoids having to deal with this time offset explicitly.
247 static void timekeeping_forward_now(struct timekeeper *tk)
249 cycle_t cycle_now, cycle_delta;
250 struct clocksource *clock;
254 cycle_now = clock->read(clock);
255 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
256 clock->cycle_last = cycle_now;
258 tk->xtime_nsec += cycle_delta * tk->mult;
260 /* If arch requires, add in gettimeoffset() */
261 tk->xtime_nsec += (u64)arch_gettimeoffset() << tk->shift;
263 tk_normalize_xtime(tk);
265 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
266 timespec_add_ns(&tk->raw_time, nsec);
270 * __getnstimeofday - Returns the time of day in a timespec.
271 * @ts: pointer to the timespec to be set
273 * Updates the time of day in the timespec.
274 * Returns 0 on success, or -ve when suspended (timespec will be undefined).
276 int __getnstimeofday(struct timespec *ts)
278 struct timekeeper *tk = &timekeeper;
283 seq = read_seqbegin(&tk->lock);
285 ts->tv_sec = tk->xtime_sec;
286 nsecs = timekeeping_get_ns(tk);
288 } while (read_seqretry(&tk->lock, seq));
291 timespec_add_ns(ts, nsecs);
294 * Do not bail out early, in case there were callers still using
295 * the value, even in the face of the WARN_ON.
297 if (unlikely(timekeeping_suspended))
301 EXPORT_SYMBOL(__getnstimeofday);
304 * getnstimeofday - Returns the time of day in a timespec.
305 * @ts: pointer to the timespec to be set
307 * Returns the time of day in a timespec (WARN if suspended).
309 void getnstimeofday(struct timespec *ts)
311 WARN_ON(__getnstimeofday(ts));
313 EXPORT_SYMBOL(getnstimeofday);
315 ktime_t ktime_get(void)
317 struct timekeeper *tk = &timekeeper;
321 WARN_ON(timekeeping_suspended);
324 seq = read_seqbegin(&tk->lock);
325 secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
326 nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
328 } while (read_seqretry(&tk->lock, seq));
330 * Use ktime_set/ktime_add_ns to create a proper ktime on
331 * 32-bit architectures without CONFIG_KTIME_SCALAR.
333 return ktime_add_ns(ktime_set(secs, 0), nsecs);
335 EXPORT_SYMBOL_GPL(ktime_get);
338 * ktime_get_ts - get the monotonic clock in timespec format
339 * @ts: pointer to timespec variable
341 * The function calculates the monotonic clock from the realtime
342 * clock and the wall_to_monotonic offset and stores the result
343 * in normalized timespec format in the variable pointed to by @ts.
345 void ktime_get_ts(struct timespec *ts)
347 struct timekeeper *tk = &timekeeper;
348 struct timespec tomono;
352 WARN_ON(timekeeping_suspended);
355 seq = read_seqbegin(&tk->lock);
356 ts->tv_sec = tk->xtime_sec;
357 nsec = timekeeping_get_ns(tk);
358 tomono = tk->wall_to_monotonic;
360 } while (read_seqretry(&tk->lock, seq));
362 ts->tv_sec += tomono.tv_sec;
364 timespec_add_ns(ts, nsec + tomono.tv_nsec);
366 EXPORT_SYMBOL_GPL(ktime_get_ts);
368 #ifdef CONFIG_NTP_PPS
371 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
372 * @ts_raw: pointer to the timespec to be set to raw monotonic time
373 * @ts_real: pointer to the timespec to be set to the time of day
375 * This function reads both the time of day and raw monotonic time at the
376 * same time atomically and stores the resulting timestamps in timespec
379 void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
381 struct timekeeper *tk = &timekeeper;
383 s64 nsecs_raw, nsecs_real;
385 WARN_ON_ONCE(timekeeping_suspended);
388 seq = read_seqbegin(&tk->lock);
390 *ts_raw = tk->raw_time;
391 ts_real->tv_sec = tk->xtime_sec;
392 ts_real->tv_nsec = 0;
394 nsecs_raw = timekeeping_get_ns_raw(tk);
395 nsecs_real = timekeeping_get_ns(tk);
397 } while (read_seqretry(&tk->lock, seq));
399 timespec_add_ns(ts_raw, nsecs_raw);
400 timespec_add_ns(ts_real, nsecs_real);
402 EXPORT_SYMBOL(getnstime_raw_and_real);
404 #endif /* CONFIG_NTP_PPS */
407 * do_gettimeofday - Returns the time of day in a timeval
408 * @tv: pointer to the timeval to be set
410 * NOTE: Users should be converted to using getnstimeofday()
412 void do_gettimeofday(struct timeval *tv)
416 getnstimeofday(&now);
417 tv->tv_sec = now.tv_sec;
418 tv->tv_usec = now.tv_nsec/1000;
420 EXPORT_SYMBOL(do_gettimeofday);
423 * do_settimeofday - Sets the time of day
424 * @tv: pointer to the timespec variable containing the new time
426 * Sets the time of day to the new time and update NTP and notify hrtimers
428 int do_settimeofday(const struct timespec *tv)
430 struct timekeeper *tk = &timekeeper;
431 struct timespec ts_delta, xt;
434 if (!timespec_valid_strict(tv))
437 write_seqlock_irqsave(&tk->lock, flags);
439 timekeeping_forward_now(tk);
442 ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
443 ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
445 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta));
447 tk_set_xtime(tk, tv);
449 timekeeping_update(tk, true);
451 write_sequnlock_irqrestore(&tk->lock, flags);
453 /* signal hrtimers about time change */
458 EXPORT_SYMBOL(do_settimeofday);
461 * timekeeping_inject_offset - Adds or subtracts from the current time.
462 * @tv: pointer to the timespec variable containing the offset
464 * Adds or subtracts an offset value from the current time.
466 int timekeeping_inject_offset(struct timespec *ts)
468 struct timekeeper *tk = &timekeeper;
473 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
476 write_seqlock_irqsave(&tk->lock, flags);
478 timekeeping_forward_now(tk);
480 /* Make sure the proposed value is valid */
481 tmp = timespec_add(tk_xtime(tk), *ts);
482 if (!timespec_valid_strict(&tmp)) {
487 tk_xtime_add(tk, ts);
488 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
490 error: /* even if we error out, we forwarded the time, so call update */
491 timekeeping_update(tk, true);
493 write_sequnlock_irqrestore(&tk->lock, flags);
495 /* signal hrtimers about time change */
500 EXPORT_SYMBOL(timekeeping_inject_offset);
503 * change_clocksource - Swaps clocksources if a new one is available
505 * Accumulates current time interval and initializes new clocksource
507 static int change_clocksource(void *data)
509 struct timekeeper *tk = &timekeeper;
510 struct clocksource *new, *old;
513 new = (struct clocksource *) data;
515 write_seqlock_irqsave(&tk->lock, flags);
517 timekeeping_forward_now(tk);
518 if (!new->enable || new->enable(new) == 0) {
520 tk_setup_internals(tk, new);
524 timekeeping_update(tk, true);
526 write_sequnlock_irqrestore(&tk->lock, flags);
532 * timekeeping_notify - Install a new clock source
533 * @clock: pointer to the clock source
535 * This function is called from clocksource.c after a new, better clock
536 * source has been registered. The caller holds the clocksource_mutex.
538 void timekeeping_notify(struct clocksource *clock)
540 struct timekeeper *tk = &timekeeper;
542 if (tk->clock == clock)
544 stop_machine(change_clocksource, clock, NULL);
549 * ktime_get_real - get the real (wall-) time in ktime_t format
551 * returns the time in ktime_t format
553 ktime_t ktime_get_real(void)
557 getnstimeofday(&now);
559 return timespec_to_ktime(now);
561 EXPORT_SYMBOL_GPL(ktime_get_real);
564 * getrawmonotonic - Returns the raw monotonic time in a timespec
565 * @ts: pointer to the timespec to be set
567 * Returns the raw monotonic time (completely un-modified by ntp)
569 void getrawmonotonic(struct timespec *ts)
571 struct timekeeper *tk = &timekeeper;
576 seq = read_seqbegin(&tk->lock);
577 nsecs = timekeeping_get_ns_raw(tk);
580 } while (read_seqretry(&tk->lock, seq));
582 timespec_add_ns(ts, nsecs);
584 EXPORT_SYMBOL(getrawmonotonic);
587 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
589 int timekeeping_valid_for_hres(void)
591 struct timekeeper *tk = &timekeeper;
596 seq = read_seqbegin(&tk->lock);
598 ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
600 } while (read_seqretry(&tk->lock, seq));
606 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
608 u64 timekeeping_max_deferment(void)
610 struct timekeeper *tk = &timekeeper;
615 seq = read_seqbegin(&tk->lock);
617 ret = tk->clock->max_idle_ns;
619 } while (read_seqretry(&tk->lock, seq));
625 * read_persistent_clock - Return time from the persistent clock.
627 * Weak dummy function for arches that do not yet support it.
628 * Reads the time from the battery backed persistent clock.
629 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
631 * XXX - Do be sure to remove it once all arches implement it.
633 void __attribute__((weak)) read_persistent_clock(struct timespec *ts)
640 * read_boot_clock - Return time of the system start.
642 * Weak dummy function for arches that do not yet support it.
643 * Function to read the exact time the system has been started.
644 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
646 * XXX - Do be sure to remove it once all arches implement it.
648 void __attribute__((weak)) read_boot_clock(struct timespec *ts)
655 * timekeeping_init - Initializes the clocksource and common timekeeping values
657 void __init timekeeping_init(void)
659 struct timekeeper *tk = &timekeeper;
660 struct clocksource *clock;
662 struct timespec now, boot, tmp;
664 read_persistent_clock(&now);
666 if (!timespec_valid_strict(&now)) {
667 pr_warn("WARNING: Persistent clock returned invalid value!\n"
668 " Check your CMOS/BIOS settings.\n");
671 } else if (now.tv_sec || now.tv_nsec)
672 persistent_clock_exist = true;
674 read_boot_clock(&boot);
675 if (!timespec_valid_strict(&boot)) {
676 pr_warn("WARNING: Boot clock returned invalid value!\n"
677 " Check your CMOS/BIOS settings.\n");
682 seqlock_init(&tk->lock);
686 write_seqlock_irqsave(&tk->lock, flags);
687 clock = clocksource_default_clock();
689 clock->enable(clock);
690 tk_setup_internals(tk, clock);
692 tk_set_xtime(tk, &now);
693 tk->raw_time.tv_sec = 0;
694 tk->raw_time.tv_nsec = 0;
695 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
698 set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec);
699 tk_set_wall_to_mono(tk, tmp);
703 tk_set_sleep_time(tk, tmp);
705 write_sequnlock_irqrestore(&tk->lock, flags);
708 /* time in seconds when suspend began */
709 static struct timespec timekeeping_suspend_time;
712 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
713 * @delta: pointer to a timespec delta value
715 * Takes a timespec offset measuring a suspend interval and properly
716 * adds the sleep offset to the timekeeping variables.
718 static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
719 struct timespec *delta)
721 if (!timespec_valid_strict(delta)) {
722 printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
723 "sleep delta value!\n");
726 tk_xtime_add(tk, delta);
727 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta));
728 tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta));
732 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
733 * @delta: pointer to a timespec delta value
735 * This hook is for architectures that cannot support read_persistent_clock
736 * because their RTC/persistent clock is only accessible when irqs are enabled.
738 * This function should only be called by rtc_resume(), and allows
739 * a suspend offset to be injected into the timekeeping values.
741 void timekeeping_inject_sleeptime(struct timespec *delta)
743 struct timekeeper *tk = &timekeeper;
747 * Make sure we don't set the clock twice, as timekeeping_resume()
750 if (has_persistent_clock())
753 write_seqlock_irqsave(&tk->lock, flags);
755 timekeeping_forward_now(tk);
757 __timekeeping_inject_sleeptime(tk, delta);
759 timekeeping_update(tk, true);
761 write_sequnlock_irqrestore(&tk->lock, flags);
763 /* signal hrtimers about time change */
768 * timekeeping_resume - Resumes the generic timekeeping subsystem.
770 * This is for the generic clocksource timekeeping.
771 * xtime/wall_to_monotonic/jiffies/etc are
772 * still managed by arch specific suspend/resume code.
774 static void timekeeping_resume(void)
776 struct timekeeper *tk = &timekeeper;
780 read_persistent_clock(&ts);
782 clockevents_resume();
783 clocksource_resume();
785 write_seqlock_irqsave(&tk->lock, flags);
787 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
788 ts = timespec_sub(ts, timekeeping_suspend_time);
789 __timekeeping_inject_sleeptime(tk, &ts);
791 /* re-base the last cycle value */
792 tk->clock->cycle_last = tk->clock->read(tk->clock);
794 timekeeping_suspended = 0;
795 timekeeping_update(tk, false);
796 write_sequnlock_irqrestore(&tk->lock, flags);
798 touch_softlockup_watchdog();
800 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
802 /* Resume hrtimers */
806 static int timekeeping_suspend(void)
808 struct timekeeper *tk = &timekeeper;
810 struct timespec delta, delta_delta;
811 static struct timespec old_delta;
813 read_persistent_clock(&timekeeping_suspend_time);
815 write_seqlock_irqsave(&tk->lock, flags);
816 timekeeping_forward_now(tk);
817 timekeeping_suspended = 1;
820 * To avoid drift caused by repeated suspend/resumes,
821 * which each can add ~1 second drift error,
822 * try to compensate so the difference in system time
823 * and persistent_clock time stays close to constant.
825 delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time);
826 delta_delta = timespec_sub(delta, old_delta);
827 if (abs(delta_delta.tv_sec) >= 2) {
829 * if delta_delta is too large, assume time correction
830 * has occured and set old_delta to the current delta.
834 /* Otherwise try to adjust old_system to compensate */
835 timekeeping_suspend_time =
836 timespec_add(timekeeping_suspend_time, delta_delta);
838 write_sequnlock_irqrestore(&tk->lock, flags);
840 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
841 clocksource_suspend();
842 clockevents_suspend();
847 /* sysfs resume/suspend bits for timekeeping */
848 static struct syscore_ops timekeeping_syscore_ops = {
849 .resume = timekeeping_resume,
850 .suspend = timekeeping_suspend,
853 static int __init timekeeping_init_ops(void)
855 register_syscore_ops(&timekeeping_syscore_ops);
859 device_initcall(timekeeping_init_ops);
862 * If the error is already larger, we look ahead even further
863 * to compensate for late or lost adjustments.
865 static __always_inline int timekeeping_bigadjust(struct timekeeper *tk,
866 s64 error, s64 *interval,
874 * Use the current error value to determine how much to look ahead.
875 * The larger the error the slower we adjust for it to avoid problems
876 * with losing too many ticks, otherwise we would overadjust and
877 * produce an even larger error. The smaller the adjustment the
878 * faster we try to adjust for it, as lost ticks can do less harm
879 * here. This is tuned so that an error of about 1 msec is adjusted
880 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
882 error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
883 error2 = abs(error2);
884 for (look_ahead = 0; error2 > 0; look_ahead++)
888 * Now calculate the error in (1 << look_ahead) ticks, but first
889 * remove the single look ahead already included in the error.
891 tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1);
892 tick_error -= tk->xtime_interval >> 1;
893 error = ((error - tick_error) >> look_ahead) + tick_error;
895 /* Finally calculate the adjustment shift value. */
900 *interval = -*interval;
904 for (adj = 0; error > i; adj++)
913 * Adjust the multiplier to reduce the error value,
914 * this is optimized for the most common adjustments of -1,0,1,
915 * for other values we can do a bit more work.
917 static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
919 s64 error, interval = tk->cycle_interval;
923 * The point of this is to check if the error is greater than half
926 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
928 * Note we subtract one in the shift, so that error is really error*2.
929 * This "saves" dividing(shifting) interval twice, but keeps the
930 * (error > interval) comparison as still measuring if error is
931 * larger than half an interval.
933 * Note: It does not "save" on aggravation when reading the code.
935 error = tk->ntp_error >> (tk->ntp_error_shift - 1);
936 if (error > interval) {
938 * We now divide error by 4(via shift), which checks if
939 * the error is greater than twice the interval.
940 * If it is greater, we need a bigadjust, if its smaller,
941 * we can adjust by 1.
945 * XXX - In update_wall_time, we round up to the next
946 * nanosecond, and store the amount rounded up into
947 * the error. This causes the likely below to be unlikely.
949 * The proper fix is to avoid rounding up by using
950 * the high precision tk->xtime_nsec instead of
951 * xtime.tv_nsec everywhere. Fixing this will take some
954 if (likely(error <= interval))
957 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
959 if (error < -interval) {
960 /* See comment above, this is just switched for the negative */
962 if (likely(error >= -interval)) {
964 interval = -interval;
967 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
974 if (unlikely(tk->clock->maxadj &&
975 (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
976 printk_once(KERN_WARNING
977 "Adjusting %s more than 11%% (%ld vs %ld)\n",
978 tk->clock->name, (long)tk->mult + adj,
979 (long)tk->clock->mult + tk->clock->maxadj);
982 * So the following can be confusing.
984 * To keep things simple, lets assume adj == 1 for now.
986 * When adj != 1, remember that the interval and offset values
987 * have been appropriately scaled so the math is the same.
989 * The basic idea here is that we're increasing the multiplier
990 * by one, this causes the xtime_interval to be incremented by
991 * one cycle_interval. This is because:
992 * xtime_interval = cycle_interval * mult
993 * So if mult is being incremented by one:
994 * xtime_interval = cycle_interval * (mult + 1)
996 * xtime_interval = (cycle_interval * mult) + cycle_interval
997 * Which can be shortened to:
998 * xtime_interval += cycle_interval
1000 * So offset stores the non-accumulated cycles. Thus the current
1001 * time (in shifted nanoseconds) is:
1002 * now = (offset * adj) + xtime_nsec
1003 * Now, even though we're adjusting the clock frequency, we have
1004 * to keep time consistent. In other words, we can't jump back
1005 * in time, and we also want to avoid jumping forward in time.
1007 * So given the same offset value, we need the time to be the same
1008 * both before and after the freq adjustment.
1009 * now = (offset * adj_1) + xtime_nsec_1
1010 * now = (offset * adj_2) + xtime_nsec_2
1012 * (offset * adj_1) + xtime_nsec_1 =
1013 * (offset * adj_2) + xtime_nsec_2
1017 * (offset * adj_1) + xtime_nsec_1 =
1018 * (offset * (adj_1+1)) + xtime_nsec_2
1019 * (offset * adj_1) + xtime_nsec_1 =
1020 * (offset * adj_1) + offset + xtime_nsec_2
1021 * Canceling the sides:
1022 * xtime_nsec_1 = offset + xtime_nsec_2
1024 * xtime_nsec_2 = xtime_nsec_1 - offset
1025 * Which simplfies to:
1026 * xtime_nsec -= offset
1028 * XXX - TODO: Doc ntp_error calculation.
1031 tk->xtime_interval += interval;
1032 tk->xtime_nsec -= offset;
1033 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1037 * It may be possible that when we entered this function, xtime_nsec
1038 * was very small. Further, if we're slightly speeding the clocksource
1039 * in the code above, its possible the required corrective factor to
1040 * xtime_nsec could cause it to underflow.
1042 * Now, since we already accumulated the second, cannot simply roll
1043 * the accumulated second back, since the NTP subsystem has been
1044 * notified via second_overflow. So instead we push xtime_nsec forward
1045 * by the amount we underflowed, and add that amount into the error.
1047 * We'll correct this error next time through this function, when
1048 * xtime_nsec is not as small.
1050 if (unlikely((s64)tk->xtime_nsec < 0)) {
1051 s64 neg = -(s64)tk->xtime_nsec;
1053 tk->ntp_error += neg << tk->ntp_error_shift;
1059 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1061 * Helper function that accumulates a the nsecs greater then a second
1062 * from the xtime_nsec field to the xtime_secs field.
1063 * It also calls into the NTP code to handle leapsecond processing.
1066 static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
1068 u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
1070 while (tk->xtime_nsec >= nsecps) {
1073 tk->xtime_nsec -= nsecps;
1076 /* Figure out if its a leap sec and apply if needed */
1077 leap = second_overflow(tk->xtime_sec);
1078 if (unlikely(leap)) {
1081 tk->xtime_sec += leap;
1085 tk_set_wall_to_mono(tk,
1086 timespec_sub(tk->wall_to_monotonic, ts));
1088 clock_was_set_delayed();
1094 * logarithmic_accumulation - shifted accumulation of cycles
1096 * This functions accumulates a shifted interval of cycles into
1097 * into a shifted interval nanoseconds. Allows for O(log) accumulation
1100 * Returns the unconsumed cycles.
1102 static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1107 /* If the offset is smaller then a shifted interval, do nothing */
1108 if (offset < tk->cycle_interval<<shift)
1111 /* Accumulate one shifted interval */
1112 offset -= tk->cycle_interval << shift;
1113 tk->clock->cycle_last += tk->cycle_interval << shift;
1115 tk->xtime_nsec += tk->xtime_interval << shift;
1116 accumulate_nsecs_to_secs(tk);
1118 /* Accumulate raw time */
1119 raw_nsecs = (u64)tk->raw_interval << shift;
1120 raw_nsecs += tk->raw_time.tv_nsec;
1121 if (raw_nsecs >= NSEC_PER_SEC) {
1122 u64 raw_secs = raw_nsecs;
1123 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
1124 tk->raw_time.tv_sec += raw_secs;
1126 tk->raw_time.tv_nsec = raw_nsecs;
1128 /* Accumulate error between NTP and clock interval */
1129 tk->ntp_error += ntp_tick_length() << shift;
1130 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
1131 (tk->ntp_error_shift + shift);
1136 #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
1137 static inline void old_vsyscall_fixup(struct timekeeper *tk)
1142 * Store only full nanoseconds into xtime_nsec after rounding
1143 * it up and add the remainder to the error difference.
1144 * XXX - This is necessary to avoid small 1ns inconsistnecies caused
1145 * by truncating the remainder in vsyscalls. However, it causes
1146 * additional work to be done in timekeeping_adjust(). Once
1147 * the vsyscall implementations are converted to use xtime_nsec
1148 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
1149 * users are removed, this can be killed.
1151 remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
1152 tk->xtime_nsec -= remainder;
1153 tk->xtime_nsec += 1ULL << tk->shift;
1154 tk->ntp_error += remainder << tk->ntp_error_shift;
1158 #define old_vsyscall_fixup(tk)
1164 * update_wall_time - Uses the current clocksource to increment the wall time
1167 static void update_wall_time(void)
1169 struct clocksource *clock;
1170 struct timekeeper *tk = &timekeeper;
1172 int shift = 0, maxshift;
1173 unsigned long flags;
1175 write_seqlock_irqsave(&tk->lock, flags);
1177 /* Make sure we're fully resumed: */
1178 if (unlikely(timekeeping_suspended))
1183 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1184 offset = tk->cycle_interval;
1186 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
1189 /* Check if there's really nothing to do */
1190 if (offset < tk->cycle_interval)
1194 * With NO_HZ we may have to accumulate many cycle_intervals
1195 * (think "ticks") worth of time at once. To do this efficiently,
1196 * we calculate the largest doubling multiple of cycle_intervals
1197 * that is smaller than the offset. We then accumulate that
1198 * chunk in one go, and then try to consume the next smaller
1201 shift = ilog2(offset) - ilog2(tk->cycle_interval);
1202 shift = max(0, shift);
1203 /* Bound shift to one less than what overflows tick_length */
1204 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1205 shift = min(shift, maxshift);
1206 while (offset >= tk->cycle_interval) {
1207 offset = logarithmic_accumulation(tk, offset, shift);
1208 if (offset < tk->cycle_interval<<shift)
1212 /* correct the clock when NTP error is too big */
1213 timekeeping_adjust(tk, offset);
1216 * XXX This can be killed once everyone converts
1217 * to the new update_vsyscall.
1219 old_vsyscall_fixup(tk);
1222 * Finally, make sure that after the rounding
1223 * xtime_nsec isn't larger than NSEC_PER_SEC
1225 accumulate_nsecs_to_secs(tk);
1227 timekeeping_update(tk, false);
1230 write_sequnlock_irqrestore(&tk->lock, flags);
1235 * getboottime - Return the real time of system boot.
1236 * @ts: pointer to the timespec to be set
1238 * Returns the wall-time of boot in a timespec.
1240 * This is based on the wall_to_monotonic offset and the total suspend
1241 * time. Calls to settimeofday will affect the value returned (which
1242 * basically means that however wrong your real time clock is at boot time,
1243 * you get the right time here).
1245 void getboottime(struct timespec *ts)
1247 struct timekeeper *tk = &timekeeper;
1248 struct timespec boottime = {
1249 .tv_sec = tk->wall_to_monotonic.tv_sec +
1250 tk->total_sleep_time.tv_sec,
1251 .tv_nsec = tk->wall_to_monotonic.tv_nsec +
1252 tk->total_sleep_time.tv_nsec
1255 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
1257 EXPORT_SYMBOL_GPL(getboottime);
1260 * get_monotonic_boottime - Returns monotonic time since boot
1261 * @ts: pointer to the timespec to be set
1263 * Returns the monotonic time since boot in a timespec.
1265 * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also
1266 * includes the time spent in suspend.
1268 void get_monotonic_boottime(struct timespec *ts)
1270 struct timekeeper *tk = &timekeeper;
1271 struct timespec tomono, sleep;
1275 WARN_ON(timekeeping_suspended);
1278 seq = read_seqbegin(&tk->lock);
1279 ts->tv_sec = tk->xtime_sec;
1280 nsec = timekeeping_get_ns(tk);
1281 tomono = tk->wall_to_monotonic;
1282 sleep = tk->total_sleep_time;
1284 } while (read_seqretry(&tk->lock, seq));
1286 ts->tv_sec += tomono.tv_sec + sleep.tv_sec;
1288 timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec);
1290 EXPORT_SYMBOL_GPL(get_monotonic_boottime);
1293 * ktime_get_boottime - Returns monotonic time since boot in a ktime
1295 * Returns the monotonic time since boot in a ktime
1297 * This is similar to CLOCK_MONTONIC/ktime_get, but also
1298 * includes the time spent in suspend.
1300 ktime_t ktime_get_boottime(void)
1304 get_monotonic_boottime(&ts);
1305 return timespec_to_ktime(ts);
1307 EXPORT_SYMBOL_GPL(ktime_get_boottime);
1310 * monotonic_to_bootbased - Convert the monotonic time to boot based.
1311 * @ts: pointer to the timespec to be converted
1313 void monotonic_to_bootbased(struct timespec *ts)
1315 struct timekeeper *tk = &timekeeper;
1317 *ts = timespec_add(*ts, tk->total_sleep_time);
1319 EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
1321 unsigned long get_seconds(void)
1323 struct timekeeper *tk = &timekeeper;
1325 return tk->xtime_sec;
1327 EXPORT_SYMBOL(get_seconds);
1329 struct timespec __current_kernel_time(void)
1331 struct timekeeper *tk = &timekeeper;
1333 return tk_xtime(tk);
1336 struct timespec current_kernel_time(void)
1338 struct timekeeper *tk = &timekeeper;
1339 struct timespec now;
1343 seq = read_seqbegin(&tk->lock);
1346 } while (read_seqretry(&tk->lock, seq));
1350 EXPORT_SYMBOL(current_kernel_time);
1352 struct timespec get_monotonic_coarse(void)
1354 struct timekeeper *tk = &timekeeper;
1355 struct timespec now, mono;
1359 seq = read_seqbegin(&tk->lock);
1362 mono = tk->wall_to_monotonic;
1363 } while (read_seqretry(&tk->lock, seq));
1365 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
1366 now.tv_nsec + mono.tv_nsec);
1371 * Must hold jiffies_lock
1373 void do_timer(unsigned long ticks)
1375 jiffies_64 += ticks;
1377 calc_global_load(ticks);
1381 * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic,
1382 * and sleep offsets.
1383 * @xtim: pointer to timespec to be set with xtime
1384 * @wtom: pointer to timespec to be set with wall_to_monotonic
1385 * @sleep: pointer to timespec to be set with time in suspend
1387 void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
1388 struct timespec *wtom, struct timespec *sleep)
1390 struct timekeeper *tk = &timekeeper;
1394 seq = read_seqbegin(&tk->lock);
1395 *xtim = tk_xtime(tk);
1396 *wtom = tk->wall_to_monotonic;
1397 *sleep = tk->total_sleep_time;
1398 } while (read_seqretry(&tk->lock, seq));
1401 #ifdef CONFIG_HIGH_RES_TIMERS
1403 * ktime_get_update_offsets - hrtimer helper
1404 * @offs_real: pointer to storage for monotonic -> realtime offset
1405 * @offs_boot: pointer to storage for monotonic -> boottime offset
1407 * Returns current monotonic time and updates the offsets
1408 * Called from hrtimer_interupt() or retrigger_next_event()
1410 ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
1412 struct timekeeper *tk = &timekeeper;
1418 seq = read_seqbegin(&tk->lock);
1420 secs = tk->xtime_sec;
1421 nsecs = timekeeping_get_ns(tk);
1423 *offs_real = tk->offs_real;
1424 *offs_boot = tk->offs_boot;
1425 } while (read_seqretry(&tk->lock, seq));
1427 now = ktime_add_ns(ktime_set(secs, 0), nsecs);
1428 now = ktime_sub(now, *offs_real);
1434 * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
1436 ktime_t ktime_get_monotonic_offset(void)
1438 struct timekeeper *tk = &timekeeper;
1440 struct timespec wtom;
1443 seq = read_seqbegin(&tk->lock);
1444 wtom = tk->wall_to_monotonic;
1445 } while (read_seqretry(&tk->lock, seq));
1447 return timespec_to_ktime(wtom);
1449 EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
1452 * xtime_update() - advances the timekeeping infrastructure
1453 * @ticks: number of ticks, that have elapsed since the last call.
1455 * Must be called with interrupts disabled.
1457 void xtime_update(unsigned long ticks)
1459 write_seqlock(&jiffies_lock);
1461 write_sequnlock(&jiffies_lock);