2 * linux/kernel/time/timekeeping.c
4 * Kernel timekeeping code and accessor functions
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/percpu.h>
14 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/sysdev.h>
18 #include <linux/clocksource.h>
19 #include <linux/jiffies.h>
20 #include <linux/time.h>
21 #include <linux/tick.h>
22 #include <linux/stop_machine.h>
24 /* Structure holding internal timekeeping values. */
26 /* Current clocksource used for timekeeping. */
27 struct clocksource *clock;
28 /* The shift value of the current clocksource. */
31 /* Number of clock cycles in one NTP interval. */
32 cycle_t cycle_interval;
33 /* Number of clock shifted nano seconds in one NTP interval. */
35 /* Raw nano seconds accumulated per NTP interval. */
38 /* Clock shifted nano seconds remainder not stored in xtime.tv_nsec. */
40 /* Difference between accumulated time and NTP time in ntp
41 * shifted nano seconds. */
43 /* Shift conversion between clock shifted nano seconds and
44 * ntp shifted nano seconds. */
46 /* NTP adjusted clock multiplier */
50 struct timekeeper timekeeper;
53 * timekeeper_setup_internals - Set up internals to use clocksource clock.
55 * @clock: Pointer to clocksource.
57 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
58 * pair and interval request.
60 * Unless you're the timekeeping code, you should not be using this!
62 static void timekeeper_setup_internals(struct clocksource *clock)
67 timekeeper.clock = clock;
68 clock->cycle_last = clock->read(clock);
70 /* Do the ns -> cycle conversion first, using original mult */
71 tmp = NTP_INTERVAL_LENGTH;
74 do_div(tmp, clock->mult);
78 interval = (cycle_t) tmp;
79 timekeeper.cycle_interval = interval;
81 /* Go back from cycles -> shifted ns */
82 timekeeper.xtime_interval = (u64) interval * clock->mult;
83 timekeeper.raw_interval =
84 ((u64) interval * clock->mult) >> clock->shift;
86 timekeeper.xtime_nsec = 0;
87 timekeeper.shift = clock->shift;
89 timekeeper.ntp_error = 0;
90 timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
93 * The timekeeper keeps its own mult values for the currently
94 * active clocksource. These value will be adjusted via NTP
95 * to counteract clock drifting.
97 timekeeper.mult = clock->mult;
100 /* Timekeeper helper functions. */
101 static inline s64 timekeeping_get_ns(void)
103 cycle_t cycle_now, cycle_delta;
104 struct clocksource *clock;
106 /* read clocksource: */
107 clock = timekeeper.clock;
108 cycle_now = clock->read(clock);
110 /* calculate the delta since the last update_wall_time: */
111 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
113 /* return delta convert to nanoseconds using ntp adjusted mult. */
114 return clocksource_cyc2ns(cycle_delta, timekeeper.mult,
118 static inline s64 timekeeping_get_ns_raw(void)
120 cycle_t cycle_now, cycle_delta;
121 struct clocksource *clock;
123 /* read clocksource: */
124 clock = timekeeper.clock;
125 cycle_now = clock->read(clock);
127 /* calculate the delta since the last update_wall_time: */
128 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
130 /* return delta convert to nanoseconds using ntp adjusted mult. */
131 return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
135 * This read-write spinlock protects us from races in SMP while
136 * playing with xtime.
138 __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
143 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
144 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
145 * at zero at system boot time, so wall_to_monotonic will be negative,
146 * however, we will ALWAYS keep the tv_nsec part positive so we can use
147 * the usual normalization.
149 * wall_to_monotonic is moved after resume from suspend for the monotonic
150 * time not to jump. We need to add total_sleep_time to wall_to_monotonic
151 * to get the real boot based time offset.
153 * - wall_to_monotonic is no longer the boot time, getboottime must be
156 struct timespec xtime __attribute__ ((aligned (16)));
157 struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
158 static struct timespec total_sleep_time;
161 * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock.
163 struct timespec raw_time;
165 /* flag for if timekeeping is suspended */
166 int __read_mostly timekeeping_suspended;
168 static struct timespec xtime_cache __attribute__ ((aligned (16)));
169 void update_xtime_cache(u64 nsec)
172 * Use temporary variable so get_seconds() cannot catch
173 * an intermediate xtime_cache.tv_sec value.
174 * The ACCESS_ONCE() keeps the compiler from optimizing
175 * out the intermediate value.
177 struct timespec ts = xtime;
178 timespec_add_ns(&ts, nsec);
179 ACCESS_ONCE(xtime_cache) = ts;
182 /* must hold xtime_lock */
183 void timekeeping_leap_insert(int leapsecond)
185 xtime.tv_sec += leapsecond;
186 wall_to_monotonic.tv_sec -= leapsecond;
187 update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
190 #ifdef CONFIG_GENERIC_TIME
193 * timekeeping_forward_now - update clock to the current time
195 * Forward the current clock to update its state since the last call to
196 * update_wall_time(). This is useful before significant clock changes,
197 * as it avoids having to deal with this time offset explicitly.
199 static void timekeeping_forward_now(void)
201 cycle_t cycle_now, cycle_delta;
202 struct clocksource *clock;
205 clock = timekeeper.clock;
206 cycle_now = clock->read(clock);
207 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
208 clock->cycle_last = cycle_now;
210 nsec = clocksource_cyc2ns(cycle_delta, timekeeper.mult,
213 /* If arch requires, add in gettimeoffset() */
214 nsec += arch_gettimeoffset();
216 timespec_add_ns(&xtime, nsec);
218 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
219 timespec_add_ns(&raw_time, nsec);
223 * getnstimeofday - Returns the time of day in a timespec
224 * @ts: pointer to the timespec to be set
226 * Returns the time of day in a timespec.
228 void getnstimeofday(struct timespec *ts)
233 WARN_ON(timekeeping_suspended);
236 seq = read_seqbegin(&xtime_lock);
239 nsecs = timekeeping_get_ns();
241 /* If arch requires, add in gettimeoffset() */
242 nsecs += arch_gettimeoffset();
244 } while (read_seqretry(&xtime_lock, seq));
246 timespec_add_ns(ts, nsecs);
249 EXPORT_SYMBOL(getnstimeofday);
251 ktime_t ktime_get(void)
256 WARN_ON(timekeeping_suspended);
259 seq = read_seqbegin(&xtime_lock);
260 secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
261 nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
262 nsecs += timekeeping_get_ns();
264 } while (read_seqretry(&xtime_lock, seq));
266 * Use ktime_set/ktime_add_ns to create a proper ktime on
267 * 32-bit architectures without CONFIG_KTIME_SCALAR.
269 return ktime_add_ns(ktime_set(secs, 0), nsecs);
271 EXPORT_SYMBOL_GPL(ktime_get);
274 * ktime_get_ts - get the monotonic clock in timespec format
275 * @ts: pointer to timespec variable
277 * The function calculates the monotonic clock from the realtime
278 * clock and the wall_to_monotonic offset and stores the result
279 * in normalized timespec format in the variable pointed to by @ts.
281 void ktime_get_ts(struct timespec *ts)
283 struct timespec tomono;
287 WARN_ON(timekeeping_suspended);
290 seq = read_seqbegin(&xtime_lock);
292 tomono = wall_to_monotonic;
293 nsecs = timekeeping_get_ns();
295 } while (read_seqretry(&xtime_lock, seq));
297 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
298 ts->tv_nsec + tomono.tv_nsec + nsecs);
300 EXPORT_SYMBOL_GPL(ktime_get_ts);
303 * do_gettimeofday - Returns the time of day in a timeval
304 * @tv: pointer to the timeval to be set
306 * NOTE: Users should be converted to using getnstimeofday()
308 void do_gettimeofday(struct timeval *tv)
312 getnstimeofday(&now);
313 tv->tv_sec = now.tv_sec;
314 tv->tv_usec = now.tv_nsec/1000;
317 EXPORT_SYMBOL(do_gettimeofday);
319 * do_settimeofday - Sets the time of day
320 * @tv: pointer to the timespec variable containing the new time
322 * Sets the time of day to the new time and update NTP and notify hrtimers
324 int do_settimeofday(struct timespec *tv)
326 struct timespec ts_delta;
329 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
332 write_seqlock_irqsave(&xtime_lock, flags);
334 timekeeping_forward_now();
336 ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
337 ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
338 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
342 update_xtime_cache(0);
344 timekeeper.ntp_error = 0;
347 update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
349 write_sequnlock_irqrestore(&xtime_lock, flags);
351 /* signal hrtimers about time change */
357 EXPORT_SYMBOL(do_settimeofday);
360 * change_clocksource - Swaps clocksources if a new one is available
362 * Accumulates current time interval and initializes new clocksource
364 static int change_clocksource(void *data)
366 struct clocksource *new, *old;
368 new = (struct clocksource *) data;
370 timekeeping_forward_now();
371 if (!new->enable || new->enable(new) == 0) {
372 old = timekeeper.clock;
373 timekeeper_setup_internals(new);
381 * timekeeping_notify - Install a new clock source
382 * @clock: pointer to the clock source
384 * This function is called from clocksource.c after a new, better clock
385 * source has been registered. The caller holds the clocksource_mutex.
387 void timekeeping_notify(struct clocksource *clock)
389 if (timekeeper.clock == clock)
391 stop_machine(change_clocksource, clock, NULL);
395 #else /* GENERIC_TIME */
397 static inline void timekeeping_forward_now(void) { }
400 * ktime_get - get the monotonic time in ktime_t format
402 * returns the time in ktime_t format
404 ktime_t ktime_get(void)
410 return timespec_to_ktime(now);
412 EXPORT_SYMBOL_GPL(ktime_get);
415 * ktime_get_ts - get the monotonic clock in timespec format
416 * @ts: pointer to timespec variable
418 * The function calculates the monotonic clock from the realtime
419 * clock and the wall_to_monotonic offset and stores the result
420 * in normalized timespec format in the variable pointed to by @ts.
422 void ktime_get_ts(struct timespec *ts)
424 struct timespec tomono;
428 seq = read_seqbegin(&xtime_lock);
430 tomono = wall_to_monotonic;
432 } while (read_seqretry(&xtime_lock, seq));
434 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
435 ts->tv_nsec + tomono.tv_nsec);
437 EXPORT_SYMBOL_GPL(ktime_get_ts);
439 #endif /* !GENERIC_TIME */
442 * ktime_get_real - get the real (wall-) time in ktime_t format
444 * returns the time in ktime_t format
446 ktime_t ktime_get_real(void)
450 getnstimeofday(&now);
452 return timespec_to_ktime(now);
454 EXPORT_SYMBOL_GPL(ktime_get_real);
457 * getrawmonotonic - Returns the raw monotonic time in a timespec
458 * @ts: pointer to the timespec to be set
460 * Returns the raw monotonic time (completely un-modified by ntp)
462 void getrawmonotonic(struct timespec *ts)
468 seq = read_seqbegin(&xtime_lock);
469 nsecs = timekeeping_get_ns_raw();
472 } while (read_seqretry(&xtime_lock, seq));
474 timespec_add_ns(ts, nsecs);
476 EXPORT_SYMBOL(getrawmonotonic);
480 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
482 int timekeeping_valid_for_hres(void)
488 seq = read_seqbegin(&xtime_lock);
490 ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
492 } while (read_seqretry(&xtime_lock, seq));
498 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
500 * Caller must observe xtime_lock via read_seqbegin/read_seqretry to
501 * ensure that the clocksource does not change!
503 u64 timekeeping_max_deferment(void)
505 return timekeeper.clock->max_idle_ns;
509 * read_persistent_clock - Return time from the persistent clock.
511 * Weak dummy function for arches that do not yet support it.
512 * Reads the time from the battery backed persistent clock.
513 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
515 * XXX - Do be sure to remove it once all arches implement it.
517 void __attribute__((weak)) read_persistent_clock(struct timespec *ts)
524 * read_boot_clock - Return time of the system start.
526 * Weak dummy function for arches that do not yet support it.
527 * Function to read the exact time the system has been started.
528 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
530 * XXX - Do be sure to remove it once all arches implement it.
532 void __attribute__((weak)) read_boot_clock(struct timespec *ts)
539 * timekeeping_init - Initializes the clocksource and common timekeeping values
541 void __init timekeeping_init(void)
543 struct clocksource *clock;
545 struct timespec now, boot;
547 read_persistent_clock(&now);
548 read_boot_clock(&boot);
550 write_seqlock_irqsave(&xtime_lock, flags);
554 clock = clocksource_default_clock();
556 clock->enable(clock);
557 timekeeper_setup_internals(clock);
559 xtime.tv_sec = now.tv_sec;
560 xtime.tv_nsec = now.tv_nsec;
562 raw_time.tv_nsec = 0;
563 if (boot.tv_sec == 0 && boot.tv_nsec == 0) {
564 boot.tv_sec = xtime.tv_sec;
565 boot.tv_nsec = xtime.tv_nsec;
567 set_normalized_timespec(&wall_to_monotonic,
568 -boot.tv_sec, -boot.tv_nsec);
569 update_xtime_cache(0);
570 total_sleep_time.tv_sec = 0;
571 total_sleep_time.tv_nsec = 0;
572 write_sequnlock_irqrestore(&xtime_lock, flags);
575 /* time in seconds when suspend began */
576 static struct timespec timekeeping_suspend_time;
579 * timekeeping_resume - Resumes the generic timekeeping subsystem.
582 * This is for the generic clocksource timekeeping.
583 * xtime/wall_to_monotonic/jiffies/etc are
584 * still managed by arch specific suspend/resume code.
586 static int timekeeping_resume(struct sys_device *dev)
591 read_persistent_clock(&ts);
593 clocksource_resume();
595 write_seqlock_irqsave(&xtime_lock, flags);
597 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
598 ts = timespec_sub(ts, timekeeping_suspend_time);
599 xtime = timespec_add_safe(xtime, ts);
600 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts);
601 total_sleep_time = timespec_add_safe(total_sleep_time, ts);
603 update_xtime_cache(0);
604 /* re-base the last cycle value */
605 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
606 timekeeper.ntp_error = 0;
607 timekeeping_suspended = 0;
608 write_sequnlock_irqrestore(&xtime_lock, flags);
610 touch_softlockup_watchdog();
612 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
614 /* Resume hrtimers */
615 hres_timers_resume();
620 static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
624 read_persistent_clock(&timekeeping_suspend_time);
626 write_seqlock_irqsave(&xtime_lock, flags);
627 timekeeping_forward_now();
628 timekeeping_suspended = 1;
629 write_sequnlock_irqrestore(&xtime_lock, flags);
631 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
636 /* sysfs resume/suspend bits for timekeeping */
637 static struct sysdev_class timekeeping_sysclass = {
638 .name = "timekeeping",
639 .resume = timekeeping_resume,
640 .suspend = timekeeping_suspend,
643 static struct sys_device device_timer = {
645 .cls = &timekeeping_sysclass,
648 static int __init timekeeping_init_device(void)
650 int error = sysdev_class_register(&timekeeping_sysclass);
652 error = sysdev_register(&device_timer);
656 device_initcall(timekeeping_init_device);
659 * If the error is already larger, we look ahead even further
660 * to compensate for late or lost adjustments.
662 static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
670 * Use the current error value to determine how much to look ahead.
671 * The larger the error the slower we adjust for it to avoid problems
672 * with losing too many ticks, otherwise we would overadjust and
673 * produce an even larger error. The smaller the adjustment the
674 * faster we try to adjust for it, as lost ticks can do less harm
675 * here. This is tuned so that an error of about 1 msec is adjusted
676 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
678 error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
679 error2 = abs(error2);
680 for (look_ahead = 0; error2 > 0; look_ahead++)
684 * Now calculate the error in (1 << look_ahead) ticks, but first
685 * remove the single look ahead already included in the error.
687 tick_error = tick_length >> (timekeeper.ntp_error_shift + 1);
688 tick_error -= timekeeper.xtime_interval >> 1;
689 error = ((error - tick_error) >> look_ahead) + tick_error;
691 /* Finally calculate the adjustment shift value. */
696 *interval = -*interval;
700 for (adj = 0; error > i; adj++)
709 * Adjust the multiplier to reduce the error value,
710 * this is optimized for the most common adjustments of -1,0,1,
711 * for other values we can do a bit more work.
713 static void timekeeping_adjust(s64 offset)
715 s64 error, interval = timekeeper.cycle_interval;
718 error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
719 if (error > interval) {
721 if (likely(error <= interval))
724 adj = timekeeping_bigadjust(error, &interval, &offset);
725 } else if (error < -interval) {
727 if (likely(error >= -interval)) {
729 interval = -interval;
732 adj = timekeeping_bigadjust(error, &interval, &offset);
736 timekeeper.mult += adj;
737 timekeeper.xtime_interval += interval;
738 timekeeper.xtime_nsec -= offset;
739 timekeeper.ntp_error -= (interval - offset) <<
740 timekeeper.ntp_error_shift;
744 * update_wall_time - Uses the current clocksource to increment the wall time
746 * Called from the timer interrupt, must hold a write on xtime_lock.
748 void update_wall_time(void)
750 struct clocksource *clock;
754 /* Make sure we're fully resumed: */
755 if (unlikely(timekeeping_suspended))
758 clock = timekeeper.clock;
759 #ifdef CONFIG_GENERIC_TIME
760 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
762 offset = timekeeper.cycle_interval;
764 timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift;
766 /* normally this loop will run just once, however in the
767 * case of lost or late ticks, it will accumulate correctly.
769 while (offset >= timekeeper.cycle_interval) {
770 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
772 /* accumulate one interval */
773 offset -= timekeeper.cycle_interval;
774 clock->cycle_last += timekeeper.cycle_interval;
776 timekeeper.xtime_nsec += timekeeper.xtime_interval;
777 if (timekeeper.xtime_nsec >= nsecps) {
778 timekeeper.xtime_nsec -= nsecps;
783 raw_time.tv_nsec += timekeeper.raw_interval;
784 if (raw_time.tv_nsec >= NSEC_PER_SEC) {
785 raw_time.tv_nsec -= NSEC_PER_SEC;
789 /* accumulate error between NTP and clock interval */
790 timekeeper.ntp_error += tick_length;
791 timekeeper.ntp_error -= timekeeper.xtime_interval <<
792 timekeeper.ntp_error_shift;
795 /* correct the clock when NTP error is too big */
796 timekeeping_adjust(offset);
799 * Since in the loop above, we accumulate any amount of time
800 * in xtime_nsec over a second into xtime.tv_sec, its possible for
801 * xtime_nsec to be fairly small after the loop. Further, if we're
802 * slightly speeding the clocksource up in timekeeping_adjust(),
803 * its possible the required corrective factor to xtime_nsec could
804 * cause it to underflow.
806 * Now, we cannot simply roll the accumulated second back, since
807 * the NTP subsystem has been notified via second_overflow. So
808 * instead we push xtime_nsec forward by the amount we underflowed,
809 * and add that amount into the error.
811 * We'll correct this error next time through this function, when
812 * xtime_nsec is not as small.
814 if (unlikely((s64)timekeeper.xtime_nsec < 0)) {
815 s64 neg = -(s64)timekeeper.xtime_nsec;
816 timekeeper.xtime_nsec = 0;
817 timekeeper.ntp_error += neg << timekeeper.ntp_error_shift;
820 /* store full nanoseconds into xtime after rounding it up and
821 * add the remainder to the error difference.
823 xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1;
824 timekeeper.xtime_nsec -= (s64) xtime.tv_nsec << timekeeper.shift;
825 timekeeper.ntp_error += timekeeper.xtime_nsec <<
826 timekeeper.ntp_error_shift;
828 nsecs = clocksource_cyc2ns(offset, timekeeper.mult, timekeeper.shift);
829 update_xtime_cache(nsecs);
831 /* check to see if there is a new clocksource to use */
832 update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
836 * getboottime - Return the real time of system boot.
837 * @ts: pointer to the timespec to be set
839 * Returns the time of day in a timespec.
841 * This is based on the wall_to_monotonic offset and the total suspend
842 * time. Calls to settimeofday will affect the value returned (which
843 * basically means that however wrong your real time clock is at boot time,
844 * you get the right time here).
846 void getboottime(struct timespec *ts)
848 struct timespec boottime = {
849 .tv_sec = wall_to_monotonic.tv_sec + total_sleep_time.tv_sec,
850 .tv_nsec = wall_to_monotonic.tv_nsec + total_sleep_time.tv_nsec
853 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
855 EXPORT_SYMBOL_GPL(getboottime);
858 * monotonic_to_bootbased - Convert the monotonic time to boot based.
859 * @ts: pointer to the timespec to be converted
861 void monotonic_to_bootbased(struct timespec *ts)
863 *ts = timespec_add_safe(*ts, total_sleep_time);
865 EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
867 unsigned long get_seconds(void)
869 return xtime_cache.tv_sec;
871 EXPORT_SYMBOL(get_seconds);
873 struct timespec __current_kernel_time(void)
878 struct timespec current_kernel_time(void)
884 seq = read_seqbegin(&xtime_lock);
887 } while (read_seqretry(&xtime_lock, seq));
891 EXPORT_SYMBOL(current_kernel_time);
893 struct timespec get_monotonic_coarse(void)
895 struct timespec now, mono;
899 seq = read_seqbegin(&xtime_lock);
902 mono = wall_to_monotonic;
903 } while (read_seqretry(&xtime_lock, seq));
905 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
906 now.tv_nsec + mono.tv_nsec);