2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
9 * Copyright (c) 2011, 2015, 2016 The FreeBSD Foundation
10 * All rights reserved.
12 * Portions of this software were developed by Julien Ridoux at the University
13 * of Melbourne under sponsorship from the FreeBSD Foundation.
15 * Portions of this software were developed by Konstantin Belousov
16 * under sponsorship from the FreeBSD Foundation.
19 #include <sys/cdefs.h>
20 __FBSDID("$FreeBSD$");
22 #include "opt_compat.h"
24 #include "opt_ffclock.h"
26 #include <sys/param.h>
27 #include <sys/kernel.h>
28 #include <sys/limits.h>
30 #include <sys/mutex.h>
32 #include <sys/sysctl.h>
33 #include <sys/syslog.h>
34 #include <sys/systm.h>
35 #include <sys/timeffc.h>
36 #include <sys/timepps.h>
37 #include <sys/timetc.h>
38 #include <sys/timex.h>
42 * A large step happens on boot. This constant detects such steps.
43 * It is relatively small so that ntp_update_second gets called enough
44 * in the typical 'missed a couple of seconds' case, but doesn't loop
45 * forever when the time step is large.
47 #define LARGE_STEP 200
50 * Implement a dummy timecounter which we can use until we get a real one
51 * in the air. This allows the console and other early stuff to use
56 dummy_get_timecount(struct timecounter *tc)
63 static struct timecounter dummy_timecounter = {
64 dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000
68 /* These fields must be initialized by the driver. */
69 struct timecounter *th_counter;
70 int64_t th_adjustment;
72 u_int th_offset_count;
73 struct bintime th_offset;
74 struct bintime th_bintime;
75 struct timeval th_microtime;
76 struct timespec th_nanotime;
77 struct bintime th_boottime;
78 /* Fields not to be copied in tc_windup start with th_generation. */
80 struct timehands *th_next;
83 static struct timehands th0;
84 static struct timehands th1 = {
87 static struct timehands th0 = {
88 .th_counter = &dummy_timecounter,
89 .th_scale = (uint64_t)-1 / 1000000,
90 .th_offset = { .sec = 1 },
95 static struct timehands *volatile timehands = &th0;
96 struct timecounter *timecounter = &dummy_timecounter;
97 static struct timecounter *timecounters = &dummy_timecounter;
99 int tc_min_ticktock_freq = 1;
101 volatile time_t time_second = 1;
102 volatile time_t time_uptime = 1;
104 static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS);
105 SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD,
106 NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime");
108 SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
109 static SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc, CTLFLAG_RW, 0, "");
111 static int timestepwarnings;
112 SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
113 ×tepwarnings, 0, "Log time steps");
115 struct bintime bt_timethreshold;
116 struct bintime bt_tickthreshold;
117 sbintime_t sbt_timethreshold;
118 sbintime_t sbt_tickthreshold;
119 struct bintime tc_tick_bt;
120 sbintime_t tc_tick_sbt;
122 int tc_timepercentage = TC_DEFAULTPERC;
123 static int sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS);
124 SYSCTL_PROC(_kern_timecounter, OID_AUTO, alloweddeviation,
125 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 0, 0,
126 sysctl_kern_timecounter_adjprecision, "I",
127 "Allowed time interval deviation in percents");
129 static int tc_chosen; /* Non-zero if a specific tc was chosen via sysctl. */
131 static void tc_windup(struct bintime *new_boottimebin);
132 static void cpu_tick_calibrate(int);
134 void dtrace_getnanotime(struct timespec *tsp);
137 sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
139 struct timeval boottime;
141 getboottime(&boottime);
147 if (req->flags & SCTL_MASK32) {
148 tv[0] = boottime.tv_sec;
149 tv[1] = boottime.tv_usec;
150 return (SYSCTL_OUT(req, tv, sizeof(tv)));
154 return (SYSCTL_OUT(req, &boottime, sizeof(boottime)));
158 sysctl_kern_timecounter_get(SYSCTL_HANDLER_ARGS)
161 struct timecounter *tc = arg1;
163 ncount = tc->tc_get_timecount(tc);
164 return (sysctl_handle_int(oidp, &ncount, 0, req));
168 sysctl_kern_timecounter_freq(SYSCTL_HANDLER_ARGS)
171 struct timecounter *tc = arg1;
173 freq = tc->tc_frequency;
174 return (sysctl_handle_64(oidp, &freq, 0, req));
178 * Return the difference between the timehands' counter value now and what
179 * was when we copied it to the timehands' offset_count.
181 static __inline u_int
182 tc_delta(struct timehands *th)
184 struct timecounter *tc;
187 return ((tc->tc_get_timecount(tc) - th->th_offset_count) &
188 tc->tc_counter_mask);
192 * Functions for reading the time. We have to loop until we are sure that
193 * the timehands that we operated on was not updated under our feet. See
194 * the comment in <sys/time.h> for a description of these 12 functions.
199 fbclock_binuptime(struct bintime *bt)
201 struct timehands *th;
206 gen = atomic_load_acq_int(&th->th_generation);
208 bintime_addx(bt, th->th_scale * tc_delta(th));
209 atomic_thread_fence_acq();
210 } while (gen == 0 || gen != th->th_generation);
214 fbclock_nanouptime(struct timespec *tsp)
218 fbclock_binuptime(&bt);
219 bintime2timespec(&bt, tsp);
223 fbclock_microuptime(struct timeval *tvp)
227 fbclock_binuptime(&bt);
228 bintime2timeval(&bt, tvp);
232 fbclock_bintime(struct bintime *bt)
234 struct timehands *th;
239 gen = atomic_load_acq_int(&th->th_generation);
240 *bt = th->th_bintime;
241 bintime_addx(bt, th->th_scale * tc_delta(th));
242 atomic_thread_fence_acq();
243 } while (gen == 0 || gen != th->th_generation);
247 fbclock_nanotime(struct timespec *tsp)
251 fbclock_bintime(&bt);
252 bintime2timespec(&bt, tsp);
256 fbclock_microtime(struct timeval *tvp)
260 fbclock_bintime(&bt);
261 bintime2timeval(&bt, tvp);
265 fbclock_getbinuptime(struct bintime *bt)
267 struct timehands *th;
272 gen = atomic_load_acq_int(&th->th_generation);
274 atomic_thread_fence_acq();
275 } while (gen == 0 || gen != th->th_generation);
279 fbclock_getnanouptime(struct timespec *tsp)
281 struct timehands *th;
286 gen = atomic_load_acq_int(&th->th_generation);
287 bintime2timespec(&th->th_offset, tsp);
288 atomic_thread_fence_acq();
289 } while (gen == 0 || gen != th->th_generation);
293 fbclock_getmicrouptime(struct timeval *tvp)
295 struct timehands *th;
300 gen = atomic_load_acq_int(&th->th_generation);
301 bintime2timeval(&th->th_offset, tvp);
302 atomic_thread_fence_acq();
303 } while (gen == 0 || gen != th->th_generation);
307 fbclock_getbintime(struct bintime *bt)
309 struct timehands *th;
314 gen = atomic_load_acq_int(&th->th_generation);
315 *bt = th->th_bintime;
316 atomic_thread_fence_acq();
317 } while (gen == 0 || gen != th->th_generation);
321 fbclock_getnanotime(struct timespec *tsp)
323 struct timehands *th;
328 gen = atomic_load_acq_int(&th->th_generation);
329 *tsp = th->th_nanotime;
330 atomic_thread_fence_acq();
331 } while (gen == 0 || gen != th->th_generation);
335 fbclock_getmicrotime(struct timeval *tvp)
337 struct timehands *th;
342 gen = atomic_load_acq_int(&th->th_generation);
343 *tvp = th->th_microtime;
344 atomic_thread_fence_acq();
345 } while (gen == 0 || gen != th->th_generation);
349 binuptime(struct bintime *bt)
351 struct timehands *th;
356 gen = atomic_load_acq_int(&th->th_generation);
358 bintime_addx(bt, th->th_scale * tc_delta(th));
359 atomic_thread_fence_acq();
360 } while (gen == 0 || gen != th->th_generation);
364 nanouptime(struct timespec *tsp)
369 bintime2timespec(&bt, tsp);
373 microuptime(struct timeval *tvp)
378 bintime2timeval(&bt, tvp);
382 bintime(struct bintime *bt)
384 struct timehands *th;
389 gen = atomic_load_acq_int(&th->th_generation);
390 *bt = th->th_bintime;
391 bintime_addx(bt, th->th_scale * tc_delta(th));
392 atomic_thread_fence_acq();
393 } while (gen == 0 || gen != th->th_generation);
397 nanotime(struct timespec *tsp)
402 bintime2timespec(&bt, tsp);
406 microtime(struct timeval *tvp)
411 bintime2timeval(&bt, tvp);
415 getbinuptime(struct bintime *bt)
417 struct timehands *th;
422 gen = atomic_load_acq_int(&th->th_generation);
424 atomic_thread_fence_acq();
425 } while (gen == 0 || gen != th->th_generation);
429 getnanouptime(struct timespec *tsp)
431 struct timehands *th;
436 gen = atomic_load_acq_int(&th->th_generation);
437 bintime2timespec(&th->th_offset, tsp);
438 atomic_thread_fence_acq();
439 } while (gen == 0 || gen != th->th_generation);
443 getmicrouptime(struct timeval *tvp)
445 struct timehands *th;
450 gen = atomic_load_acq_int(&th->th_generation);
451 bintime2timeval(&th->th_offset, tvp);
452 atomic_thread_fence_acq();
453 } while (gen == 0 || gen != th->th_generation);
457 getbintime(struct bintime *bt)
459 struct timehands *th;
464 gen = atomic_load_acq_int(&th->th_generation);
465 *bt = th->th_bintime;
466 atomic_thread_fence_acq();
467 } while (gen == 0 || gen != th->th_generation);
471 getnanotime(struct timespec *tsp)
473 struct timehands *th;
478 gen = atomic_load_acq_int(&th->th_generation);
479 *tsp = th->th_nanotime;
480 atomic_thread_fence_acq();
481 } while (gen == 0 || gen != th->th_generation);
485 getmicrotime(struct timeval *tvp)
487 struct timehands *th;
492 gen = atomic_load_acq_int(&th->th_generation);
493 *tvp = th->th_microtime;
494 atomic_thread_fence_acq();
495 } while (gen == 0 || gen != th->th_generation);
500 getboottime(struct timeval *boottime)
502 struct bintime boottimebin;
504 getboottimebin(&boottimebin);
505 bintime2timeval(&boottimebin, boottime);
509 getboottimebin(struct bintime *boottimebin)
511 struct timehands *th;
516 gen = atomic_load_acq_int(&th->th_generation);
517 *boottimebin = th->th_boottime;
518 atomic_thread_fence_acq();
519 } while (gen == 0 || gen != th->th_generation);
524 * Support for feed-forward synchronization algorithms. This is heavily inspired
525 * by the timehands mechanism but kept independent from it. *_windup() functions
526 * have some connection to avoid accessing the timecounter hardware more than
530 /* Feed-forward clock estimates kept updated by the synchronization daemon. */
531 struct ffclock_estimate ffclock_estimate;
532 struct bintime ffclock_boottime; /* Feed-forward boot time estimate. */
533 uint32_t ffclock_status; /* Feed-forward clock status. */
534 int8_t ffclock_updated; /* New estimates are available. */
535 struct mtx ffclock_mtx; /* Mutex on ffclock_estimate. */
538 struct ffclock_estimate cest;
539 struct bintime tick_time;
540 struct bintime tick_time_lerp;
541 ffcounter tick_ffcount;
542 uint64_t period_lerp;
543 volatile uint8_t gen;
544 struct fftimehands *next;
547 #define NUM_ELEMENTS(x) (sizeof(x) / sizeof(*x))
549 static struct fftimehands ffth[10];
550 static struct fftimehands *volatile fftimehands = ffth;
555 struct fftimehands *cur;
556 struct fftimehands *last;
558 memset(ffth, 0, sizeof(ffth));
560 last = ffth + NUM_ELEMENTS(ffth) - 1;
561 for (cur = ffth; cur < last; cur++)
566 ffclock_status = FFCLOCK_STA_UNSYNC;
567 mtx_init(&ffclock_mtx, "ffclock lock", NULL, MTX_DEF);
571 * Reset the feed-forward clock estimates. Called from inittodr() to get things
572 * kick started and uses the timecounter nominal frequency as a first period
573 * estimate. Note: this function may be called several time just after boot.
574 * Note: this is the only function that sets the value of boot time for the
575 * monotonic (i.e. uptime) version of the feed-forward clock.
578 ffclock_reset_clock(struct timespec *ts)
580 struct timecounter *tc;
581 struct ffclock_estimate cest;
583 tc = timehands->th_counter;
584 memset(&cest, 0, sizeof(struct ffclock_estimate));
586 timespec2bintime(ts, &ffclock_boottime);
587 timespec2bintime(ts, &(cest.update_time));
588 ffclock_read_counter(&cest.update_ffcount);
589 cest.leapsec_next = 0;
590 cest.period = ((1ULL << 63) / tc->tc_frequency) << 1;
593 cest.status = FFCLOCK_STA_UNSYNC;
594 cest.leapsec_total = 0;
597 mtx_lock(&ffclock_mtx);
598 bcopy(&cest, &ffclock_estimate, sizeof(struct ffclock_estimate));
599 ffclock_updated = INT8_MAX;
600 mtx_unlock(&ffclock_mtx);
602 printf("ffclock reset: %s (%llu Hz), time = %ld.%09lu\n", tc->tc_name,
603 (unsigned long long)tc->tc_frequency, (long)ts->tv_sec,
604 (unsigned long)ts->tv_nsec);
608 * Sub-routine to convert a time interval measured in RAW counter units to time
609 * in seconds stored in bintime format.
610 * NOTE: bintime_mul requires u_int, but the value of the ffcounter may be
611 * larger than the max value of u_int (on 32 bit architecture). Loop to consume
615 ffclock_convert_delta(ffcounter ffdelta, uint64_t period, struct bintime *bt)
618 ffcounter delta, delta_max;
620 delta_max = (1ULL << (8 * sizeof(unsigned int))) - 1;
623 if (ffdelta > delta_max)
629 bintime_mul(&bt2, (unsigned int)delta);
630 bintime_add(bt, &bt2);
632 } while (ffdelta > 0);
636 * Update the fftimehands.
637 * Push the tick ffcount and time(s) forward based on current clock estimate.
638 * The conversion from ffcounter to bintime relies on the difference clock
639 * principle, whose accuracy relies on computing small time intervals. If a new
640 * clock estimate has been passed by the synchronisation daemon, make it
641 * current, and compute the linear interpolation for monotonic time if needed.
644 ffclock_windup(unsigned int delta)
646 struct ffclock_estimate *cest;
647 struct fftimehands *ffth;
648 struct bintime bt, gap_lerp;
651 unsigned int polling;
652 uint8_t forward_jump, ogen;
655 * Pick the next timehand, copy current ffclock estimates and move tick
656 * times and counter forward.
659 ffth = fftimehands->next;
663 bcopy(&fftimehands->cest, cest, sizeof(struct ffclock_estimate));
664 ffdelta = (ffcounter)delta;
665 ffth->period_lerp = fftimehands->period_lerp;
667 ffth->tick_time = fftimehands->tick_time;
668 ffclock_convert_delta(ffdelta, cest->period, &bt);
669 bintime_add(&ffth->tick_time, &bt);
671 ffth->tick_time_lerp = fftimehands->tick_time_lerp;
672 ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt);
673 bintime_add(&ffth->tick_time_lerp, &bt);
675 ffth->tick_ffcount = fftimehands->tick_ffcount + ffdelta;
678 * Assess the status of the clock, if the last update is too old, it is
679 * likely the synchronisation daemon is dead and the clock is free
682 if (ffclock_updated == 0) {
683 ffdelta = ffth->tick_ffcount - cest->update_ffcount;
684 ffclock_convert_delta(ffdelta, cest->period, &bt);
685 if (bt.sec > 2 * FFCLOCK_SKM_SCALE)
686 ffclock_status |= FFCLOCK_STA_UNSYNC;
690 * If available, grab updated clock estimates and make them current.
691 * Recompute time at this tick using the updated estimates. The clock
692 * estimates passed the feed-forward synchronisation daemon may result
693 * in time conversion that is not monotonically increasing (just after
694 * the update). time_lerp is a particular linear interpolation over the
695 * synchronisation algo polling period that ensures monotonicity for the
696 * clock ids requesting it.
698 if (ffclock_updated > 0) {
699 bcopy(&ffclock_estimate, cest, sizeof(struct ffclock_estimate));
700 ffdelta = ffth->tick_ffcount - cest->update_ffcount;
701 ffth->tick_time = cest->update_time;
702 ffclock_convert_delta(ffdelta, cest->period, &bt);
703 bintime_add(&ffth->tick_time, &bt);
705 /* ffclock_reset sets ffclock_updated to INT8_MAX */
706 if (ffclock_updated == INT8_MAX)
707 ffth->tick_time_lerp = ffth->tick_time;
709 if (bintime_cmp(&ffth->tick_time, &ffth->tick_time_lerp, >))
714 bintime_clear(&gap_lerp);
716 gap_lerp = ffth->tick_time;
717 bintime_sub(&gap_lerp, &ffth->tick_time_lerp);
719 gap_lerp = ffth->tick_time_lerp;
720 bintime_sub(&gap_lerp, &ffth->tick_time);
724 * The reset from the RTC clock may be far from accurate, and
725 * reducing the gap between real time and interpolated time
726 * could take a very long time if the interpolated clock insists
727 * on strict monotonicity. The clock is reset under very strict
728 * conditions (kernel time is known to be wrong and
729 * synchronization daemon has been restarted recently.
730 * ffclock_boottime absorbs the jump to ensure boot time is
731 * correct and uptime functions stay consistent.
733 if (((ffclock_status & FFCLOCK_STA_UNSYNC) == FFCLOCK_STA_UNSYNC) &&
734 ((cest->status & FFCLOCK_STA_UNSYNC) == 0) &&
735 ((cest->status & FFCLOCK_STA_WARMUP) == FFCLOCK_STA_WARMUP)) {
737 bintime_add(&ffclock_boottime, &gap_lerp);
739 bintime_sub(&ffclock_boottime, &gap_lerp);
740 ffth->tick_time_lerp = ffth->tick_time;
741 bintime_clear(&gap_lerp);
744 ffclock_status = cest->status;
745 ffth->period_lerp = cest->period;
748 * Compute corrected period used for the linear interpolation of
749 * time. The rate of linear interpolation is capped to 5000PPM
752 if (bintime_isset(&gap_lerp)) {
753 ffdelta = cest->update_ffcount;
754 ffdelta -= fftimehands->cest.update_ffcount;
755 ffclock_convert_delta(ffdelta, cest->period, &bt);
758 bt.frac = 5000000 * (uint64_t)18446744073LL;
759 bintime_mul(&bt, polling);
760 if (bintime_cmp(&gap_lerp, &bt, >))
763 /* Approximate 1 sec by 1-(1/2^64) to ease arithmetic */
765 if (gap_lerp.sec > 0) {
767 frac /= ffdelta / gap_lerp.sec;
769 frac += gap_lerp.frac / ffdelta;
772 ffth->period_lerp += frac;
774 ffth->period_lerp -= frac;
786 * Adjust the fftimehands when the timecounter is changed. Stating the obvious,
787 * the old and new hardware counter cannot be read simultaneously. tc_windup()
788 * does read the two counters 'back to back', but a few cycles are effectively
789 * lost, and not accumulated in tick_ffcount. This is a fairly radical
790 * operation for a feed-forward synchronization daemon, and it is its job to not
791 * pushing irrelevant data to the kernel. Because there is no locking here,
792 * simply force to ignore pending or next update to give daemon a chance to
793 * realize the counter has changed.
796 ffclock_change_tc(struct timehands *th)
798 struct fftimehands *ffth;
799 struct ffclock_estimate *cest;
800 struct timecounter *tc;
804 ffth = fftimehands->next;
809 bcopy(&(fftimehands->cest), cest, sizeof(struct ffclock_estimate));
810 cest->period = ((1ULL << 63) / tc->tc_frequency ) << 1;
813 cest->status |= FFCLOCK_STA_UNSYNC;
815 ffth->tick_ffcount = fftimehands->tick_ffcount;
816 ffth->tick_time_lerp = fftimehands->tick_time_lerp;
817 ffth->tick_time = fftimehands->tick_time;
818 ffth->period_lerp = cest->period;
820 /* Do not lock but ignore next update from synchronization daemon. */
830 * Retrieve feed-forward counter and time of last kernel tick.
833 ffclock_last_tick(ffcounter *ffcount, struct bintime *bt, uint32_t flags)
835 struct fftimehands *ffth;
839 * No locking but check generation has not changed. Also need to make
840 * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
845 if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP)
846 *bt = ffth->tick_time_lerp;
848 *bt = ffth->tick_time;
849 *ffcount = ffth->tick_ffcount;
850 } while (gen == 0 || gen != ffth->gen);
854 * Absolute clock conversion. Low level function to convert ffcounter to
855 * bintime. The ffcounter is converted using the current ffclock period estimate
856 * or the "interpolated period" to ensure monotonicity.
857 * NOTE: this conversion may have been deferred, and the clock updated since the
858 * hardware counter has been read.
861 ffclock_convert_abs(ffcounter ffcount, struct bintime *bt, uint32_t flags)
863 struct fftimehands *ffth;
869 * No locking but check generation has not changed. Also need to make
870 * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
875 if (ffcount > ffth->tick_ffcount)
876 ffdelta = ffcount - ffth->tick_ffcount;
878 ffdelta = ffth->tick_ffcount - ffcount;
880 if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP) {
881 *bt = ffth->tick_time_lerp;
882 ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt2);
884 *bt = ffth->tick_time;
885 ffclock_convert_delta(ffdelta, ffth->cest.period, &bt2);
888 if (ffcount > ffth->tick_ffcount)
889 bintime_add(bt, &bt2);
891 bintime_sub(bt, &bt2);
892 } while (gen == 0 || gen != ffth->gen);
896 * Difference clock conversion.
897 * Low level function to Convert a time interval measured in RAW counter units
898 * into bintime. The difference clock allows measuring small intervals much more
899 * reliably than the absolute clock.
902 ffclock_convert_diff(ffcounter ffdelta, struct bintime *bt)
904 struct fftimehands *ffth;
907 /* No locking but check generation has not changed. */
911 ffclock_convert_delta(ffdelta, ffth->cest.period, bt);
912 } while (gen == 0 || gen != ffth->gen);
916 * Access to current ffcounter value.
919 ffclock_read_counter(ffcounter *ffcount)
921 struct timehands *th;
922 struct fftimehands *ffth;
923 unsigned int gen, delta;
926 * ffclock_windup() called from tc_windup(), safe to rely on
927 * th->th_generation only, for correct delta and ffcounter.
931 gen = atomic_load_acq_int(&th->th_generation);
933 delta = tc_delta(th);
934 *ffcount = ffth->tick_ffcount;
935 atomic_thread_fence_acq();
936 } while (gen == 0 || gen != th->th_generation);
942 binuptime(struct bintime *bt)
945 binuptime_fromclock(bt, sysclock_active);
949 nanouptime(struct timespec *tsp)
952 nanouptime_fromclock(tsp, sysclock_active);
956 microuptime(struct timeval *tvp)
959 microuptime_fromclock(tvp, sysclock_active);
963 bintime(struct bintime *bt)
966 bintime_fromclock(bt, sysclock_active);
970 nanotime(struct timespec *tsp)
973 nanotime_fromclock(tsp, sysclock_active);
977 microtime(struct timeval *tvp)
980 microtime_fromclock(tvp, sysclock_active);
984 getbinuptime(struct bintime *bt)
987 getbinuptime_fromclock(bt, sysclock_active);
991 getnanouptime(struct timespec *tsp)
994 getnanouptime_fromclock(tsp, sysclock_active);
998 getmicrouptime(struct timeval *tvp)
1001 getmicrouptime_fromclock(tvp, sysclock_active);
1005 getbintime(struct bintime *bt)
1008 getbintime_fromclock(bt, sysclock_active);
1012 getnanotime(struct timespec *tsp)
1015 getnanotime_fromclock(tsp, sysclock_active);
1019 getmicrotime(struct timeval *tvp)
1022 getmicrouptime_fromclock(tvp, sysclock_active);
1025 #endif /* FFCLOCK */
1028 * This is a clone of getnanotime and used for walltimestamps.
1029 * The dtrace_ prefix prevents fbt from creating probes for
1030 * it so walltimestamp can be safely used in all fbt probes.
1033 dtrace_getnanotime(struct timespec *tsp)
1035 struct timehands *th;
1040 gen = atomic_load_acq_int(&th->th_generation);
1041 *tsp = th->th_nanotime;
1042 atomic_thread_fence_acq();
1043 } while (gen == 0 || gen != th->th_generation);
1047 * System clock currently providing time to the system. Modifiable via sysctl
1048 * when the FFCLOCK option is defined.
1050 int sysclock_active = SYSCLOCK_FBCK;
1052 /* Internal NTP status and error estimates. */
1053 extern int time_status;
1054 extern long time_esterror;
1057 * Take a snapshot of sysclock data which can be used to compare system clocks
1058 * and generate timestamps after the fact.
1061 sysclock_getsnapshot(struct sysclock_snap *clock_snap, int fast)
1063 struct fbclock_info *fbi;
1064 struct timehands *th;
1066 unsigned int delta, gen;
1069 struct fftimehands *ffth;
1070 struct ffclock_info *ffi;
1071 struct ffclock_estimate cest;
1073 ffi = &clock_snap->ff_info;
1076 fbi = &clock_snap->fb_info;
1081 gen = atomic_load_acq_int(&th->th_generation);
1082 fbi->th_scale = th->th_scale;
1083 fbi->tick_time = th->th_offset;
1086 ffi->tick_time = ffth->tick_time_lerp;
1087 ffi->tick_time_lerp = ffth->tick_time_lerp;
1088 ffi->period = ffth->cest.period;
1089 ffi->period_lerp = ffth->period_lerp;
1090 clock_snap->ffcount = ffth->tick_ffcount;
1094 delta = tc_delta(th);
1095 atomic_thread_fence_acq();
1096 } while (gen == 0 || gen != th->th_generation);
1098 clock_snap->delta = delta;
1099 clock_snap->sysclock_active = sysclock_active;
1101 /* Record feedback clock status and error. */
1102 clock_snap->fb_info.status = time_status;
1103 /* XXX: Very crude estimate of feedback clock error. */
1104 bt.sec = time_esterror / 1000000;
1105 bt.frac = ((time_esterror - bt.sec) * 1000000) *
1106 (uint64_t)18446744073709ULL;
1107 clock_snap->fb_info.error = bt;
1111 clock_snap->ffcount += delta;
1113 /* Record feed-forward clock leap second adjustment. */
1114 ffi->leapsec_adjustment = cest.leapsec_total;
1115 if (clock_snap->ffcount > cest.leapsec_next)
1116 ffi->leapsec_adjustment -= cest.leapsec;
1118 /* Record feed-forward clock status and error. */
1119 clock_snap->ff_info.status = cest.status;
1120 ffcount = clock_snap->ffcount - cest.update_ffcount;
1121 ffclock_convert_delta(ffcount, cest.period, &bt);
1122 /* 18446744073709 = int(2^64/1e12), err_bound_rate in [ps/s]. */
1123 bintime_mul(&bt, cest.errb_rate * (uint64_t)18446744073709ULL);
1124 /* 18446744073 = int(2^64 / 1e9), since err_abs in [ns]. */
1125 bintime_addx(&bt, cest.errb_abs * (uint64_t)18446744073ULL);
1126 clock_snap->ff_info.error = bt;
1131 * Convert a sysclock snapshot into a struct bintime based on the specified
1132 * clock source and flags.
1135 sysclock_snap2bintime(struct sysclock_snap *cs, struct bintime *bt,
1136 int whichclock, uint32_t flags)
1138 struct bintime boottimebin;
1144 switch (whichclock) {
1146 *bt = cs->fb_info.tick_time;
1148 /* If snapshot was created with !fast, delta will be >0. */
1150 bintime_addx(bt, cs->fb_info.th_scale * cs->delta);
1152 if ((flags & FBCLOCK_UPTIME) == 0) {
1153 getboottimebin(&boottimebin);
1154 bintime_add(bt, &boottimebin);
1159 if (flags & FFCLOCK_LERP) {
1160 *bt = cs->ff_info.tick_time_lerp;
1161 period = cs->ff_info.period_lerp;
1163 *bt = cs->ff_info.tick_time;
1164 period = cs->ff_info.period;
1167 /* If snapshot was created with !fast, delta will be >0. */
1168 if (cs->delta > 0) {
1169 ffclock_convert_delta(cs->delta, period, &bt2);
1170 bintime_add(bt, &bt2);
1173 /* Leap second adjustment. */
1174 if (flags & FFCLOCK_LEAPSEC)
1175 bt->sec -= cs->ff_info.leapsec_adjustment;
1177 /* Boot time adjustment, for uptime/monotonic clocks. */
1178 if (flags & FFCLOCK_UPTIME)
1179 bintime_sub(bt, &ffclock_boottime);
1191 * Initialize a new timecounter and possibly use it.
1194 tc_init(struct timecounter *tc)
1197 struct sysctl_oid *tc_root;
1199 u = tc->tc_frequency / tc->tc_counter_mask;
1200 /* XXX: We need some margin here, 10% is a guess */
1203 if (u > hz && tc->tc_quality >= 0) {
1204 tc->tc_quality = -2000;
1206 printf("Timecounter \"%s\" frequency %ju Hz",
1207 tc->tc_name, (uintmax_t)tc->tc_frequency);
1208 printf(" -- Insufficient hz, needs at least %u\n", u);
1210 } else if (tc->tc_quality >= 0 || bootverbose) {
1211 printf("Timecounter \"%s\" frequency %ju Hz quality %d\n",
1212 tc->tc_name, (uintmax_t)tc->tc_frequency,
1216 tc->tc_next = timecounters;
1219 * Set up sysctl tree for this counter.
1221 tc_root = SYSCTL_ADD_NODE_WITH_LABEL(NULL,
1222 SYSCTL_STATIC_CHILDREN(_kern_timecounter_tc), OID_AUTO, tc->tc_name,
1223 CTLFLAG_RW, 0, "timecounter description", "timecounter");
1224 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1225 "mask", CTLFLAG_RD, &(tc->tc_counter_mask), 0,
1226 "mask for implemented bits");
1227 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1228 "counter", CTLTYPE_UINT | CTLFLAG_RD, tc, sizeof(*tc),
1229 sysctl_kern_timecounter_get, "IU", "current timecounter value");
1230 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1231 "frequency", CTLTYPE_U64 | CTLFLAG_RD, tc, sizeof(*tc),
1232 sysctl_kern_timecounter_freq, "QU", "timecounter frequency");
1233 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1234 "quality", CTLFLAG_RD, &(tc->tc_quality), 0,
1235 "goodness of time counter");
1237 * Do not automatically switch if the current tc was specifically
1238 * chosen. Never automatically use a timecounter with negative quality.
1239 * Even though we run on the dummy counter, switching here may be
1240 * worse since this timecounter may not be monotonic.
1244 if (tc->tc_quality < 0)
1246 if (tc->tc_quality < timecounter->tc_quality)
1248 if (tc->tc_quality == timecounter->tc_quality &&
1249 tc->tc_frequency < timecounter->tc_frequency)
1251 (void)tc->tc_get_timecount(tc);
1252 (void)tc->tc_get_timecount(tc);
1256 /* Report the frequency of the current timecounter. */
1258 tc_getfrequency(void)
1261 return (timehands->th_counter->tc_frequency);
1264 static struct mtx tc_setclock_mtx;
1265 MTX_SYSINIT(tc_setclock_init, &tc_setclock_mtx, "tcsetc", MTX_SPIN);
1268 * Step our concept of UTC. This is done by modifying our estimate of
1272 tc_setclock(struct timespec *ts)
1274 struct timespec tbef, taft;
1275 struct bintime bt, bt2;
1277 timespec2bintime(ts, &bt);
1279 mtx_lock_spin(&tc_setclock_mtx);
1280 cpu_tick_calibrate(1);
1282 bintime_sub(&bt, &bt2);
1284 /* XXX fiddle all the little crinkly bits around the fiords... */
1286 mtx_unlock_spin(&tc_setclock_mtx);
1287 if (timestepwarnings) {
1290 "Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n",
1291 (intmax_t)tbef.tv_sec, tbef.tv_nsec,
1292 (intmax_t)taft.tv_sec, taft.tv_nsec,
1293 (intmax_t)ts->tv_sec, ts->tv_nsec);
1298 * Initialize the next struct timehands in the ring and make
1299 * it the active timehands. Along the way we might switch to a different
1300 * timecounter and/or do seconds processing in NTP. Slightly magic.
1303 tc_windup(struct bintime *new_boottimebin)
1306 struct timehands *th, *tho;
1308 u_int delta, ncount, ogen;
1313 * Make the next timehands a copy of the current one, but do
1314 * not overwrite the generation or next pointer. While we
1315 * update the contents, the generation must be zero. We need
1316 * to ensure that the zero generation is visible before the
1317 * data updates become visible, which requires release fence.
1318 * For similar reasons, re-reading of the generation after the
1319 * data is read should use acquire fence.
1323 ogen = th->th_generation;
1324 th->th_generation = 0;
1325 atomic_thread_fence_rel();
1326 bcopy(tho, th, offsetof(struct timehands, th_generation));
1327 if (new_boottimebin != NULL)
1328 th->th_boottime = *new_boottimebin;
1331 * Capture a timecounter delta on the current timecounter and if
1332 * changing timecounters, a counter value from the new timecounter.
1333 * Update the offset fields accordingly.
1335 delta = tc_delta(th);
1336 if (th->th_counter != timecounter)
1337 ncount = timecounter->tc_get_timecount(timecounter);
1341 ffclock_windup(delta);
1343 th->th_offset_count += delta;
1344 th->th_offset_count &= th->th_counter->tc_counter_mask;
1345 while (delta > th->th_counter->tc_frequency) {
1346 /* Eat complete unadjusted seconds. */
1347 delta -= th->th_counter->tc_frequency;
1348 th->th_offset.sec++;
1350 if ((delta > th->th_counter->tc_frequency / 2) &&
1351 (th->th_scale * delta < ((uint64_t)1 << 63))) {
1352 /* The product th_scale * delta just barely overflows. */
1353 th->th_offset.sec++;
1355 bintime_addx(&th->th_offset, th->th_scale * delta);
1358 * Hardware latching timecounters may not generate interrupts on
1359 * PPS events, so instead we poll them. There is a finite risk that
1360 * the hardware might capture a count which is later than the one we
1361 * got above, and therefore possibly in the next NTP second which might
1362 * have a different rate than the current NTP second. It doesn't
1363 * matter in practice.
1365 if (tho->th_counter->tc_poll_pps)
1366 tho->th_counter->tc_poll_pps(tho->th_counter);
1369 * Deal with NTP second processing. The for loop normally
1370 * iterates at most once, but in extreme situations it might
1371 * keep NTP sane if timeouts are not run for several seconds.
1372 * At boot, the time step can be large when the TOD hardware
1373 * has been read, so on really large steps, we call
1374 * ntp_update_second only twice. We need to call it twice in
1375 * case we missed a leap second.
1378 bintime_add(&bt, &th->th_boottime);
1379 i = bt.sec - tho->th_microtime.tv_sec;
1382 for (; i > 0; i--) {
1384 ntp_update_second(&th->th_adjustment, &bt.sec);
1386 th->th_boottime.sec += bt.sec - t;
1388 th->th_bintime = th->th_offset;
1389 bintime_add(&th->th_bintime, &th->th_boottime);
1390 /* Update the UTC timestamps used by the get*() functions. */
1391 /* XXX shouldn't do this here. Should force non-`get' versions. */
1392 bintime2timeval(&bt, &th->th_microtime);
1393 bintime2timespec(&bt, &th->th_nanotime);
1395 /* Now is a good time to change timecounters. */
1396 if (th->th_counter != timecounter) {
1398 if ((timecounter->tc_flags & TC_FLAGS_C2STOP) != 0)
1399 cpu_disable_c2_sleep++;
1400 if ((th->th_counter->tc_flags & TC_FLAGS_C2STOP) != 0)
1401 cpu_disable_c2_sleep--;
1403 th->th_counter = timecounter;
1404 th->th_offset_count = ncount;
1405 tc_min_ticktock_freq = max(1, timecounter->tc_frequency /
1406 (((uint64_t)timecounter->tc_counter_mask + 1) / 3));
1408 ffclock_change_tc(th);
1413 * Recalculate the scaling factor. We want the number of 1/2^64
1414 * fractions of a second per period of the hardware counter, taking
1415 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
1416 * processing provides us with.
1418 * The th_adjustment is nanoseconds per second with 32 bit binary
1419 * fraction and we want 64 bit binary fraction of second:
1421 * x = a * 2^32 / 10^9 = a * 4.294967296
1423 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
1424 * we can only multiply by about 850 without overflowing, that
1425 * leaves no suitably precise fractions for multiply before divide.
1427 * Divide before multiply with a fraction of 2199/512 results in a
1428 * systematic undercompensation of 10PPM of th_adjustment. On a
1429 * 5000PPM adjustment this is a 0.05PPM error. This is acceptable.
1431 * We happily sacrifice the lowest of the 64 bits of our result
1432 * to the goddess of code clarity.
1435 scale = (uint64_t)1 << 63;
1436 scale += (th->th_adjustment / 1024) * 2199;
1437 scale /= th->th_counter->tc_frequency;
1438 th->th_scale = scale * 2;
1441 * Now that the struct timehands is again consistent, set the new
1442 * generation number, making sure to not make it zero.
1446 atomic_store_rel_int(&th->th_generation, ogen);
1448 /* Go live with the new struct timehands. */
1450 switch (sysclock_active) {
1453 time_second = th->th_microtime.tv_sec;
1454 time_uptime = th->th_offset.sec;
1458 time_second = fftimehands->tick_time_lerp.sec;
1459 time_uptime = fftimehands->tick_time_lerp.sec - ffclock_boottime.sec;
1465 timekeep_push_vdso();
1468 /* Report or change the active timecounter hardware. */
1470 sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
1473 struct timecounter *newtc, *tc;
1477 strlcpy(newname, tc->tc_name, sizeof(newname));
1479 error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
1480 if (error != 0 || req->newptr == NULL)
1482 /* Record that the tc in use now was specifically chosen. */
1484 if (strcmp(newname, tc->tc_name) == 0)
1486 for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
1487 if (strcmp(newname, newtc->tc_name) != 0)
1490 /* Warm up new timecounter. */
1491 (void)newtc->tc_get_timecount(newtc);
1492 (void)newtc->tc_get_timecount(newtc);
1494 timecounter = newtc;
1497 * The vdso timehands update is deferred until the next
1500 * This is prudent given that 'timekeep_push_vdso()' does not
1501 * use any locking and that it can be called in hard interrupt
1502 * context via 'tc_windup()'.
1509 SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
1510 0, 0, sysctl_kern_timecounter_hardware, "A",
1511 "Timecounter hardware selected");
1514 /* Report the available timecounter hardware. */
1516 sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
1519 struct timecounter *tc;
1522 sbuf_new_for_sysctl(&sb, NULL, 0, req);
1523 for (tc = timecounters; tc != NULL; tc = tc->tc_next) {
1524 if (tc != timecounters)
1525 sbuf_putc(&sb, ' ');
1526 sbuf_printf(&sb, "%s(%d)", tc->tc_name, tc->tc_quality);
1528 error = sbuf_finish(&sb);
1533 SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD,
1534 0, 0, sysctl_kern_timecounter_choice, "A", "Timecounter hardware detected");
1537 * RFC 2783 PPS-API implementation.
1541 * Return true if the driver is aware of the abi version extensions in the
1542 * pps_state structure, and it supports at least the given abi version number.
1545 abi_aware(struct pps_state *pps, int vers)
1548 return ((pps->kcmode & KCMODE_ABIFLAG) && pps->driver_abi >= vers);
1552 pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
1555 pps_seq_t aseq, cseq;
1558 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
1562 * If no timeout is requested, immediately return whatever values were
1563 * most recently captured. If timeout seconds is -1, that's a request
1564 * to block without a timeout. WITNESS won't let us sleep forever
1565 * without a lock (we really don't need a lock), so just repeatedly
1566 * sleep a long time.
1568 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) {
1569 if (fapi->timeout.tv_sec == -1)
1572 tv.tv_sec = fapi->timeout.tv_sec;
1573 tv.tv_usec = fapi->timeout.tv_nsec / 1000;
1576 aseq = pps->ppsinfo.assert_sequence;
1577 cseq = pps->ppsinfo.clear_sequence;
1578 while (aseq == pps->ppsinfo.assert_sequence &&
1579 cseq == pps->ppsinfo.clear_sequence) {
1580 if (abi_aware(pps, 1) && pps->driver_mtx != NULL) {
1581 if (pps->flags & PPSFLAG_MTX_SPIN) {
1582 err = msleep_spin(pps, pps->driver_mtx,
1585 err = msleep(pps, pps->driver_mtx, PCATCH,
1589 err = tsleep(pps, PCATCH, "ppsfch", timo);
1591 if (err == EWOULDBLOCK) {
1592 if (fapi->timeout.tv_sec == -1) {
1597 } else if (err != 0) {
1603 pps->ppsinfo.current_mode = pps->ppsparam.mode;
1604 fapi->pps_info_buf = pps->ppsinfo;
1610 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
1613 struct pps_fetch_args *fapi;
1615 struct pps_fetch_ffc_args *fapi_ffc;
1618 struct pps_kcbind_args *kapi;
1621 KASSERT(pps != NULL, ("NULL pps pointer in pps_ioctl"));
1623 case PPS_IOC_CREATE:
1625 case PPS_IOC_DESTROY:
1627 case PPS_IOC_SETPARAMS:
1628 app = (pps_params_t *)data;
1629 if (app->mode & ~pps->ppscap)
1632 /* Ensure only a single clock is selected for ffc timestamp. */
1633 if ((app->mode & PPS_TSCLK_MASK) == PPS_TSCLK_MASK)
1636 pps->ppsparam = *app;
1638 case PPS_IOC_GETPARAMS:
1639 app = (pps_params_t *)data;
1640 *app = pps->ppsparam;
1641 app->api_version = PPS_API_VERS_1;
1643 case PPS_IOC_GETCAP:
1644 *(int*)data = pps->ppscap;
1647 fapi = (struct pps_fetch_args *)data;
1648 return (pps_fetch(fapi, pps));
1650 case PPS_IOC_FETCH_FFCOUNTER:
1651 fapi_ffc = (struct pps_fetch_ffc_args *)data;
1652 if (fapi_ffc->tsformat && fapi_ffc->tsformat !=
1655 if (fapi_ffc->timeout.tv_sec || fapi_ffc->timeout.tv_nsec)
1656 return (EOPNOTSUPP);
1657 pps->ppsinfo_ffc.current_mode = pps->ppsparam.mode;
1658 fapi_ffc->pps_info_buf_ffc = pps->ppsinfo_ffc;
1659 /* Overwrite timestamps if feedback clock selected. */
1660 switch (pps->ppsparam.mode & PPS_TSCLK_MASK) {
1661 case PPS_TSCLK_FBCK:
1662 fapi_ffc->pps_info_buf_ffc.assert_timestamp =
1663 pps->ppsinfo.assert_timestamp;
1664 fapi_ffc->pps_info_buf_ffc.clear_timestamp =
1665 pps->ppsinfo.clear_timestamp;
1667 case PPS_TSCLK_FFWD:
1673 #endif /* FFCLOCK */
1674 case PPS_IOC_KCBIND:
1676 kapi = (struct pps_kcbind_args *)data;
1677 /* XXX Only root should be able to do this */
1678 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
1680 if (kapi->kernel_consumer != PPS_KC_HARDPPS)
1682 if (kapi->edge & ~pps->ppscap)
1684 pps->kcmode = (kapi->edge & KCMODE_EDGEMASK) |
1685 (pps->kcmode & KCMODE_ABIFLAG);
1688 return (EOPNOTSUPP);
1696 pps_init(struct pps_state *pps)
1698 pps->ppscap |= PPS_TSFMT_TSPEC | PPS_CANWAIT;
1699 if (pps->ppscap & PPS_CAPTUREASSERT)
1700 pps->ppscap |= PPS_OFFSETASSERT;
1701 if (pps->ppscap & PPS_CAPTURECLEAR)
1702 pps->ppscap |= PPS_OFFSETCLEAR;
1704 pps->ppscap |= PPS_TSCLK_MASK;
1706 pps->kcmode &= ~KCMODE_ABIFLAG;
1710 pps_init_abi(struct pps_state *pps)
1714 if (pps->driver_abi > 0) {
1715 pps->kcmode |= KCMODE_ABIFLAG;
1716 pps->kernel_abi = PPS_ABI_VERSION;
1721 pps_capture(struct pps_state *pps)
1723 struct timehands *th;
1725 KASSERT(pps != NULL, ("NULL pps pointer in pps_capture"));
1727 pps->capgen = atomic_load_acq_int(&th->th_generation);
1730 pps->capffth = fftimehands;
1732 pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
1733 atomic_thread_fence_acq();
1734 if (pps->capgen != th->th_generation)
1739 pps_event(struct pps_state *pps, int event)
1742 struct timespec ts, *tsp, *osp;
1743 u_int tcount, *pcount;
1747 struct timespec *tsp_ffc;
1748 pps_seq_t *pseq_ffc;
1755 KASSERT(pps != NULL, ("NULL pps pointer in pps_event"));
1756 /* Nothing to do if not currently set to capture this event type. */
1757 if ((event & pps->ppsparam.mode) == 0)
1759 /* If the timecounter was wound up underneath us, bail out. */
1760 if (pps->capgen == 0 || pps->capgen !=
1761 atomic_load_acq_int(&pps->capth->th_generation))
1764 /* Things would be easier with arrays. */
1765 if (event == PPS_CAPTUREASSERT) {
1766 tsp = &pps->ppsinfo.assert_timestamp;
1767 osp = &pps->ppsparam.assert_offset;
1768 foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
1770 fhard = pps->kcmode & PPS_CAPTUREASSERT;
1772 pcount = &pps->ppscount[0];
1773 pseq = &pps->ppsinfo.assert_sequence;
1775 ffcount = &pps->ppsinfo_ffc.assert_ffcount;
1776 tsp_ffc = &pps->ppsinfo_ffc.assert_timestamp;
1777 pseq_ffc = &pps->ppsinfo_ffc.assert_sequence;
1780 tsp = &pps->ppsinfo.clear_timestamp;
1781 osp = &pps->ppsparam.clear_offset;
1782 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
1784 fhard = pps->kcmode & PPS_CAPTURECLEAR;
1786 pcount = &pps->ppscount[1];
1787 pseq = &pps->ppsinfo.clear_sequence;
1789 ffcount = &pps->ppsinfo_ffc.clear_ffcount;
1790 tsp_ffc = &pps->ppsinfo_ffc.clear_timestamp;
1791 pseq_ffc = &pps->ppsinfo_ffc.clear_sequence;
1796 * If the timecounter changed, we cannot compare the count values, so
1797 * we have to drop the rest of the PPS-stuff until the next event.
1799 if (pps->ppstc != pps->capth->th_counter) {
1800 pps->ppstc = pps->capth->th_counter;
1801 *pcount = pps->capcount;
1802 pps->ppscount[2] = pps->capcount;
1806 /* Convert the count to a timespec. */
1807 tcount = pps->capcount - pps->capth->th_offset_count;
1808 tcount &= pps->capth->th_counter->tc_counter_mask;
1809 bt = pps->capth->th_bintime;
1810 bintime_addx(&bt, pps->capth->th_scale * tcount);
1811 bintime2timespec(&bt, &ts);
1813 /* If the timecounter was wound up underneath us, bail out. */
1814 atomic_thread_fence_acq();
1815 if (pps->capgen != pps->capth->th_generation)
1818 *pcount = pps->capcount;
1823 timespecadd(tsp, osp);
1824 if (tsp->tv_nsec < 0) {
1825 tsp->tv_nsec += 1000000000;
1831 *ffcount = pps->capffth->tick_ffcount + tcount;
1832 bt = pps->capffth->tick_time;
1833 ffclock_convert_delta(tcount, pps->capffth->cest.period, &bt);
1834 bintime_add(&bt, &pps->capffth->tick_time);
1835 bintime2timespec(&bt, &ts);
1845 * Feed the NTP PLL/FLL.
1846 * The FLL wants to know how many (hardware) nanoseconds
1847 * elapsed since the previous event.
1849 tcount = pps->capcount - pps->ppscount[2];
1850 pps->ppscount[2] = pps->capcount;
1851 tcount &= pps->capth->th_counter->tc_counter_mask;
1852 scale = (uint64_t)1 << 63;
1853 scale /= pps->capth->th_counter->tc_frequency;
1857 bintime_addx(&bt, scale * tcount);
1858 bintime2timespec(&bt, &ts);
1859 hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
1863 /* Wakeup anyone sleeping in pps_fetch(). */
1868 * Timecounters need to be updated every so often to prevent the hardware
1869 * counter from overflowing. Updating also recalculates the cached values
1870 * used by the get*() family of functions, so their precision depends on
1871 * the update frequency.
1875 SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0,
1876 "Approximate number of hardclock ticks in a millisecond");
1879 tc_ticktock(int cnt)
1883 if (mtx_trylock_spin(&tc_setclock_mtx)) {
1885 if (count >= tc_tick) {
1889 mtx_unlock_spin(&tc_setclock_mtx);
1893 static void __inline
1894 tc_adjprecision(void)
1898 if (tc_timepercentage > 0) {
1899 t = (99 + tc_timepercentage) / tc_timepercentage;
1900 tc_precexp = fls(t + (t >> 1)) - 1;
1901 FREQ2BT(hz / tc_tick, &bt_timethreshold);
1902 FREQ2BT(hz, &bt_tickthreshold);
1903 bintime_shift(&bt_timethreshold, tc_precexp);
1904 bintime_shift(&bt_tickthreshold, tc_precexp);
1907 bt_timethreshold.sec = INT_MAX;
1908 bt_timethreshold.frac = ~(uint64_t)0;
1909 bt_tickthreshold = bt_timethreshold;
1911 sbt_timethreshold = bttosbt(bt_timethreshold);
1912 sbt_tickthreshold = bttosbt(bt_tickthreshold);
1916 sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS)
1920 val = tc_timepercentage;
1921 error = sysctl_handle_int(oidp, &val, 0, req);
1922 if (error != 0 || req->newptr == NULL)
1924 tc_timepercentage = val;
1933 inittimecounter(void *dummy)
1939 * Set the initial timeout to
1940 * max(1, <approx. number of hardclock ticks in a millisecond>).
1941 * People should probably not use the sysctl to set the timeout
1942 * to smaller than its initial value, since that value is the
1943 * smallest reasonable one. If they want better timestamps they
1944 * should use the non-"get"* functions.
1947 tc_tick = (hz + 500) / 1000;
1951 FREQ2BT(hz, &tick_bt);
1952 tick_sbt = bttosbt(tick_bt);
1953 tick_rate = hz / tc_tick;
1954 FREQ2BT(tick_rate, &tc_tick_bt);
1955 tc_tick_sbt = bttosbt(tc_tick_bt);
1956 p = (tc_tick * 1000000) / hz;
1957 printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
1962 /* warm up new timecounter (again) and get rolling. */
1963 (void)timecounter->tc_get_timecount(timecounter);
1964 (void)timecounter->tc_get_timecount(timecounter);
1965 mtx_lock_spin(&tc_setclock_mtx);
1967 mtx_unlock_spin(&tc_setclock_mtx);
1970 SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
1972 /* Cpu tick handling -------------------------------------------------*/
1974 static int cpu_tick_variable;
1975 static uint64_t cpu_tick_frequency;
1977 static DPCPU_DEFINE(uint64_t, tc_cpu_ticks_base);
1978 static DPCPU_DEFINE(unsigned, tc_cpu_ticks_last);
1983 struct timecounter *tc;
1984 uint64_t res, *base;
1988 base = DPCPU_PTR(tc_cpu_ticks_base);
1989 last = DPCPU_PTR(tc_cpu_ticks_last);
1990 tc = timehands->th_counter;
1991 u = tc->tc_get_timecount(tc) & tc->tc_counter_mask;
1993 *base += (uint64_t)tc->tc_counter_mask + 1;
2001 cpu_tick_calibration(void)
2003 static time_t last_calib;
2005 if (time_uptime != last_calib && !(time_uptime & 0xf)) {
2006 cpu_tick_calibrate(0);
2007 last_calib = time_uptime;
2012 * This function gets called every 16 seconds on only one designated
2013 * CPU in the system from hardclock() via cpu_tick_calibration()().
2015 * Whenever the real time clock is stepped we get called with reset=1
2016 * to make sure we handle suspend/resume and similar events correctly.
2020 cpu_tick_calibrate(int reset)
2022 static uint64_t c_last;
2023 uint64_t c_this, c_delta;
2024 static struct bintime t_last;
2025 struct bintime t_this, t_delta;
2029 /* The clock was stepped, abort & reset */
2034 /* we don't calibrate fixed rate cputicks */
2035 if (!cpu_tick_variable)
2038 getbinuptime(&t_this);
2039 c_this = cpu_ticks();
2040 if (t_last.sec != 0) {
2041 c_delta = c_this - c_last;
2043 bintime_sub(&t_delta, &t_last);
2046 * 2^(64-20) / 16[s] =
2048 * 17.592.186.044.416 / 16 =
2049 * 1.099.511.627.776 [Hz]
2051 divi = t_delta.sec << 20;
2052 divi |= t_delta.frac >> (64 - 20);
2055 if (c_delta > cpu_tick_frequency) {
2056 if (0 && bootverbose)
2057 printf("cpu_tick increased to %ju Hz\n",
2059 cpu_tick_frequency = c_delta;
2067 set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var)
2071 cpu_ticks = tc_cpu_ticks;
2073 cpu_tick_frequency = freq;
2074 cpu_tick_variable = var;
2083 if (cpu_ticks == tc_cpu_ticks)
2084 return (tc_getfrequency());
2085 return (cpu_tick_frequency);
2089 * We need to be slightly careful converting cputicks to microseconds.
2090 * There is plenty of margin in 64 bits of microseconds (half a million
2091 * years) and in 64 bits at 4 GHz (146 years), but if we do a multiply
2092 * before divide conversion (to retain precision) we find that the
2093 * margin shrinks to 1.5 hours (one millionth of 146y).
2094 * With a three prong approach we never lose significant bits, no
2095 * matter what the cputick rate and length of timeinterval is.
2099 cputick2usec(uint64_t tick)
2102 if (tick > 18446744073709551LL) /* floor(2^64 / 1000) */
2103 return (tick / (cpu_tickrate() / 1000000LL));
2104 else if (tick > 18446744073709LL) /* floor(2^64 / 1000000) */
2105 return ((tick * 1000LL) / (cpu_tickrate() / 1000LL));
2107 return ((tick * 1000000LL) / cpu_tickrate());
2110 cpu_tick_f *cpu_ticks = tc_cpu_ticks;
2112 static int vdso_th_enable = 1;
2114 sysctl_fast_gettime(SYSCTL_HANDLER_ARGS)
2116 int old_vdso_th_enable, error;
2118 old_vdso_th_enable = vdso_th_enable;
2119 error = sysctl_handle_int(oidp, &old_vdso_th_enable, 0, req);
2122 vdso_th_enable = old_vdso_th_enable;
2125 SYSCTL_PROC(_kern_timecounter, OID_AUTO, fast_gettime,
2126 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2127 NULL, 0, sysctl_fast_gettime, "I", "Enable fast time of day");
2130 tc_fill_vdso_timehands(struct vdso_timehands *vdso_th)
2132 struct timehands *th;
2136 vdso_th->th_scale = th->th_scale;
2137 vdso_th->th_offset_count = th->th_offset_count;
2138 vdso_th->th_counter_mask = th->th_counter->tc_counter_mask;
2139 vdso_th->th_offset = th->th_offset;
2140 vdso_th->th_boottime = th->th_boottime;
2141 if (th->th_counter->tc_fill_vdso_timehands != NULL) {
2142 enabled = th->th_counter->tc_fill_vdso_timehands(vdso_th,
2146 if (!vdso_th_enable)
2151 #ifdef COMPAT_FREEBSD32
2153 tc_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32)
2155 struct timehands *th;
2159 *(uint64_t *)&vdso_th32->th_scale[0] = th->th_scale;
2160 vdso_th32->th_offset_count = th->th_offset_count;
2161 vdso_th32->th_counter_mask = th->th_counter->tc_counter_mask;
2162 vdso_th32->th_offset.sec = th->th_offset.sec;
2163 *(uint64_t *)&vdso_th32->th_offset.frac[0] = th->th_offset.frac;
2164 vdso_th32->th_boottime.sec = th->th_boottime.sec;
2165 *(uint64_t *)&vdso_th32->th_boottime.frac[0] = th->th_boottime.frac;
2166 if (th->th_counter->tc_fill_vdso_timehands32 != NULL) {
2167 enabled = th->th_counter->tc_fill_vdso_timehands32(vdso_th32,
2171 if (!vdso_th_enable)