2 * SPDX-License-Identifier: Beerware
4 * ----------------------------------------------------------------------------
5 * "THE BEER-WARE LICENSE" (Revision 42):
6 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
7 * can do whatever you want with this stuff. If we meet some day, and you think
8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
9 * ----------------------------------------------------------------------------
11 * Copyright (c) 2011, 2015, 2016 The FreeBSD Foundation
12 * All rights reserved.
14 * Portions of this software were developed by Julien Ridoux at the University
15 * of Melbourne under sponsorship from the FreeBSD Foundation.
17 * Portions of this software were developed by Konstantin Belousov
18 * under sponsorship from the FreeBSD Foundation.
21 #include <sys/cdefs.h>
22 __FBSDID("$FreeBSD$");
25 #include "opt_ffclock.h"
27 #include <sys/param.h>
28 #include <sys/kernel.h>
29 #include <sys/limits.h>
31 #include <sys/mutex.h>
34 #include <sys/sleepqueue.h>
35 #include <sys/sysctl.h>
36 #include <sys/syslog.h>
37 #include <sys/systm.h>
38 #include <sys/timeffc.h>
39 #include <sys/timepps.h>
40 #include <sys/timetc.h>
41 #include <sys/timex.h>
45 * A large step happens on boot. This constant detects such steps.
46 * It is relatively small so that ntp_update_second gets called enough
47 * in the typical 'missed a couple of seconds' case, but doesn't loop
48 * forever when the time step is large.
50 #define LARGE_STEP 200
53 * Implement a dummy timecounter which we can use until we get a real one
54 * in the air. This allows the console and other early stuff to use
59 dummy_get_timecount(struct timecounter *tc)
66 static struct timecounter dummy_timecounter = {
67 dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000
71 /* These fields must be initialized by the driver. */
72 struct timecounter *th_counter;
73 int64_t th_adjustment;
75 u_int th_offset_count;
76 struct bintime th_offset;
77 struct bintime th_bintime;
78 struct timeval th_microtime;
79 struct timespec th_nanotime;
80 struct bintime th_boottime;
81 /* Fields not to be copied in tc_windup start with th_generation. */
83 struct timehands *th_next;
86 static struct timehands ths[16] = {
88 .th_counter = &dummy_timecounter,
89 .th_scale = (uint64_t)-1 / 1000000,
90 .th_offset = { .sec = 1 },
95 static struct timehands *volatile timehands = &ths[0];
96 struct timecounter *timecounter = &dummy_timecounter;
97 static struct timecounter *timecounters = &dummy_timecounter;
99 int tc_min_ticktock_freq = 1;
101 volatile time_t time_second = 1;
102 volatile time_t time_uptime = 1;
104 static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS);
105 SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD,
106 NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime");
108 SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
109 static SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc, CTLFLAG_RW, 0, "");
111 static int timestepwarnings;
112 SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
113 ×tepwarnings, 0, "Log time steps");
115 static int timehands_count = 2;
116 SYSCTL_INT(_kern_timecounter, OID_AUTO, timehands_count,
117 CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
118 &timehands_count, 0, "Count of timehands in rotation");
120 struct bintime bt_timethreshold;
121 struct bintime bt_tickthreshold;
122 sbintime_t sbt_timethreshold;
123 sbintime_t sbt_tickthreshold;
124 struct bintime tc_tick_bt;
125 sbintime_t tc_tick_sbt;
127 int tc_timepercentage = TC_DEFAULTPERC;
128 static int sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS);
129 SYSCTL_PROC(_kern_timecounter, OID_AUTO, alloweddeviation,
130 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 0, 0,
131 sysctl_kern_timecounter_adjprecision, "I",
132 "Allowed time interval deviation in percents");
134 volatile int rtc_generation = 1;
136 static int tc_chosen; /* Non-zero if a specific tc was chosen via sysctl. */
138 static void tc_windup(struct bintime *new_boottimebin);
139 static void cpu_tick_calibrate(int);
141 void dtrace_getnanotime(struct timespec *tsp);
144 sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
146 struct timeval boottime;
148 getboottime(&boottime);
150 /* i386 is the only arch which uses a 32bits time_t */
155 if (req->flags & SCTL_MASK32) {
156 tv[0] = boottime.tv_sec;
157 tv[1] = boottime.tv_usec;
158 return (SYSCTL_OUT(req, tv, sizeof(tv)));
162 return (SYSCTL_OUT(req, &boottime, sizeof(boottime)));
166 sysctl_kern_timecounter_get(SYSCTL_HANDLER_ARGS)
169 struct timecounter *tc = arg1;
171 ncount = tc->tc_get_timecount(tc);
172 return (sysctl_handle_int(oidp, &ncount, 0, req));
176 sysctl_kern_timecounter_freq(SYSCTL_HANDLER_ARGS)
179 struct timecounter *tc = arg1;
181 freq = tc->tc_frequency;
182 return (sysctl_handle_64(oidp, &freq, 0, req));
186 * Return the difference between the timehands' counter value now and what
187 * was when we copied it to the timehands' offset_count.
189 static __inline u_int
190 tc_delta(struct timehands *th)
192 struct timecounter *tc;
195 return ((tc->tc_get_timecount(tc) - th->th_offset_count) &
196 tc->tc_counter_mask);
200 * Functions for reading the time. We have to loop until we are sure that
201 * the timehands that we operated on was not updated under our feet. See
202 * the comment in <sys/time.h> for a description of these 12 functions.
207 fbclock_binuptime(struct bintime *bt)
209 struct timehands *th;
214 gen = atomic_load_acq_int(&th->th_generation);
216 bintime_addx(bt, th->th_scale * tc_delta(th));
217 atomic_thread_fence_acq();
218 } while (gen == 0 || gen != th->th_generation);
222 fbclock_nanouptime(struct timespec *tsp)
226 fbclock_binuptime(&bt);
227 bintime2timespec(&bt, tsp);
231 fbclock_microuptime(struct timeval *tvp)
235 fbclock_binuptime(&bt);
236 bintime2timeval(&bt, tvp);
240 fbclock_bintime(struct bintime *bt)
242 struct timehands *th;
247 gen = atomic_load_acq_int(&th->th_generation);
248 *bt = th->th_bintime;
249 bintime_addx(bt, th->th_scale * tc_delta(th));
250 atomic_thread_fence_acq();
251 } while (gen == 0 || gen != th->th_generation);
255 fbclock_nanotime(struct timespec *tsp)
259 fbclock_bintime(&bt);
260 bintime2timespec(&bt, tsp);
264 fbclock_microtime(struct timeval *tvp)
268 fbclock_bintime(&bt);
269 bintime2timeval(&bt, tvp);
273 fbclock_getbinuptime(struct bintime *bt)
275 struct timehands *th;
280 gen = atomic_load_acq_int(&th->th_generation);
282 atomic_thread_fence_acq();
283 } while (gen == 0 || gen != th->th_generation);
287 fbclock_getnanouptime(struct timespec *tsp)
289 struct timehands *th;
294 gen = atomic_load_acq_int(&th->th_generation);
295 bintime2timespec(&th->th_offset, tsp);
296 atomic_thread_fence_acq();
297 } while (gen == 0 || gen != th->th_generation);
301 fbclock_getmicrouptime(struct timeval *tvp)
303 struct timehands *th;
308 gen = atomic_load_acq_int(&th->th_generation);
309 bintime2timeval(&th->th_offset, tvp);
310 atomic_thread_fence_acq();
311 } while (gen == 0 || gen != th->th_generation);
315 fbclock_getbintime(struct bintime *bt)
317 struct timehands *th;
322 gen = atomic_load_acq_int(&th->th_generation);
323 *bt = th->th_bintime;
324 atomic_thread_fence_acq();
325 } while (gen == 0 || gen != th->th_generation);
329 fbclock_getnanotime(struct timespec *tsp)
331 struct timehands *th;
336 gen = atomic_load_acq_int(&th->th_generation);
337 *tsp = th->th_nanotime;
338 atomic_thread_fence_acq();
339 } while (gen == 0 || gen != th->th_generation);
343 fbclock_getmicrotime(struct timeval *tvp)
345 struct timehands *th;
350 gen = atomic_load_acq_int(&th->th_generation);
351 *tvp = th->th_microtime;
352 atomic_thread_fence_acq();
353 } while (gen == 0 || gen != th->th_generation);
357 binuptime(struct bintime *bt)
359 struct timehands *th;
364 gen = atomic_load_acq_int(&th->th_generation);
366 bintime_addx(bt, th->th_scale * tc_delta(th));
367 atomic_thread_fence_acq();
368 } while (gen == 0 || gen != th->th_generation);
372 nanouptime(struct timespec *tsp)
377 bintime2timespec(&bt, tsp);
381 microuptime(struct timeval *tvp)
386 bintime2timeval(&bt, tvp);
390 bintime(struct bintime *bt)
392 struct timehands *th;
397 gen = atomic_load_acq_int(&th->th_generation);
398 *bt = th->th_bintime;
399 bintime_addx(bt, th->th_scale * tc_delta(th));
400 atomic_thread_fence_acq();
401 } while (gen == 0 || gen != th->th_generation);
405 nanotime(struct timespec *tsp)
410 bintime2timespec(&bt, tsp);
414 microtime(struct timeval *tvp)
419 bintime2timeval(&bt, tvp);
423 getbinuptime(struct bintime *bt)
425 struct timehands *th;
430 gen = atomic_load_acq_int(&th->th_generation);
432 atomic_thread_fence_acq();
433 } while (gen == 0 || gen != th->th_generation);
437 getnanouptime(struct timespec *tsp)
439 struct timehands *th;
444 gen = atomic_load_acq_int(&th->th_generation);
445 bintime2timespec(&th->th_offset, tsp);
446 atomic_thread_fence_acq();
447 } while (gen == 0 || gen != th->th_generation);
451 getmicrouptime(struct timeval *tvp)
453 struct timehands *th;
458 gen = atomic_load_acq_int(&th->th_generation);
459 bintime2timeval(&th->th_offset, tvp);
460 atomic_thread_fence_acq();
461 } while (gen == 0 || gen != th->th_generation);
465 getbintime(struct bintime *bt)
467 struct timehands *th;
472 gen = atomic_load_acq_int(&th->th_generation);
473 *bt = th->th_bintime;
474 atomic_thread_fence_acq();
475 } while (gen == 0 || gen != th->th_generation);
479 getnanotime(struct timespec *tsp)
481 struct timehands *th;
486 gen = atomic_load_acq_int(&th->th_generation);
487 *tsp = th->th_nanotime;
488 atomic_thread_fence_acq();
489 } while (gen == 0 || gen != th->th_generation);
493 getmicrotime(struct timeval *tvp)
495 struct timehands *th;
500 gen = atomic_load_acq_int(&th->th_generation);
501 *tvp = th->th_microtime;
502 atomic_thread_fence_acq();
503 } while (gen == 0 || gen != th->th_generation);
508 getboottime(struct timeval *boottime)
510 struct bintime boottimebin;
512 getboottimebin(&boottimebin);
513 bintime2timeval(&boottimebin, boottime);
517 getboottimebin(struct bintime *boottimebin)
519 struct timehands *th;
524 gen = atomic_load_acq_int(&th->th_generation);
525 *boottimebin = th->th_boottime;
526 atomic_thread_fence_acq();
527 } while (gen == 0 || gen != th->th_generation);
532 * Support for feed-forward synchronization algorithms. This is heavily inspired
533 * by the timehands mechanism but kept independent from it. *_windup() functions
534 * have some connection to avoid accessing the timecounter hardware more than
538 /* Feed-forward clock estimates kept updated by the synchronization daemon. */
539 struct ffclock_estimate ffclock_estimate;
540 struct bintime ffclock_boottime; /* Feed-forward boot time estimate. */
541 uint32_t ffclock_status; /* Feed-forward clock status. */
542 int8_t ffclock_updated; /* New estimates are available. */
543 struct mtx ffclock_mtx; /* Mutex on ffclock_estimate. */
546 struct ffclock_estimate cest;
547 struct bintime tick_time;
548 struct bintime tick_time_lerp;
549 ffcounter tick_ffcount;
550 uint64_t period_lerp;
551 volatile uint8_t gen;
552 struct fftimehands *next;
555 #define NUM_ELEMENTS(x) (sizeof(x) / sizeof(*x))
557 static struct fftimehands ffth[10];
558 static struct fftimehands *volatile fftimehands = ffth;
563 struct fftimehands *cur;
564 struct fftimehands *last;
566 memset(ffth, 0, sizeof(ffth));
568 last = ffth + NUM_ELEMENTS(ffth) - 1;
569 for (cur = ffth; cur < last; cur++)
574 ffclock_status = FFCLOCK_STA_UNSYNC;
575 mtx_init(&ffclock_mtx, "ffclock lock", NULL, MTX_DEF);
579 * Reset the feed-forward clock estimates. Called from inittodr() to get things
580 * kick started and uses the timecounter nominal frequency as a first period
581 * estimate. Note: this function may be called several time just after boot.
582 * Note: this is the only function that sets the value of boot time for the
583 * monotonic (i.e. uptime) version of the feed-forward clock.
586 ffclock_reset_clock(struct timespec *ts)
588 struct timecounter *tc;
589 struct ffclock_estimate cest;
591 tc = timehands->th_counter;
592 memset(&cest, 0, sizeof(struct ffclock_estimate));
594 timespec2bintime(ts, &ffclock_boottime);
595 timespec2bintime(ts, &(cest.update_time));
596 ffclock_read_counter(&cest.update_ffcount);
597 cest.leapsec_next = 0;
598 cest.period = ((1ULL << 63) / tc->tc_frequency) << 1;
601 cest.status = FFCLOCK_STA_UNSYNC;
602 cest.leapsec_total = 0;
605 mtx_lock(&ffclock_mtx);
606 bcopy(&cest, &ffclock_estimate, sizeof(struct ffclock_estimate));
607 ffclock_updated = INT8_MAX;
608 mtx_unlock(&ffclock_mtx);
610 printf("ffclock reset: %s (%llu Hz), time = %ld.%09lu\n", tc->tc_name,
611 (unsigned long long)tc->tc_frequency, (long)ts->tv_sec,
612 (unsigned long)ts->tv_nsec);
616 * Sub-routine to convert a time interval measured in RAW counter units to time
617 * in seconds stored in bintime format.
618 * NOTE: bintime_mul requires u_int, but the value of the ffcounter may be
619 * larger than the max value of u_int (on 32 bit architecture). Loop to consume
623 ffclock_convert_delta(ffcounter ffdelta, uint64_t period, struct bintime *bt)
626 ffcounter delta, delta_max;
628 delta_max = (1ULL << (8 * sizeof(unsigned int))) - 1;
631 if (ffdelta > delta_max)
637 bintime_mul(&bt2, (unsigned int)delta);
638 bintime_add(bt, &bt2);
640 } while (ffdelta > 0);
644 * Update the fftimehands.
645 * Push the tick ffcount and time(s) forward based on current clock estimate.
646 * The conversion from ffcounter to bintime relies on the difference clock
647 * principle, whose accuracy relies on computing small time intervals. If a new
648 * clock estimate has been passed by the synchronisation daemon, make it
649 * current, and compute the linear interpolation for monotonic time if needed.
652 ffclock_windup(unsigned int delta)
654 struct ffclock_estimate *cest;
655 struct fftimehands *ffth;
656 struct bintime bt, gap_lerp;
659 unsigned int polling;
660 uint8_t forward_jump, ogen;
663 * Pick the next timehand, copy current ffclock estimates and move tick
664 * times and counter forward.
667 ffth = fftimehands->next;
671 bcopy(&fftimehands->cest, cest, sizeof(struct ffclock_estimate));
672 ffdelta = (ffcounter)delta;
673 ffth->period_lerp = fftimehands->period_lerp;
675 ffth->tick_time = fftimehands->tick_time;
676 ffclock_convert_delta(ffdelta, cest->period, &bt);
677 bintime_add(&ffth->tick_time, &bt);
679 ffth->tick_time_lerp = fftimehands->tick_time_lerp;
680 ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt);
681 bintime_add(&ffth->tick_time_lerp, &bt);
683 ffth->tick_ffcount = fftimehands->tick_ffcount + ffdelta;
686 * Assess the status of the clock, if the last update is too old, it is
687 * likely the synchronisation daemon is dead and the clock is free
690 if (ffclock_updated == 0) {
691 ffdelta = ffth->tick_ffcount - cest->update_ffcount;
692 ffclock_convert_delta(ffdelta, cest->period, &bt);
693 if (bt.sec > 2 * FFCLOCK_SKM_SCALE)
694 ffclock_status |= FFCLOCK_STA_UNSYNC;
698 * If available, grab updated clock estimates and make them current.
699 * Recompute time at this tick using the updated estimates. The clock
700 * estimates passed the feed-forward synchronisation daemon may result
701 * in time conversion that is not monotonically increasing (just after
702 * the update). time_lerp is a particular linear interpolation over the
703 * synchronisation algo polling period that ensures monotonicity for the
704 * clock ids requesting it.
706 if (ffclock_updated > 0) {
707 bcopy(&ffclock_estimate, cest, sizeof(struct ffclock_estimate));
708 ffdelta = ffth->tick_ffcount - cest->update_ffcount;
709 ffth->tick_time = cest->update_time;
710 ffclock_convert_delta(ffdelta, cest->period, &bt);
711 bintime_add(&ffth->tick_time, &bt);
713 /* ffclock_reset sets ffclock_updated to INT8_MAX */
714 if (ffclock_updated == INT8_MAX)
715 ffth->tick_time_lerp = ffth->tick_time;
717 if (bintime_cmp(&ffth->tick_time, &ffth->tick_time_lerp, >))
722 bintime_clear(&gap_lerp);
724 gap_lerp = ffth->tick_time;
725 bintime_sub(&gap_lerp, &ffth->tick_time_lerp);
727 gap_lerp = ffth->tick_time_lerp;
728 bintime_sub(&gap_lerp, &ffth->tick_time);
732 * The reset from the RTC clock may be far from accurate, and
733 * reducing the gap between real time and interpolated time
734 * could take a very long time if the interpolated clock insists
735 * on strict monotonicity. The clock is reset under very strict
736 * conditions (kernel time is known to be wrong and
737 * synchronization daemon has been restarted recently.
738 * ffclock_boottime absorbs the jump to ensure boot time is
739 * correct and uptime functions stay consistent.
741 if (((ffclock_status & FFCLOCK_STA_UNSYNC) == FFCLOCK_STA_UNSYNC) &&
742 ((cest->status & FFCLOCK_STA_UNSYNC) == 0) &&
743 ((cest->status & FFCLOCK_STA_WARMUP) == FFCLOCK_STA_WARMUP)) {
745 bintime_add(&ffclock_boottime, &gap_lerp);
747 bintime_sub(&ffclock_boottime, &gap_lerp);
748 ffth->tick_time_lerp = ffth->tick_time;
749 bintime_clear(&gap_lerp);
752 ffclock_status = cest->status;
753 ffth->period_lerp = cest->period;
756 * Compute corrected period used for the linear interpolation of
757 * time. The rate of linear interpolation is capped to 5000PPM
760 if (bintime_isset(&gap_lerp)) {
761 ffdelta = cest->update_ffcount;
762 ffdelta -= fftimehands->cest.update_ffcount;
763 ffclock_convert_delta(ffdelta, cest->period, &bt);
766 bt.frac = 5000000 * (uint64_t)18446744073LL;
767 bintime_mul(&bt, polling);
768 if (bintime_cmp(&gap_lerp, &bt, >))
771 /* Approximate 1 sec by 1-(1/2^64) to ease arithmetic */
773 if (gap_lerp.sec > 0) {
775 frac /= ffdelta / gap_lerp.sec;
777 frac += gap_lerp.frac / ffdelta;
780 ffth->period_lerp += frac;
782 ffth->period_lerp -= frac;
794 * Adjust the fftimehands when the timecounter is changed. Stating the obvious,
795 * the old and new hardware counter cannot be read simultaneously. tc_windup()
796 * does read the two counters 'back to back', but a few cycles are effectively
797 * lost, and not accumulated in tick_ffcount. This is a fairly radical
798 * operation for a feed-forward synchronization daemon, and it is its job to not
799 * pushing irrelevant data to the kernel. Because there is no locking here,
800 * simply force to ignore pending or next update to give daemon a chance to
801 * realize the counter has changed.
804 ffclock_change_tc(struct timehands *th)
806 struct fftimehands *ffth;
807 struct ffclock_estimate *cest;
808 struct timecounter *tc;
812 ffth = fftimehands->next;
817 bcopy(&(fftimehands->cest), cest, sizeof(struct ffclock_estimate));
818 cest->period = ((1ULL << 63) / tc->tc_frequency ) << 1;
821 cest->status |= FFCLOCK_STA_UNSYNC;
823 ffth->tick_ffcount = fftimehands->tick_ffcount;
824 ffth->tick_time_lerp = fftimehands->tick_time_lerp;
825 ffth->tick_time = fftimehands->tick_time;
826 ffth->period_lerp = cest->period;
828 /* Do not lock but ignore next update from synchronization daemon. */
838 * Retrieve feed-forward counter and time of last kernel tick.
841 ffclock_last_tick(ffcounter *ffcount, struct bintime *bt, uint32_t flags)
843 struct fftimehands *ffth;
847 * No locking but check generation has not changed. Also need to make
848 * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
853 if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP)
854 *bt = ffth->tick_time_lerp;
856 *bt = ffth->tick_time;
857 *ffcount = ffth->tick_ffcount;
858 } while (gen == 0 || gen != ffth->gen);
862 * Absolute clock conversion. Low level function to convert ffcounter to
863 * bintime. The ffcounter is converted using the current ffclock period estimate
864 * or the "interpolated period" to ensure monotonicity.
865 * NOTE: this conversion may have been deferred, and the clock updated since the
866 * hardware counter has been read.
869 ffclock_convert_abs(ffcounter ffcount, struct bintime *bt, uint32_t flags)
871 struct fftimehands *ffth;
877 * No locking but check generation has not changed. Also need to make
878 * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
883 if (ffcount > ffth->tick_ffcount)
884 ffdelta = ffcount - ffth->tick_ffcount;
886 ffdelta = ffth->tick_ffcount - ffcount;
888 if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP) {
889 *bt = ffth->tick_time_lerp;
890 ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt2);
892 *bt = ffth->tick_time;
893 ffclock_convert_delta(ffdelta, ffth->cest.period, &bt2);
896 if (ffcount > ffth->tick_ffcount)
897 bintime_add(bt, &bt2);
899 bintime_sub(bt, &bt2);
900 } while (gen == 0 || gen != ffth->gen);
904 * Difference clock conversion.
905 * Low level function to Convert a time interval measured in RAW counter units
906 * into bintime. The difference clock allows measuring small intervals much more
907 * reliably than the absolute clock.
910 ffclock_convert_diff(ffcounter ffdelta, struct bintime *bt)
912 struct fftimehands *ffth;
915 /* No locking but check generation has not changed. */
919 ffclock_convert_delta(ffdelta, ffth->cest.period, bt);
920 } while (gen == 0 || gen != ffth->gen);
924 * Access to current ffcounter value.
927 ffclock_read_counter(ffcounter *ffcount)
929 struct timehands *th;
930 struct fftimehands *ffth;
931 unsigned int gen, delta;
934 * ffclock_windup() called from tc_windup(), safe to rely on
935 * th->th_generation only, for correct delta and ffcounter.
939 gen = atomic_load_acq_int(&th->th_generation);
941 delta = tc_delta(th);
942 *ffcount = ffth->tick_ffcount;
943 atomic_thread_fence_acq();
944 } while (gen == 0 || gen != th->th_generation);
950 binuptime(struct bintime *bt)
953 binuptime_fromclock(bt, sysclock_active);
957 nanouptime(struct timespec *tsp)
960 nanouptime_fromclock(tsp, sysclock_active);
964 microuptime(struct timeval *tvp)
967 microuptime_fromclock(tvp, sysclock_active);
971 bintime(struct bintime *bt)
974 bintime_fromclock(bt, sysclock_active);
978 nanotime(struct timespec *tsp)
981 nanotime_fromclock(tsp, sysclock_active);
985 microtime(struct timeval *tvp)
988 microtime_fromclock(tvp, sysclock_active);
992 getbinuptime(struct bintime *bt)
995 getbinuptime_fromclock(bt, sysclock_active);
999 getnanouptime(struct timespec *tsp)
1002 getnanouptime_fromclock(tsp, sysclock_active);
1006 getmicrouptime(struct timeval *tvp)
1009 getmicrouptime_fromclock(tvp, sysclock_active);
1013 getbintime(struct bintime *bt)
1016 getbintime_fromclock(bt, sysclock_active);
1020 getnanotime(struct timespec *tsp)
1023 getnanotime_fromclock(tsp, sysclock_active);
1027 getmicrotime(struct timeval *tvp)
1030 getmicrouptime_fromclock(tvp, sysclock_active);
1033 #endif /* FFCLOCK */
1036 * This is a clone of getnanotime and used for walltimestamps.
1037 * The dtrace_ prefix prevents fbt from creating probes for
1038 * it so walltimestamp can be safely used in all fbt probes.
1041 dtrace_getnanotime(struct timespec *tsp)
1043 struct timehands *th;
1048 gen = atomic_load_acq_int(&th->th_generation);
1049 *tsp = th->th_nanotime;
1050 atomic_thread_fence_acq();
1051 } while (gen == 0 || gen != th->th_generation);
1055 * System clock currently providing time to the system. Modifiable via sysctl
1056 * when the FFCLOCK option is defined.
1058 int sysclock_active = SYSCLOCK_FBCK;
1060 /* Internal NTP status and error estimates. */
1061 extern int time_status;
1062 extern long time_esterror;
1065 * Take a snapshot of sysclock data which can be used to compare system clocks
1066 * and generate timestamps after the fact.
1069 sysclock_getsnapshot(struct sysclock_snap *clock_snap, int fast)
1071 struct fbclock_info *fbi;
1072 struct timehands *th;
1074 unsigned int delta, gen;
1077 struct fftimehands *ffth;
1078 struct ffclock_info *ffi;
1079 struct ffclock_estimate cest;
1081 ffi = &clock_snap->ff_info;
1084 fbi = &clock_snap->fb_info;
1089 gen = atomic_load_acq_int(&th->th_generation);
1090 fbi->th_scale = th->th_scale;
1091 fbi->tick_time = th->th_offset;
1094 ffi->tick_time = ffth->tick_time_lerp;
1095 ffi->tick_time_lerp = ffth->tick_time_lerp;
1096 ffi->period = ffth->cest.period;
1097 ffi->period_lerp = ffth->period_lerp;
1098 clock_snap->ffcount = ffth->tick_ffcount;
1102 delta = tc_delta(th);
1103 atomic_thread_fence_acq();
1104 } while (gen == 0 || gen != th->th_generation);
1106 clock_snap->delta = delta;
1107 clock_snap->sysclock_active = sysclock_active;
1109 /* Record feedback clock status and error. */
1110 clock_snap->fb_info.status = time_status;
1111 /* XXX: Very crude estimate of feedback clock error. */
1112 bt.sec = time_esterror / 1000000;
1113 bt.frac = ((time_esterror - bt.sec) * 1000000) *
1114 (uint64_t)18446744073709ULL;
1115 clock_snap->fb_info.error = bt;
1119 clock_snap->ffcount += delta;
1121 /* Record feed-forward clock leap second adjustment. */
1122 ffi->leapsec_adjustment = cest.leapsec_total;
1123 if (clock_snap->ffcount > cest.leapsec_next)
1124 ffi->leapsec_adjustment -= cest.leapsec;
1126 /* Record feed-forward clock status and error. */
1127 clock_snap->ff_info.status = cest.status;
1128 ffcount = clock_snap->ffcount - cest.update_ffcount;
1129 ffclock_convert_delta(ffcount, cest.period, &bt);
1130 /* 18446744073709 = int(2^64/1e12), err_bound_rate in [ps/s]. */
1131 bintime_mul(&bt, cest.errb_rate * (uint64_t)18446744073709ULL);
1132 /* 18446744073 = int(2^64 / 1e9), since err_abs in [ns]. */
1133 bintime_addx(&bt, cest.errb_abs * (uint64_t)18446744073ULL);
1134 clock_snap->ff_info.error = bt;
1139 * Convert a sysclock snapshot into a struct bintime based on the specified
1140 * clock source and flags.
1143 sysclock_snap2bintime(struct sysclock_snap *cs, struct bintime *bt,
1144 int whichclock, uint32_t flags)
1146 struct bintime boottimebin;
1152 switch (whichclock) {
1154 *bt = cs->fb_info.tick_time;
1156 /* If snapshot was created with !fast, delta will be >0. */
1158 bintime_addx(bt, cs->fb_info.th_scale * cs->delta);
1160 if ((flags & FBCLOCK_UPTIME) == 0) {
1161 getboottimebin(&boottimebin);
1162 bintime_add(bt, &boottimebin);
1167 if (flags & FFCLOCK_LERP) {
1168 *bt = cs->ff_info.tick_time_lerp;
1169 period = cs->ff_info.period_lerp;
1171 *bt = cs->ff_info.tick_time;
1172 period = cs->ff_info.period;
1175 /* If snapshot was created with !fast, delta will be >0. */
1176 if (cs->delta > 0) {
1177 ffclock_convert_delta(cs->delta, period, &bt2);
1178 bintime_add(bt, &bt2);
1181 /* Leap second adjustment. */
1182 if (flags & FFCLOCK_LEAPSEC)
1183 bt->sec -= cs->ff_info.leapsec_adjustment;
1185 /* Boot time adjustment, for uptime/monotonic clocks. */
1186 if (flags & FFCLOCK_UPTIME)
1187 bintime_sub(bt, &ffclock_boottime);
1199 * Initialize a new timecounter and possibly use it.
1202 tc_init(struct timecounter *tc)
1205 struct sysctl_oid *tc_root;
1207 u = tc->tc_frequency / tc->tc_counter_mask;
1208 /* XXX: We need some margin here, 10% is a guess */
1211 if (u > hz && tc->tc_quality >= 0) {
1212 tc->tc_quality = -2000;
1214 printf("Timecounter \"%s\" frequency %ju Hz",
1215 tc->tc_name, (uintmax_t)tc->tc_frequency);
1216 printf(" -- Insufficient hz, needs at least %u\n", u);
1218 } else if (tc->tc_quality >= 0 || bootverbose) {
1219 printf("Timecounter \"%s\" frequency %ju Hz quality %d\n",
1220 tc->tc_name, (uintmax_t)tc->tc_frequency,
1224 tc->tc_next = timecounters;
1227 * Set up sysctl tree for this counter.
1229 tc_root = SYSCTL_ADD_NODE_WITH_LABEL(NULL,
1230 SYSCTL_STATIC_CHILDREN(_kern_timecounter_tc), OID_AUTO, tc->tc_name,
1231 CTLFLAG_RW, 0, "timecounter description", "timecounter");
1232 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1233 "mask", CTLFLAG_RD, &(tc->tc_counter_mask), 0,
1234 "mask for implemented bits");
1235 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1236 "counter", CTLTYPE_UINT | CTLFLAG_RD, tc, sizeof(*tc),
1237 sysctl_kern_timecounter_get, "IU", "current timecounter value");
1238 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1239 "frequency", CTLTYPE_U64 | CTLFLAG_RD, tc, sizeof(*tc),
1240 sysctl_kern_timecounter_freq, "QU", "timecounter frequency");
1241 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1242 "quality", CTLFLAG_RD, &(tc->tc_quality), 0,
1243 "goodness of time counter");
1245 * Do not automatically switch if the current tc was specifically
1246 * chosen. Never automatically use a timecounter with negative quality.
1247 * Even though we run on the dummy counter, switching here may be
1248 * worse since this timecounter may not be monotonic.
1252 if (tc->tc_quality < 0)
1254 if (tc->tc_quality < timecounter->tc_quality)
1256 if (tc->tc_quality == timecounter->tc_quality &&
1257 tc->tc_frequency < timecounter->tc_frequency)
1259 (void)tc->tc_get_timecount(tc);
1260 (void)tc->tc_get_timecount(tc);
1264 /* Report the frequency of the current timecounter. */
1266 tc_getfrequency(void)
1269 return (timehands->th_counter->tc_frequency);
1273 sleeping_on_old_rtc(struct thread *td)
1277 * td_rtcgen is modified by curthread when it is running,
1278 * and by other threads in this function. By finding the thread
1279 * on a sleepqueue and holding the lock on the sleepqueue
1280 * chain, we guarantee that the thread is not running and that
1281 * modifying td_rtcgen is safe. Setting td_rtcgen to zero informs
1282 * the thread that it was woken due to a real-time clock adjustment.
1283 * (The declaration of td_rtcgen refers to this comment.)
1285 if (td->td_rtcgen != 0 && td->td_rtcgen != rtc_generation) {
1292 static struct mtx tc_setclock_mtx;
1293 MTX_SYSINIT(tc_setclock_init, &tc_setclock_mtx, "tcsetc", MTX_SPIN);
1296 * Step our concept of UTC. This is done by modifying our estimate of
1300 tc_setclock(struct timespec *ts)
1302 struct timespec tbef, taft;
1303 struct bintime bt, bt2;
1305 timespec2bintime(ts, &bt);
1307 mtx_lock_spin(&tc_setclock_mtx);
1308 cpu_tick_calibrate(1);
1310 bintime_sub(&bt, &bt2);
1312 /* XXX fiddle all the little crinkly bits around the fiords... */
1314 mtx_unlock_spin(&tc_setclock_mtx);
1316 /* Avoid rtc_generation == 0, since td_rtcgen == 0 is special. */
1317 atomic_add_rel_int(&rtc_generation, 2);
1318 sleepq_chains_remove_matching(sleeping_on_old_rtc);
1319 if (timestepwarnings) {
1322 "Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n",
1323 (intmax_t)tbef.tv_sec, tbef.tv_nsec,
1324 (intmax_t)taft.tv_sec, taft.tv_nsec,
1325 (intmax_t)ts->tv_sec, ts->tv_nsec);
1330 * Initialize the next struct timehands in the ring and make
1331 * it the active timehands. Along the way we might switch to a different
1332 * timecounter and/or do seconds processing in NTP. Slightly magic.
1335 tc_windup(struct bintime *new_boottimebin)
1338 struct timehands *th, *tho;
1340 u_int delta, ncount, ogen;
1345 * Make the next timehands a copy of the current one, but do
1346 * not overwrite the generation or next pointer. While we
1347 * update the contents, the generation must be zero. We need
1348 * to ensure that the zero generation is visible before the
1349 * data updates become visible, which requires release fence.
1350 * For similar reasons, re-reading of the generation after the
1351 * data is read should use acquire fence.
1355 ogen = th->th_generation;
1356 th->th_generation = 0;
1357 atomic_thread_fence_rel();
1358 memcpy(th, tho, offsetof(struct timehands, th_generation));
1359 if (new_boottimebin != NULL)
1360 th->th_boottime = *new_boottimebin;
1363 * Capture a timecounter delta on the current timecounter and if
1364 * changing timecounters, a counter value from the new timecounter.
1365 * Update the offset fields accordingly.
1367 delta = tc_delta(th);
1368 if (th->th_counter != timecounter)
1369 ncount = timecounter->tc_get_timecount(timecounter);
1373 ffclock_windup(delta);
1375 th->th_offset_count += delta;
1376 th->th_offset_count &= th->th_counter->tc_counter_mask;
1377 while (delta > th->th_counter->tc_frequency) {
1378 /* Eat complete unadjusted seconds. */
1379 delta -= th->th_counter->tc_frequency;
1380 th->th_offset.sec++;
1382 if ((delta > th->th_counter->tc_frequency / 2) &&
1383 (th->th_scale * delta < ((uint64_t)1 << 63))) {
1384 /* The product th_scale * delta just barely overflows. */
1385 th->th_offset.sec++;
1387 bintime_addx(&th->th_offset, th->th_scale * delta);
1390 * Hardware latching timecounters may not generate interrupts on
1391 * PPS events, so instead we poll them. There is a finite risk that
1392 * the hardware might capture a count which is later than the one we
1393 * got above, and therefore possibly in the next NTP second which might
1394 * have a different rate than the current NTP second. It doesn't
1395 * matter in practice.
1397 if (tho->th_counter->tc_poll_pps)
1398 tho->th_counter->tc_poll_pps(tho->th_counter);
1401 * Deal with NTP second processing. The for loop normally
1402 * iterates at most once, but in extreme situations it might
1403 * keep NTP sane if timeouts are not run for several seconds.
1404 * At boot, the time step can be large when the TOD hardware
1405 * has been read, so on really large steps, we call
1406 * ntp_update_second only twice. We need to call it twice in
1407 * case we missed a leap second.
1410 bintime_add(&bt, &th->th_boottime);
1411 i = bt.sec - tho->th_microtime.tv_sec;
1414 for (; i > 0; i--) {
1416 ntp_update_second(&th->th_adjustment, &bt.sec);
1418 th->th_boottime.sec += bt.sec - t;
1420 /* Update the UTC timestamps used by the get*() functions. */
1421 th->th_bintime = bt;
1422 bintime2timeval(&bt, &th->th_microtime);
1423 bintime2timespec(&bt, &th->th_nanotime);
1425 /* Now is a good time to change timecounters. */
1426 if (th->th_counter != timecounter) {
1428 if ((timecounter->tc_flags & TC_FLAGS_C2STOP) != 0)
1429 cpu_disable_c2_sleep++;
1430 if ((th->th_counter->tc_flags & TC_FLAGS_C2STOP) != 0)
1431 cpu_disable_c2_sleep--;
1433 th->th_counter = timecounter;
1434 th->th_offset_count = ncount;
1435 tc_min_ticktock_freq = max(1, timecounter->tc_frequency /
1436 (((uint64_t)timecounter->tc_counter_mask + 1) / 3));
1438 ffclock_change_tc(th);
1443 * Recalculate the scaling factor. We want the number of 1/2^64
1444 * fractions of a second per period of the hardware counter, taking
1445 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
1446 * processing provides us with.
1448 * The th_adjustment is nanoseconds per second with 32 bit binary
1449 * fraction and we want 64 bit binary fraction of second:
1451 * x = a * 2^32 / 10^9 = a * 4.294967296
1453 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
1454 * we can only multiply by about 850 without overflowing, that
1455 * leaves no suitably precise fractions for multiply before divide.
1457 * Divide before multiply with a fraction of 2199/512 results in a
1458 * systematic undercompensation of 10PPM of th_adjustment. On a
1459 * 5000PPM adjustment this is a 0.05PPM error. This is acceptable.
1461 * We happily sacrifice the lowest of the 64 bits of our result
1462 * to the goddess of code clarity.
1465 scale = (uint64_t)1 << 63;
1466 scale += (th->th_adjustment / 1024) * 2199;
1467 scale /= th->th_counter->tc_frequency;
1468 th->th_scale = scale * 2;
1471 * Now that the struct timehands is again consistent, set the new
1472 * generation number, making sure to not make it zero.
1476 atomic_store_rel_int(&th->th_generation, ogen);
1478 /* Go live with the new struct timehands. */
1480 switch (sysclock_active) {
1483 time_second = th->th_microtime.tv_sec;
1484 time_uptime = th->th_offset.sec;
1488 time_second = fftimehands->tick_time_lerp.sec;
1489 time_uptime = fftimehands->tick_time_lerp.sec - ffclock_boottime.sec;
1495 timekeep_push_vdso();
1498 /* Report or change the active timecounter hardware. */
1500 sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
1503 struct timecounter *newtc, *tc;
1507 strlcpy(newname, tc->tc_name, sizeof(newname));
1509 error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
1510 if (error != 0 || req->newptr == NULL)
1512 /* Record that the tc in use now was specifically chosen. */
1514 if (strcmp(newname, tc->tc_name) == 0)
1516 for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
1517 if (strcmp(newname, newtc->tc_name) != 0)
1520 /* Warm up new timecounter. */
1521 (void)newtc->tc_get_timecount(newtc);
1522 (void)newtc->tc_get_timecount(newtc);
1524 timecounter = newtc;
1527 * The vdso timehands update is deferred until the next
1530 * This is prudent given that 'timekeep_push_vdso()' does not
1531 * use any locking and that it can be called in hard interrupt
1532 * context via 'tc_windup()'.
1539 SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
1540 0, 0, sysctl_kern_timecounter_hardware, "A",
1541 "Timecounter hardware selected");
1543 /* Report the available timecounter hardware. */
1545 sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
1548 struct timecounter *tc;
1551 sbuf_new_for_sysctl(&sb, NULL, 0, req);
1552 for (tc = timecounters; tc != NULL; tc = tc->tc_next) {
1553 if (tc != timecounters)
1554 sbuf_putc(&sb, ' ');
1555 sbuf_printf(&sb, "%s(%d)", tc->tc_name, tc->tc_quality);
1557 error = sbuf_finish(&sb);
1562 SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD,
1563 0, 0, sysctl_kern_timecounter_choice, "A", "Timecounter hardware detected");
1566 * RFC 2783 PPS-API implementation.
1570 * Return true if the driver is aware of the abi version extensions in the
1571 * pps_state structure, and it supports at least the given abi version number.
1574 abi_aware(struct pps_state *pps, int vers)
1577 return ((pps->kcmode & KCMODE_ABIFLAG) && pps->driver_abi >= vers);
1581 pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
1584 pps_seq_t aseq, cseq;
1587 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
1591 * If no timeout is requested, immediately return whatever values were
1592 * most recently captured. If timeout seconds is -1, that's a request
1593 * to block without a timeout. WITNESS won't let us sleep forever
1594 * without a lock (we really don't need a lock), so just repeatedly
1595 * sleep a long time.
1597 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) {
1598 if (fapi->timeout.tv_sec == -1)
1601 tv.tv_sec = fapi->timeout.tv_sec;
1602 tv.tv_usec = fapi->timeout.tv_nsec / 1000;
1605 aseq = atomic_load_int(&pps->ppsinfo.assert_sequence);
1606 cseq = atomic_load_int(&pps->ppsinfo.clear_sequence);
1607 while (aseq == atomic_load_int(&pps->ppsinfo.assert_sequence) &&
1608 cseq == atomic_load_int(&pps->ppsinfo.clear_sequence)) {
1609 if (abi_aware(pps, 1) && pps->driver_mtx != NULL) {
1610 if (pps->flags & PPSFLAG_MTX_SPIN) {
1611 err = msleep_spin(pps, pps->driver_mtx,
1614 err = msleep(pps, pps->driver_mtx, PCATCH,
1618 err = tsleep(pps, PCATCH, "ppsfch", timo);
1620 if (err == EWOULDBLOCK) {
1621 if (fapi->timeout.tv_sec == -1) {
1626 } else if (err != 0) {
1632 pps->ppsinfo.current_mode = pps->ppsparam.mode;
1633 fapi->pps_info_buf = pps->ppsinfo;
1639 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
1642 struct pps_fetch_args *fapi;
1644 struct pps_fetch_ffc_args *fapi_ffc;
1647 struct pps_kcbind_args *kapi;
1650 KASSERT(pps != NULL, ("NULL pps pointer in pps_ioctl"));
1652 case PPS_IOC_CREATE:
1654 case PPS_IOC_DESTROY:
1656 case PPS_IOC_SETPARAMS:
1657 app = (pps_params_t *)data;
1658 if (app->mode & ~pps->ppscap)
1661 /* Ensure only a single clock is selected for ffc timestamp. */
1662 if ((app->mode & PPS_TSCLK_MASK) == PPS_TSCLK_MASK)
1665 pps->ppsparam = *app;
1667 case PPS_IOC_GETPARAMS:
1668 app = (pps_params_t *)data;
1669 *app = pps->ppsparam;
1670 app->api_version = PPS_API_VERS_1;
1672 case PPS_IOC_GETCAP:
1673 *(int*)data = pps->ppscap;
1676 fapi = (struct pps_fetch_args *)data;
1677 return (pps_fetch(fapi, pps));
1679 case PPS_IOC_FETCH_FFCOUNTER:
1680 fapi_ffc = (struct pps_fetch_ffc_args *)data;
1681 if (fapi_ffc->tsformat && fapi_ffc->tsformat !=
1684 if (fapi_ffc->timeout.tv_sec || fapi_ffc->timeout.tv_nsec)
1685 return (EOPNOTSUPP);
1686 pps->ppsinfo_ffc.current_mode = pps->ppsparam.mode;
1687 fapi_ffc->pps_info_buf_ffc = pps->ppsinfo_ffc;
1688 /* Overwrite timestamps if feedback clock selected. */
1689 switch (pps->ppsparam.mode & PPS_TSCLK_MASK) {
1690 case PPS_TSCLK_FBCK:
1691 fapi_ffc->pps_info_buf_ffc.assert_timestamp =
1692 pps->ppsinfo.assert_timestamp;
1693 fapi_ffc->pps_info_buf_ffc.clear_timestamp =
1694 pps->ppsinfo.clear_timestamp;
1696 case PPS_TSCLK_FFWD:
1702 #endif /* FFCLOCK */
1703 case PPS_IOC_KCBIND:
1705 kapi = (struct pps_kcbind_args *)data;
1706 /* XXX Only root should be able to do this */
1707 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
1709 if (kapi->kernel_consumer != PPS_KC_HARDPPS)
1711 if (kapi->edge & ~pps->ppscap)
1713 pps->kcmode = (kapi->edge & KCMODE_EDGEMASK) |
1714 (pps->kcmode & KCMODE_ABIFLAG);
1717 return (EOPNOTSUPP);
1725 pps_init(struct pps_state *pps)
1727 pps->ppscap |= PPS_TSFMT_TSPEC | PPS_CANWAIT;
1728 if (pps->ppscap & PPS_CAPTUREASSERT)
1729 pps->ppscap |= PPS_OFFSETASSERT;
1730 if (pps->ppscap & PPS_CAPTURECLEAR)
1731 pps->ppscap |= PPS_OFFSETCLEAR;
1733 pps->ppscap |= PPS_TSCLK_MASK;
1735 pps->kcmode &= ~KCMODE_ABIFLAG;
1739 pps_init_abi(struct pps_state *pps)
1743 if (pps->driver_abi > 0) {
1744 pps->kcmode |= KCMODE_ABIFLAG;
1745 pps->kernel_abi = PPS_ABI_VERSION;
1750 pps_capture(struct pps_state *pps)
1752 struct timehands *th;
1754 KASSERT(pps != NULL, ("NULL pps pointer in pps_capture"));
1756 pps->capgen = atomic_load_acq_int(&th->th_generation);
1759 pps->capffth = fftimehands;
1761 pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
1762 atomic_thread_fence_acq();
1763 if (pps->capgen != th->th_generation)
1768 pps_event(struct pps_state *pps, int event)
1771 struct timespec ts, *tsp, *osp;
1772 u_int tcount, *pcount;
1776 struct timespec *tsp_ffc;
1777 pps_seq_t *pseq_ffc;
1784 KASSERT(pps != NULL, ("NULL pps pointer in pps_event"));
1785 /* Nothing to do if not currently set to capture this event type. */
1786 if ((event & pps->ppsparam.mode) == 0)
1788 /* If the timecounter was wound up underneath us, bail out. */
1789 if (pps->capgen == 0 || pps->capgen !=
1790 atomic_load_acq_int(&pps->capth->th_generation))
1793 /* Things would be easier with arrays. */
1794 if (event == PPS_CAPTUREASSERT) {
1795 tsp = &pps->ppsinfo.assert_timestamp;
1796 osp = &pps->ppsparam.assert_offset;
1797 foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
1799 fhard = pps->kcmode & PPS_CAPTUREASSERT;
1801 pcount = &pps->ppscount[0];
1802 pseq = &pps->ppsinfo.assert_sequence;
1804 ffcount = &pps->ppsinfo_ffc.assert_ffcount;
1805 tsp_ffc = &pps->ppsinfo_ffc.assert_timestamp;
1806 pseq_ffc = &pps->ppsinfo_ffc.assert_sequence;
1809 tsp = &pps->ppsinfo.clear_timestamp;
1810 osp = &pps->ppsparam.clear_offset;
1811 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
1813 fhard = pps->kcmode & PPS_CAPTURECLEAR;
1815 pcount = &pps->ppscount[1];
1816 pseq = &pps->ppsinfo.clear_sequence;
1818 ffcount = &pps->ppsinfo_ffc.clear_ffcount;
1819 tsp_ffc = &pps->ppsinfo_ffc.clear_timestamp;
1820 pseq_ffc = &pps->ppsinfo_ffc.clear_sequence;
1825 * If the timecounter changed, we cannot compare the count values, so
1826 * we have to drop the rest of the PPS-stuff until the next event.
1828 if (pps->ppstc != pps->capth->th_counter) {
1829 pps->ppstc = pps->capth->th_counter;
1830 *pcount = pps->capcount;
1831 pps->ppscount[2] = pps->capcount;
1835 /* Convert the count to a timespec. */
1836 tcount = pps->capcount - pps->capth->th_offset_count;
1837 tcount &= pps->capth->th_counter->tc_counter_mask;
1838 bt = pps->capth->th_bintime;
1839 bintime_addx(&bt, pps->capth->th_scale * tcount);
1840 bintime2timespec(&bt, &ts);
1842 /* If the timecounter was wound up underneath us, bail out. */
1843 atomic_thread_fence_acq();
1844 if (pps->capgen != pps->capth->th_generation)
1847 *pcount = pps->capcount;
1852 timespecadd(tsp, osp, tsp);
1853 if (tsp->tv_nsec < 0) {
1854 tsp->tv_nsec += 1000000000;
1860 *ffcount = pps->capffth->tick_ffcount + tcount;
1861 bt = pps->capffth->tick_time;
1862 ffclock_convert_delta(tcount, pps->capffth->cest.period, &bt);
1863 bintime_add(&bt, &pps->capffth->tick_time);
1864 bintime2timespec(&bt, &ts);
1874 * Feed the NTP PLL/FLL.
1875 * The FLL wants to know how many (hardware) nanoseconds
1876 * elapsed since the previous event.
1878 tcount = pps->capcount - pps->ppscount[2];
1879 pps->ppscount[2] = pps->capcount;
1880 tcount &= pps->capth->th_counter->tc_counter_mask;
1881 scale = (uint64_t)1 << 63;
1882 scale /= pps->capth->th_counter->tc_frequency;
1886 bintime_addx(&bt, scale * tcount);
1887 bintime2timespec(&bt, &ts);
1888 hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
1892 /* Wakeup anyone sleeping in pps_fetch(). */
1897 * Timecounters need to be updated every so often to prevent the hardware
1898 * counter from overflowing. Updating also recalculates the cached values
1899 * used by the get*() family of functions, so their precision depends on
1900 * the update frequency.
1904 SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0,
1905 "Approximate number of hardclock ticks in a millisecond");
1908 tc_ticktock(int cnt)
1912 if (mtx_trylock_spin(&tc_setclock_mtx)) {
1914 if (count >= tc_tick) {
1918 mtx_unlock_spin(&tc_setclock_mtx);
1922 static void __inline
1923 tc_adjprecision(void)
1927 if (tc_timepercentage > 0) {
1928 t = (99 + tc_timepercentage) / tc_timepercentage;
1929 tc_precexp = fls(t + (t >> 1)) - 1;
1930 FREQ2BT(hz / tc_tick, &bt_timethreshold);
1931 FREQ2BT(hz, &bt_tickthreshold);
1932 bintime_shift(&bt_timethreshold, tc_precexp);
1933 bintime_shift(&bt_tickthreshold, tc_precexp);
1936 bt_timethreshold.sec = INT_MAX;
1937 bt_timethreshold.frac = ~(uint64_t)0;
1938 bt_tickthreshold = bt_timethreshold;
1940 sbt_timethreshold = bttosbt(bt_timethreshold);
1941 sbt_tickthreshold = bttosbt(bt_tickthreshold);
1945 sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS)
1949 val = tc_timepercentage;
1950 error = sysctl_handle_int(oidp, &val, 0, req);
1951 if (error != 0 || req->newptr == NULL)
1953 tc_timepercentage = val;
1961 /* Set up the requested number of timehands. */
1963 inittimehands(void *dummy)
1965 struct timehands *thp;
1968 TUNABLE_INT_FETCH("kern.timecounter.timehands_count",
1970 if (timehands_count < 1)
1971 timehands_count = 1;
1972 if (timehands_count > nitems(ths))
1973 timehands_count = nitems(ths);
1974 for (i = 1, thp = &ths[0]; i < timehands_count; thp = &ths[i++])
1975 thp->th_next = &ths[i];
1976 thp->th_next = &ths[0];
1978 SYSINIT(timehands, SI_SUB_TUNABLES, SI_ORDER_ANY, inittimehands, NULL);
1981 inittimecounter(void *dummy)
1987 * Set the initial timeout to
1988 * max(1, <approx. number of hardclock ticks in a millisecond>).
1989 * People should probably not use the sysctl to set the timeout
1990 * to smaller than its initial value, since that value is the
1991 * smallest reasonable one. If they want better timestamps they
1992 * should use the non-"get"* functions.
1995 tc_tick = (hz + 500) / 1000;
1999 FREQ2BT(hz, &tick_bt);
2000 tick_sbt = bttosbt(tick_bt);
2001 tick_rate = hz / tc_tick;
2002 FREQ2BT(tick_rate, &tc_tick_bt);
2003 tc_tick_sbt = bttosbt(tc_tick_bt);
2004 p = (tc_tick * 1000000) / hz;
2005 printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
2011 /* warm up new timecounter (again) and get rolling. */
2012 (void)timecounter->tc_get_timecount(timecounter);
2013 (void)timecounter->tc_get_timecount(timecounter);
2014 mtx_lock_spin(&tc_setclock_mtx);
2016 mtx_unlock_spin(&tc_setclock_mtx);
2019 SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
2021 /* Cpu tick handling -------------------------------------------------*/
2023 static int cpu_tick_variable;
2024 static uint64_t cpu_tick_frequency;
2026 DPCPU_DEFINE_STATIC(uint64_t, tc_cpu_ticks_base);
2027 DPCPU_DEFINE_STATIC(unsigned, tc_cpu_ticks_last);
2032 struct timecounter *tc;
2033 uint64_t res, *base;
2037 base = DPCPU_PTR(tc_cpu_ticks_base);
2038 last = DPCPU_PTR(tc_cpu_ticks_last);
2039 tc = timehands->th_counter;
2040 u = tc->tc_get_timecount(tc) & tc->tc_counter_mask;
2042 *base += (uint64_t)tc->tc_counter_mask + 1;
2050 cpu_tick_calibration(void)
2052 static time_t last_calib;
2054 if (time_uptime != last_calib && !(time_uptime & 0xf)) {
2055 cpu_tick_calibrate(0);
2056 last_calib = time_uptime;
2061 * This function gets called every 16 seconds on only one designated
2062 * CPU in the system from hardclock() via cpu_tick_calibration()().
2064 * Whenever the real time clock is stepped we get called with reset=1
2065 * to make sure we handle suspend/resume and similar events correctly.
2069 cpu_tick_calibrate(int reset)
2071 static uint64_t c_last;
2072 uint64_t c_this, c_delta;
2073 static struct bintime t_last;
2074 struct bintime t_this, t_delta;
2078 /* The clock was stepped, abort & reset */
2083 /* we don't calibrate fixed rate cputicks */
2084 if (!cpu_tick_variable)
2087 getbinuptime(&t_this);
2088 c_this = cpu_ticks();
2089 if (t_last.sec != 0) {
2090 c_delta = c_this - c_last;
2092 bintime_sub(&t_delta, &t_last);
2095 * 2^(64-20) / 16[s] =
2097 * 17.592.186.044.416 / 16 =
2098 * 1.099.511.627.776 [Hz]
2100 divi = t_delta.sec << 20;
2101 divi |= t_delta.frac >> (64 - 20);
2104 if (c_delta > cpu_tick_frequency) {
2105 if (0 && bootverbose)
2106 printf("cpu_tick increased to %ju Hz\n",
2108 cpu_tick_frequency = c_delta;
2116 set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var)
2120 cpu_ticks = tc_cpu_ticks;
2122 cpu_tick_frequency = freq;
2123 cpu_tick_variable = var;
2132 if (cpu_ticks == tc_cpu_ticks)
2133 return (tc_getfrequency());
2134 return (cpu_tick_frequency);
2138 * We need to be slightly careful converting cputicks to microseconds.
2139 * There is plenty of margin in 64 bits of microseconds (half a million
2140 * years) and in 64 bits at 4 GHz (146 years), but if we do a multiply
2141 * before divide conversion (to retain precision) we find that the
2142 * margin shrinks to 1.5 hours (one millionth of 146y).
2143 * With a three prong approach we never lose significant bits, no
2144 * matter what the cputick rate and length of timeinterval is.
2148 cputick2usec(uint64_t tick)
2151 if (tick > 18446744073709551LL) /* floor(2^64 / 1000) */
2152 return (tick / (cpu_tickrate() / 1000000LL));
2153 else if (tick > 18446744073709LL) /* floor(2^64 / 1000000) */
2154 return ((tick * 1000LL) / (cpu_tickrate() / 1000LL));
2156 return ((tick * 1000000LL) / cpu_tickrate());
2159 cpu_tick_f *cpu_ticks = tc_cpu_ticks;
2161 static int vdso_th_enable = 1;
2163 sysctl_fast_gettime(SYSCTL_HANDLER_ARGS)
2165 int old_vdso_th_enable, error;
2167 old_vdso_th_enable = vdso_th_enable;
2168 error = sysctl_handle_int(oidp, &old_vdso_th_enable, 0, req);
2171 vdso_th_enable = old_vdso_th_enable;
2174 SYSCTL_PROC(_kern_timecounter, OID_AUTO, fast_gettime,
2175 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2176 NULL, 0, sysctl_fast_gettime, "I", "Enable fast time of day");
2179 tc_fill_vdso_timehands(struct vdso_timehands *vdso_th)
2181 struct timehands *th;
2185 vdso_th->th_scale = th->th_scale;
2186 vdso_th->th_offset_count = th->th_offset_count;
2187 vdso_th->th_counter_mask = th->th_counter->tc_counter_mask;
2188 vdso_th->th_offset = th->th_offset;
2189 vdso_th->th_boottime = th->th_boottime;
2190 if (th->th_counter->tc_fill_vdso_timehands != NULL) {
2191 enabled = th->th_counter->tc_fill_vdso_timehands(vdso_th,
2195 if (!vdso_th_enable)
2200 #ifdef COMPAT_FREEBSD32
2202 tc_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32)
2204 struct timehands *th;
2208 *(uint64_t *)&vdso_th32->th_scale[0] = th->th_scale;
2209 vdso_th32->th_offset_count = th->th_offset_count;
2210 vdso_th32->th_counter_mask = th->th_counter->tc_counter_mask;
2211 vdso_th32->th_offset.sec = th->th_offset.sec;
2212 *(uint64_t *)&vdso_th32->th_offset.frac[0] = th->th_offset.frac;
2213 vdso_th32->th_boottime.sec = th->th_boottime.sec;
2214 *(uint64_t *)&vdso_th32->th_boottime.frac[0] = th->th_boottime.frac;
2215 if (th->th_counter->tc_fill_vdso_timehands32 != NULL) {
2216 enabled = th->th_counter->tc_fill_vdso_timehands32(vdso_th32,
2220 if (!vdso_th_enable)