2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
9 * Copyright (c) 2011 The FreeBSD Foundation
10 * All rights reserved.
12 * Portions of this software were developed by Julien Ridoux at the University
13 * of Melbourne under sponsorship from the FreeBSD Foundation.
16 #include <sys/cdefs.h>
17 __FBSDID("$FreeBSD$");
19 #include "opt_compat.h"
21 #include "opt_ffclock.h"
23 #include <sys/param.h>
24 #include <sys/kernel.h>
25 #include <sys/limits.h>
27 #include <sys/mutex.h>
28 #include <sys/sysctl.h>
29 #include <sys/syslog.h>
30 #include <sys/systm.h>
31 #include <sys/timeffc.h>
32 #include <sys/timepps.h>
33 #include <sys/timetc.h>
34 #include <sys/timex.h>
38 * A large step happens on boot. This constant detects such steps.
39 * It is relatively small so that ntp_update_second gets called enough
40 * in the typical 'missed a couple of seconds' case, but doesn't loop
41 * forever when the time step is large.
43 #define LARGE_STEP 200
46 * Implement a dummy timecounter which we can use until we get a real one
47 * in the air. This allows the console and other early stuff to use
52 dummy_get_timecount(struct timecounter *tc)
59 static struct timecounter dummy_timecounter = {
60 dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000
64 /* These fields must be initialized by the driver. */
65 struct timecounter *th_counter;
66 int64_t th_adjustment;
68 u_int th_offset_count;
69 struct bintime th_offset;
70 struct timeval th_microtime;
71 struct timespec th_nanotime;
72 /* Fields not to be copied in tc_windup start with th_generation. */
73 volatile u_int th_generation;
74 struct timehands *th_next;
77 static struct timehands th0;
78 static struct timehands th9 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th0};
79 static struct timehands th8 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th9};
80 static struct timehands th7 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th8};
81 static struct timehands th6 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th7};
82 static struct timehands th5 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th6};
83 static struct timehands th4 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th5};
84 static struct timehands th3 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th4};
85 static struct timehands th2 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th3};
86 static struct timehands th1 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th2};
87 static struct timehands th0 = {
90 (uint64_t)-1 / 1000000,
99 static struct timehands *volatile timehands = &th0;
100 struct timecounter *timecounter = &dummy_timecounter;
101 static struct timecounter *timecounters = &dummy_timecounter;
103 int tc_min_ticktock_freq = 1;
105 volatile time_t time_second = 1;
106 volatile time_t time_uptime = 1;
108 struct bintime boottimebin;
109 struct timeval boottime;
110 static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS);
111 SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD,
112 NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime");
114 SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
115 static SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc, CTLFLAG_RW, 0, "");
117 static int timestepwarnings;
118 SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
119 ×tepwarnings, 0, "Log time steps");
121 struct bintime bt_timethreshold;
122 struct bintime bt_tickthreshold;
123 sbintime_t sbt_timethreshold;
124 sbintime_t sbt_tickthreshold;
125 struct bintime tc_tick_bt;
126 sbintime_t tc_tick_sbt;
128 int tc_timepercentage = TC_DEFAULTPERC;
129 TUNABLE_INT("kern.timecounter.alloweddeviation", &tc_timepercentage);
130 static int sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS);
131 SYSCTL_PROC(_kern_timecounter, OID_AUTO, alloweddeviation,
132 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
133 sysctl_kern_timecounter_adjprecision, "I",
134 "Allowed time interval deviation in percents");
136 static int tc_chosen; /* Non-zero if a specific tc was chosen via sysctl. */
138 static void tc_windup(void);
139 static void cpu_tick_calibrate(int);
141 void dtrace_getnanotime(struct timespec *tsp);
144 sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
150 if (req->flags & SCTL_MASK32) {
151 tv[0] = boottime.tv_sec;
152 tv[1] = boottime.tv_usec;
153 return SYSCTL_OUT(req, tv, sizeof(tv));
157 return SYSCTL_OUT(req, &boottime, sizeof(boottime));
161 sysctl_kern_timecounter_get(SYSCTL_HANDLER_ARGS)
164 struct timecounter *tc = arg1;
166 ncount = tc->tc_get_timecount(tc);
167 return sysctl_handle_int(oidp, &ncount, 0, req);
171 sysctl_kern_timecounter_freq(SYSCTL_HANDLER_ARGS)
174 struct timecounter *tc = arg1;
176 freq = tc->tc_frequency;
177 return sysctl_handle_64(oidp, &freq, 0, req);
181 * Return the difference between the timehands' counter value now and what
182 * was when we copied it to the timehands' offset_count.
184 static __inline u_int
185 tc_delta(struct timehands *th)
187 struct timecounter *tc;
190 return ((tc->tc_get_timecount(tc) - th->th_offset_count) &
191 tc->tc_counter_mask);
195 * Functions for reading the time. We have to loop until we are sure that
196 * the timehands that we operated on was not updated under our feet. See
197 * the comment in <sys/time.h> for a description of these 12 functions.
202 fbclock_binuptime(struct bintime *bt)
204 struct timehands *th;
209 gen = th->th_generation;
211 bintime_addx(bt, th->th_scale * tc_delta(th));
212 } while (gen == 0 || gen != th->th_generation);
216 fbclock_nanouptime(struct timespec *tsp)
220 fbclock_binuptime(&bt);
221 bintime2timespec(&bt, tsp);
225 fbclock_microuptime(struct timeval *tvp)
229 fbclock_binuptime(&bt);
230 bintime2timeval(&bt, tvp);
234 fbclock_bintime(struct bintime *bt)
237 fbclock_binuptime(bt);
238 bintime_add(bt, &boottimebin);
242 fbclock_nanotime(struct timespec *tsp)
246 fbclock_bintime(&bt);
247 bintime2timespec(&bt, tsp);
251 fbclock_microtime(struct timeval *tvp)
255 fbclock_bintime(&bt);
256 bintime2timeval(&bt, tvp);
260 fbclock_getbinuptime(struct bintime *bt)
262 struct timehands *th;
267 gen = th->th_generation;
269 } while (gen == 0 || gen != th->th_generation);
273 fbclock_getnanouptime(struct timespec *tsp)
275 struct timehands *th;
280 gen = th->th_generation;
281 bintime2timespec(&th->th_offset, tsp);
282 } while (gen == 0 || gen != th->th_generation);
286 fbclock_getmicrouptime(struct timeval *tvp)
288 struct timehands *th;
293 gen = th->th_generation;
294 bintime2timeval(&th->th_offset, tvp);
295 } while (gen == 0 || gen != th->th_generation);
299 fbclock_getbintime(struct bintime *bt)
301 struct timehands *th;
306 gen = th->th_generation;
308 } while (gen == 0 || gen != th->th_generation);
309 bintime_add(bt, &boottimebin);
313 fbclock_getnanotime(struct timespec *tsp)
315 struct timehands *th;
320 gen = th->th_generation;
321 *tsp = th->th_nanotime;
322 } while (gen == 0 || gen != th->th_generation);
326 fbclock_getmicrotime(struct timeval *tvp)
328 struct timehands *th;
333 gen = th->th_generation;
334 *tvp = th->th_microtime;
335 } while (gen == 0 || gen != th->th_generation);
339 binuptime(struct bintime *bt)
341 struct timehands *th;
346 gen = th->th_generation;
348 bintime_addx(bt, th->th_scale * tc_delta(th));
349 } while (gen == 0 || gen != th->th_generation);
353 nanouptime(struct timespec *tsp)
358 bintime2timespec(&bt, tsp);
362 microuptime(struct timeval *tvp)
367 bintime2timeval(&bt, tvp);
371 bintime(struct bintime *bt)
375 bintime_add(bt, &boottimebin);
379 nanotime(struct timespec *tsp)
384 bintime2timespec(&bt, tsp);
388 microtime(struct timeval *tvp)
393 bintime2timeval(&bt, tvp);
397 getbinuptime(struct bintime *bt)
399 struct timehands *th;
404 gen = th->th_generation;
406 } while (gen == 0 || gen != th->th_generation);
410 getnanouptime(struct timespec *tsp)
412 struct timehands *th;
417 gen = th->th_generation;
418 bintime2timespec(&th->th_offset, tsp);
419 } while (gen == 0 || gen != th->th_generation);
423 getmicrouptime(struct timeval *tvp)
425 struct timehands *th;
430 gen = th->th_generation;
431 bintime2timeval(&th->th_offset, tvp);
432 } while (gen == 0 || gen != th->th_generation);
436 getbintime(struct bintime *bt)
438 struct timehands *th;
443 gen = th->th_generation;
445 } while (gen == 0 || gen != th->th_generation);
446 bintime_add(bt, &boottimebin);
450 getnanotime(struct timespec *tsp)
452 struct timehands *th;
457 gen = th->th_generation;
458 *tsp = th->th_nanotime;
459 } while (gen == 0 || gen != th->th_generation);
463 getmicrotime(struct timeval *tvp)
465 struct timehands *th;
470 gen = th->th_generation;
471 *tvp = th->th_microtime;
472 } while (gen == 0 || gen != th->th_generation);
478 * Support for feed-forward synchronization algorithms. This is heavily inspired
479 * by the timehands mechanism but kept independent from it. *_windup() functions
480 * have some connection to avoid accessing the timecounter hardware more than
484 /* Feed-forward clock estimates kept updated by the synchronization daemon. */
485 struct ffclock_estimate ffclock_estimate;
486 struct bintime ffclock_boottime; /* Feed-forward boot time estimate. */
487 uint32_t ffclock_status; /* Feed-forward clock status. */
488 int8_t ffclock_updated; /* New estimates are available. */
489 struct mtx ffclock_mtx; /* Mutex on ffclock_estimate. */
492 struct ffclock_estimate cest;
493 struct bintime tick_time;
494 struct bintime tick_time_lerp;
495 ffcounter tick_ffcount;
496 uint64_t period_lerp;
497 volatile uint8_t gen;
498 struct fftimehands *next;
501 #define NUM_ELEMENTS(x) (sizeof(x) / sizeof(*x))
503 static struct fftimehands ffth[10];
504 static struct fftimehands *volatile fftimehands = ffth;
509 struct fftimehands *cur;
510 struct fftimehands *last;
512 memset(ffth, 0, sizeof(ffth));
514 last = ffth + NUM_ELEMENTS(ffth) - 1;
515 for (cur = ffth; cur < last; cur++)
520 ffclock_status = FFCLOCK_STA_UNSYNC;
521 mtx_init(&ffclock_mtx, "ffclock lock", NULL, MTX_DEF);
525 * Reset the feed-forward clock estimates. Called from inittodr() to get things
526 * kick started and uses the timecounter nominal frequency as a first period
527 * estimate. Note: this function may be called several time just after boot.
528 * Note: this is the only function that sets the value of boot time for the
529 * monotonic (i.e. uptime) version of the feed-forward clock.
532 ffclock_reset_clock(struct timespec *ts)
534 struct timecounter *tc;
535 struct ffclock_estimate cest;
537 tc = timehands->th_counter;
538 memset(&cest, 0, sizeof(struct ffclock_estimate));
540 timespec2bintime(ts, &ffclock_boottime);
541 timespec2bintime(ts, &(cest.update_time));
542 ffclock_read_counter(&cest.update_ffcount);
543 cest.leapsec_next = 0;
544 cest.period = ((1ULL << 63) / tc->tc_frequency) << 1;
547 cest.status = FFCLOCK_STA_UNSYNC;
548 cest.leapsec_total = 0;
551 mtx_lock(&ffclock_mtx);
552 bcopy(&cest, &ffclock_estimate, sizeof(struct ffclock_estimate));
553 ffclock_updated = INT8_MAX;
554 mtx_unlock(&ffclock_mtx);
556 printf("ffclock reset: %s (%llu Hz), time = %ld.%09lu\n", tc->tc_name,
557 (unsigned long long)tc->tc_frequency, (long)ts->tv_sec,
558 (unsigned long)ts->tv_nsec);
562 * Sub-routine to convert a time interval measured in RAW counter units to time
563 * in seconds stored in bintime format.
564 * NOTE: bintime_mul requires u_int, but the value of the ffcounter may be
565 * larger than the max value of u_int (on 32 bit architecture). Loop to consume
569 ffclock_convert_delta(ffcounter ffdelta, uint64_t period, struct bintime *bt)
572 ffcounter delta, delta_max;
574 delta_max = (1ULL << (8 * sizeof(unsigned int))) - 1;
577 if (ffdelta > delta_max)
583 bintime_mul(&bt2, (unsigned int)delta);
584 bintime_add(bt, &bt2);
586 } while (ffdelta > 0);
590 * Update the fftimehands.
591 * Push the tick ffcount and time(s) forward based on current clock estimate.
592 * The conversion from ffcounter to bintime relies on the difference clock
593 * principle, whose accuracy relies on computing small time intervals. If a new
594 * clock estimate has been passed by the synchronisation daemon, make it
595 * current, and compute the linear interpolation for monotonic time if needed.
598 ffclock_windup(unsigned int delta)
600 struct ffclock_estimate *cest;
601 struct fftimehands *ffth;
602 struct bintime bt, gap_lerp;
605 unsigned int polling;
606 uint8_t forward_jump, ogen;
609 * Pick the next timehand, copy current ffclock estimates and move tick
610 * times and counter forward.
613 ffth = fftimehands->next;
617 bcopy(&fftimehands->cest, cest, sizeof(struct ffclock_estimate));
618 ffdelta = (ffcounter)delta;
619 ffth->period_lerp = fftimehands->period_lerp;
621 ffth->tick_time = fftimehands->tick_time;
622 ffclock_convert_delta(ffdelta, cest->period, &bt);
623 bintime_add(&ffth->tick_time, &bt);
625 ffth->tick_time_lerp = fftimehands->tick_time_lerp;
626 ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt);
627 bintime_add(&ffth->tick_time_lerp, &bt);
629 ffth->tick_ffcount = fftimehands->tick_ffcount + ffdelta;
632 * Assess the status of the clock, if the last update is too old, it is
633 * likely the synchronisation daemon is dead and the clock is free
636 if (ffclock_updated == 0) {
637 ffdelta = ffth->tick_ffcount - cest->update_ffcount;
638 ffclock_convert_delta(ffdelta, cest->period, &bt);
639 if (bt.sec > 2 * FFCLOCK_SKM_SCALE)
640 ffclock_status |= FFCLOCK_STA_UNSYNC;
644 * If available, grab updated clock estimates and make them current.
645 * Recompute time at this tick using the updated estimates. The clock
646 * estimates passed the feed-forward synchronisation daemon may result
647 * in time conversion that is not monotonically increasing (just after
648 * the update). time_lerp is a particular linear interpolation over the
649 * synchronisation algo polling period that ensures monotonicity for the
650 * clock ids requesting it.
652 if (ffclock_updated > 0) {
653 bcopy(&ffclock_estimate, cest, sizeof(struct ffclock_estimate));
654 ffdelta = ffth->tick_ffcount - cest->update_ffcount;
655 ffth->tick_time = cest->update_time;
656 ffclock_convert_delta(ffdelta, cest->period, &bt);
657 bintime_add(&ffth->tick_time, &bt);
659 /* ffclock_reset sets ffclock_updated to INT8_MAX */
660 if (ffclock_updated == INT8_MAX)
661 ffth->tick_time_lerp = ffth->tick_time;
663 if (bintime_cmp(&ffth->tick_time, &ffth->tick_time_lerp, >))
668 bintime_clear(&gap_lerp);
670 gap_lerp = ffth->tick_time;
671 bintime_sub(&gap_lerp, &ffth->tick_time_lerp);
673 gap_lerp = ffth->tick_time_lerp;
674 bintime_sub(&gap_lerp, &ffth->tick_time);
678 * The reset from the RTC clock may be far from accurate, and
679 * reducing the gap between real time and interpolated time
680 * could take a very long time if the interpolated clock insists
681 * on strict monotonicity. The clock is reset under very strict
682 * conditions (kernel time is known to be wrong and
683 * synchronization daemon has been restarted recently.
684 * ffclock_boottime absorbs the jump to ensure boot time is
685 * correct and uptime functions stay consistent.
687 if (((ffclock_status & FFCLOCK_STA_UNSYNC) == FFCLOCK_STA_UNSYNC) &&
688 ((cest->status & FFCLOCK_STA_UNSYNC) == 0) &&
689 ((cest->status & FFCLOCK_STA_WARMUP) == FFCLOCK_STA_WARMUP)) {
691 bintime_add(&ffclock_boottime, &gap_lerp);
693 bintime_sub(&ffclock_boottime, &gap_lerp);
694 ffth->tick_time_lerp = ffth->tick_time;
695 bintime_clear(&gap_lerp);
698 ffclock_status = cest->status;
699 ffth->period_lerp = cest->period;
702 * Compute corrected period used for the linear interpolation of
703 * time. The rate of linear interpolation is capped to 5000PPM
706 if (bintime_isset(&gap_lerp)) {
707 ffdelta = cest->update_ffcount;
708 ffdelta -= fftimehands->cest.update_ffcount;
709 ffclock_convert_delta(ffdelta, cest->period, &bt);
712 bt.frac = 5000000 * (uint64_t)18446744073LL;
713 bintime_mul(&bt, polling);
714 if (bintime_cmp(&gap_lerp, &bt, >))
717 /* Approximate 1 sec by 1-(1/2^64) to ease arithmetic */
719 if (gap_lerp.sec > 0) {
721 frac /= ffdelta / gap_lerp.sec;
723 frac += gap_lerp.frac / ffdelta;
726 ffth->period_lerp += frac;
728 ffth->period_lerp -= frac;
740 * Adjust the fftimehands when the timecounter is changed. Stating the obvious,
741 * the old and new hardware counter cannot be read simultaneously. tc_windup()
742 * does read the two counters 'back to back', but a few cycles are effectively
743 * lost, and not accumulated in tick_ffcount. This is a fairly radical
744 * operation for a feed-forward synchronization daemon, and it is its job to not
745 * pushing irrelevant data to the kernel. Because there is no locking here,
746 * simply force to ignore pending or next update to give daemon a chance to
747 * realize the counter has changed.
750 ffclock_change_tc(struct timehands *th)
752 struct fftimehands *ffth;
753 struct ffclock_estimate *cest;
754 struct timecounter *tc;
758 ffth = fftimehands->next;
763 bcopy(&(fftimehands->cest), cest, sizeof(struct ffclock_estimate));
764 cest->period = ((1ULL << 63) / tc->tc_frequency ) << 1;
767 cest->status |= FFCLOCK_STA_UNSYNC;
769 ffth->tick_ffcount = fftimehands->tick_ffcount;
770 ffth->tick_time_lerp = fftimehands->tick_time_lerp;
771 ffth->tick_time = fftimehands->tick_time;
772 ffth->period_lerp = cest->period;
774 /* Do not lock but ignore next update from synchronization daemon. */
784 * Retrieve feed-forward counter and time of last kernel tick.
787 ffclock_last_tick(ffcounter *ffcount, struct bintime *bt, uint32_t flags)
789 struct fftimehands *ffth;
793 * No locking but check generation has not changed. Also need to make
794 * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
799 if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP)
800 *bt = ffth->tick_time_lerp;
802 *bt = ffth->tick_time;
803 *ffcount = ffth->tick_ffcount;
804 } while (gen == 0 || gen != ffth->gen);
808 * Absolute clock conversion. Low level function to convert ffcounter to
809 * bintime. The ffcounter is converted using the current ffclock period estimate
810 * or the "interpolated period" to ensure monotonicity.
811 * NOTE: this conversion may have been deferred, and the clock updated since the
812 * hardware counter has been read.
815 ffclock_convert_abs(ffcounter ffcount, struct bintime *bt, uint32_t flags)
817 struct fftimehands *ffth;
823 * No locking but check generation has not changed. Also need to make
824 * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
829 if (ffcount > ffth->tick_ffcount)
830 ffdelta = ffcount - ffth->tick_ffcount;
832 ffdelta = ffth->tick_ffcount - ffcount;
834 if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP) {
835 *bt = ffth->tick_time_lerp;
836 ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt2);
838 *bt = ffth->tick_time;
839 ffclock_convert_delta(ffdelta, ffth->cest.period, &bt2);
842 if (ffcount > ffth->tick_ffcount)
843 bintime_add(bt, &bt2);
845 bintime_sub(bt, &bt2);
846 } while (gen == 0 || gen != ffth->gen);
850 * Difference clock conversion.
851 * Low level function to Convert a time interval measured in RAW counter units
852 * into bintime. The difference clock allows measuring small intervals much more
853 * reliably than the absolute clock.
856 ffclock_convert_diff(ffcounter ffdelta, struct bintime *bt)
858 struct fftimehands *ffth;
861 /* No locking but check generation has not changed. */
865 ffclock_convert_delta(ffdelta, ffth->cest.period, bt);
866 } while (gen == 0 || gen != ffth->gen);
870 * Access to current ffcounter value.
873 ffclock_read_counter(ffcounter *ffcount)
875 struct timehands *th;
876 struct fftimehands *ffth;
877 unsigned int gen, delta;
880 * ffclock_windup() called from tc_windup(), safe to rely on
881 * th->th_generation only, for correct delta and ffcounter.
885 gen = th->th_generation;
887 delta = tc_delta(th);
888 *ffcount = ffth->tick_ffcount;
889 } while (gen == 0 || gen != th->th_generation);
895 binuptime(struct bintime *bt)
898 binuptime_fromclock(bt, sysclock_active);
902 nanouptime(struct timespec *tsp)
905 nanouptime_fromclock(tsp, sysclock_active);
909 microuptime(struct timeval *tvp)
912 microuptime_fromclock(tvp, sysclock_active);
916 bintime(struct bintime *bt)
919 bintime_fromclock(bt, sysclock_active);
923 nanotime(struct timespec *tsp)
926 nanotime_fromclock(tsp, sysclock_active);
930 microtime(struct timeval *tvp)
933 microtime_fromclock(tvp, sysclock_active);
937 getbinuptime(struct bintime *bt)
940 getbinuptime_fromclock(bt, sysclock_active);
944 getnanouptime(struct timespec *tsp)
947 getnanouptime_fromclock(tsp, sysclock_active);
951 getmicrouptime(struct timeval *tvp)
954 getmicrouptime_fromclock(tvp, sysclock_active);
958 getbintime(struct bintime *bt)
961 getbintime_fromclock(bt, sysclock_active);
965 getnanotime(struct timespec *tsp)
968 getnanotime_fromclock(tsp, sysclock_active);
972 getmicrotime(struct timeval *tvp)
975 getmicrouptime_fromclock(tvp, sysclock_active);
981 * This is a clone of getnanotime and used for walltimestamps.
982 * The dtrace_ prefix prevents fbt from creating probes for
983 * it so walltimestamp can be safely used in all fbt probes.
986 dtrace_getnanotime(struct timespec *tsp)
988 struct timehands *th;
993 gen = th->th_generation;
994 *tsp = th->th_nanotime;
995 } while (gen == 0 || gen != th->th_generation);
999 * System clock currently providing time to the system. Modifiable via sysctl
1000 * when the FFCLOCK option is defined.
1002 int sysclock_active = SYSCLOCK_FBCK;
1004 /* Internal NTP status and error estimates. */
1005 extern int time_status;
1006 extern long time_esterror;
1009 * Take a snapshot of sysclock data which can be used to compare system clocks
1010 * and generate timestamps after the fact.
1013 sysclock_getsnapshot(struct sysclock_snap *clock_snap, int fast)
1015 struct fbclock_info *fbi;
1016 struct timehands *th;
1018 unsigned int delta, gen;
1021 struct fftimehands *ffth;
1022 struct ffclock_info *ffi;
1023 struct ffclock_estimate cest;
1025 ffi = &clock_snap->ff_info;
1028 fbi = &clock_snap->fb_info;
1033 gen = th->th_generation;
1034 fbi->th_scale = th->th_scale;
1035 fbi->tick_time = th->th_offset;
1038 ffi->tick_time = ffth->tick_time_lerp;
1039 ffi->tick_time_lerp = ffth->tick_time_lerp;
1040 ffi->period = ffth->cest.period;
1041 ffi->period_lerp = ffth->period_lerp;
1042 clock_snap->ffcount = ffth->tick_ffcount;
1046 delta = tc_delta(th);
1047 } while (gen == 0 || gen != th->th_generation);
1049 clock_snap->delta = delta;
1050 clock_snap->sysclock_active = sysclock_active;
1052 /* Record feedback clock status and error. */
1053 clock_snap->fb_info.status = time_status;
1054 /* XXX: Very crude estimate of feedback clock error. */
1055 bt.sec = time_esterror / 1000000;
1056 bt.frac = ((time_esterror - bt.sec) * 1000000) *
1057 (uint64_t)18446744073709ULL;
1058 clock_snap->fb_info.error = bt;
1062 clock_snap->ffcount += delta;
1064 /* Record feed-forward clock leap second adjustment. */
1065 ffi->leapsec_adjustment = cest.leapsec_total;
1066 if (clock_snap->ffcount > cest.leapsec_next)
1067 ffi->leapsec_adjustment -= cest.leapsec;
1069 /* Record feed-forward clock status and error. */
1070 clock_snap->ff_info.status = cest.status;
1071 ffcount = clock_snap->ffcount - cest.update_ffcount;
1072 ffclock_convert_delta(ffcount, cest.period, &bt);
1073 /* 18446744073709 = int(2^64/1e12), err_bound_rate in [ps/s]. */
1074 bintime_mul(&bt, cest.errb_rate * (uint64_t)18446744073709ULL);
1075 /* 18446744073 = int(2^64 / 1e9), since err_abs in [ns]. */
1076 bintime_addx(&bt, cest.errb_abs * (uint64_t)18446744073ULL);
1077 clock_snap->ff_info.error = bt;
1082 * Convert a sysclock snapshot into a struct bintime based on the specified
1083 * clock source and flags.
1086 sysclock_snap2bintime(struct sysclock_snap *cs, struct bintime *bt,
1087 int whichclock, uint32_t flags)
1094 switch (whichclock) {
1096 *bt = cs->fb_info.tick_time;
1098 /* If snapshot was created with !fast, delta will be >0. */
1100 bintime_addx(bt, cs->fb_info.th_scale * cs->delta);
1102 if ((flags & FBCLOCK_UPTIME) == 0)
1103 bintime_add(bt, &boottimebin);
1107 if (flags & FFCLOCK_LERP) {
1108 *bt = cs->ff_info.tick_time_lerp;
1109 period = cs->ff_info.period_lerp;
1111 *bt = cs->ff_info.tick_time;
1112 period = cs->ff_info.period;
1115 /* If snapshot was created with !fast, delta will be >0. */
1116 if (cs->delta > 0) {
1117 ffclock_convert_delta(cs->delta, period, &bt2);
1118 bintime_add(bt, &bt2);
1121 /* Leap second adjustment. */
1122 if (flags & FFCLOCK_LEAPSEC)
1123 bt->sec -= cs->ff_info.leapsec_adjustment;
1125 /* Boot time adjustment, for uptime/monotonic clocks. */
1126 if (flags & FFCLOCK_UPTIME)
1127 bintime_sub(bt, &ffclock_boottime);
1139 * Initialize a new timecounter and possibly use it.
1142 tc_init(struct timecounter *tc)
1145 struct sysctl_oid *tc_root;
1147 u = tc->tc_frequency / tc->tc_counter_mask;
1148 /* XXX: We need some margin here, 10% is a guess */
1151 if (u > hz && tc->tc_quality >= 0) {
1152 tc->tc_quality = -2000;
1154 printf("Timecounter \"%s\" frequency %ju Hz",
1155 tc->tc_name, (uintmax_t)tc->tc_frequency);
1156 printf(" -- Insufficient hz, needs at least %u\n", u);
1158 } else if (tc->tc_quality >= 0 || bootverbose) {
1159 printf("Timecounter \"%s\" frequency %ju Hz quality %d\n",
1160 tc->tc_name, (uintmax_t)tc->tc_frequency,
1164 tc->tc_next = timecounters;
1167 * Set up sysctl tree for this counter.
1169 tc_root = SYSCTL_ADD_NODE(NULL,
1170 SYSCTL_STATIC_CHILDREN(_kern_timecounter_tc), OID_AUTO, tc->tc_name,
1171 CTLFLAG_RW, 0, "timecounter description");
1172 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1173 "mask", CTLFLAG_RD, &(tc->tc_counter_mask), 0,
1174 "mask for implemented bits");
1175 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1176 "counter", CTLTYPE_UINT | CTLFLAG_RD, tc, sizeof(*tc),
1177 sysctl_kern_timecounter_get, "IU", "current timecounter value");
1178 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1179 "frequency", CTLTYPE_U64 | CTLFLAG_RD, tc, sizeof(*tc),
1180 sysctl_kern_timecounter_freq, "QU", "timecounter frequency");
1181 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1182 "quality", CTLFLAG_RD, &(tc->tc_quality), 0,
1183 "goodness of time counter");
1185 * Do not automatically switch if the current tc was specifically
1186 * chosen. Never automatically use a timecounter with negative quality.
1187 * Even though we run on the dummy counter, switching here may be
1188 * worse since this timecounter may not be monotonic.
1192 if (tc->tc_quality < 0)
1194 if (tc->tc_quality < timecounter->tc_quality)
1196 if (tc->tc_quality == timecounter->tc_quality &&
1197 tc->tc_frequency < timecounter->tc_frequency)
1199 (void)tc->tc_get_timecount(tc);
1200 (void)tc->tc_get_timecount(tc);
1204 /* Report the frequency of the current timecounter. */
1206 tc_getfrequency(void)
1209 return (timehands->th_counter->tc_frequency);
1213 * Step our concept of UTC. This is done by modifying our estimate of
1218 tc_setclock(struct timespec *ts)
1220 struct timespec tbef, taft;
1221 struct bintime bt, bt2;
1223 cpu_tick_calibrate(1);
1225 timespec2bintime(ts, &bt);
1227 bintime_sub(&bt, &bt2);
1228 bintime_add(&bt2, &boottimebin);
1230 bintime2timeval(&bt, &boottime);
1232 /* XXX fiddle all the little crinkly bits around the fiords... */
1235 if (timestepwarnings) {
1237 "Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n",
1238 (intmax_t)tbef.tv_sec, tbef.tv_nsec,
1239 (intmax_t)taft.tv_sec, taft.tv_nsec,
1240 (intmax_t)ts->tv_sec, ts->tv_nsec);
1242 cpu_tick_calibrate(1);
1246 * Initialize the next struct timehands in the ring and make
1247 * it the active timehands. Along the way we might switch to a different
1248 * timecounter and/or do seconds processing in NTP. Slightly magic.
1254 struct timehands *th, *tho;
1256 u_int delta, ncount, ogen;
1261 * Make the next timehands a copy of the current one, but do not
1262 * overwrite the generation or next pointer. While we update
1263 * the contents, the generation must be zero.
1267 ogen = th->th_generation;
1268 th->th_generation = 0;
1269 bcopy(tho, th, offsetof(struct timehands, th_generation));
1272 * Capture a timecounter delta on the current timecounter and if
1273 * changing timecounters, a counter value from the new timecounter.
1274 * Update the offset fields accordingly.
1276 delta = tc_delta(th);
1277 if (th->th_counter != timecounter)
1278 ncount = timecounter->tc_get_timecount(timecounter);
1282 ffclock_windup(delta);
1284 th->th_offset_count += delta;
1285 th->th_offset_count &= th->th_counter->tc_counter_mask;
1286 while (delta > th->th_counter->tc_frequency) {
1287 /* Eat complete unadjusted seconds. */
1288 delta -= th->th_counter->tc_frequency;
1289 th->th_offset.sec++;
1291 if ((delta > th->th_counter->tc_frequency / 2) &&
1292 (th->th_scale * delta < ((uint64_t)1 << 63))) {
1293 /* The product th_scale * delta just barely overflows. */
1294 th->th_offset.sec++;
1296 bintime_addx(&th->th_offset, th->th_scale * delta);
1299 * Hardware latching timecounters may not generate interrupts on
1300 * PPS events, so instead we poll them. There is a finite risk that
1301 * the hardware might capture a count which is later than the one we
1302 * got above, and therefore possibly in the next NTP second which might
1303 * have a different rate than the current NTP second. It doesn't
1304 * matter in practice.
1306 if (tho->th_counter->tc_poll_pps)
1307 tho->th_counter->tc_poll_pps(tho->th_counter);
1310 * Deal with NTP second processing. The for loop normally
1311 * iterates at most once, but in extreme situations it might
1312 * keep NTP sane if timeouts are not run for several seconds.
1313 * At boot, the time step can be large when the TOD hardware
1314 * has been read, so on really large steps, we call
1315 * ntp_update_second only twice. We need to call it twice in
1316 * case we missed a leap second.
1319 bintime_add(&bt, &boottimebin);
1320 i = bt.sec - tho->th_microtime.tv_sec;
1323 for (; i > 0; i--) {
1325 ntp_update_second(&th->th_adjustment, &bt.sec);
1327 boottimebin.sec += bt.sec - t;
1329 /* Update the UTC timestamps used by the get*() functions. */
1330 /* XXX shouldn't do this here. Should force non-`get' versions. */
1331 bintime2timeval(&bt, &th->th_microtime);
1332 bintime2timespec(&bt, &th->th_nanotime);
1334 /* Now is a good time to change timecounters. */
1335 if (th->th_counter != timecounter) {
1337 if ((timecounter->tc_flags & TC_FLAGS_C2STOP) != 0)
1338 cpu_disable_c2_sleep++;
1339 if ((th->th_counter->tc_flags & TC_FLAGS_C2STOP) != 0)
1340 cpu_disable_c2_sleep--;
1342 th->th_counter = timecounter;
1343 th->th_offset_count = ncount;
1344 tc_min_ticktock_freq = max(1, timecounter->tc_frequency /
1345 (((uint64_t)timecounter->tc_counter_mask + 1) / 3));
1347 ffclock_change_tc(th);
1352 * Recalculate the scaling factor. We want the number of 1/2^64
1353 * fractions of a second per period of the hardware counter, taking
1354 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
1355 * processing provides us with.
1357 * The th_adjustment is nanoseconds per second with 32 bit binary
1358 * fraction and we want 64 bit binary fraction of second:
1360 * x = a * 2^32 / 10^9 = a * 4.294967296
1362 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
1363 * we can only multiply by about 850 without overflowing, that
1364 * leaves no suitably precise fractions for multiply before divide.
1366 * Divide before multiply with a fraction of 2199/512 results in a
1367 * systematic undercompensation of 10PPM of th_adjustment. On a
1368 * 5000PPM adjustment this is a 0.05PPM error. This is acceptable.
1370 * We happily sacrifice the lowest of the 64 bits of our result
1371 * to the goddess of code clarity.
1374 scale = (uint64_t)1 << 63;
1375 scale += (th->th_adjustment / 1024) * 2199;
1376 scale /= th->th_counter->tc_frequency;
1377 th->th_scale = scale * 2;
1380 * Now that the struct timehands is again consistent, set the new
1381 * generation number, making sure to not make it zero.
1385 th->th_generation = ogen;
1387 /* Go live with the new struct timehands. */
1389 switch (sysclock_active) {
1392 time_second = th->th_microtime.tv_sec;
1393 time_uptime = th->th_offset.sec;
1397 time_second = fftimehands->tick_time_lerp.sec;
1398 time_uptime = fftimehands->tick_time_lerp.sec - ffclock_boottime.sec;
1404 timekeep_push_vdso();
1407 /* Report or change the active timecounter hardware. */
1409 sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
1412 struct timecounter *newtc, *tc;
1416 strlcpy(newname, tc->tc_name, sizeof(newname));
1418 error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
1419 if (error != 0 || req->newptr == NULL)
1421 /* Record that the tc in use now was specifically chosen. */
1423 if (strcmp(newname, tc->tc_name) == 0)
1425 for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
1426 if (strcmp(newname, newtc->tc_name) != 0)
1429 /* Warm up new timecounter. */
1430 (void)newtc->tc_get_timecount(newtc);
1431 (void)newtc->tc_get_timecount(newtc);
1433 timecounter = newtc;
1434 timekeep_push_vdso();
1440 SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
1441 0, 0, sysctl_kern_timecounter_hardware, "A",
1442 "Timecounter hardware selected");
1445 /* Report the available timecounter hardware. */
1447 sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
1450 struct timecounter *tc;
1455 for (tc = timecounters; error == 0 && tc != NULL; tc = tc->tc_next) {
1456 sprintf(buf, "%s%s(%d)",
1457 spc, tc->tc_name, tc->tc_quality);
1458 error = SYSCTL_OUT(req, buf, strlen(buf));
1464 SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD,
1465 0, 0, sysctl_kern_timecounter_choice, "A", "Timecounter hardware detected");
1468 * RFC 2783 PPS-API implementation.
1472 * Return true if the driver is aware of the abi version extensions in the
1473 * pps_state structure, and it supports at least the given abi version number.
1476 abi_aware(struct pps_state *pps, int vers)
1479 return ((pps->kcmode & KCMODE_ABIFLAG) && pps->driver_abi >= vers);
1483 pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
1486 pps_seq_t aseq, cseq;
1489 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
1493 * If no timeout is requested, immediately return whatever values were
1494 * most recently captured. If timeout seconds is -1, that's a request
1495 * to block without a timeout. WITNESS won't let us sleep forever
1496 * without a lock (we really don't need a lock), so just repeatedly
1497 * sleep a long time.
1499 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) {
1500 if (fapi->timeout.tv_sec == -1)
1503 tv.tv_sec = fapi->timeout.tv_sec;
1504 tv.tv_usec = fapi->timeout.tv_nsec / 1000;
1507 aseq = pps->ppsinfo.assert_sequence;
1508 cseq = pps->ppsinfo.clear_sequence;
1509 while (aseq == pps->ppsinfo.assert_sequence &&
1510 cseq == pps->ppsinfo.clear_sequence) {
1511 if (abi_aware(pps, 1) && pps->driver_mtx != NULL) {
1512 if (pps->flags & PPSFLAG_MTX_SPIN) {
1513 err = msleep_spin(pps, pps->driver_mtx,
1516 err = msleep(pps, pps->driver_mtx, PCATCH,
1520 err = tsleep(pps, PCATCH, "ppsfch", timo);
1522 if (err == EWOULDBLOCK) {
1523 if (fapi->timeout.tv_sec == -1) {
1528 } else if (err != 0) {
1534 pps->ppsinfo.current_mode = pps->ppsparam.mode;
1535 fapi->pps_info_buf = pps->ppsinfo;
1541 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
1544 struct pps_fetch_args *fapi;
1546 struct pps_fetch_ffc_args *fapi_ffc;
1549 struct pps_kcbind_args *kapi;
1552 KASSERT(pps != NULL, ("NULL pps pointer in pps_ioctl"));
1554 case PPS_IOC_CREATE:
1556 case PPS_IOC_DESTROY:
1558 case PPS_IOC_SETPARAMS:
1559 app = (pps_params_t *)data;
1560 if (app->mode & ~pps->ppscap)
1563 /* Ensure only a single clock is selected for ffc timestamp. */
1564 if ((app->mode & PPS_TSCLK_MASK) == PPS_TSCLK_MASK)
1567 pps->ppsparam = *app;
1569 case PPS_IOC_GETPARAMS:
1570 app = (pps_params_t *)data;
1571 *app = pps->ppsparam;
1572 app->api_version = PPS_API_VERS_1;
1574 case PPS_IOC_GETCAP:
1575 *(int*)data = pps->ppscap;
1578 fapi = (struct pps_fetch_args *)data;
1579 return (pps_fetch(fapi, pps));
1581 case PPS_IOC_FETCH_FFCOUNTER:
1582 fapi_ffc = (struct pps_fetch_ffc_args *)data;
1583 if (fapi_ffc->tsformat && fapi_ffc->tsformat !=
1586 if (fapi_ffc->timeout.tv_sec || fapi_ffc->timeout.tv_nsec)
1587 return (EOPNOTSUPP);
1588 pps->ppsinfo_ffc.current_mode = pps->ppsparam.mode;
1589 fapi_ffc->pps_info_buf_ffc = pps->ppsinfo_ffc;
1590 /* Overwrite timestamps if feedback clock selected. */
1591 switch (pps->ppsparam.mode & PPS_TSCLK_MASK) {
1592 case PPS_TSCLK_FBCK:
1593 fapi_ffc->pps_info_buf_ffc.assert_timestamp =
1594 pps->ppsinfo.assert_timestamp;
1595 fapi_ffc->pps_info_buf_ffc.clear_timestamp =
1596 pps->ppsinfo.clear_timestamp;
1598 case PPS_TSCLK_FFWD:
1604 #endif /* FFCLOCK */
1605 case PPS_IOC_KCBIND:
1607 kapi = (struct pps_kcbind_args *)data;
1608 /* XXX Only root should be able to do this */
1609 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
1611 if (kapi->kernel_consumer != PPS_KC_HARDPPS)
1613 if (kapi->edge & ~pps->ppscap)
1615 pps->kcmode = (kapi->edge & KCMODE_EDGEMASK) |
1616 (pps->kcmode & KCMODE_ABIFLAG);
1619 return (EOPNOTSUPP);
1627 pps_init(struct pps_state *pps)
1629 pps->ppscap |= PPS_TSFMT_TSPEC | PPS_CANWAIT;
1630 if (pps->ppscap & PPS_CAPTUREASSERT)
1631 pps->ppscap |= PPS_OFFSETASSERT;
1632 if (pps->ppscap & PPS_CAPTURECLEAR)
1633 pps->ppscap |= PPS_OFFSETCLEAR;
1635 pps->ppscap |= PPS_TSCLK_MASK;
1637 pps->kcmode &= ~KCMODE_ABIFLAG;
1641 pps_init_abi(struct pps_state *pps)
1645 if (pps->driver_abi > 0) {
1646 pps->kcmode |= KCMODE_ABIFLAG;
1647 pps->kernel_abi = PPS_ABI_VERSION;
1652 pps_capture(struct pps_state *pps)
1654 struct timehands *th;
1656 KASSERT(pps != NULL, ("NULL pps pointer in pps_capture"));
1658 pps->capgen = th->th_generation;
1661 pps->capffth = fftimehands;
1663 pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
1664 if (pps->capgen != th->th_generation)
1669 pps_event(struct pps_state *pps, int event)
1672 struct timespec ts, *tsp, *osp;
1673 u_int tcount, *pcount;
1677 struct timespec *tsp_ffc;
1678 pps_seq_t *pseq_ffc;
1682 KASSERT(pps != NULL, ("NULL pps pointer in pps_event"));
1683 /* Nothing to do if not currently set to capture this event type. */
1684 if ((event & pps->ppsparam.mode) == 0)
1686 /* If the timecounter was wound up underneath us, bail out. */
1687 if (pps->capgen == 0 || pps->capgen != pps->capth->th_generation)
1690 /* Things would be easier with arrays. */
1691 if (event == PPS_CAPTUREASSERT) {
1692 tsp = &pps->ppsinfo.assert_timestamp;
1693 osp = &pps->ppsparam.assert_offset;
1694 foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
1695 fhard = pps->kcmode & PPS_CAPTUREASSERT;
1696 pcount = &pps->ppscount[0];
1697 pseq = &pps->ppsinfo.assert_sequence;
1699 ffcount = &pps->ppsinfo_ffc.assert_ffcount;
1700 tsp_ffc = &pps->ppsinfo_ffc.assert_timestamp;
1701 pseq_ffc = &pps->ppsinfo_ffc.assert_sequence;
1704 tsp = &pps->ppsinfo.clear_timestamp;
1705 osp = &pps->ppsparam.clear_offset;
1706 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
1707 fhard = pps->kcmode & PPS_CAPTURECLEAR;
1708 pcount = &pps->ppscount[1];
1709 pseq = &pps->ppsinfo.clear_sequence;
1711 ffcount = &pps->ppsinfo_ffc.clear_ffcount;
1712 tsp_ffc = &pps->ppsinfo_ffc.clear_timestamp;
1713 pseq_ffc = &pps->ppsinfo_ffc.clear_sequence;
1718 * If the timecounter changed, we cannot compare the count values, so
1719 * we have to drop the rest of the PPS-stuff until the next event.
1721 if (pps->ppstc != pps->capth->th_counter) {
1722 pps->ppstc = pps->capth->th_counter;
1723 *pcount = pps->capcount;
1724 pps->ppscount[2] = pps->capcount;
1728 /* Convert the count to a timespec. */
1729 tcount = pps->capcount - pps->capth->th_offset_count;
1730 tcount &= pps->capth->th_counter->tc_counter_mask;
1731 bt = pps->capth->th_offset;
1732 bintime_addx(&bt, pps->capth->th_scale * tcount);
1733 bintime_add(&bt, &boottimebin);
1734 bintime2timespec(&bt, &ts);
1736 /* If the timecounter was wound up underneath us, bail out. */
1737 if (pps->capgen != pps->capth->th_generation)
1740 *pcount = pps->capcount;
1745 timespecadd(tsp, osp);
1746 if (tsp->tv_nsec < 0) {
1747 tsp->tv_nsec += 1000000000;
1753 *ffcount = pps->capffth->tick_ffcount + tcount;
1754 bt = pps->capffth->tick_time;
1755 ffclock_convert_delta(tcount, pps->capffth->cest.period, &bt);
1756 bintime_add(&bt, &pps->capffth->tick_time);
1757 bintime2timespec(&bt, &ts);
1767 * Feed the NTP PLL/FLL.
1768 * The FLL wants to know how many (hardware) nanoseconds
1769 * elapsed since the previous event.
1771 tcount = pps->capcount - pps->ppscount[2];
1772 pps->ppscount[2] = pps->capcount;
1773 tcount &= pps->capth->th_counter->tc_counter_mask;
1774 scale = (uint64_t)1 << 63;
1775 scale /= pps->capth->th_counter->tc_frequency;
1779 bintime_addx(&bt, scale * tcount);
1780 bintime2timespec(&bt, &ts);
1781 hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
1785 /* Wakeup anyone sleeping in pps_fetch(). */
1790 * Timecounters need to be updated every so often to prevent the hardware
1791 * counter from overflowing. Updating also recalculates the cached values
1792 * used by the get*() family of functions, so their precision depends on
1793 * the update frequency.
1797 SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0,
1798 "Approximate number of hardclock ticks in a millisecond");
1801 tc_ticktock(int cnt)
1806 if (count < tc_tick)
1812 static void __inline
1813 tc_adjprecision(void)
1817 if (tc_timepercentage > 0) {
1818 t = (99 + tc_timepercentage) / tc_timepercentage;
1819 tc_precexp = fls(t + (t >> 1)) - 1;
1820 FREQ2BT(hz / tc_tick, &bt_timethreshold);
1821 FREQ2BT(hz, &bt_tickthreshold);
1822 bintime_shift(&bt_timethreshold, tc_precexp);
1823 bintime_shift(&bt_tickthreshold, tc_precexp);
1826 bt_timethreshold.sec = INT_MAX;
1827 bt_timethreshold.frac = ~(uint64_t)0;
1828 bt_tickthreshold = bt_timethreshold;
1830 sbt_timethreshold = bttosbt(bt_timethreshold);
1831 sbt_tickthreshold = bttosbt(bt_tickthreshold);
1835 sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS)
1839 val = tc_timepercentage;
1840 error = sysctl_handle_int(oidp, &val, 0, req);
1841 if (error != 0 || req->newptr == NULL)
1843 tc_timepercentage = val;
1849 inittimecounter(void *dummy)
1855 * Set the initial timeout to
1856 * max(1, <approx. number of hardclock ticks in a millisecond>).
1857 * People should probably not use the sysctl to set the timeout
1858 * to smaller than its initial value, since that value is the
1859 * smallest reasonable one. If they want better timestamps they
1860 * should use the non-"get"* functions.
1863 tc_tick = (hz + 500) / 1000;
1867 FREQ2BT(hz, &tick_bt);
1868 tick_sbt = bttosbt(tick_bt);
1869 tick_rate = hz / tc_tick;
1870 FREQ2BT(tick_rate, &tc_tick_bt);
1871 tc_tick_sbt = bttosbt(tc_tick_bt);
1872 p = (tc_tick * 1000000) / hz;
1873 printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
1878 /* warm up new timecounter (again) and get rolling. */
1879 (void)timecounter->tc_get_timecount(timecounter);
1880 (void)timecounter->tc_get_timecount(timecounter);
1884 SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
1886 /* Cpu tick handling -------------------------------------------------*/
1888 static int cpu_tick_variable;
1889 static uint64_t cpu_tick_frequency;
1891 static DPCPU_DEFINE(uint64_t, tc_cpu_ticks_base);
1892 static DPCPU_DEFINE(unsigned, tc_cpu_ticks_last);
1897 struct timecounter *tc;
1898 uint64_t res, *base;
1902 base = DPCPU_PTR(tc_cpu_ticks_base);
1903 last = DPCPU_PTR(tc_cpu_ticks_last);
1904 tc = timehands->th_counter;
1905 u = tc->tc_get_timecount(tc) & tc->tc_counter_mask;
1907 *base += (uint64_t)tc->tc_counter_mask + 1;
1915 cpu_tick_calibration(void)
1917 static time_t last_calib;
1919 if (time_uptime != last_calib && !(time_uptime & 0xf)) {
1920 cpu_tick_calibrate(0);
1921 last_calib = time_uptime;
1926 * This function gets called every 16 seconds on only one designated
1927 * CPU in the system from hardclock() via cpu_tick_calibration()().
1929 * Whenever the real time clock is stepped we get called with reset=1
1930 * to make sure we handle suspend/resume and similar events correctly.
1934 cpu_tick_calibrate(int reset)
1936 static uint64_t c_last;
1937 uint64_t c_this, c_delta;
1938 static struct bintime t_last;
1939 struct bintime t_this, t_delta;
1943 /* The clock was stepped, abort & reset */
1948 /* we don't calibrate fixed rate cputicks */
1949 if (!cpu_tick_variable)
1952 getbinuptime(&t_this);
1953 c_this = cpu_ticks();
1954 if (t_last.sec != 0) {
1955 c_delta = c_this - c_last;
1957 bintime_sub(&t_delta, &t_last);
1960 * 2^(64-20) / 16[s] =
1962 * 17.592.186.044.416 / 16 =
1963 * 1.099.511.627.776 [Hz]
1965 divi = t_delta.sec << 20;
1966 divi |= t_delta.frac >> (64 - 20);
1969 if (c_delta > cpu_tick_frequency) {
1970 if (0 && bootverbose)
1971 printf("cpu_tick increased to %ju Hz\n",
1973 cpu_tick_frequency = c_delta;
1981 set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var)
1985 cpu_ticks = tc_cpu_ticks;
1987 cpu_tick_frequency = freq;
1988 cpu_tick_variable = var;
1997 if (cpu_ticks == tc_cpu_ticks)
1998 return (tc_getfrequency());
1999 return (cpu_tick_frequency);
2003 * We need to be slightly careful converting cputicks to microseconds.
2004 * There is plenty of margin in 64 bits of microseconds (half a million
2005 * years) and in 64 bits at 4 GHz (146 years), but if we do a multiply
2006 * before divide conversion (to retain precision) we find that the
2007 * margin shrinks to 1.5 hours (one millionth of 146y).
2008 * With a three prong approach we never lose significant bits, no
2009 * matter what the cputick rate and length of timeinterval is.
2013 cputick2usec(uint64_t tick)
2016 if (tick > 18446744073709551LL) /* floor(2^64 / 1000) */
2017 return (tick / (cpu_tickrate() / 1000000LL));
2018 else if (tick > 18446744073709LL) /* floor(2^64 / 1000000) */
2019 return ((tick * 1000LL) / (cpu_tickrate() / 1000LL));
2021 return ((tick * 1000000LL) / cpu_tickrate());
2024 cpu_tick_f *cpu_ticks = tc_cpu_ticks;
2026 static int vdso_th_enable = 1;
2028 sysctl_fast_gettime(SYSCTL_HANDLER_ARGS)
2030 int old_vdso_th_enable, error;
2032 old_vdso_th_enable = vdso_th_enable;
2033 error = sysctl_handle_int(oidp, &old_vdso_th_enable, 0, req);
2036 vdso_th_enable = old_vdso_th_enable;
2037 timekeep_push_vdso();
2040 SYSCTL_PROC(_kern_timecounter, OID_AUTO, fast_gettime,
2041 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2042 NULL, 0, sysctl_fast_gettime, "I", "Enable fast time of day");
2045 tc_fill_vdso_timehands(struct vdso_timehands *vdso_th)
2047 struct timehands *th;
2051 vdso_th->th_algo = VDSO_TH_ALGO_1;
2052 vdso_th->th_scale = th->th_scale;
2053 vdso_th->th_offset_count = th->th_offset_count;
2054 vdso_th->th_counter_mask = th->th_counter->tc_counter_mask;
2055 vdso_th->th_offset = th->th_offset;
2056 vdso_th->th_boottime = boottimebin;
2057 enabled = cpu_fill_vdso_timehands(vdso_th);
2058 if (!vdso_th_enable)
2063 #ifdef COMPAT_FREEBSD32
2065 tc_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32)
2067 struct timehands *th;
2071 vdso_th32->th_algo = VDSO_TH_ALGO_1;
2072 *(uint64_t *)&vdso_th32->th_scale[0] = th->th_scale;
2073 vdso_th32->th_offset_count = th->th_offset_count;
2074 vdso_th32->th_counter_mask = th->th_counter->tc_counter_mask;
2075 vdso_th32->th_offset.sec = th->th_offset.sec;
2076 *(uint64_t *)&vdso_th32->th_offset.frac[0] = th->th_offset.frac;
2077 vdso_th32->th_boottime.sec = boottimebin.sec;
2078 *(uint64_t *)&vdso_th32->th_boottime.frac[0] = boottimebin.frac;
2079 enabled = cpu_fill_vdso_timehands32(vdso_th32);
2080 if (!vdso_th_enable)