2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
9 * Copyright (c) 2011 The FreeBSD Foundation
10 * All rights reserved.
12 * Portions of this software were developed by Julien Ridoux at the University
13 * of Melbourne under sponsorship from the FreeBSD Foundation.
16 #include <sys/cdefs.h>
17 __FBSDID("$FreeBSD$");
20 #include "opt_ffclock.h"
22 #include <sys/param.h>
23 #include <sys/kernel.h>
26 #include <sys/mutex.h>
28 #include <sys/sysctl.h>
29 #include <sys/syslog.h>
30 #include <sys/systm.h>
32 #include <sys/timeffc.h>
34 #include <sys/timepps.h>
35 #include <sys/timetc.h>
36 #include <sys/timex.h>
39 * A large step happens on boot. This constant detects such steps.
40 * It is relatively small so that ntp_update_second gets called enough
41 * in the typical 'missed a couple of seconds' case, but doesn't loop
42 * forever when the time step is large.
44 #define LARGE_STEP 200
47 * Implement a dummy timecounter which we can use until we get a real one
48 * in the air. This allows the console and other early stuff to use
53 dummy_get_timecount(struct timecounter *tc)
60 static struct timecounter dummy_timecounter = {
61 dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000
65 /* These fields must be initialized by the driver. */
66 struct timecounter *th_counter;
67 int64_t th_adjustment;
69 u_int th_offset_count;
70 struct bintime th_offset;
71 struct timeval th_microtime;
72 struct timespec th_nanotime;
73 /* Fields not to be copied in tc_windup start with th_generation. */
74 volatile u_int th_generation;
75 struct timehands *th_next;
78 static struct timehands th0;
79 static struct timehands th9 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th0};
80 static struct timehands th8 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th9};
81 static struct timehands th7 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th8};
82 static struct timehands th6 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th7};
83 static struct timehands th5 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th6};
84 static struct timehands th4 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th5};
85 static struct timehands th3 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th4};
86 static struct timehands th2 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th3};
87 static struct timehands th1 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th2};
88 static struct timehands th0 = {
91 (uint64_t)-1 / 1000000,
100 static struct timehands *volatile timehands = &th0;
101 struct timecounter *timecounter = &dummy_timecounter;
102 static struct timecounter *timecounters = &dummy_timecounter;
104 int tc_min_ticktock_freq = 1;
106 time_t time_second = 1;
107 time_t time_uptime = 1;
109 struct bintime boottimebin;
110 struct timeval boottime;
111 static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS);
112 SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD,
113 NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime");
115 SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
116 static SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc, CTLFLAG_RW, 0, "");
118 static int timestepwarnings;
119 SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
120 ×tepwarnings, 0, "Log time steps");
122 static void tc_windup(void);
123 static void cpu_tick_calibrate(int);
126 sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
131 if (req->flags & SCTL_MASK32) {
132 tv[0] = boottime.tv_sec;
133 tv[1] = boottime.tv_usec;
134 return SYSCTL_OUT(req, tv, sizeof(tv));
137 return SYSCTL_OUT(req, &boottime, sizeof(boottime));
141 sysctl_kern_timecounter_get(SYSCTL_HANDLER_ARGS)
144 struct timecounter *tc = arg1;
146 ncount = tc->tc_get_timecount(tc);
147 return sysctl_handle_int(oidp, &ncount, 0, req);
151 sysctl_kern_timecounter_freq(SYSCTL_HANDLER_ARGS)
154 struct timecounter *tc = arg1;
156 freq = tc->tc_frequency;
157 return sysctl_handle_64(oidp, &freq, 0, req);
161 * Return the difference between the timehands' counter value now and what
162 * was when we copied it to the timehands' offset_count.
164 static __inline u_int
165 tc_delta(struct timehands *th)
167 struct timecounter *tc;
170 return ((tc->tc_get_timecount(tc) - th->th_offset_count) &
171 tc->tc_counter_mask);
175 * Functions for reading the time. We have to loop until we are sure that
176 * the timehands that we operated on was not updated under our feet. See
177 * the comment in <sys/time.h> for a description of these 12 functions.
182 fbclock_binuptime(struct bintime *bt)
184 struct timehands *th;
189 gen = th->th_generation;
191 bintime_addx(bt, th->th_scale * tc_delta(th));
192 } while (gen == 0 || gen != th->th_generation);
196 fbclock_nanouptime(struct timespec *tsp)
200 fbclock_binuptime(&bt);
201 bintime2timespec(&bt, tsp);
205 fbclock_microuptime(struct timeval *tvp)
209 fbclock_binuptime(&bt);
210 bintime2timeval(&bt, tvp);
214 fbclock_bintime(struct bintime *bt)
217 fbclock_binuptime(bt);
218 bintime_add(bt, &boottimebin);
222 fbclock_nanotime(struct timespec *tsp)
226 fbclock_bintime(&bt);
227 bintime2timespec(&bt, tsp);
231 fbclock_microtime(struct timeval *tvp)
235 fbclock_bintime(&bt);
236 bintime2timeval(&bt, tvp);
240 fbclock_getbinuptime(struct bintime *bt)
242 struct timehands *th;
247 gen = th->th_generation;
249 } while (gen == 0 || gen != th->th_generation);
253 fbclock_getnanouptime(struct timespec *tsp)
255 struct timehands *th;
260 gen = th->th_generation;
261 bintime2timespec(&th->th_offset, tsp);
262 } while (gen == 0 || gen != th->th_generation);
266 fbclock_getmicrouptime(struct timeval *tvp)
268 struct timehands *th;
273 gen = th->th_generation;
274 bintime2timeval(&th->th_offset, tvp);
275 } while (gen == 0 || gen != th->th_generation);
279 fbclock_getbintime(struct bintime *bt)
281 struct timehands *th;
286 gen = th->th_generation;
288 } while (gen == 0 || gen != th->th_generation);
289 bintime_add(bt, &boottimebin);
293 fbclock_getnanotime(struct timespec *tsp)
295 struct timehands *th;
300 gen = th->th_generation;
301 *tsp = th->th_nanotime;
302 } while (gen == 0 || gen != th->th_generation);
306 fbclock_getmicrotime(struct timeval *tvp)
308 struct timehands *th;
313 gen = th->th_generation;
314 *tvp = th->th_microtime;
315 } while (gen == 0 || gen != th->th_generation);
319 binuptime(struct bintime *bt)
321 struct timehands *th;
326 gen = th->th_generation;
328 bintime_addx(bt, th->th_scale * tc_delta(th));
329 } while (gen == 0 || gen != th->th_generation);
333 nanouptime(struct timespec *tsp)
338 bintime2timespec(&bt, tsp);
342 microuptime(struct timeval *tvp)
347 bintime2timeval(&bt, tvp);
351 bintime(struct bintime *bt)
355 bintime_add(bt, &boottimebin);
359 nanotime(struct timespec *tsp)
364 bintime2timespec(&bt, tsp);
368 microtime(struct timeval *tvp)
373 bintime2timeval(&bt, tvp);
377 getbinuptime(struct bintime *bt)
379 struct timehands *th;
384 gen = th->th_generation;
386 } while (gen == 0 || gen != th->th_generation);
390 getnanouptime(struct timespec *tsp)
392 struct timehands *th;
397 gen = th->th_generation;
398 bintime2timespec(&th->th_offset, tsp);
399 } while (gen == 0 || gen != th->th_generation);
403 getmicrouptime(struct timeval *tvp)
405 struct timehands *th;
410 gen = th->th_generation;
411 bintime2timeval(&th->th_offset, tvp);
412 } while (gen == 0 || gen != th->th_generation);
416 getbintime(struct bintime *bt)
418 struct timehands *th;
423 gen = th->th_generation;
425 } while (gen == 0 || gen != th->th_generation);
426 bintime_add(bt, &boottimebin);
430 getnanotime(struct timespec *tsp)
432 struct timehands *th;
437 gen = th->th_generation;
438 *tsp = th->th_nanotime;
439 } while (gen == 0 || gen != th->th_generation);
443 getmicrotime(struct timeval *tvp)
445 struct timehands *th;
450 gen = th->th_generation;
451 *tvp = th->th_microtime;
452 } while (gen == 0 || gen != th->th_generation);
458 * Support for feed-forward synchronization algorithms. This is heavily inspired
459 * by the timehands mechanism but kept independent from it. *_windup() functions
460 * have some connection to avoid accessing the timecounter hardware more than
464 int sysclock_active = SYSCLOCK_FBCK;
466 /* Feed-forward clock estimates kept updated by the synchronization daemon. */
467 struct ffclock_estimate ffclock_estimate;
468 struct bintime ffclock_boottime; /* Feed-forward boot time estimate. */
469 uint32_t ffclock_status; /* Feed-forward clock status. */
470 int8_t ffclock_updated; /* New estimates are available. */
471 struct mtx ffclock_mtx; /* Mutex on ffclock_estimate. */
473 struct sysclock_ops {
475 void (*binuptime) (struct bintime *bt);
476 void (*nanouptime) (struct timespec *tsp);
477 void (*microuptime) (struct timeval *tvp);
478 void (*bintime) (struct bintime *bt);
479 void (*nanotime) (struct timespec *tsp);
480 void (*microtime) (struct timeval *tvp);
481 void (*getbinuptime) (struct bintime *bt);
482 void (*getnanouptime) (struct timespec *tsp);
483 void (*getmicrouptime) (struct timeval *tvp);
484 void (*getbintime) (struct bintime *bt);
485 void (*getnanotime) (struct timespec *tsp);
486 void (*getmicrotime) (struct timeval *tvp);
489 static struct sysclock_ops sysclock = {
490 .active = SYSCLOCK_FBCK,
491 .binuptime = fbclock_binuptime,
492 .nanouptime = fbclock_nanouptime,
493 .microuptime = fbclock_microuptime,
494 .bintime = fbclock_bintime,
495 .nanotime = fbclock_nanotime,
496 .microtime = fbclock_microtime,
497 .getbinuptime = fbclock_getbinuptime,
498 .getnanouptime = fbclock_getnanouptime,
499 .getmicrouptime = fbclock_getmicrouptime,
500 .getbintime = fbclock_getbintime,
501 .getnanotime = fbclock_getnanotime,
502 .getmicrotime = fbclock_getmicrotime
506 struct ffclock_estimate cest;
507 struct bintime tick_time;
508 struct bintime tick_time_lerp;
509 ffcounter tick_ffcount;
510 uint64_t period_lerp;
511 volatile uint8_t gen;
512 struct fftimehands *next;
515 #define NUM_ELEMENTS(x) (sizeof(x) / sizeof(*x))
517 static struct fftimehands ffth[10];
518 static struct fftimehands *volatile fftimehands = ffth;
523 struct fftimehands *cur;
524 struct fftimehands *last;
526 memset(ffth, 0, sizeof(ffth));
528 last = ffth + NUM_ELEMENTS(ffth) - 1;
529 for (cur = ffth; cur < last; cur++)
534 ffclock_status = FFCLOCK_STA_UNSYNC;
535 mtx_init(&ffclock_mtx, "ffclock lock", NULL, MTX_DEF);
539 * Reset the feed-forward clock estimates. Called from inittodr() to get things
540 * kick started and uses the timecounter nominal frequency as a first period
541 * estimate. Note: this function may be called several time just after boot.
542 * Note: this is the only function that sets the value of boot time for the
543 * monotonic (i.e. uptime) version of the feed-forward clock.
546 ffclock_reset_clock(struct timespec *ts)
548 struct timecounter *tc;
549 struct ffclock_estimate cest;
551 tc = timehands->th_counter;
552 memset(&cest, 0, sizeof(struct ffclock_estimate));
554 timespec2bintime(ts, &ffclock_boottime);
555 timespec2bintime(ts, &(cest.update_time));
556 ffclock_read_counter(&cest.update_ffcount);
557 cest.leapsec_next = 0;
558 cest.period = ((1ULL << 63) / tc->tc_frequency) << 1;
561 cest.status = FFCLOCK_STA_UNSYNC;
562 cest.leapsec_total = 0;
565 mtx_lock(&ffclock_mtx);
566 bcopy(&cest, &ffclock_estimate, sizeof(struct ffclock_estimate));
567 ffclock_updated = INT8_MAX;
568 mtx_unlock(&ffclock_mtx);
570 printf("ffclock reset: %s (%llu Hz), time = %ld.%09lu\n", tc->tc_name,
571 (unsigned long long)tc->tc_frequency, (long)ts->tv_sec,
572 (unsigned long)ts->tv_nsec);
576 * Sub-routine to convert a time interval measured in RAW counter units to time
577 * in seconds stored in bintime format.
578 * NOTE: bintime_mul requires u_int, but the value of the ffcounter may be
579 * larger than the max value of u_int (on 32 bit architecture). Loop to consume
583 ffclock_convert_delta(ffcounter ffdelta, uint64_t period, struct bintime *bt)
586 ffcounter delta, delta_max;
588 delta_max = (1ULL << (8 * sizeof(unsigned int))) - 1;
591 if (ffdelta > delta_max)
597 bintime_mul(&bt2, (unsigned int)delta);
598 bintime_add(bt, &bt2);
600 } while (ffdelta > 0);
604 * Update the fftimehands.
605 * Push the tick ffcount and time(s) forward based on current clock estimate.
606 * The conversion from ffcounter to bintime relies on the difference clock
607 * principle, whose accuracy relies on computing small time intervals. If a new
608 * clock estimate has been passed by the synchronisation daemon, make it
609 * current, and compute the linear interpolation for monotonic time if needed.
612 ffclock_windup(unsigned int delta)
614 struct ffclock_estimate *cest;
615 struct fftimehands *ffth;
616 struct bintime bt, gap_lerp;
619 unsigned int polling;
620 uint8_t forward_jump, ogen;
623 * Pick the next timehand, copy current ffclock estimates and move tick
624 * times and counter forward.
627 ffth = fftimehands->next;
631 bcopy(&fftimehands->cest, cest, sizeof(struct ffclock_estimate));
632 ffdelta = (ffcounter)delta;
633 ffth->period_lerp = fftimehands->period_lerp;
635 ffth->tick_time = fftimehands->tick_time;
636 ffclock_convert_delta(ffdelta, cest->period, &bt);
637 bintime_add(&ffth->tick_time, &bt);
639 ffth->tick_time_lerp = fftimehands->tick_time_lerp;
640 ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt);
641 bintime_add(&ffth->tick_time_lerp, &bt);
643 ffth->tick_ffcount = fftimehands->tick_ffcount + ffdelta;
646 * Assess the status of the clock, if the last update is too old, it is
647 * likely the synchronisation daemon is dead and the clock is free
650 if (ffclock_updated == 0) {
651 ffdelta = ffth->tick_ffcount - cest->update_ffcount;
652 ffclock_convert_delta(ffdelta, cest->period, &bt);
653 if (bt.sec > 2 * FFCLOCK_SKM_SCALE)
654 ffclock_status |= FFCLOCK_STA_UNSYNC;
658 * If available, grab updated clock estimates and make them current.
659 * Recompute time at this tick using the updated estimates. The clock
660 * estimates passed the feed-forward synchronisation daemon may result
661 * in time conversion that is not monotonically increasing (just after
662 * the update). time_lerp is a particular linear interpolation over the
663 * synchronisation algo polling period that ensures monotonicity for the
664 * clock ids requesting it.
666 if (ffclock_updated > 0) {
667 bcopy(&ffclock_estimate, cest, sizeof(struct ffclock_estimate));
668 ffdelta = ffth->tick_ffcount - cest->update_ffcount;
669 ffth->tick_time = cest->update_time;
670 ffclock_convert_delta(ffdelta, cest->period, &bt);
671 bintime_add(&ffth->tick_time, &bt);
673 /* ffclock_reset sets ffclock_updated to INT8_MAX */
674 if (ffclock_updated == INT8_MAX)
675 ffth->tick_time_lerp = ffth->tick_time;
677 if (bintime_cmp(&ffth->tick_time, &ffth->tick_time_lerp, >))
682 bintime_clear(&gap_lerp);
684 gap_lerp = ffth->tick_time;
685 bintime_sub(&gap_lerp, &ffth->tick_time_lerp);
687 gap_lerp = ffth->tick_time_lerp;
688 bintime_sub(&gap_lerp, &ffth->tick_time);
692 * The reset from the RTC clock may be far from accurate, and
693 * reducing the gap between real time and interpolated time
694 * could take a very long time if the interpolated clock insists
695 * on strict monotonicity. The clock is reset under very strict
696 * conditions (kernel time is known to be wrong and
697 * synchronization daemon has been restarted recently.
698 * ffclock_boottime absorbs the jump to ensure boot time is
699 * correct and uptime functions stay consistent.
701 if (((ffclock_status & FFCLOCK_STA_UNSYNC) == FFCLOCK_STA_UNSYNC) &&
702 ((cest->status & FFCLOCK_STA_UNSYNC) == 0) &&
703 ((cest->status & FFCLOCK_STA_WARMUP) == FFCLOCK_STA_WARMUP)) {
705 bintime_add(&ffclock_boottime, &gap_lerp);
707 bintime_sub(&ffclock_boottime, &gap_lerp);
708 ffth->tick_time_lerp = ffth->tick_time;
709 bintime_clear(&gap_lerp);
712 ffclock_status = cest->status;
713 ffth->period_lerp = cest->period;
716 * Compute corrected period used for the linear interpolation of
717 * time. The rate of linear interpolation is capped to 5000PPM
720 if (bintime_isset(&gap_lerp)) {
721 ffdelta = cest->update_ffcount;
722 ffdelta -= fftimehands->cest.update_ffcount;
723 ffclock_convert_delta(ffdelta, cest->period, &bt);
726 bt.frac = 5000000 * (uint64_t)18446744073LL;
727 bintime_mul(&bt, polling);
728 if (bintime_cmp(&gap_lerp, &bt, >))
731 /* Approximate 1 sec by 1-(1/2^64) to ease arithmetic */
733 if (gap_lerp.sec > 0) {
735 frac /= ffdelta / gap_lerp.sec;
737 frac += gap_lerp.frac / ffdelta;
740 ffth->period_lerp += frac;
742 ffth->period_lerp -= frac;
754 * Adjust the fftimehands when the timecounter is changed. Stating the obvious,
755 * the old and new hardware counter cannot be read simultaneously. tc_windup()
756 * does read the two counters 'back to back', but a few cycles are effectively
757 * lost, and not accumulated in tick_ffcount. This is a fairly radical
758 * operation for a feed-forward synchronization daemon, and it is its job to not
759 * pushing irrelevant data to the kernel. Because there is no locking here,
760 * simply force to ignore pending or next update to give daemon a chance to
761 * realize the counter has changed.
764 ffclock_change_tc(struct timehands *th)
766 struct fftimehands *ffth;
767 struct ffclock_estimate *cest;
768 struct timecounter *tc;
772 ffth = fftimehands->next;
777 bcopy(&(fftimehands->cest), cest, sizeof(struct ffclock_estimate));
778 cest->period = ((1ULL << 63) / tc->tc_frequency ) << 1;
781 cest->status |= FFCLOCK_STA_UNSYNC;
783 ffth->tick_ffcount = fftimehands->tick_ffcount;
784 ffth->tick_time_lerp = fftimehands->tick_time_lerp;
785 ffth->tick_time = fftimehands->tick_time;
786 ffth->period_lerp = cest->period;
788 /* Do not lock but ignore next update from synchronization daemon. */
798 change_sysclock(int new_sysclock)
801 sysclock.active = new_sysclock;
803 switch (sysclock.active) {
805 sysclock.binuptime = fbclock_binuptime;
806 sysclock.nanouptime = fbclock_nanouptime;
807 sysclock.microuptime = fbclock_microuptime;
808 sysclock.bintime = fbclock_bintime;
809 sysclock.nanotime = fbclock_nanotime;
810 sysclock.microtime = fbclock_microtime;
811 sysclock.getbinuptime = fbclock_getbinuptime;
812 sysclock.getnanouptime = fbclock_getnanouptime;
813 sysclock.getmicrouptime = fbclock_getmicrouptime;
814 sysclock.getbintime = fbclock_getbintime;
815 sysclock.getnanotime = fbclock_getnanotime;
816 sysclock.getmicrotime = fbclock_getmicrotime;
819 sysclock.binuptime = ffclock_binuptime;
820 sysclock.nanouptime = ffclock_nanouptime;
821 sysclock.microuptime = ffclock_microuptime;
822 sysclock.bintime = ffclock_bintime;
823 sysclock.nanotime = ffclock_nanotime;
824 sysclock.microtime = ffclock_microtime;
825 sysclock.getbinuptime = ffclock_getbinuptime;
826 sysclock.getnanouptime = ffclock_getnanouptime;
827 sysclock.getmicrouptime = ffclock_getmicrouptime;
828 sysclock.getbintime = ffclock_getbintime;
829 sysclock.getnanotime = ffclock_getnanotime;
830 sysclock.getmicrotime = ffclock_getmicrotime;
838 * Retrieve feed-forward counter and time of last kernel tick.
841 ffclock_last_tick(ffcounter *ffcount, struct bintime *bt, uint32_t flags)
843 struct fftimehands *ffth;
847 * No locking but check generation has not changed. Also need to make
848 * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
853 if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP)
854 *bt = ffth->tick_time_lerp;
856 *bt = ffth->tick_time;
857 *ffcount = ffth->tick_ffcount;
858 } while (gen == 0 || gen != ffth->gen);
862 * Absolute clock conversion. Low level function to convert ffcounter to
863 * bintime. The ffcounter is converted using the current ffclock period estimate
864 * or the "interpolated period" to ensure monotonicity.
865 * NOTE: this conversion may have been deferred, and the clock updated since the
866 * hardware counter has been read.
869 ffclock_convert_abs(ffcounter ffcount, struct bintime *bt, uint32_t flags)
871 struct fftimehands *ffth;
877 * No locking but check generation has not changed. Also need to make
878 * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
883 if (ffcount > ffth->tick_ffcount)
884 ffdelta = ffcount - ffth->tick_ffcount;
886 ffdelta = ffth->tick_ffcount - ffcount;
888 if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP) {
889 *bt = ffth->tick_time_lerp;
890 ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt2);
892 *bt = ffth->tick_time;
893 ffclock_convert_delta(ffdelta, ffth->cest.period, &bt2);
896 if (ffcount > ffth->tick_ffcount)
897 bintime_add(bt, &bt2);
899 bintime_sub(bt, &bt2);
900 } while (gen == 0 || gen != ffth->gen);
904 * Difference clock conversion.
905 * Low level function to Convert a time interval measured in RAW counter units
906 * into bintime. The difference clock allows measuring small intervals much more
907 * reliably than the absolute clock.
910 ffclock_convert_diff(ffcounter ffdelta, struct bintime *bt)
912 struct fftimehands *ffth;
915 /* No locking but check generation has not changed. */
919 ffclock_convert_delta(ffdelta, ffth->cest.period, bt);
920 } while (gen == 0 || gen != ffth->gen);
924 * Access to current ffcounter value.
927 ffclock_read_counter(ffcounter *ffcount)
929 struct timehands *th;
930 struct fftimehands *ffth;
931 unsigned int gen, delta;
934 * ffclock_windup() called from tc_windup(), safe to rely on
935 * th->th_generation only, for correct delta and ffcounter.
939 gen = th->th_generation;
941 delta = tc_delta(th);
942 *ffcount = ffth->tick_ffcount;
943 } while (gen == 0 || gen != th->th_generation);
949 binuptime(struct bintime *bt)
952 sysclock.binuptime(bt);
956 nanouptime(struct timespec *tsp)
959 sysclock.nanouptime(tsp);
963 microuptime(struct timeval *tvp)
966 sysclock.microuptime(tvp);
970 bintime(struct bintime *bt)
973 sysclock.bintime(bt);
977 nanotime(struct timespec *tsp)
980 sysclock.nanotime(tsp);
984 microtime(struct timeval *tvp)
987 sysclock.microtime(tvp);
991 getbinuptime(struct bintime *bt)
994 sysclock.getbinuptime(bt);
998 getnanouptime(struct timespec *tsp)
1001 sysclock.getnanouptime(tsp);
1005 getmicrouptime(struct timeval *tvp)
1008 sysclock.getmicrouptime(tvp);
1012 getbintime(struct bintime *bt)
1015 sysclock.getbintime(bt);
1019 getnanotime(struct timespec *tsp)
1022 sysclock.getnanotime(tsp);
1026 getmicrotime(struct timeval *tvp)
1029 sysclock.getmicrouptime(tvp);
1031 #endif /* FFCLOCK */
1034 * Initialize a new timecounter and possibly use it.
1037 tc_init(struct timecounter *tc)
1040 struct sysctl_oid *tc_root;
1042 u = tc->tc_frequency / tc->tc_counter_mask;
1043 /* XXX: We need some margin here, 10% is a guess */
1046 if (u > hz && tc->tc_quality >= 0) {
1047 tc->tc_quality = -2000;
1049 printf("Timecounter \"%s\" frequency %ju Hz",
1050 tc->tc_name, (uintmax_t)tc->tc_frequency);
1051 printf(" -- Insufficient hz, needs at least %u\n", u);
1053 } else if (tc->tc_quality >= 0 || bootverbose) {
1054 printf("Timecounter \"%s\" frequency %ju Hz quality %d\n",
1055 tc->tc_name, (uintmax_t)tc->tc_frequency,
1059 tc->tc_next = timecounters;
1062 * Set up sysctl tree for this counter.
1064 tc_root = SYSCTL_ADD_NODE(NULL,
1065 SYSCTL_STATIC_CHILDREN(_kern_timecounter_tc), OID_AUTO, tc->tc_name,
1066 CTLFLAG_RW, 0, "timecounter description");
1067 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1068 "mask", CTLFLAG_RD, &(tc->tc_counter_mask), 0,
1069 "mask for implemented bits");
1070 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1071 "counter", CTLTYPE_UINT | CTLFLAG_RD, tc, sizeof(*tc),
1072 sysctl_kern_timecounter_get, "IU", "current timecounter value");
1073 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1074 "frequency", CTLTYPE_U64 | CTLFLAG_RD, tc, sizeof(*tc),
1075 sysctl_kern_timecounter_freq, "QU", "timecounter frequency");
1076 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1077 "quality", CTLFLAG_RD, &(tc->tc_quality), 0,
1078 "goodness of time counter");
1080 * Never automatically use a timecounter with negative quality.
1081 * Even though we run on the dummy counter, switching here may be
1082 * worse since this timecounter may not be monotonous.
1084 if (tc->tc_quality < 0)
1086 if (tc->tc_quality < timecounter->tc_quality)
1088 if (tc->tc_quality == timecounter->tc_quality &&
1089 tc->tc_frequency < timecounter->tc_frequency)
1091 (void)tc->tc_get_timecount(tc);
1092 (void)tc->tc_get_timecount(tc);
1096 /* Report the frequency of the current timecounter. */
1098 tc_getfrequency(void)
1101 return (timehands->th_counter->tc_frequency);
1105 * Step our concept of UTC. This is done by modifying our estimate of
1110 tc_setclock(struct timespec *ts)
1112 struct timespec tbef, taft;
1113 struct bintime bt, bt2;
1115 cpu_tick_calibrate(1);
1117 timespec2bintime(ts, &bt);
1119 bintime_sub(&bt, &bt2);
1120 bintime_add(&bt2, &boottimebin);
1122 bintime2timeval(&bt, &boottime);
1124 /* XXX fiddle all the little crinkly bits around the fiords... */
1127 if (timestepwarnings) {
1129 "Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n",
1130 (intmax_t)tbef.tv_sec, tbef.tv_nsec,
1131 (intmax_t)taft.tv_sec, taft.tv_nsec,
1132 (intmax_t)ts->tv_sec, ts->tv_nsec);
1134 cpu_tick_calibrate(1);
1138 * Initialize the next struct timehands in the ring and make
1139 * it the active timehands. Along the way we might switch to a different
1140 * timecounter and/or do seconds processing in NTP. Slightly magic.
1146 struct timehands *th, *tho;
1148 u_int delta, ncount, ogen;
1153 * Make the next timehands a copy of the current one, but do not
1154 * overwrite the generation or next pointer. While we update
1155 * the contents, the generation must be zero.
1159 ogen = th->th_generation;
1160 th->th_generation = 0;
1161 bcopy(tho, th, offsetof(struct timehands, th_generation));
1164 * Capture a timecounter delta on the current timecounter and if
1165 * changing timecounters, a counter value from the new timecounter.
1166 * Update the offset fields accordingly.
1168 delta = tc_delta(th);
1169 if (th->th_counter != timecounter)
1170 ncount = timecounter->tc_get_timecount(timecounter);
1174 ffclock_windup(delta);
1176 th->th_offset_count += delta;
1177 th->th_offset_count &= th->th_counter->tc_counter_mask;
1178 while (delta > th->th_counter->tc_frequency) {
1179 /* Eat complete unadjusted seconds. */
1180 delta -= th->th_counter->tc_frequency;
1181 th->th_offset.sec++;
1183 if ((delta > th->th_counter->tc_frequency / 2) &&
1184 (th->th_scale * delta < ((uint64_t)1 << 63))) {
1185 /* The product th_scale * delta just barely overflows. */
1186 th->th_offset.sec++;
1188 bintime_addx(&th->th_offset, th->th_scale * delta);
1191 * Hardware latching timecounters may not generate interrupts on
1192 * PPS events, so instead we poll them. There is a finite risk that
1193 * the hardware might capture a count which is later than the one we
1194 * got above, and therefore possibly in the next NTP second which might
1195 * have a different rate than the current NTP second. It doesn't
1196 * matter in practice.
1198 if (tho->th_counter->tc_poll_pps)
1199 tho->th_counter->tc_poll_pps(tho->th_counter);
1202 * Deal with NTP second processing. The for loop normally
1203 * iterates at most once, but in extreme situations it might
1204 * keep NTP sane if timeouts are not run for several seconds.
1205 * At boot, the time step can be large when the TOD hardware
1206 * has been read, so on really large steps, we call
1207 * ntp_update_second only twice. We need to call it twice in
1208 * case we missed a leap second.
1211 bintime_add(&bt, &boottimebin);
1212 i = bt.sec - tho->th_microtime.tv_sec;
1215 for (; i > 0; i--) {
1217 ntp_update_second(&th->th_adjustment, &bt.sec);
1219 boottimebin.sec += bt.sec - t;
1221 /* Update the UTC timestamps used by the get*() functions. */
1222 /* XXX shouldn't do this here. Should force non-`get' versions. */
1223 bintime2timeval(&bt, &th->th_microtime);
1224 bintime2timespec(&bt, &th->th_nanotime);
1226 /* Now is a good time to change timecounters. */
1227 if (th->th_counter != timecounter) {
1229 if ((timecounter->tc_flags & TC_FLAGS_C3STOP) != 0)
1230 cpu_disable_deep_sleep++;
1231 if ((th->th_counter->tc_flags & TC_FLAGS_C3STOP) != 0)
1232 cpu_disable_deep_sleep--;
1234 th->th_counter = timecounter;
1235 th->th_offset_count = ncount;
1236 tc_min_ticktock_freq = max(1, timecounter->tc_frequency /
1237 (((uint64_t)timecounter->tc_counter_mask + 1) / 3));
1239 ffclock_change_tc(th);
1244 * Recalculate the scaling factor. We want the number of 1/2^64
1245 * fractions of a second per period of the hardware counter, taking
1246 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
1247 * processing provides us with.
1249 * The th_adjustment is nanoseconds per second with 32 bit binary
1250 * fraction and we want 64 bit binary fraction of second:
1252 * x = a * 2^32 / 10^9 = a * 4.294967296
1254 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
1255 * we can only multiply by about 850 without overflowing, that
1256 * leaves no suitably precise fractions for multiply before divide.
1258 * Divide before multiply with a fraction of 2199/512 results in a
1259 * systematic undercompensation of 10PPM of th_adjustment. On a
1260 * 5000PPM adjustment this is a 0.05PPM error. This is acceptable.
1262 * We happily sacrifice the lowest of the 64 bits of our result
1263 * to the goddess of code clarity.
1266 scale = (uint64_t)1 << 63;
1267 scale += (th->th_adjustment / 1024) * 2199;
1268 scale /= th->th_counter->tc_frequency;
1269 th->th_scale = scale * 2;
1272 if (sysclock_active != sysclock.active)
1273 change_sysclock(sysclock_active);
1277 * Now that the struct timehands is again consistent, set the new
1278 * generation number, making sure to not make it zero.
1282 th->th_generation = ogen;
1284 /* Go live with the new struct timehands. */
1286 switch (sysclock_active) {
1289 time_second = th->th_microtime.tv_sec;
1290 time_uptime = th->th_offset.sec;
1294 time_second = fftimehands->tick_time_lerp.sec;
1295 time_uptime = fftimehands->tick_time_lerp.sec - ffclock_boottime.sec;
1303 /* Report or change the active timecounter hardware. */
1305 sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
1308 struct timecounter *newtc, *tc;
1312 strlcpy(newname, tc->tc_name, sizeof(newname));
1314 error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
1315 if (error != 0 || req->newptr == NULL ||
1316 strcmp(newname, tc->tc_name) == 0)
1318 for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
1319 if (strcmp(newname, newtc->tc_name) != 0)
1322 /* Warm up new timecounter. */
1323 (void)newtc->tc_get_timecount(newtc);
1324 (void)newtc->tc_get_timecount(newtc);
1326 timecounter = newtc;
1332 SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
1333 0, 0, sysctl_kern_timecounter_hardware, "A",
1334 "Timecounter hardware selected");
1337 /* Report or change the active timecounter hardware. */
1339 sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
1342 struct timecounter *tc;
1347 for (tc = timecounters; error == 0 && tc != NULL; tc = tc->tc_next) {
1348 sprintf(buf, "%s%s(%d)",
1349 spc, tc->tc_name, tc->tc_quality);
1350 error = SYSCTL_OUT(req, buf, strlen(buf));
1356 SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD,
1357 0, 0, sysctl_kern_timecounter_choice, "A", "Timecounter hardware detected");
1360 * RFC 2783 PPS-API implementation.
1364 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
1367 struct pps_fetch_args *fapi;
1369 struct pps_fetch_ffc_args *fapi_ffc;
1372 struct pps_kcbind_args *kapi;
1375 KASSERT(pps != NULL, ("NULL pps pointer in pps_ioctl"));
1377 case PPS_IOC_CREATE:
1379 case PPS_IOC_DESTROY:
1381 case PPS_IOC_SETPARAMS:
1382 app = (pps_params_t *)data;
1383 if (app->mode & ~pps->ppscap)
1386 /* Ensure only a single clock is selected for ffc timestamp. */
1387 if ((app->mode & PPS_TSCLK_MASK) == PPS_TSCLK_MASK)
1390 pps->ppsparam = *app;
1392 case PPS_IOC_GETPARAMS:
1393 app = (pps_params_t *)data;
1394 *app = pps->ppsparam;
1395 app->api_version = PPS_API_VERS_1;
1397 case PPS_IOC_GETCAP:
1398 *(int*)data = pps->ppscap;
1401 fapi = (struct pps_fetch_args *)data;
1402 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
1404 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec)
1405 return (EOPNOTSUPP);
1406 pps->ppsinfo.current_mode = pps->ppsparam.mode;
1407 fapi->pps_info_buf = pps->ppsinfo;
1410 case PPS_IOC_FETCH_FFCOUNTER:
1411 fapi_ffc = (struct pps_fetch_ffc_args *)data;
1412 if (fapi_ffc->tsformat && fapi_ffc->tsformat !=
1415 if (fapi_ffc->timeout.tv_sec || fapi_ffc->timeout.tv_nsec)
1416 return (EOPNOTSUPP);
1417 pps->ppsinfo_ffc.current_mode = pps->ppsparam.mode;
1418 fapi_ffc->pps_info_buf_ffc = pps->ppsinfo_ffc;
1419 /* Overwrite timestamps if feedback clock selected. */
1420 switch (pps->ppsparam.mode & PPS_TSCLK_MASK) {
1421 case PPS_TSCLK_FBCK:
1422 fapi_ffc->pps_info_buf_ffc.assert_timestamp =
1423 pps->ppsinfo.assert_timestamp;
1424 fapi_ffc->pps_info_buf_ffc.clear_timestamp =
1425 pps->ppsinfo.clear_timestamp;
1427 case PPS_TSCLK_FFWD:
1433 #endif /* FFCLOCK */
1434 case PPS_IOC_KCBIND:
1436 kapi = (struct pps_kcbind_args *)data;
1437 /* XXX Only root should be able to do this */
1438 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
1440 if (kapi->kernel_consumer != PPS_KC_HARDPPS)
1442 if (kapi->edge & ~pps->ppscap)
1444 pps->kcmode = kapi->edge;
1447 return (EOPNOTSUPP);
1455 pps_init(struct pps_state *pps)
1457 pps->ppscap |= PPS_TSFMT_TSPEC;
1458 if (pps->ppscap & PPS_CAPTUREASSERT)
1459 pps->ppscap |= PPS_OFFSETASSERT;
1460 if (pps->ppscap & PPS_CAPTURECLEAR)
1461 pps->ppscap |= PPS_OFFSETCLEAR;
1463 pps->ppscap |= PPS_TSCLK_MASK;
1468 pps_capture(struct pps_state *pps)
1470 struct timehands *th;
1472 KASSERT(pps != NULL, ("NULL pps pointer in pps_capture"));
1474 pps->capgen = th->th_generation;
1477 pps->capffth = fftimehands;
1479 pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
1480 if (pps->capgen != th->th_generation)
1485 pps_event(struct pps_state *pps, int event)
1488 struct timespec ts, *tsp, *osp;
1489 u_int tcount, *pcount;
1493 struct timespec *tsp_ffc;
1494 pps_seq_t *pseq_ffc;
1498 KASSERT(pps != NULL, ("NULL pps pointer in pps_event"));
1499 /* If the timecounter was wound up underneath us, bail out. */
1500 if (pps->capgen == 0 || pps->capgen != pps->capth->th_generation)
1503 /* Things would be easier with arrays. */
1504 if (event == PPS_CAPTUREASSERT) {
1505 tsp = &pps->ppsinfo.assert_timestamp;
1506 osp = &pps->ppsparam.assert_offset;
1507 foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
1508 fhard = pps->kcmode & PPS_CAPTUREASSERT;
1509 pcount = &pps->ppscount[0];
1510 pseq = &pps->ppsinfo.assert_sequence;
1512 ffcount = &pps->ppsinfo_ffc.assert_ffcount;
1513 tsp_ffc = &pps->ppsinfo_ffc.assert_timestamp;
1514 pseq_ffc = &pps->ppsinfo_ffc.assert_sequence;
1517 tsp = &pps->ppsinfo.clear_timestamp;
1518 osp = &pps->ppsparam.clear_offset;
1519 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
1520 fhard = pps->kcmode & PPS_CAPTURECLEAR;
1521 pcount = &pps->ppscount[1];
1522 pseq = &pps->ppsinfo.clear_sequence;
1524 ffcount = &pps->ppsinfo_ffc.clear_ffcount;
1525 tsp_ffc = &pps->ppsinfo_ffc.clear_timestamp;
1526 pseq_ffc = &pps->ppsinfo_ffc.clear_sequence;
1531 * If the timecounter changed, we cannot compare the count values, so
1532 * we have to drop the rest of the PPS-stuff until the next event.
1534 if (pps->ppstc != pps->capth->th_counter) {
1535 pps->ppstc = pps->capth->th_counter;
1536 *pcount = pps->capcount;
1537 pps->ppscount[2] = pps->capcount;
1541 /* Convert the count to a timespec. */
1542 tcount = pps->capcount - pps->capth->th_offset_count;
1543 tcount &= pps->capth->th_counter->tc_counter_mask;
1544 bt = pps->capth->th_offset;
1545 bintime_addx(&bt, pps->capth->th_scale * tcount);
1546 bintime_add(&bt, &boottimebin);
1547 bintime2timespec(&bt, &ts);
1549 /* If the timecounter was wound up underneath us, bail out. */
1550 if (pps->capgen != pps->capth->th_generation)
1553 *pcount = pps->capcount;
1558 timespecadd(tsp, osp);
1559 if (tsp->tv_nsec < 0) {
1560 tsp->tv_nsec += 1000000000;
1566 *ffcount = pps->capffth->tick_ffcount + tcount;
1567 bt = pps->capffth->tick_time;
1568 ffclock_convert_delta(tcount, pps->capffth->cest.period, &bt);
1569 bintime_add(&bt, &pps->capffth->tick_time);
1570 bintime2timespec(&bt, &ts);
1580 * Feed the NTP PLL/FLL.
1581 * The FLL wants to know how many (hardware) nanoseconds
1582 * elapsed since the previous event.
1584 tcount = pps->capcount - pps->ppscount[2];
1585 pps->ppscount[2] = pps->capcount;
1586 tcount &= pps->capth->th_counter->tc_counter_mask;
1587 scale = (uint64_t)1 << 63;
1588 scale /= pps->capth->th_counter->tc_frequency;
1592 bintime_addx(&bt, scale * tcount);
1593 bintime2timespec(&bt, &ts);
1594 hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
1600 * Timecounters need to be updated every so often to prevent the hardware
1601 * counter from overflowing. Updating also recalculates the cached values
1602 * used by the get*() family of functions, so their precision depends on
1603 * the update frequency.
1607 SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0,
1608 "Approximate number of hardclock ticks in a millisecond");
1611 tc_ticktock(int cnt)
1616 if (count < tc_tick)
1623 inittimecounter(void *dummy)
1628 * Set the initial timeout to
1629 * max(1, <approx. number of hardclock ticks in a millisecond>).
1630 * People should probably not use the sysctl to set the timeout
1631 * to smaller than its inital value, since that value is the
1632 * smallest reasonable one. If they want better timestamps they
1633 * should use the non-"get"* functions.
1636 tc_tick = (hz + 500) / 1000;
1639 p = (tc_tick * 1000000) / hz;
1640 printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
1644 change_sysclock(sysclock_active);
1646 /* warm up new timecounter (again) and get rolling. */
1647 (void)timecounter->tc_get_timecount(timecounter);
1648 (void)timecounter->tc_get_timecount(timecounter);
1652 SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
1654 /* Cpu tick handling -------------------------------------------------*/
1656 static int cpu_tick_variable;
1657 static uint64_t cpu_tick_frequency;
1662 static uint64_t base;
1663 static unsigned last;
1665 struct timecounter *tc;
1667 tc = timehands->th_counter;
1668 u = tc->tc_get_timecount(tc) & tc->tc_counter_mask;
1670 base += (uint64_t)tc->tc_counter_mask + 1;
1676 cpu_tick_calibration(void)
1678 static time_t last_calib;
1680 if (time_uptime != last_calib && !(time_uptime & 0xf)) {
1681 cpu_tick_calibrate(0);
1682 last_calib = time_uptime;
1687 * This function gets called every 16 seconds on only one designated
1688 * CPU in the system from hardclock() via cpu_tick_calibration()().
1690 * Whenever the real time clock is stepped we get called with reset=1
1691 * to make sure we handle suspend/resume and similar events correctly.
1695 cpu_tick_calibrate(int reset)
1697 static uint64_t c_last;
1698 uint64_t c_this, c_delta;
1699 static struct bintime t_last;
1700 struct bintime t_this, t_delta;
1704 /* The clock was stepped, abort & reset */
1709 /* we don't calibrate fixed rate cputicks */
1710 if (!cpu_tick_variable)
1713 getbinuptime(&t_this);
1714 c_this = cpu_ticks();
1715 if (t_last.sec != 0) {
1716 c_delta = c_this - c_last;
1718 bintime_sub(&t_delta, &t_last);
1721 * 2^(64-20) / 16[s] =
1723 * 17.592.186.044.416 / 16 =
1724 * 1.099.511.627.776 [Hz]
1726 divi = t_delta.sec << 20;
1727 divi |= t_delta.frac >> (64 - 20);
1730 if (c_delta > cpu_tick_frequency) {
1731 if (0 && bootverbose)
1732 printf("cpu_tick increased to %ju Hz\n",
1734 cpu_tick_frequency = c_delta;
1742 set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var)
1746 cpu_ticks = tc_cpu_ticks;
1748 cpu_tick_frequency = freq;
1749 cpu_tick_variable = var;
1758 if (cpu_ticks == tc_cpu_ticks)
1759 return (tc_getfrequency());
1760 return (cpu_tick_frequency);
1764 * We need to be slightly careful converting cputicks to microseconds.
1765 * There is plenty of margin in 64 bits of microseconds (half a million
1766 * years) and in 64 bits at 4 GHz (146 years), but if we do a multiply
1767 * before divide conversion (to retain precision) we find that the
1768 * margin shrinks to 1.5 hours (one millionth of 146y).
1769 * With a three prong approach we never lose significant bits, no
1770 * matter what the cputick rate and length of timeinterval is.
1774 cputick2usec(uint64_t tick)
1777 if (tick > 18446744073709551LL) /* floor(2^64 / 1000) */
1778 return (tick / (cpu_tickrate() / 1000000LL));
1779 else if (tick > 18446744073709LL) /* floor(2^64 / 1000000) */
1780 return ((tick * 1000LL) / (cpu_tickrate() / 1000LL));
1782 return ((tick * 1000000LL) / cpu_tickrate());
1785 cpu_tick_f *cpu_ticks = tc_cpu_ticks;