2 ***********************************************************************
4 * Copyright (c) David L. Mills 1993-2001 *
6 * Permission to use, copy, modify, and distribute this software and *
7 * its documentation for any purpose and without fee is hereby *
8 * granted, provided that the above copyright notice appears in all *
9 * copies and that both the copyright notice and this permission *
10 * notice appear in supporting documentation, and that the name *
11 * University of Delaware not be used in advertising or publicity *
12 * pertaining to distribution of the software without specific, *
13 * written prior permission. The University of Delaware makes no *
14 * representations about the suitability this software for any *
15 * purpose. It is provided "as is" without express or implied *
18 **********************************************************************/
21 * Adapted from the original sources for FreeBSD and timecounters by:
22 * Poul-Henning Kamp <phk@FreeBSD.org>.
24 * The 32bit version of the "LP" macros seems a bit past its "sell by"
25 * date so I have retained only the 64bit version and included it directly
28 * Only minor changes done to interface with the timecounters over in
29 * sys/kern/kern_clock.c. Some of the comments below may be (even more)
30 * confusing and/or plain wrong in that context.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/eventhandler.h>
42 #include <sys/kernel.h>
46 #include <sys/mutex.h>
48 #include <sys/timex.h>
49 #include <sys/timetc.h>
50 #include <sys/timepps.h>
51 #include <sys/syscallsubr.h>
52 #include <sys/sysctl.h>
55 FEATURE(pps_sync, "Support usage of external PPS signal by kernel PLL");
59 * Single-precision macros for 64-bit machines
62 #define L_ADD(v, u) ((v) += (u))
63 #define L_SUB(v, u) ((v) -= (u))
64 #define L_ADDHI(v, a) ((v) += (int64_t)(a) << 32)
65 #define L_NEG(v) ((v) = -(v))
66 #define L_RSHIFT(v, n) \
69 (v) = -(-(v) >> (n)); \
73 #define L_MPY(v, a) ((v) *= (a))
74 #define L_CLR(v) ((v) = 0)
75 #define L_ISNEG(v) ((v) < 0)
76 #define L_LINT(v, a) ((v) = (int64_t)(a) << 32)
77 #define L_GINT(v) ((v) < 0 ? -(-(v) >> 32) : (v) >> 32)
80 * Generic NTP kernel interface
82 * These routines constitute the Network Time Protocol (NTP) interfaces
83 * for user and daemon application programs. The ntp_gettime() routine
84 * provides the time, maximum error (synch distance) and estimated error
85 * (dispersion) to client user application programs. The ntp_adjtime()
86 * routine is used by the NTP daemon to adjust the system clock to an
87 * externally derived time. The time offset and related variables set by
88 * this routine are used by other routines in this module to adjust the
89 * phase and frequency of the clock discipline loop which controls the
92 * When the kernel time is reckoned directly in nanoseconds (NTP_NANO
93 * defined), the time at each tick interrupt is derived directly from
94 * the kernel time variable. When the kernel time is reckoned in
95 * microseconds, (NTP_NANO undefined), the time is derived from the
96 * kernel time variable together with a variable representing the
97 * leftover nanoseconds at the last tick interrupt. In either case, the
98 * current nanosecond time is reckoned from these values plus an
99 * interpolated value derived by the clock routines in another
100 * architecture-specific module. The interpolation can use either a
101 * dedicated counter or a processor cycle counter (PCC) implemented in
102 * some architectures.
104 * Note that all routines must run at priority splclock or higher.
107 * Phase/frequency-lock loop (PLL/FLL) definitions
109 * The nanosecond clock discipline uses two variable types, time
110 * variables and frequency variables. Both types are represented as 64-
111 * bit fixed-point quantities with the decimal point between two 32-bit
112 * halves. On a 32-bit machine, each half is represented as a single
113 * word and mathematical operations are done using multiple-precision
114 * arithmetic. On a 64-bit machine, ordinary computer arithmetic is
117 * A time variable is a signed 64-bit fixed-point number in ns and
118 * fraction. It represents the remaining time offset to be amortized
119 * over succeeding tick interrupts. The maximum time offset is about
120 * 0.5 s and the resolution is about 2.3e-10 ns.
122 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
123 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
124 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
126 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
128 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
130 * A frequency variable is a signed 64-bit fixed-point number in ns/s
131 * and fraction. It represents the ns and fraction to be added to the
132 * kernel time variable at each second. The maximum frequency offset is
133 * about +-500000 ns/s and the resolution is about 2.3e-10 ns/s.
135 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
136 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
137 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
138 * |s s s s s s s s s s s s s| ns/s |
139 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
141 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
144 * The following variables establish the state of the PLL/FLL and the
145 * residual time and frequency offset of the local clock.
147 #define SHIFT_PLL 4 /* PLL loop gain (shift) */
148 #define SHIFT_FLL 2 /* FLL loop gain (shift) */
150 static int time_state = TIME_OK; /* clock state */
151 int time_status = STA_UNSYNC; /* clock status bits */
152 static long time_tai; /* TAI offset (s) */
153 static long time_monitor; /* last time offset scaled (ns) */
154 static long time_constant; /* poll interval (shift) (s) */
155 static long time_precision = 1; /* clock precision (ns) */
156 static long time_maxerror = MAXPHASE / 1000; /* maximum error (us) */
157 long time_esterror = MAXPHASE / 1000; /* estimated error (us) */
158 static long time_reftime; /* uptime at last adjustment (s) */
159 static l_fp time_offset; /* time offset (ns) */
160 static l_fp time_freq; /* frequency offset (ns/s) */
161 static l_fp time_adj; /* tick adjust (ns/s) */
163 static int64_t time_adjtime; /* correction from adjtime(2) (usec) */
165 static struct mtx ntp_lock;
166 MTX_SYSINIT(ntp, &ntp_lock, "ntp", MTX_SPIN);
168 #define NTP_LOCK() mtx_lock_spin(&ntp_lock)
169 #define NTP_UNLOCK() mtx_unlock_spin(&ntp_lock)
170 #define NTP_ASSERT_LOCKED() mtx_assert(&ntp_lock, MA_OWNED)
174 * The following variables are used when a pulse-per-second (PPS) signal
175 * is available and connected via a modem control lead. They establish
176 * the engineering parameters of the clock discipline loop when
177 * controlled by the PPS signal.
179 #define PPS_FAVG 2 /* min freq avg interval (s) (shift) */
180 #define PPS_FAVGDEF 8 /* default freq avg int (s) (shift) */
181 #define PPS_FAVGMAX 15 /* max freq avg interval (s) (shift) */
182 #define PPS_PAVG 4 /* phase avg interval (s) (shift) */
183 #define PPS_VALID 120 /* PPS signal watchdog max (s) */
184 #define PPS_MAXWANDER 100000 /* max PPS wander (ns/s) */
185 #define PPS_POPCORN 2 /* popcorn spike threshold (shift) */
187 static struct timespec pps_tf[3]; /* phase median filter */
188 static l_fp pps_freq; /* scaled frequency offset (ns/s) */
189 static long pps_fcount; /* frequency accumulator */
190 static long pps_jitter; /* nominal jitter (ns) */
191 static long pps_stabil; /* nominal stability (scaled ns/s) */
192 static long pps_lastsec; /* time at last calibration (s) */
193 static int pps_valid; /* signal watchdog counter */
194 static int pps_shift = PPS_FAVG; /* interval duration (s) (shift) */
195 static int pps_shiftmax = PPS_FAVGDEF; /* max interval duration (s) (shift) */
196 static int pps_intcnt; /* wander counter */
199 * PPS signal quality monitors
201 static long pps_calcnt; /* calibration intervals */
202 static long pps_jitcnt; /* jitter limit exceeded */
203 static long pps_stbcnt; /* stability limit exceeded */
204 static long pps_errcnt; /* calibration errors */
205 #endif /* PPS_SYNC */
207 * End of phase/frequency-lock loop (PLL/FLL) definitions
210 static void ntp_init(void);
211 static void hardupdate(long offset);
212 static void ntp_gettime1(struct ntptimeval *ntvp);
213 static bool ntp_is_time_error(int tsl);
216 ntp_is_time_error(int tsl)
220 * Status word error decode. If any of these conditions occur,
221 * an error is returned, instead of the status word. Most
222 * applications will care only about the fact the system clock
223 * may not be trusted, not about the details.
225 * Hardware or software error
227 if ((tsl & (STA_UNSYNC | STA_CLOCKERR)) ||
230 * PPS signal lost when either time or frequency synchronization
233 (tsl & (STA_PPSFREQ | STA_PPSTIME) &&
234 !(tsl & STA_PPSSIGNAL)) ||
237 * PPS jitter exceeded when time synchronization requested
239 (tsl & STA_PPSTIME && tsl & STA_PPSJITTER) ||
242 * PPS wander exceeded or calibration error when frequency
243 * synchronization requested
245 (tsl & STA_PPSFREQ &&
246 tsl & (STA_PPSWANDER | STA_PPSERROR)))
253 ntp_gettime1(struct ntptimeval *ntvp)
255 struct timespec atv; /* nanosecond time */
260 ntvp->time.tv_sec = atv.tv_sec;
261 ntvp->time.tv_nsec = atv.tv_nsec;
262 ntvp->maxerror = time_maxerror;
263 ntvp->esterror = time_esterror;
264 ntvp->tai = time_tai;
265 ntvp->time_state = time_state;
267 if (ntp_is_time_error(time_status))
268 ntvp->time_state = TIME_ERROR;
272 * ntp_gettime() - NTP user application interface
274 * See the timex.h header file for synopsis and API description. Note that
275 * the TAI offset is returned in the ntvtimeval.tai structure member.
277 #ifndef _SYS_SYSPROTO_H_
278 struct ntp_gettime_args {
279 struct ntptimeval *ntvp;
284 sys_ntp_gettime(struct thread *td, struct ntp_gettime_args *uap)
286 struct ntptimeval ntv;
288 memset(&ntv, 0, sizeof(ntv));
294 td->td_retval[0] = ntv.time_state;
295 return (copyout(&ntv, uap->ntvp, sizeof(ntv)));
299 ntp_sysctl(SYSCTL_HANDLER_ARGS)
301 struct ntptimeval ntv; /* temporary structure */
307 return (sysctl_handle_opaque(oidp, &ntv, sizeof(ntv), req));
310 SYSCTL_NODE(_kern, OID_AUTO, ntp_pll, CTLFLAG_RW, 0, "");
311 SYSCTL_PROC(_kern_ntp_pll, OID_AUTO, gettime, CTLTYPE_OPAQUE | CTLFLAG_RD |
312 CTLFLAG_MPSAFE, 0, sizeof(struct ntptimeval) , ntp_sysctl, "S,ntptimeval",
316 SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shiftmax, CTLFLAG_RW,
317 &pps_shiftmax, 0, "Max interval duration (sec) (shift)");
318 SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shift, CTLFLAG_RW,
319 &pps_shift, 0, "Interval duration (sec) (shift)");
320 SYSCTL_LONG(_kern_ntp_pll, OID_AUTO, time_monitor, CTLFLAG_RD,
321 &time_monitor, 0, "Last time offset scaled (ns)");
323 SYSCTL_S64(_kern_ntp_pll, OID_AUTO, pps_freq, CTLFLAG_RD | CTLFLAG_MPSAFE,
325 "Scaled frequency offset (ns/sec)");
326 SYSCTL_S64(_kern_ntp_pll, OID_AUTO, time_freq, CTLFLAG_RD | CTLFLAG_MPSAFE,
328 "Frequency offset (ns/sec)");
332 * ntp_adjtime() - NTP daemon application interface
334 * See the timex.h header file for synopsis and API description. Note that
335 * the timex.constant structure member has a dual purpose to set the time
336 * constant and to set the TAI offset.
338 #ifndef _SYS_SYSPROTO_H_
339 struct ntp_adjtime_args {
345 sys_ntp_adjtime(struct thread *td, struct ntp_adjtime_args *uap)
347 struct timex ntv; /* temporary structure */
348 long freq; /* frequency ns/s) */
349 int modes; /* mode bits from structure */
352 error = copyin((caddr_t)uap->tp, (caddr_t)&ntv, sizeof(ntv));
357 * Update selected clock variables - only the superuser can
358 * change anything. Note that there is no error checking here on
359 * the assumption the superuser should know what it is doing.
360 * Note that either the time constant or TAI offset are loaded
361 * from the ntv.constant member, depending on the mode bits. If
362 * the STA_PLL bit in the status word is cleared, the state and
363 * status words are reset to the initial values at boot.
367 error = priv_check(td, PRIV_NTP_ADJTIME);
371 if (modes & MOD_MAXERROR)
372 time_maxerror = ntv.maxerror;
373 if (modes & MOD_ESTERROR)
374 time_esterror = ntv.esterror;
375 if (modes & MOD_STATUS) {
376 if (time_status & STA_PLL && !(ntv.status & STA_PLL)) {
377 time_state = TIME_OK;
378 time_status = STA_UNSYNC;
380 pps_shift = PPS_FAVG;
381 #endif /* PPS_SYNC */
383 time_status &= STA_RONLY;
384 time_status |= ntv.status & ~STA_RONLY;
386 if (modes & MOD_TIMECONST) {
387 if (ntv.constant < 0)
389 else if (ntv.constant > MAXTC)
390 time_constant = MAXTC;
392 time_constant = ntv.constant;
394 if (modes & MOD_TAI) {
395 if (ntv.constant > 0) /* XXX zero & negative numbers ? */
396 time_tai = ntv.constant;
399 if (modes & MOD_PPSMAX) {
400 if (ntv.shift < PPS_FAVG)
401 pps_shiftmax = PPS_FAVG;
402 else if (ntv.shift > PPS_FAVGMAX)
403 pps_shiftmax = PPS_FAVGMAX;
405 pps_shiftmax = ntv.shift;
407 #endif /* PPS_SYNC */
408 if (modes & MOD_NANO)
409 time_status |= STA_NANO;
410 if (modes & MOD_MICRO)
411 time_status &= ~STA_NANO;
412 if (modes & MOD_CLKB)
413 time_status |= STA_CLK;
414 if (modes & MOD_CLKA)
415 time_status &= ~STA_CLK;
416 if (modes & MOD_FREQUENCY) {
417 freq = (ntv.freq * 1000LL) >> 16;
419 L_LINT(time_freq, MAXFREQ);
420 else if (freq < -MAXFREQ)
421 L_LINT(time_freq, -MAXFREQ);
424 * ntv.freq is [PPM * 2^16] = [us/s * 2^16]
425 * time_freq is [ns/s * 2^32]
427 time_freq = ntv.freq * 1000LL * 65536LL;
430 pps_freq = time_freq;
431 #endif /* PPS_SYNC */
433 if (modes & MOD_OFFSET) {
434 if (time_status & STA_NANO)
435 hardupdate(ntv.offset);
437 hardupdate(ntv.offset * 1000);
441 * Retrieve all clock variables. Note that the TAI offset is
442 * returned only by ntp_gettime();
444 if (time_status & STA_NANO)
445 ntv.offset = L_GINT(time_offset);
447 ntv.offset = L_GINT(time_offset) / 1000; /* XXX rounding ? */
448 ntv.freq = L_GINT((time_freq / 1000LL) << 16);
449 ntv.maxerror = time_maxerror;
450 ntv.esterror = time_esterror;
451 ntv.status = time_status;
452 ntv.constant = time_constant;
453 if (time_status & STA_NANO)
454 ntv.precision = time_precision;
456 ntv.precision = time_precision / 1000;
457 ntv.tolerance = MAXFREQ * SCALE_PPM;
459 ntv.shift = pps_shift;
460 ntv.ppsfreq = L_GINT((pps_freq / 1000LL) << 16);
461 if (time_status & STA_NANO)
462 ntv.jitter = pps_jitter;
464 ntv.jitter = pps_jitter / 1000;
465 ntv.stabil = pps_stabil;
466 ntv.calcnt = pps_calcnt;
467 ntv.errcnt = pps_errcnt;
468 ntv.jitcnt = pps_jitcnt;
469 ntv.stbcnt = pps_stbcnt;
470 #endif /* PPS_SYNC */
471 retval = ntp_is_time_error(time_status) ? TIME_ERROR : time_state;
474 error = copyout((caddr_t)&ntv, (caddr_t)uap->tp, sizeof(ntv));
476 td->td_retval[0] = retval;
481 * second_overflow() - called after ntp_tick_adjust()
483 * This routine is ordinarily called immediately following the above
484 * routine ntp_tick_adjust(). While these two routines are normally
485 * combined, they are separated here only for the purposes of
489 ntp_update_second(int64_t *adjustment, time_t *newsec)
492 l_fp ftemp; /* 32/64-bit temporary */
497 * On rollover of the second both the nanosecond and microsecond
498 * clocks are updated and the state machine cranked as
499 * necessary. The phase adjustment to be used for the next
500 * second is calculated and the maximum error is increased by
503 time_maxerror += MAXFREQ / 1000;
506 * Leap second processing. If in leap-insert state at
507 * the end of the day, the system clock is set back one
508 * second; if in leap-delete state, the system clock is
509 * set ahead one second. The nano_time() routine or
510 * external clock driver will insure that reported time
511 * is always monotonic.
513 switch (time_state) {
519 if (time_status & STA_INS)
520 time_state = TIME_INS;
521 else if (time_status & STA_DEL)
522 time_state = TIME_DEL;
526 * Insert second 23:59:60 following second
530 if (!(time_status & STA_INS))
531 time_state = TIME_OK;
532 else if ((*newsec) % 86400 == 0) {
534 time_state = TIME_OOP;
540 * Delete second 23:59:59.
543 if (!(time_status & STA_DEL))
544 time_state = TIME_OK;
545 else if (((*newsec) + 1) % 86400 == 0) {
548 time_state = TIME_WAIT;
553 * Insert second in progress.
556 time_state = TIME_WAIT;
560 * Wait for status bits to clear.
563 if (!(time_status & (STA_INS | STA_DEL)))
564 time_state = TIME_OK;
568 * Compute the total time adjustment for the next second
569 * in ns. The offset is reduced by a factor depending on
570 * whether the PPS signal is operating. Note that the
571 * value is in effect scaled by the clock frequency,
572 * since the adjustment is added at each tick interrupt.
576 /* XXX even if PPS signal dies we should finish adjustment ? */
577 if (time_status & STA_PPSTIME && time_status &
579 L_RSHIFT(ftemp, pps_shift);
581 L_RSHIFT(ftemp, SHIFT_PLL + time_constant);
583 L_RSHIFT(ftemp, SHIFT_PLL + time_constant);
584 #endif /* PPS_SYNC */
586 L_SUB(time_offset, ftemp);
587 L_ADD(time_adj, time_freq);
590 * Apply any correction from adjtime(2). If more than one second
591 * off we slew at a rate of 5ms/s (5000 PPM) else 500us/s (500PPM)
592 * until the last second is slewed the final < 500 usecs.
594 if (time_adjtime != 0) {
595 if (time_adjtime > 1000000)
597 else if (time_adjtime < -1000000)
599 else if (time_adjtime > 500)
601 else if (time_adjtime < -500)
604 tickrate = time_adjtime;
605 time_adjtime -= tickrate;
606 L_LINT(ftemp, tickrate * 1000);
607 L_ADD(time_adj, ftemp);
609 *adjustment = time_adj;
615 time_status &= ~STA_PPSSIGNAL;
616 #endif /* PPS_SYNC */
622 * ntp_init() - initialize variables and structures
624 * This routine must be called after the kernel variables hz and tick
625 * are set or changed and before the next tick interrupt. In this
626 * particular implementation, these values are assumed set elsewhere in
627 * the kernel. The design allows the clock frequency and tick interval
628 * to be changed while the system is running. So, this routine should
629 * probably be integrated with the code that does that.
636 * The following variables are initialized only at startup. Only
637 * those structures not cleared by the compiler need to be
638 * initialized, and these only in the simulator. In the actual
639 * kernel, any nonzero values here will quickly evaporate.
644 pps_tf[0].tv_sec = pps_tf[0].tv_nsec = 0;
645 pps_tf[1].tv_sec = pps_tf[1].tv_nsec = 0;
646 pps_tf[2].tv_sec = pps_tf[2].tv_nsec = 0;
649 #endif /* PPS_SYNC */
652 SYSINIT(ntpclocks, SI_SUB_CLOCKS, SI_ORDER_MIDDLE, ntp_init, NULL);
655 * hardupdate() - local clock update
657 * This routine is called by ntp_adjtime() to update the local clock
658 * phase and frequency. The implementation is of an adaptive-parameter,
659 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new
660 * time and frequency offset estimates for each call. If the kernel PPS
661 * discipline code is configured (PPS_SYNC), the PPS signal itself
662 * determines the new time offset, instead of the calling argument.
663 * Presumably, calls to ntp_adjtime() occur only when the caller
664 * believes the local clock is valid within some bound (+-128 ms with
665 * NTP). If the caller's time is far different than the PPS time, an
666 * argument will ensue, and it's not clear who will lose.
668 * For uncompensated quartz crystal oscillators and nominal update
669 * intervals less than 256 s, operation should be in phase-lock mode,
670 * where the loop is disciplined to phase. For update intervals greater
671 * than 1024 s, operation should be in frequency-lock mode, where the
672 * loop is disciplined to frequency. Between 256 s and 1024 s, the mode
673 * is selected by the STA_MODE status bit.
677 long offset; /* clock offset (ns) */
685 * Select how the phase is to be controlled and from which
686 * source. If the PPS signal is present and enabled to
687 * discipline the time, the PPS offset is used; otherwise, the
688 * argument offset is used.
690 if (!(time_status & STA_PLL))
692 if (!(time_status & STA_PPSTIME && time_status &
694 if (offset > MAXPHASE)
695 time_monitor = MAXPHASE;
696 else if (offset < -MAXPHASE)
697 time_monitor = -MAXPHASE;
699 time_monitor = offset;
700 L_LINT(time_offset, time_monitor);
704 * Select how the frequency is to be controlled and in which
705 * mode (PLL or FLL). If the PPS signal is present and enabled
706 * to discipline the frequency, the PPS frequency is used;
707 * otherwise, the argument offset is used to compute it.
709 if (time_status & STA_PPSFREQ && time_status & STA_PPSSIGNAL) {
710 time_reftime = time_uptime;
713 if (time_status & STA_FREQHOLD || time_reftime == 0)
714 time_reftime = time_uptime;
715 mtemp = time_uptime - time_reftime;
716 L_LINT(ftemp, time_monitor);
717 L_RSHIFT(ftemp, (SHIFT_PLL + 2 + time_constant) << 1);
719 L_ADD(time_freq, ftemp);
720 time_status &= ~STA_MODE;
721 if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp >
723 L_LINT(ftemp, (time_monitor << 4) / mtemp);
724 L_RSHIFT(ftemp, SHIFT_FLL + 4);
725 L_ADD(time_freq, ftemp);
726 time_status |= STA_MODE;
728 time_reftime = time_uptime;
729 if (L_GINT(time_freq) > MAXFREQ)
730 L_LINT(time_freq, MAXFREQ);
731 else if (L_GINT(time_freq) < -MAXFREQ)
732 L_LINT(time_freq, -MAXFREQ);
737 * hardpps() - discipline CPU clock oscillator to external PPS signal
739 * This routine is called at each PPS interrupt in order to discipline
740 * the CPU clock oscillator to the PPS signal. There are two independent
741 * first-order feedback loops, one for the phase, the other for the
742 * frequency. The phase loop measures and grooms the PPS phase offset
743 * and leaves it in a handy spot for the seconds overflow routine. The
744 * frequency loop averages successive PPS phase differences and
745 * calculates the PPS frequency offset, which is also processed by the
746 * seconds overflow routine. The code requires the caller to capture the
747 * time and architecture-dependent hardware counter values in
748 * nanoseconds at the on-time PPS signal transition.
750 * Note that, on some Unix systems this routine runs at an interrupt
751 * priority level higher than the timer interrupt routine hardclock().
752 * Therefore, the variables used are distinct from the hardclock()
753 * variables, except for the actual time and frequency variables, which
754 * are determined by this routine and updated atomically.
757 * nsec - hardware counter at PPS
760 hardpps(struct timespec *tsp, long nsec)
762 long u_sec, u_nsec, v_nsec; /* temps */
768 * The signal is first processed by a range gate and frequency
769 * discriminator. The range gate rejects noise spikes outside
770 * the range +-500 us. The frequency discriminator rejects input
771 * signals with apparent frequency outside the range 1 +-500
772 * PPM. If two hits occur in the same second, we ignore the
773 * later hit; if not and a hit occurs outside the range gate,
774 * keep the later hit for later comparison, but do not process
777 time_status |= STA_PPSSIGNAL | STA_PPSJITTER;
778 time_status &= ~(STA_PPSWANDER | STA_PPSERROR);
779 pps_valid = PPS_VALID;
781 u_nsec = tsp->tv_nsec;
782 if (u_nsec >= (NANOSECOND >> 1)) {
783 u_nsec -= NANOSECOND;
786 v_nsec = u_nsec - pps_tf[0].tv_nsec;
787 if (u_sec == pps_tf[0].tv_sec && v_nsec < NANOSECOND - MAXFREQ)
789 pps_tf[2] = pps_tf[1];
790 pps_tf[1] = pps_tf[0];
791 pps_tf[0].tv_sec = u_sec;
792 pps_tf[0].tv_nsec = u_nsec;
795 * Compute the difference between the current and previous
796 * counter values. If the difference exceeds 0.5 s, assume it
797 * has wrapped around, so correct 1.0 s. If the result exceeds
798 * the tick interval, the sample point has crossed a tick
799 * boundary during the last second, so correct the tick. Very
803 if (u_nsec > (NANOSECOND >> 1))
804 u_nsec -= NANOSECOND;
805 else if (u_nsec < -(NANOSECOND >> 1))
806 u_nsec += NANOSECOND;
807 pps_fcount += u_nsec;
808 if (v_nsec > MAXFREQ || v_nsec < -MAXFREQ)
810 time_status &= ~STA_PPSJITTER;
813 * A three-stage median filter is used to help denoise the PPS
814 * time. The median sample becomes the time offset estimate; the
815 * difference between the other two samples becomes the time
816 * dispersion (jitter) estimate.
818 if (pps_tf[0].tv_nsec > pps_tf[1].tv_nsec) {
819 if (pps_tf[1].tv_nsec > pps_tf[2].tv_nsec) {
820 v_nsec = pps_tf[1].tv_nsec; /* 0 1 2 */
821 u_nsec = pps_tf[0].tv_nsec - pps_tf[2].tv_nsec;
822 } else if (pps_tf[2].tv_nsec > pps_tf[0].tv_nsec) {
823 v_nsec = pps_tf[0].tv_nsec; /* 2 0 1 */
824 u_nsec = pps_tf[2].tv_nsec - pps_tf[1].tv_nsec;
826 v_nsec = pps_tf[2].tv_nsec; /* 0 2 1 */
827 u_nsec = pps_tf[0].tv_nsec - pps_tf[1].tv_nsec;
830 if (pps_tf[1].tv_nsec < pps_tf[2].tv_nsec) {
831 v_nsec = pps_tf[1].tv_nsec; /* 2 1 0 */
832 u_nsec = pps_tf[2].tv_nsec - pps_tf[0].tv_nsec;
833 } else if (pps_tf[2].tv_nsec < pps_tf[0].tv_nsec) {
834 v_nsec = pps_tf[0].tv_nsec; /* 1 0 2 */
835 u_nsec = pps_tf[1].tv_nsec - pps_tf[2].tv_nsec;
837 v_nsec = pps_tf[2].tv_nsec; /* 1 2 0 */
838 u_nsec = pps_tf[1].tv_nsec - pps_tf[0].tv_nsec;
843 * Nominal jitter is due to PPS signal noise and interrupt
844 * latency. If it exceeds the popcorn threshold, the sample is
845 * discarded. otherwise, if so enabled, the time offset is
846 * updated. We can tolerate a modest loss of data here without
847 * much degrading time accuracy.
849 * The measurements being checked here were made with the system
850 * timecounter, so the popcorn threshold is not allowed to fall below
851 * the number of nanoseconds in two ticks of the timecounter. For a
852 * timecounter running faster than 1 GHz the lower bound is 2ns, just
853 * to avoid a nonsensical threshold of zero.
855 if (u_nsec > lmax(pps_jitter << PPS_POPCORN,
856 2 * (NANOSECOND / (long)qmin(NANOSECOND, tc_getfrequency())))) {
857 time_status |= STA_PPSJITTER;
859 } else if (time_status & STA_PPSTIME) {
860 time_monitor = -v_nsec;
861 L_LINT(time_offset, time_monitor);
863 pps_jitter += (u_nsec - pps_jitter) >> PPS_FAVG;
864 u_sec = pps_tf[0].tv_sec - pps_lastsec;
865 if (u_sec < (1 << pps_shift))
869 * At the end of the calibration interval the difference between
870 * the first and last counter values becomes the scaled
871 * frequency. It will later be divided by the length of the
872 * interval to determine the frequency update. If the frequency
873 * exceeds a sanity threshold, or if the actual calibration
874 * interval is not equal to the expected length, the data are
875 * discarded. We can tolerate a modest loss of data here without
876 * much degrading frequency accuracy.
879 v_nsec = -pps_fcount;
880 pps_lastsec = pps_tf[0].tv_sec;
882 u_nsec = MAXFREQ << pps_shift;
883 if (v_nsec > u_nsec || v_nsec < -u_nsec || u_sec != (1 << pps_shift)) {
884 time_status |= STA_PPSERROR;
890 * Here the raw frequency offset and wander (stability) is
891 * calculated. If the wander is less than the wander threshold
892 * for four consecutive averaging intervals, the interval is
893 * doubled; if it is greater than the threshold for four
894 * consecutive intervals, the interval is halved. The scaled
895 * frequency offset is converted to frequency offset. The
896 * stability metric is calculated as the average of recent
897 * frequency changes, but is used only for performance
900 L_LINT(ftemp, v_nsec);
901 L_RSHIFT(ftemp, pps_shift);
902 L_SUB(ftemp, pps_freq);
903 u_nsec = L_GINT(ftemp);
904 if (u_nsec > PPS_MAXWANDER) {
905 L_LINT(ftemp, PPS_MAXWANDER);
907 time_status |= STA_PPSWANDER;
909 } else if (u_nsec < -PPS_MAXWANDER) {
910 L_LINT(ftemp, -PPS_MAXWANDER);
912 time_status |= STA_PPSWANDER;
917 if (pps_intcnt >= 4) {
919 if (pps_shift < pps_shiftmax) {
923 } else if (pps_intcnt <= -4 || pps_shift > pps_shiftmax) {
925 if (pps_shift > PPS_FAVG) {
932 pps_stabil += (u_nsec * SCALE_PPM - pps_stabil) >> PPS_FAVG;
935 * The PPS frequency is recalculated and clamped to the maximum
936 * MAXFREQ. If enabled, the system clock frequency is updated as
939 L_ADD(pps_freq, ftemp);
940 u_nsec = L_GINT(pps_freq);
941 if (u_nsec > MAXFREQ)
942 L_LINT(pps_freq, MAXFREQ);
943 else if (u_nsec < -MAXFREQ)
944 L_LINT(pps_freq, -MAXFREQ);
945 if (time_status & STA_PPSFREQ)
946 time_freq = pps_freq;
951 #endif /* PPS_SYNC */
953 #ifndef _SYS_SYSPROTO_H_
954 struct adjtime_args {
955 struct timeval *delta;
956 struct timeval *olddelta;
961 sys_adjtime(struct thread *td, struct adjtime_args *uap)
963 struct timeval delta, olddelta, *deltap;
967 error = copyin(uap->delta, &delta, sizeof(delta));
973 error = kern_adjtime(td, deltap, &olddelta);
974 if (uap->olddelta && error == 0)
975 error = copyout(&olddelta, uap->olddelta, sizeof(olddelta));
980 kern_adjtime(struct thread *td, struct timeval *delta, struct timeval *olddelta)
987 error = priv_check(td, PRIV_ADJTIME);
990 ltw = (int64_t)delta->tv_sec * 1000000 + delta->tv_usec;
997 if (olddelta != NULL) {
998 atv.tv_sec = ltr / 1000000;
999 atv.tv_usec = ltr % 1000000;
1000 if (atv.tv_usec < 0) {
1001 atv.tv_usec += 1000000;
1009 static struct callout resettodr_callout;
1010 static int resettodr_period = 1800;
1013 periodic_resettodr(void *arg __unused)
1017 * Read of time_status is lock-less, which is fine since
1018 * ntp_is_time_error() operates on the consistent read value.
1020 if (!ntp_is_time_error(time_status))
1022 if (resettodr_period > 0)
1023 callout_schedule(&resettodr_callout, resettodr_period * hz);
1027 shutdown_resettodr(void *arg __unused, int howto __unused)
1030 callout_drain(&resettodr_callout);
1031 /* Another unlocked read of time_status */
1032 if (resettodr_period > 0 && !ntp_is_time_error(time_status))
1037 sysctl_resettodr_period(SYSCTL_HANDLER_ARGS)
1041 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
1042 if (error || !req->newptr)
1046 if (resettodr_period == 0)
1047 callout_stop(&resettodr_callout);
1049 callout_reset(&resettodr_callout, resettodr_period * hz,
1050 periodic_resettodr, NULL);
1055 SYSCTL_PROC(_machdep, OID_AUTO, rtc_save_period, CTLTYPE_INT | CTLFLAG_RWTUN |
1056 CTLFLAG_MPSAFE, &resettodr_period, 1800, sysctl_resettodr_period, "I",
1057 "Save system time to RTC with this period (in seconds)");
1060 start_periodic_resettodr(void *arg __unused)
1063 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_resettodr, NULL,
1064 SHUTDOWN_PRI_FIRST);
1065 callout_init(&resettodr_callout, 1);
1066 if (resettodr_period == 0)
1068 callout_reset(&resettodr_callout, resettodr_period * hz,
1069 periodic_resettodr, NULL);
1072 SYSINIT(periodic_resettodr, SI_SUB_LAST, SI_ORDER_MIDDLE,
1073 start_periodic_resettodr, NULL);