2 * ntp_loopfilter.c - implements the NTP loop filter algorithm
4 * ATTENTION: Get approval from Dave Mills on all changes to this file!
16 #include "ntp_unixtime.h"
17 #include "ntp_stdlib.h"
28 #include "ntp_syscall.h"
29 #endif /* KERNEL_PLL */
32 * This is an implementation of the clock discipline algorithm described
33 * in UDel TR 97-4-3, as amended. It operates as an adaptive parameter,
34 * hybrid phase/frequency-lock loop. A number of sanity checks are
35 * included to protect against timewarps, timespikes and general mayhem.
36 * All units are in s and s/s, unless noted otherwise.
38 #define CLOCK_MAX .128 /* default step threshold (s) */
39 #define CLOCK_MINSTEP 300. /* default stepout threshold (s) */
40 #define CLOCK_PANIC 1000. /* default panic threshold (s) */
41 #define CLOCK_PHI 15e-6 /* max frequency error (s/s) */
42 #define CLOCK_PLL 16. /* PLL loop gain (log2) */
43 #define CLOCK_AVG 8. /* parameter averaging constant */
44 #define CLOCK_FLL .25 /* FLL loop gain */
45 #define CLOCK_FLOOR .0005 /* startup offset floor (s) */
46 #define CLOCK_ALLAN 11 /* Allan intercept (log2 s) */
47 #define CLOCK_LIMIT 30 /* poll-adjust threshold */
48 #define CLOCK_PGATE 4. /* poll-adjust gate */
49 #define PPS_MAXAGE 120 /* kernel pps signal timeout (s) */
50 #define FREQTOD(x) ((x) / 65536e6) /* NTP to double */
51 #define DTOFREQ(x) ((int32)((x) * 65536e6)) /* double to NTP */
54 * Clock discipline state machine. This is used to control the
55 * synchronization behavior during initialization and following a
58 * State < step > step Comments
59 * ========================================================
60 * NSET FREQ step, FREQ freq not set
62 * FSET SYNC step, SYNC freq set
64 * FREQ if (mu < 900) if (mu < 900) set freq direct
67 * freq, SYNC freq, step, SYNC
69 * SYNC SYNC SPIK, ignore adjust phase/freq
71 * SPIK SYNC if (mu < 900) adjust phase/freq
76 * Kernel PLL/PPS state machine. This is used with the kernel PLL
77 * modifications described in the documentation.
79 * If kernel support for the ntp_adjtime() system call is available, the
80 * ntp_control flag is set. The ntp_enable and kern_enable flags can be
81 * set at configuration time or run time using ntpdc. If ntp_enable is
82 * false, the discipline loop is unlocked and no corrections of any kind
83 * are made. If both ntp_control and kern_enable are set, the kernel
84 * support is used as described above; if false, the kernel is bypassed
85 * entirely and the daemon discipline used instead.
87 * There have been three versions of the kernel discipline code. The
88 * first (microkernel) now in Solaris discipilnes the microseconds. The
89 * second and third (nanokernel) disciplines the clock in nanoseconds.
90 * These versions are identifed if the symbol STA_PLL is present in the
91 * header file /usr/include/sys/timex.h. The third and current version
92 * includes TAI offset and is identified by the symbol NTP_API with
95 * Each PPS time/frequency discipline can be enabled by the atom driver
96 * or another driver. If enabled, the STA_PPSTIME and STA_FREQ bits are
97 * set in the kernel status word; otherwise, these bits are cleared.
98 * These bits are also cleard if the kernel reports an error.
100 * If an external clock is present, the clock driver sets STA_CLK in the
101 * status word. When the local clock driver sees this bit, it updates
102 * via this routine, which then calls ntp_adjtime() with the STA_PLL bit
103 * set to zero, in which case the system clock is not adjusted. This is
104 * also a signal for the external clock driver to discipline the system
105 * clock. Unless specified otherwise, all times are in seconds.
108 * Program variables that can be tinkered.
110 double clock_max_back = CLOCK_MAX; /* step threshold */
111 double clock_max_fwd = CLOCK_MAX; /* step threshold */
112 double clock_minstep = CLOCK_MINSTEP; /* stepout threshold */
113 double clock_panic = CLOCK_PANIC; /* panic threshold */
114 double clock_phi = CLOCK_PHI; /* dispersion rate (s/s) */
115 u_char allan_xpt = CLOCK_ALLAN; /* Allan intercept (log2 s) */
120 static double clock_offset; /* offset */
121 double clock_jitter; /* offset jitter */
122 double drift_comp; /* frequency (s/s) */
123 static double init_drift_comp; /* initial frequency (PPM) */
124 double clock_stability; /* frequency stability (wander) (s/s) */
125 double clock_codec; /* audio codec frequency (samples/s) */
126 static u_long clock_epoch; /* last update */
127 u_int sys_tai; /* TAI offset from UTC */
128 static int loop_started; /* TRUE after LOOP_DRIFTINIT */
129 static void rstclock (int, double); /* transition function */
130 static double direct_freq(double); /* direct set frequency */
131 static void set_freq(double); /* set frequency */
133 # define PATH_MAX MAX_PATH
135 static char relative_path[PATH_MAX + 1]; /* relative path per recursive make */
136 static char *this_file = NULL;
139 static struct timex ntv; /* ntp_adjtime() parameters */
140 int pll_status; /* last kernel status bits */
141 #if defined(STA_NANO) && NTP_API == 4
142 static u_int loop_tai; /* last TAI offset */
143 #endif /* STA_NANO */
144 static void start_kern_loop(void);
145 static void stop_kern_loop(void);
146 #endif /* KERNEL_PLL */
149 * Clock state machine control flags
151 int ntp_enable = TRUE; /* clock discipline enabled */
152 int pll_control; /* kernel support available */
153 int kern_enable = TRUE; /* kernel support enabled */
154 int hardpps_enable; /* kernel PPS discipline enabled */
155 int ext_enable; /* external clock enabled */
156 int pps_stratum; /* pps stratum */
157 int kernel_status; /* from ntp_adjtime */
158 int force_step_once = FALSE; /* always step time once at startup (-G) */
159 int mode_ntpdate = FALSE; /* exit on first clock set (-q) */
160 int freq_cnt; /* initial frequency clamp */
161 int freq_set; /* initial set frequency switch */
164 * Clock state machine variables
166 int state = 0; /* clock discipline state */
167 u_char sys_poll; /* time constant/poll (log2 s) */
168 int tc_counter; /* jiggle counter */
169 double last_offset; /* last offset (s) */
171 u_int tc_twinlo; /* TC step down not before this time */
172 u_int tc_twinhi; /* TC step up not before this time */
175 * Huff-n'-puff filter variables
177 static double *sys_huffpuff; /* huff-n'-puff filter */
178 static int sys_hufflen; /* huff-n'-puff filter stages */
179 static int sys_huffptr; /* huff-n'-puff filter pointer */
180 static double sys_mindly; /* huff-n'-puff filter min delay */
182 #if defined(KERNEL_PLL)
183 /* Emacs cc-mode goes nuts if we split the next line... */
184 #define MOD_BITS (MOD_OFFSET | MOD_MAXERROR | MOD_ESTERROR | \
185 MOD_STATUS | MOD_TIMECONST)
187 static void pll_trap (int); /* configuration trap */
188 static struct sigaction sigsys; /* current sigaction status */
189 static struct sigaction newsigsys; /* new sigaction status */
190 static sigjmp_buf env; /* environment var. for pll_trap() */
192 #endif /* KERNEL_PLL */
195 sync_status(const char *what, int ostatus, int nstatus)
197 char obuf[256], nbuf[256], tbuf[1024];
198 #if defined(USE_SNPRINTB) && defined (STA_FMT)
199 snprintb(obuf, sizeof(obuf), STA_FMT, ostatus);
200 snprintb(nbuf, sizeof(nbuf), STA_FMT, nstatus);
202 snprintf(obuf, sizeof(obuf), "%04x", ostatus);
203 snprintf(nbuf, sizeof(nbuf), "%04x", nstatus);
205 snprintf(tbuf, sizeof(tbuf), "%s status: %s -> %s", what, obuf, nbuf);
206 report_event(EVNT_KERN, NULL, tbuf);
210 * file_name - return pointer to non-relative portion of this C file pathname
212 static char *file_name(void)
214 if (this_file == NULL) {
215 (void)strncpy(relative_path, __FILE__, PATH_MAX);
216 for (this_file=relative_path;
217 *this_file && ! isalnum((unsigned char)*this_file);
224 * init_loopfilter - initialize loop filter data
227 init_loopfilter(void)
230 * Initialize state variables.
232 sys_poll = ntp_minpoll;
233 clock_jitter = LOGTOD(sys_precision);
234 freq_cnt = (int)clock_minstep;
239 * ntp_adjtime_error_handler - process errors from ntp_adjtime
242 ntp_adjtime_error_handler(
243 const char *caller, /* name of calling function */
244 struct timex *ptimex, /* pointer to struct timex */
245 int ret, /* return value from ntp_adjtime */
246 int saved_errno, /* value of errno when ntp_adjtime returned */
247 int pps_call, /* ntp_adjtime call was PPS-related */
248 int tai_call, /* ntp_adjtime call was TAI-related */
249 int line /* line number of ntp_adjtime call */
252 char des[1024] = ""; /* Decoded Error Status */
256 ebp = dbp + sizeof(des);
260 switch (saved_errno) {
262 msyslog(LOG_ERR, "%s: %s line %d: invalid struct timex pointer: 0x%lx",
263 caller, file_name(), line,
264 (long)((void *)ptimex)
268 msyslog(LOG_ERR, "%s: %s line %d: invalid struct timex \"constant\" element value: %ld",
269 caller, file_name(), line,
270 (long)(ptimex->constant)
277 "%s: ntp_adjtime(TAI) failed: %m",
281 msyslog(LOG_ERR, "%s: %s line %d: ntp_adjtime: %m",
282 caller, file_name(), line
286 msyslog(LOG_NOTICE, "%s: %s line %d: unhandled errno value %d after failed ntp_adjtime call",
287 caller, file_name(), line,
294 case TIME_OK: /* 0: synchronized, no leap second warning */
295 /* msyslog(LOG_INFO, "kernel reports time is synchronized normally"); */
298 # warning TIME_OK is not defined
301 case TIME_INS: /* 1: positive leap second warning */
302 msyslog(LOG_INFO, "kernel reports leap second insertion scheduled");
305 # warning TIME_INS is not defined
308 case TIME_DEL: /* 2: negative leap second warning */
309 msyslog(LOG_INFO, "kernel reports leap second deletion scheduled");
312 # warning TIME_DEL is not defined
315 case TIME_OOP: /* 3: leap second in progress */
316 msyslog(LOG_INFO, "kernel reports leap second in progress");
319 # warning TIME_OOP is not defined
322 case TIME_WAIT: /* 4: leap second has occured */
323 msyslog(LOG_INFO, "kernel reports leap second has occurred");
326 # warning TIME_WAIT is not defined
331 from the reference implementation of ntp_gettime():
333 // Hardware or software error
334 if ((time_status & (STA_UNSYNC | STA_CLOCKERR))
337 * PPS signal lost when either time or frequency synchronization
340 || (time_status & (STA_PPSFREQ | STA_PPSTIME)
341 && !(time_status & STA_PPSSIGNAL))
344 * PPS jitter exceeded when time synchronization requested
346 || (time_status & STA_PPSTIME &&
347 time_status & STA_PPSJITTER)
350 * PPS wander exceeded or calibration error when frequency
351 * synchronization requested
353 || (time_status & STA_PPSFREQ &&
354 time_status & (STA_PPSWANDER | STA_PPSERROR)))
357 or, from ntp_adjtime():
359 if ( (time_status & (STA_UNSYNC | STA_CLOCKERR))
360 || (time_status & (STA_PPSFREQ | STA_PPSTIME)
361 && !(time_status & STA_PPSSIGNAL))
362 || (time_status & STA_PPSTIME
363 && time_status & STA_PPSJITTER)
364 || (time_status & STA_PPSFREQ
365 && time_status & (STA_PPSWANDER | STA_PPSERROR))
370 case TIME_ERROR: /* 5: unsynchronized, or loss of synchronization */
371 /* error (see status word) */
373 if (ptimex->status & STA_UNSYNC)
374 xsbprintf(&dbp, ebp, "%sClock Unsynchronized",
377 if (ptimex->status & STA_CLOCKERR)
378 xsbprintf(&dbp, ebp, "%sClock Error",
381 if (!(ptimex->status & STA_PPSSIGNAL)
382 && ptimex->status & STA_PPSFREQ)
383 xsbprintf(&dbp, ebp, "%sPPS Frequency Sync wanted but no PPS",
386 if (!(ptimex->status & STA_PPSSIGNAL)
387 && ptimex->status & STA_PPSTIME)
388 xsbprintf(&dbp, ebp, "%sPPS Time Sync wanted but no PPS signal",
391 if ( ptimex->status & STA_PPSTIME
392 && ptimex->status & STA_PPSJITTER)
393 xsbprintf(&dbp, ebp, "%sPPS Time Sync wanted but PPS Jitter exceeded",
396 if ( ptimex->status & STA_PPSFREQ
397 && ptimex->status & STA_PPSWANDER)
398 xsbprintf(&dbp, ebp, "%sPPS Frequency Sync wanted but PPS Wander exceeded",
401 if ( ptimex->status & STA_PPSFREQ
402 && ptimex->status & STA_PPSERROR)
403 xsbprintf(&dbp, ebp, "%sPPS Frequency Sync wanted but Calibration error detected",
406 if (pps_call && !(ptimex->status & STA_PPSSIGNAL))
407 report_event(EVNT_KERN, NULL,
409 DPRINTF(1, ("kernel loop status %#x (%s)\n",
410 ptimex->status, des));
412 * This code may be returned when ntp_adjtime() has just
413 * been called for the first time, quite a while after
414 * startup, when ntpd just starts to discipline the kernel
415 * time. In this case the occurrence of this message
416 * can be pretty confusing.
418 * HMS: How about a message when we begin kernel processing:
419 * Determining kernel clock state...
420 * so an initial TIME_ERROR message is less confising,
421 * or skipping the first message (ugh),
423 * msyslog(LOG_INFO, "kernel reports time synchronization lost");
425 msyslog(LOG_INFO, "kernel reports TIME_ERROR: %#x: %s",
426 ptimex->status, des);
429 # warning TIME_ERROR is not defined
432 msyslog(LOG_NOTICE, "%s: %s line %d: unhandled return value %d from ntp_adjtime() in %s at line %d",
433 caller, file_name(), line,
444 * local_clock - the NTP logical clock loop filter.
447 * -1 update ignored: exceeds panic threshold
448 * 0 update ignored: popcorn or exceeds step threshold
450 * 2 clock was stepped
452 * LOCKCLOCK: The only thing this routine does is set the
453 * sys_rootdisp variable equal to the peer dispersion.
457 struct peer *peer, /* synch source peer structure */
458 double fp_offset /* clock offset (s) */
461 int rval; /* return code */
462 int osys_poll; /* old system poll */
463 int ntp_adj_ret; /* returned by ntp_adjtime */
464 double mu; /* interval since last update */
465 double clock_frequency; /* clock frequency */
466 double dtemp, etemp; /* double temps */
467 char tbuf[80]; /* report buffer */
469 (void)ntp_adj_ret; /* not always used below... */
471 * If the loop is opened or the NIST LOCKCLOCK is in use,
472 * monitor and record the offsets anyway in order to determine
473 * the open-loop response and then go home.
477 #endif /* not LOCKCLOCK */
479 record_loop_stats(fp_offset, drift_comp, clock_jitter,
480 clock_stability, sys_poll);
486 * If the clock is way off, panic is declared. The clock_panic
487 * defaults to 1000 s; if set to zero, the panic will never
488 * occur. The allow_panic defaults to FALSE, so the first panic
489 * will exit. It can be set TRUE by a command line option, in
490 * which case the clock will be set anyway and time marches on.
491 * But, allow_panic will be set FALSE when the update is less
492 * than the step threshold; so, subsequent panics will exit.
494 if (fabs(fp_offset) > clock_panic && clock_panic > 0 &&
496 snprintf(tbuf, sizeof(tbuf),
497 "%+.0f s; set clock manually within %.0f s.",
498 fp_offset, clock_panic);
499 report_event(EVNT_SYSFAULT, NULL, tbuf);
506 * This section simulates ntpdate. If the offset exceeds the
507 * step threshold (128 ms), step the clock to that time and
508 * exit. Otherwise, slew the clock to that time and exit. Note
509 * that the slew will persist and eventually complete beyond the
510 * life of this program. Note that while ntpdate is active, the
511 * terminal does not detach, so the termination message prints
512 * directly to the terminal.
515 if ( ( fp_offset > clock_max_fwd && clock_max_fwd > 0)
516 || (-fp_offset > clock_max_back && clock_max_back > 0)) {
517 step_systime(fp_offset);
518 msyslog(LOG_NOTICE, "ntpd: time set %+.6f s",
520 printf("ntpd: time set %+.6fs\n", fp_offset);
522 adj_systime(fp_offset);
523 msyslog(LOG_NOTICE, "ntpd: time slew %+.6f s",
525 printf("ntpd: time slew %+.6fs\n", fp_offset);
527 record_loop_stats(fp_offset, drift_comp, clock_jitter,
528 clock_stability, sys_poll);
533 * The huff-n'-puff filter finds the lowest delay in the recent
534 * interval. This is used to correct the offset by one-half the
535 * difference between the sample delay and minimum delay. This
536 * is most effective if the delays are highly assymetric and
537 * clockhopping is avoided and the clock frequency wander is
540 if (sys_huffpuff != NULL) {
541 if (peer->delay < sys_huffpuff[sys_huffptr])
542 sys_huffpuff[sys_huffptr] = peer->delay;
543 if (peer->delay < sys_mindly)
544 sys_mindly = peer->delay;
546 dtemp = -(peer->delay - sys_mindly) / 2;
548 dtemp = (peer->delay - sys_mindly) / 2;
550 DPRINTF(1, ("local_clock: size %d mindly %.6f huffpuff %.6f\n",
551 sys_hufflen, sys_mindly, dtemp));
555 * Clock state machine transition function which defines how the
556 * system reacts to large phase and frequency excursion. There
557 * are two main regimes: when the offset exceeds the step
558 * threshold (128 ms) and when it does not. Under certain
559 * conditions updates are suspended until the stepout theshold
560 * (900 s) is exceeded. See the documentation on how these
561 * thresholds interact with commands and command line options.
563 * Note the kernel is disabled if step is disabled or greater
564 * than 0.5 s or in ntpdate mode.
566 osys_poll = sys_poll;
567 if (sys_poll < peer->minpoll)
568 sys_poll = peer->minpoll;
569 if (sys_poll > peer->maxpoll)
570 sys_poll = peer->maxpoll;
571 mu = current_time - clock_epoch;
572 clock_frequency = drift_comp;
574 if ( ( fp_offset > clock_max_fwd && clock_max_fwd > 0)
575 || (-fp_offset > clock_max_back && clock_max_back > 0)
576 || force_step_once ) {
577 if (force_step_once) {
578 force_step_once = FALSE; /* we want this only once after startup */
579 msyslog(LOG_NOTICE, "Doing intital time step" );
585 * In SYNC state we ignore the first outlier and switch
589 snprintf(tbuf, sizeof(tbuf), "%+.6f s",
591 report_event(EVNT_SPIK, NULL, tbuf);
596 * In FREQ state we ignore outliers and inlyers. At the
597 * first outlier after the stepout threshold, compute
598 * the apparent frequency correction and step the phase.
601 if (mu < clock_minstep)
604 clock_frequency = direct_freq(fp_offset);
606 /* fall through to EVNT_SPIK */
609 * In SPIK state we ignore succeeding outliers until
610 * either an inlyer is found or the stepout threshold is
614 if (mu < clock_minstep)
617 /* fall through to default */
620 * We get here by default in NSET and FSET states and
621 * from above in FREQ or SPIK states.
623 * In NSET state an initial frequency correction is not
624 * available, usually because the frequency file has not
625 * yet been written. Since the time is outside the step
626 * threshold, the clock is stepped. The frequency will
627 * be set directly following the stepout interval.
629 * In FSET state the initial frequency has been set from
630 * the frequency file. Since the time is outside the
631 * step threshold, the clock is stepped immediately,
632 * rather than after the stepout interval. Guys get
633 * nervous if it takes 15 minutes to set the clock for
636 * In FREQ and SPIK states the stepout threshold has
637 * expired and the phase is still above the step
638 * threshold. Note that a single spike greater than the
639 * step threshold is always suppressed, even with a
640 * long time constant.
643 snprintf(tbuf, sizeof(tbuf), "%+.6f s",
645 report_event(EVNT_CLOCKRESET, NULL, tbuf);
646 step_systime(fp_offset);
649 clock_jitter = LOGTOD(sys_precision);
651 if (state == EVNT_NSET) {
652 rstclock(EVNT_FREQ, 0);
657 rstclock(EVNT_SYNC, 0);
660 * The offset is less than the step threshold. Calculate
661 * the jitter as the exponentially weighted offset
664 etemp = SQUARE(clock_jitter);
665 dtemp = SQUARE(max(fabs(fp_offset - last_offset),
666 LOGTOD(sys_precision)));
667 clock_jitter = SQRT(etemp + (dtemp - etemp) /
672 * In NSET state this is the first update received and
673 * the frequency has not been initialized. Adjust the
674 * phase, but do not adjust the frequency until after
675 * the stepout threshold.
678 adj_systime(fp_offset);
679 rstclock(EVNT_FREQ, fp_offset);
683 * In FREQ state ignore updates until the stepout
684 * threshold. After that, compute the new frequency, but
685 * do not adjust the frequency until the holdoff counter
686 * decrements to zero.
689 if (mu < clock_minstep)
692 clock_frequency = direct_freq(fp_offset);
696 * We get here by default in FSET, SPIK and SYNC states.
697 * Here compute the frequency update due to PLL and FLL
698 * contributions. Note, we avoid frequency discipline at
699 * startup until the initial transient has subsided.
705 * The FLL and PLL frequency gain constants
706 * depend on the time constant and Allan
707 * intercept. The PLL is always used, but
708 * becomes ineffective above the Allan intercept
709 * where the FLL becomes effective.
711 if (sys_poll >= allan_xpt)
713 (fp_offset - clock_offset)
714 / ( max(ULOGTOD(sys_poll), mu)
718 * The PLL frequency gain (numerator) depends on
719 * the minimum of the update interval and Allan
720 * intercept. This reduces the PLL gain when the
721 * FLL becomes effective.
723 etemp = min(ULOGTOD(allan_xpt), mu);
724 dtemp = 4 * CLOCK_PLL * ULOGTOD(sys_poll);
726 fp_offset * etemp / (dtemp * dtemp);
728 rstclock(EVNT_SYNC, fp_offset);
729 if (fabs(fp_offset) < CLOCK_FLOOR)
737 * This code segment works when clock adjustments are made using
738 * precision time kernel support and the ntp_adjtime() system
739 * call. This support is available in Solaris 2.6 and later,
740 * Digital Unix 4.0 and later, FreeBSD, Linux and specially
741 * modified kernels for HP-UX 9 and Ultrix 4. In the case of the
742 * DECstation 5000/240 and Alpha AXP, additional kernel
743 * modifications provide a true microsecond clock and nanosecond
744 * clock, respectively.
746 * Important note: The kernel discipline is used only if the
747 * step threshold is less than 0.5 s, as anything higher can
748 * lead to overflow problems. This might occur if some misguided
749 * lad set the step threshold to something ridiculous.
751 if (pll_control && kern_enable && freq_cnt == 0) {
754 * We initialize the structure for the ntp_adjtime()
755 * system call. We have to convert everything to
756 * microseconds or nanoseconds first. Do not update the
757 * system variables if the ext_enable flag is set. In
758 * this case, the external clock driver will update the
759 * variables, which will be read later by the local
760 * clock driver. Afterwards, remember the time and
761 * frequency offsets for jitter and stability values and
762 * to update the frequency file.
766 ntv.modes = MOD_STATUS;
768 ntv.modes = MOD_BITS;
769 ntv.offset = var_long_from_dbl(
770 clock_offset, &ntv.modes);
772 ntv.constant = sys_poll;
774 ntv.constant = sys_poll - 4;
775 #endif /* STA_NANO */
776 if (ntv.constant < 0)
779 ntv.esterror = usec_long_from_dbl(
781 ntv.maxerror = usec_long_from_dbl(
782 sys_rootdelay / 2 + sys_rootdisp);
783 ntv.status = STA_PLL;
786 * Enable/disable the PPS if requested.
788 if (hardpps_enable) {
789 ntv.status |= (STA_PPSTIME | STA_PPSFREQ);
790 if (!(pll_status & STA_PPSTIME))
791 sync_status("PPS enabled",
795 ntv.status &= ~(STA_PPSTIME | STA_PPSFREQ);
796 if (pll_status & STA_PPSTIME)
797 sync_status("PPS disabled",
801 if (sys_leap == LEAP_ADDSECOND)
802 ntv.status |= STA_INS;
803 else if (sys_leap == LEAP_DELSECOND)
804 ntv.status |= STA_DEL;
808 * Pass the stuff to the kernel. If it squeals, turn off
809 * the pps. In any case, fetch the kernel offset,
810 * frequency and jitter.
812 ntp_adj_ret = ntp_adjtime(&ntv);
814 * A squeal is a return status < 0, or a state change.
816 if ((0 > ntp_adj_ret) || (ntp_adj_ret != kernel_status)) {
817 kernel_status = ntp_adj_ret;
818 ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, hardpps_enable, 0, __LINE__ - 1);
820 pll_status = ntv.status;
821 clock_offset = dbl_from_var_long(ntv.offset, ntv.status);
822 clock_frequency = FREQTOD(ntv.freq);
825 * If the kernel PPS is lit, monitor its performance.
827 if (ntv.status & STA_PPSTIME) {
828 clock_jitter = dbl_from_var_long(
829 ntv.jitter, ntv.status);
832 #if defined(STA_NANO) && NTP_API == 4
834 * If the TAI changes, update the kernel TAI.
836 if (loop_tai != sys_tai) {
839 ntv.constant = sys_tai;
840 if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
841 ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 1, __LINE__ - 1);
844 #endif /* STA_NANO */
846 #endif /* KERNEL_PLL */
849 * Clamp the frequency within the tolerance range and calculate
850 * the frequency difference since the last update.
852 if (fabs(clock_frequency) > NTP_MAXFREQ)
854 "frequency error %.0f PPM exceeds tolerance %.0f PPM",
855 clock_frequency * 1e6, NTP_MAXFREQ * 1e6);
856 dtemp = SQUARE(clock_frequency - drift_comp);
857 if (clock_frequency > NTP_MAXFREQ)
858 drift_comp = NTP_MAXFREQ;
859 else if (clock_frequency < -NTP_MAXFREQ)
860 drift_comp = -NTP_MAXFREQ;
862 drift_comp = clock_frequency;
865 * Calculate the wander as the exponentially weighted RMS
866 * frequency differences. Record the change for the frequency
869 etemp = SQUARE(clock_stability);
870 clock_stability = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG);
873 * Here we adjust the time constant by comparing the current
874 * offset with the clock jitter. If the offset is less than the
875 * clock jitter times a constant, then the averaging interval is
876 * increased, otherwise it is decreased. A bit of hysteresis
877 * helps calm the dance. Works best using burst mode. Don't
878 * fiddle with the poll during the startup clamp period.
879 * [Bug 3615] also observe time gates to avoid eager stepping
883 tc_twinlo = current_time;
884 tc_twinhi = current_time;
885 } else if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) {
886 tc_counter += sys_poll;
887 if (tc_counter > CLOCK_LIMIT) {
888 tc_counter = CLOCK_LIMIT;
889 if (sys_poll < peer->maxpoll)
890 sys_poll += (current_time >= tc_twinhi);
893 tc_counter -= sys_poll << 1;
894 if (tc_counter < -CLOCK_LIMIT) {
895 tc_counter = -CLOCK_LIMIT;
896 if (sys_poll > peer->minpoll)
897 sys_poll -= (current_time >= tc_twinlo);
902 * If the time constant has changed, update the poll variables.
904 * [bug 3615] also set new time gates
905 * The time limit for stepping down will be half the TC interval
906 * or 60 secs from now, whatever is bigger, and the step up time
907 * limit will be half the TC interval after the step down limit.
909 * The 'sys_poll' value affects the servo loop gain, and
910 * overshooting sys_poll slows it down unnecessarily. Stepping
911 * down too fast also has bad effects.
913 * The 'tc_counter' dance itself is something that *should*
914 * happen *once* every (1 << sys_poll) seconds, I think, but
915 * that's not how it works right now, and adding time guards
916 * seems the least intrusive way to handle this.
918 if (osys_poll != sys_poll) {
919 u_int deadband = 1u << (sys_poll - 1);
921 tc_twinlo = current_time + max(deadband, 60);
922 tc_twinhi = tc_twinlo + deadband;
923 poll_update(peer, sys_poll, 0);
927 * Yibbidy, yibbbidy, yibbidy; that'h all folks.
929 record_loop_stats(clock_offset, drift_comp, clock_jitter,
930 clock_stability, sys_poll);
931 DPRINTF(1, ("local_clock: offset %.9f jit %.9f freq %.3f stab %.3f poll %d\n",
932 clock_offset, clock_jitter, drift_comp * 1e6,
933 clock_stability * 1e6, sys_poll));
935 #endif /* not LOCKCLOCK */
940 * adj_host_clock - Called once every second to update the local clock.
942 * LOCKCLOCK: The only thing this routine does is increment the
943 * sys_rootdisp variable.
954 * Update the dispersion since the last update. In contrast to
955 * NTPv3, NTPv4 does not declare unsynchronized after one day,
956 * since the dispersion check serves this function. Also,
957 * since the poll interval can exceed one day, the old test
958 * would be counterproductive. During the startup clamp period, the
959 * time constant is clamped at 2.
961 sys_rootdisp += clock_phi;
963 if (!ntp_enable || mode_ntpdate)
966 * Determine the phase adjustment. The gain factor (denominator)
967 * increases with poll interval, so is dominated by the FLL
968 * above the Allan intercept. Note the reduced time constant at
971 if (state != EVNT_SYNC) {
973 } else if (freq_cnt > 0) {
974 offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(1));
977 } else if (pll_control && kern_enable) {
979 #endif /* KERNEL_PLL */
981 offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(sys_poll));
985 * If the kernel discipline is enabled the frequency correction
986 * drift_comp has already been engaged via ntp_adjtime() in
987 * set_freq(). Otherwise it is a component of the adj_systime()
991 if (pll_control && kern_enable)
994 #endif /* KERNEL_PLL */
995 freq_adj = drift_comp;
997 /* Bound absolute value of total adjustment to NTP_MAXFREQ. */
998 if (offset_adj + freq_adj > NTP_MAXFREQ)
999 offset_adj = NTP_MAXFREQ - freq_adj;
1000 else if (offset_adj + freq_adj < -NTP_MAXFREQ)
1001 offset_adj = -NTP_MAXFREQ - freq_adj;
1003 clock_offset -= offset_adj;
1005 * Windows port adj_systime() must be called each second,
1006 * even if the argument is zero, to ease emulation of
1007 * adjtime() using Windows' slew API which controls the rate
1008 * but does not automatically stop slewing when an offset
1009 * has decayed to zero.
1011 DEBUG_INSIST(enable_panic_check == TRUE);
1012 enable_panic_check = FALSE;
1013 adj_systime(offset_adj + freq_adj);
1014 enable_panic_check = TRUE;
1015 #endif /* LOCKCLOCK */
1020 * Clock state machine. Enter new state and set state variables.
1024 int trans, /* new state */
1025 double offset /* new offset */
1028 DPRINTF(2, ("rstclock: mu %lu state %d poll %d count %d\n",
1029 current_time - clock_epoch, trans, sys_poll,
1031 if (trans != state && trans != EVNT_FSET)
1032 report_event(trans, NULL, NULL);
1034 last_offset = clock_offset = offset;
1035 clock_epoch = current_time;
1040 * calc_freq - calculate frequency directly
1042 * This is very carefully done. When the offset is first computed at the
1043 * first update, a residual frequency component results. Subsequently,
1044 * updates are suppresed until the end of the measurement interval while
1045 * the offset is amortized. At the end of the interval the frequency is
1046 * calculated from the current offset, residual offset, length of the
1047 * interval and residual frequency component. At the same time the
1048 * frequenchy file is armed for update at the next hourly stats.
1055 set_freq(fp_offset / (current_time - clock_epoch));
1062 * set_freq - set clock frequency correction
1064 * Used to step the frequency correction at startup, possibly again once
1065 * the frequency is measured (that is, transitioning from EVNT_NSET to
1066 * EVNT_FSET), and finally to switch between daemon and kernel loop
1067 * discipline at runtime.
1069 * When the kernel loop discipline is available but the daemon loop is
1070 * in use, the kernel frequency correction is disabled (set to 0) to
1071 * ensure drift_comp is applied by only one of the loops.
1075 double freq /* frequency update */
1078 const char * loop_desc;
1081 (void)ntp_adj_ret; /* not always used below... */
1087 ntv.modes = MOD_FREQUENCY;
1089 loop_desc = "kernel";
1090 ntv.freq = DTOFREQ(drift_comp);
1092 if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
1093 ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1);
1096 #endif /* KERNEL_PLL */
1097 mprintf_event(EVNT_FSET, NULL, "%s %.3f PPM", loop_desc,
1104 start_kern_loop(void)
1106 static int atexit_done;
1111 ntv.modes = MOD_BITS;
1112 ntv.status = STA_PLL | STA_UNSYNC;
1113 ntv.maxerror = MAXDISPERSE * 1.0e6;
1114 ntv.esterror = MAXDISPERSE * 1.0e6;
1115 ntv.constant = sys_poll;
1116 /* ^^^^^^^^ why is it that here constant is
1117 * unconditionally set to sys_poll, whereas elsewhere is is
1118 * modified depending on nanosecond vs. microsecond kernel?
1122 * Use sigsetjmp() to save state and then call ntp_adjtime(); if
1123 * it fails, then pll_trap() will set pll_control FALSE before
1124 * returning control using siglogjmp().
1126 newsigsys.sa_handler = pll_trap;
1127 newsigsys.sa_flags = 0;
1128 if (sigaction(SIGSYS, &newsigsys, &sigsys)) {
1129 msyslog(LOG_ERR, "sigaction() trap SIGSYS: %m");
1130 pll_control = FALSE;
1132 if (sigsetjmp(env, 1) == 0) {
1133 if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
1134 ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1);
1137 if (sigaction(SIGSYS, &sigsys, NULL)) {
1139 "sigaction() restore SIGSYS: %m");
1140 pll_control = FALSE;
1144 if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
1145 ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1);
1150 * Save the result status and light up an external clock
1153 pll_status = ntv.status;
1157 atexit(&stop_kern_loop);
1160 if (pll_status & STA_CLK)
1162 #endif /* STA_NANO */
1163 report_event(EVNT_KERN, NULL,
1164 "kernel time sync enabled");
1167 #endif /* KERNEL_PLL */
1172 stop_kern_loop(void)
1174 if (pll_control && kern_enable)
1175 report_event(EVNT_KERN, NULL,
1176 "kernel time sync disabled");
1178 #endif /* KERNEL_PLL */
1182 * select_loop() - choose kernel or daemon loop discipline.
1189 if (kern_enable == use_kern_loop)
1192 if (pll_control && !use_kern_loop)
1195 kern_enable = use_kern_loop;
1197 if (pll_control && use_kern_loop)
1201 * If this loop selection change occurs after initial startup,
1202 * call set_freq() to switch the frequency compensation to or
1203 * from the kernel loop.
1206 if (pll_control && loop_started)
1207 set_freq(drift_comp);
1213 * huff-n'-puff filter
1220 if (sys_huffpuff == NULL)
1223 sys_huffptr = (sys_huffptr + 1) % sys_hufflen;
1224 sys_huffpuff[sys_huffptr] = 1e9;
1226 for (i = 0; i < sys_hufflen; i++) {
1227 if (sys_huffpuff[i] < sys_mindly)
1228 sys_mindly = sys_huffpuff[i];
1234 * loop_config - configure the loop filter
1236 * LOCKCLOCK: The LOOP_DRIFTINIT and LOOP_DRIFTCOMP cases are no-ops.
1247 DPRINTF(2, ("loop_config: item %d freq %f\n", item, freq));
1251 * We first assume the kernel supports the ntp_adjtime()
1252 * syscall. If that syscall works, initialize the kernel time
1253 * variables. Otherwise, continue leaving no harm behind.
1255 case LOOP_DRIFTINIT:
1262 #endif /* KERNEL_PLL */
1265 * Initialize frequency if given; otherwise, begin frequency
1266 * calibration phase.
1268 ftemp = init_drift_comp / 1e6;
1269 if (ftemp > NTP_MAXFREQ)
1270 ftemp = NTP_MAXFREQ;
1271 else if (ftemp < -NTP_MAXFREQ)
1272 ftemp = -NTP_MAXFREQ;
1275 rstclock(EVNT_FSET, 0);
1277 rstclock(EVNT_NSET, 0);
1278 loop_started = TRUE;
1279 #endif /* LOCKCLOCK */
1282 case LOOP_KERN_CLEAR:
1283 #if 0 /* XXX: needs more review, and how can we get here? */
1286 if (pll_control && kern_enable) {
1287 memset((char *)&ntv, 0, sizeof(ntv));
1288 ntv.modes = MOD_STATUS;
1289 ntv.status = STA_UNSYNC;
1291 sync_status("kernel time sync disabled",
1295 # endif /* KERNEL_PLL */
1296 #endif /* LOCKCLOCK */
1301 * Tinker command variables for Ulrich Windl. Very dangerous.
1303 case LOOP_ALLAN: /* Allan intercept (log2) (allan) */
1304 allan_xpt = (u_char)freq;
1307 case LOOP_CODEC: /* audio codec frequency (codec) */
1308 clock_codec = freq / 1e6;
1311 case LOOP_PHI: /* dispersion threshold (dispersion) */
1312 clock_phi = freq / 1e6;
1315 case LOOP_FREQ: /* initial frequency (freq) */
1316 init_drift_comp = freq;
1320 case LOOP_HUFFPUFF: /* huff-n'-puff length (huffpuff) */
1321 if (freq < HUFFPUFF)
1323 sys_hufflen = (int)(freq / HUFFPUFF);
1324 sys_huffpuff = eallocarray(sys_hufflen, sizeof(sys_huffpuff[0]));
1325 for (i = 0; i < sys_hufflen; i++)
1326 sys_huffpuff[i] = 1e9;
1330 case LOOP_PANIC: /* panic threshold (panic) */
1334 case LOOP_MAX: /* step threshold (step) */
1335 clock_max_fwd = clock_max_back = freq;
1336 if (freq == 0 || freq > 0.5)
1340 case LOOP_MAX_BACK: /* step threshold (step) */
1341 clock_max_back = freq;
1343 * Leave using the kernel discipline code unless both
1344 * limits are massive. This assumes the reason to stop
1345 * using it is that it's pointless, not that it goes wrong.
1347 if ( (clock_max_back == 0 || clock_max_back > 0.5)
1348 || (clock_max_fwd == 0 || clock_max_fwd > 0.5))
1352 case LOOP_MAX_FWD: /* step threshold (step) */
1353 clock_max_fwd = freq;
1354 if ( (clock_max_back == 0 || clock_max_back > 0.5)
1355 || (clock_max_fwd == 0 || clock_max_fwd > 0.5))
1359 case LOOP_MINSTEP: /* stepout threshold (stepout) */
1360 if (freq < CLOCK_MINSTEP)
1361 clock_minstep = CLOCK_MINSTEP;
1363 clock_minstep = freq;
1366 case LOOP_TICK: /* tick increment (tick) */
1367 set_sys_tick_precision(freq);
1370 case LOOP_LEAP: /* not used, fall through */
1373 "loop_config: unsupported option %d", item);
1378 #if defined(KERNEL_PLL) && defined(SIGSYS)
1380 * _trap - trap processor for undefined syscalls
1382 * This nugget is called by the kernel when the SYS_ntp_adjtime()
1383 * syscall bombs because the silly thing has not been implemented in
1384 * the kernel. In this case the phase-lock loop is emulated by
1385 * the stock adjtime() syscall and a lot of indelicate abuse.
1392 pll_control = FALSE;
1395 #endif /* KERNEL_PLL && SIGSYS */