3 #include <event2/util.h>
4 #include <event2/event.h>
6 #include "ntp_workimpl.h"
8 # include <event2/thread.h>
12 #include "ntp_libopts.h"
13 #include "kod_management.h"
14 #include "networking.h"
15 #include "utilities.h"
23 int n_pending_dns = 0;
24 int n_pending_ntp = 0;
25 int ai_fam_pref = AF_UNSPEC;
27 double steplimit = -1;
28 SOCKET sock4 = -1; /* Socket for IPv4 */
29 SOCKET sock6 = -1; /* Socket for IPv6 */
31 ** BCAST *must* listen on port 123 (by default), so we can only
32 ** use the UCST sockets (above) if they too are using port 123
34 SOCKET bsock4 = -1; /* Broadcast Socket for IPv4 */
35 SOCKET bsock6 = -1; /* Broadcast Socket for IPv6 */
36 struct event_base *base;
37 struct event *ev_sock4;
38 struct event *ev_sock6;
39 struct event *ev_worker_timeout;
40 struct event *ev_xmt_timer;
45 #define CTX_BCST 0x0001
46 #define CTX_UCST 0x0002
47 #define CTX_xCST 0x0003
48 #define CTX_CONC 0x0004
49 #define CTX_unused 0xfffd
51 struct timeval timeout;
55 typedef struct sent_pkt_tag sent_pkt;
58 struct dns_ctx * dctx;
65 typedef struct xmt_ctx_tag xmt_ctx;
75 struct key * keys = NULL;
77 struct timeval response_tv;
78 struct timeval start_tv;
79 /* check the timeout at least once per second */
80 struct timeval wakeup_tv = { 0, 888888 };
82 sent_pkt * fam_listheads[2];
83 #define v4_pkts_list (fam_listheads[0])
84 #define v6_pkts_list (fam_listheads[1])
88 char buf[LEN_PKT_NOMAC + NTP_MAXEXTEN + MAX_MAC_LEN];
91 #define r_pkt rbuf.pkt
94 int droproot; /* intres imports these */
97 u_long current_time; /* libntp/authkeys.c */
99 void open_sockets(void);
100 void handle_lookup(const char *name, int flags);
101 void sntp_addremove_fd(int fd, int is_pipe, int remove_it);
102 void worker_timeout(evutil_socket_t, short, void *);
103 void worker_resp_cb(evutil_socket_t, short, void *);
104 void sntp_name_resolved(int, int, void *, const char *, const char *,
105 const struct addrinfo *,
106 const struct addrinfo *);
107 void queue_xmt(SOCKET sock, struct dns_ctx *dctx, sent_pkt *spkt,
109 void xmt_timer_cb(evutil_socket_t, short, void *ptr);
110 void xmt(xmt_ctx *xctx);
111 int check_kod(const struct addrinfo *ai);
112 void timeout_query(sent_pkt *);
113 void timeout_queries(void);
114 void sock_cb(evutil_socket_t, short, void *);
115 void check_exit_conditions(void);
116 void sntp_libevent_log_cb(int, const char *);
117 void set_li_vn_mode(struct pkt *spkt, char leap, char version, char mode);
118 int set_time(double offset);
119 void dec_pending_ntp(const char *, sockaddr_u *);
120 int libevent_version_ok(void);
121 int gettimeofday_cached(struct event_base *b, struct timeval *tv);
125 * The actual main function.
131 const char *sntpVersion
137 struct event_config * evcfg;
139 /* Initialize logging system - sets up progname */
140 sntp_init_logging(argv[0]);
142 if (!libevent_version_ok())
148 optct = ntpOptionProcess(&sntpOptions, argc, argv);
153 debug = OPT_VALUE_SET_DEBUG_LEVEL;
155 TRACE(2, ("init_lib() done, %s%s\n",
162 ntpver = OPT_VALUE_NTPVERSION;
163 steplimit = OPT_VALUE_STEPLIMIT / 1e3;
164 gap.tv_usec = max(0, OPT_VALUE_GAP * 1000);
165 gap.tv_usec = min(gap.tv_usec, 999999);
167 if (HAVE_OPT(LOGFILE))
168 open_logfile(OPT_ARG(LOGFILE));
170 msyslog(LOG_INFO, "%s", sntpVersion);
172 if (0 == argc && !HAVE_OPT(BROADCAST) && !HAVE_OPT(CONCURRENT)) {
173 printf("%s: Must supply at least one of -b hostname, -c hostname, or hostname.\n",
180 ** Eventually, we probably want:
181 ** - separate bcst and ucst timeouts (why?)
182 ** - multiple --timeout values in the commandline
185 response_timeout = OPT_VALUE_TIMEOUT;
186 response_tv.tv_sec = response_timeout;
187 response_tv.tv_usec = 0;
189 /* IPv6 available? */
190 if (isc_net_probeipv6() != ISC_R_SUCCESS) {
191 ai_fam_pref = AF_INET;
192 TRACE(1, ("No ipv6 support available, forcing ipv4\n"));
194 /* Check for options -4 and -6 */
196 ai_fam_pref = AF_INET;
197 else if (HAVE_OPT(IPV6))
198 ai_fam_pref = AF_INET6;
201 /* TODO: Parse config file if declared */
204 ** Init the KOD system.
205 ** For embedded systems with no writable filesystem,
206 ** -K /dev/null can be used to disable KoD storage.
208 kod_init_kod_db(OPT_ARG(KOD), FALSE);
210 // HMS: Should we use arg-defalt for this too?
211 if (HAVE_OPT(KEYFILE))
212 auth_init(OPT_ARG(KEYFILE), &keys);
215 ** Considering employing a variable that prevents functions of doing
216 ** anything until everything is initialized properly
218 ** HMS: What exactly does the above mean?
220 event_set_log_callback(&sntp_libevent_log_cb);
222 event_enable_debug_mode();
224 evthread_use_pthreads();
225 /* we use libevent from main thread only, locks should be academic */
227 evthread_enable_lock_debuging();
229 evcfg = event_config_new();
231 printf("%s: event_config_new() failed!\n", progname);
234 #ifndef HAVE_SOCKETPAIR
235 event_config_require_features(evcfg, EV_FEATURE_FDS);
237 /* all libevent calls are from main thread */
238 /* event_config_set_flag(evcfg, EVENT_BASE_FLAG_NOLOCK); */
239 base = event_base_new_with_config(evcfg);
240 event_config_free(evcfg);
242 printf("%s: event_base_new() failed!\n", progname);
246 /* wire into intres resolver */
247 worker_per_query = TRUE;
248 addremove_io_fd = &sntp_addremove_fd;
252 if (HAVE_OPT(BROADCAST)) {
253 int cn = STACKCT_OPT( BROADCAST );
254 const char ** cp = STACKLST_OPT( BROADCAST );
257 handle_lookup(*cp, CTX_BCST);
262 if (HAVE_OPT(CONCURRENT)) {
263 int cn = STACKCT_OPT( CONCURRENT );
264 const char ** cp = STACKLST_OPT( CONCURRENT );
267 handle_lookup(*cp, CTX_UCST | CTX_CONC);
272 for (i = 0; i < argc; ++i)
273 handle_lookup(argv[i], CTX_UCST);
275 gettimeofday_cached(base, &start_tv);
276 event_base_dispatch(base);
277 event_base_free(base);
279 if (!time_adjusted &&
280 (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
290 ** open sockets and make them non-blocking
300 sock4 = socket(PF_INET, SOCK_DGRAM, 0);
302 /* error getting a socket */
303 msyslog(LOG_ERR, "open_sockets: socket(PF_INET) failed: %m");
306 /* Make it non-blocking */
307 make_socket_nonblocking(sock4);
309 /* Let's try using a wildcard... */
312 SET_ADDR4N(&name, INADDR_ANY);
313 SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
315 if (-1 == bind(sock4, &name.sa,
317 msyslog(LOG_ERR, "open_sockets: bind(sock4) failed: %m");
321 /* Register an NTP callback for recv/timeout */
322 ev_sock4 = event_new(base, sock4,
323 EV_TIMEOUT | EV_READ | EV_PERSIST,
325 if (NULL == ev_sock4) {
327 "open_sockets: event_new(base, sock4) failed!");
329 event_add(ev_sock4, &wakeup_tv);
333 /* We may not always have IPv6... */
334 if (-1 == sock6 && ipv6_works) {
335 sock6 = socket(PF_INET6, SOCK_DGRAM, 0);
336 if (-1 == sock6 && ipv6_works) {
337 /* error getting a socket */
338 msyslog(LOG_ERR, "open_sockets: socket(PF_INET6) failed: %m");
341 /* Make it non-blocking */
342 make_socket_nonblocking(sock6);
344 /* Let's try using a wildcard... */
346 AF(&name) = AF_INET6;
347 SET_ADDR6N(&name, in6addr_any);
348 SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
350 if (-1 == bind(sock6, &name.sa,
352 msyslog(LOG_ERR, "open_sockets: bind(sock6) failed: %m");
355 /* Register an NTP callback for recv/timeout */
356 ev_sock6 = event_new(base, sock6,
357 EV_TIMEOUT | EV_READ | EV_PERSIST,
359 if (NULL == ev_sock6) {
361 "open_sockets: event_new(base, sock6) failed!");
363 event_add(ev_sock6, &wakeup_tv);
380 struct addrinfo hints; /* Local copy is OK */
387 TRACE(1, ("handle_lookup(%s,%#x)\n", name, flags));
390 hints.ai_family = ai_fam_pref;
391 hints.ai_flags = AI_CANONNAME | Z_AI_NUMERICSERV;
393 ** Unless we specify a socktype, we'll get at least two
394 ** entries for each address: one for TCP and one for
395 ** UDP. That's not what we want.
397 hints.ai_socktype = SOCK_DGRAM;
398 hints.ai_protocol = IPPROTO_UDP;
400 name_sz = 1 + strlen(name);
401 octets = sizeof(*ctx) + name_sz; // Space for a ctx and the name
402 ctx = emalloc_zero(octets); // ctx at ctx[0]
403 name_copy = (char *)(ctx + 1); // Put the name at ctx[1]
404 memcpy(name_copy, name, name_sz); // copy the name to ctx[1]
405 ctx->name = name_copy; // point to it...
407 ctx->timeout = response_tv;
409 /* The following should arguably be passed in... */
410 if (ENABLED_OPT(AUTHENTICATION) &&
411 atoint(OPT_ARG(AUTHENTICATION), &l)) {
413 get_key(ctx->key_id, &ctx->key);
420 getaddrinfo_sometime(name, "123", &hints, 0,
421 &sntp_name_resolved, ctx);
429 ** - - increment n_pending_ntp
430 ** - - send a request if this is a Unicast callback
431 ** - - queue wait for response
432 ** - decrement n_pending_dns
440 const char * service,
441 const struct addrinfo * hints,
442 const struct addrinfo * addr
445 struct dns_ctx * dctx;
447 const struct addrinfo * ai;
459 if (EAI_SYSTEM == rescode) {
461 mfprintf(stderr, "%s lookup error %m\n",
465 fprintf(stderr, "%s lookup error %s\n",
466 dctx->name, gai_strerror(rescode));
468 TRACE(3, ("%s [%s]\n", dctx->name,
469 (addr->ai_canonname != NULL)
473 for (ai = addr; ai != NULL; ai = ai->ai_next) {
478 switch (ai->ai_family) {
482 xmt_delay = xmt_delay_v4;
491 xmt_delay = xmt_delay_v6;
496 msyslog(LOG_ERR, "sntp_name_resolved: unexpected ai_family: %d",
503 ** We're waiting for a response for either unicast
504 ** or broadcast, so...
508 /* If this is for a unicast IP, queue a request */
509 if (dctx->flags & CTX_UCST) {
510 spkt = emalloc_zero(sizeof(*spkt));
512 octets = min(ai->ai_addrlen, sizeof(spkt->addr));
513 memcpy(&spkt->addr, ai->ai_addr, octets);
514 queue_xmt(sock, dctx, spkt, xmt_delay);
518 /* n_pending_dns really should be >0 here... */
520 check_exit_conditions();
530 struct dns_ctx * dctx,
536 sent_pkt ** pkt_listp;
539 struct timeval start_cb;
540 struct timeval delay;
544 pkt_listp = &v6_pkts_list;
546 pkt_listp = &v4_pkts_list;
548 /* reject attempts to add address already listed */
549 for (match = *pkt_listp; match != NULL; match = match->link) {
550 if (ADDR_PORT_EQ(&spkt->addr, &match->addr)) {
551 if (strcasecmp(spkt->dctx->name,
553 printf("%s %s duplicate address from %s ignored.\n",
558 printf("%s %s, duplicate address ignored.\n",
561 dec_pending_ntp(spkt->dctx->name, &spkt->addr);
567 LINK_SLIST(*pkt_listp, spkt, link);
569 xctx = emalloc_zero(sizeof(*xctx));
572 gettimeofday_cached(base, &start_cb);
573 xctx->sched = start_cb.tv_sec + (2 * xmt_delay);
575 LINK_SORT_SLIST(xmt_q, xctx, (xctx->sched < L_S_S_CUR()->sched),
579 * The new entry is the first scheduled. The timer is
580 * either not active or is set for the second xmt
583 if (NULL == ev_xmt_timer)
584 ev_xmt_timer = event_new(base, INVALID_SOCKET,
586 &xmt_timer_cb, NULL);
587 if (NULL == ev_xmt_timer) {
589 "queue_xmt: event_new(base, -1, EV_TIMEOUT) failed!");
593 if (xctx->sched > start_cb.tv_sec)
594 delay.tv_sec = xctx->sched - start_cb.tv_sec;
595 event_add(ev_xmt_timer, &delay);
596 TRACE(2, ("queue_xmt: xmt timer for %u usec\n",
597 (u_int)delay.tv_usec));
612 struct timeval start_cb;
613 struct timeval delay;
618 DEBUG_INSIST(EV_TIMEOUT == what);
620 if (NULL == xmt_q || shutting_down)
622 gettimeofday_cached(base, &start_cb);
623 if (xmt_q->sched <= start_cb.tv_sec) {
624 UNLINK_HEAD_SLIST(x, xmt_q, link);
625 TRACE(2, ("xmt_timer_cb: at .%6.6u -> %s\n",
626 (u_int)start_cb.tv_usec, stoa(&x->spkt->addr)));
632 if (xmt_q->sched <= start_cb.tv_sec) {
633 event_add(ev_xmt_timer, &gap);
634 TRACE(2, ("xmt_timer_cb: at .%6.6u gap %6.6u\n",
635 (u_int)start_cb.tv_usec,
636 (u_int)gap.tv_usec));
638 delay.tv_sec = xmt_q->sched - start_cb.tv_sec;
640 event_add(ev_xmt_timer, &delay);
641 TRACE(2, ("xmt_timer_cb: at .%6.6u next %ld seconds\n",
642 (u_int)start_cb.tv_usec,
643 (long)delay.tv_sec));
656 SOCKET sock = xctx->sock;
657 struct dns_ctx *dctx = xctx->spkt->dctx;
658 sent_pkt * spkt = xctx->spkt;
659 sockaddr_u * dst = &spkt->addr;
660 struct timeval tv_xmt;
665 if (0 != gettimeofday(&tv_xmt, NULL)) {
667 "xmt: gettimeofday() failed: %m");
670 tv_xmt.tv_sec += JAN_1970;
672 pkt_len = generate_pkt(&x_pkt, &tv_xmt, dctx->key_id,
675 sent = sendpkt(sock, dst, &x_pkt, pkt_len);
677 /* Save the packet we sent... */
678 memcpy(&spkt->x_pkt, &x_pkt, min(sizeof(spkt->x_pkt),
680 spkt->stime = tv_xmt.tv_sec - JAN_1970;
682 TRACE(2, ("xmt: %lx.%6.6u %s %s\n", (u_long)tv_xmt.tv_sec,
683 (u_int)tv_xmt.tv_usec, dctx->name, stoa(dst)));
685 dec_pending_ntp(dctx->name, dst);
693 * timeout_queries() -- give up on unrequited NTP queries
696 timeout_queries(void)
698 struct timeval start_cb;
702 sent_pkt * spkt_next;
704 int didsomething = 0;
706 TRACE(3, ("timeout_queries: called to check %u items\n",
707 (unsigned)COUNTOF(fam_listheads)));
709 gettimeofday_cached(base, &start_cb);
710 for (idx = 0; idx < COUNTOF(fam_listheads); idx++) {
711 head = fam_listheads[idx];
712 for (spkt = head; spkt != NULL; spkt = spkt_next) {
716 switch (spkt->dctx->flags & CTX_xCST) {
726 INSIST(!"spkt->dctx->flags neither UCST nor BCST");
730 spkt_next = spkt->link;
731 if (0 == spkt->stime || spkt->done)
733 age = start_cb.tv_sec - spkt->stime;
734 TRACE(3, ("%s %s %cCST age %ld\n",
736 spkt->dctx->name, xcst, age));
737 if (age > response_timeout)
741 // Do we care about didsomething?
742 TRACE(3, ("timeout_queries: didsomething is %d, age is %ld\n",
743 didsomething, (long) (start_cb.tv_sec - start_tv.tv_sec)));
744 if (start_cb.tv_sec - start_tv.tv_sec > response_timeout) {
745 TRACE(3, ("timeout_queries: bail!\n"));
746 event_base_loopexit(base, NULL);
747 shutting_down = TRUE;
752 void dec_pending_ntp(
757 if (n_pending_ntp > 0) {
759 check_exit_conditions();
761 INSIST(0 == n_pending_ntp);
762 TRACE(1, ("n_pending_ntp was zero before decrement for %s\n",
763 hostnameaddr(name, server)));
776 switch (spkt->dctx->flags & CTX_xCST) {
786 INSIST(!"spkt->dctx->flags neither UCST nor BCST");
790 server = &spkt->addr;
791 msyslog(LOG_INFO, "%s no %cCST response after %d seconds",
792 hostnameaddr(spkt->dctx->name, server), xcst,
794 dec_pending_ntp(spkt->dctx->name, server);
804 const struct addrinfo * ai
808 struct kod_entry *reason;
810 /* Is there a KoD on file for this address? */
811 hostname = addrinfo_to_str(ai);
812 TRACE(2, ("check_kod: checking <%s>\n", hostname));
813 if (search_entry(hostname, &reason)) {
814 printf("prior KoD for %s, skipping.\n",
828 ** Socket readable/timeout Callback:
829 ** Read in the packet
832 ** - decrement n_pending_ntp
833 ** - If packet is good, set the time and "exit"
835 ** - If packet is good, set the time and "exit"
846 sent_pkt ** p_pktlist;
851 INSIST(sock4 == fd || sock6 == fd);
853 TRACE(3, ("sock_cb: event on sock%s:%s%s%s%s\n",
857 (what & EV_TIMEOUT) ? " timeout" : "",
858 (what & EV_READ) ? " read" : "",
859 (what & EV_WRITE) ? " write" : "",
860 (what & EV_SIGNAL) ? " signal" : ""));
862 if (!(EV_READ & what)) {
863 if (EV_TIMEOUT & what)
869 /* Read in the packet */
870 rpktl = recvdata(fd, &sender, &rbuf, sizeof(rbuf));
872 msyslog(LOG_DEBUG, "recvfrom error %m");
877 p_pktlist = &v6_pkts_list;
879 p_pktlist = &v4_pkts_list;
881 for (spkt = *p_pktlist; spkt != NULL; spkt = spkt->link) {
883 if (SOCK_EQ(&sender, psau))
888 "Packet from unexpected source %s dropped",
893 TRACE(1, ("sock_cb: %s %s\n", spkt->dctx->name,
896 rpktl = process_pkt(&r_pkt, &sender, rpktl, MODE_SERVER,
897 &spkt->x_pkt, "sock_cb");
899 TRACE(2, ("sock_cb: process_pkt returned %d\n", rpktl));
901 /* If this is a Unicast packet, one down ... */
902 if (!spkt->done && (CTX_UCST & spkt->dctx->flags)) {
903 dec_pending_ntp(spkt->dctx->name, &spkt->addr);
908 /* If the packet is good, set the time and we're all done */
909 rc = handle_pkt(rpktl, &r_pkt, &spkt->addr, spkt->dctx->name);
911 TRACE(1, ("sock_cb: handle_pkt() returned %d\n", rc));
912 check_exit_conditions();
917 * check_exit_conditions()
919 * If sntp has a reply, ask the event loop to stop after this round of
920 * callbacks, unless --wait was used.
923 check_exit_conditions(void)
925 if ((0 == n_pending_ntp && 0 == n_pending_dns) ||
926 (time_derived && !HAVE_OPT(WAIT))) {
927 event_base_loopexit(base, NULL);
928 shutting_down = TRUE;
930 TRACE(2, ("%d NTP and %d name queries pending\n",
931 n_pending_ntp, n_pending_dns));
937 * sntp_addremove_fd() is invoked by the intres blocking worker code
938 * to read from a pipe, or to stop same.
940 void sntp_addremove_fd(
950 #ifdef HAVE_SOCKETPAIR
952 /* sntp only asks for EV_FEATURE_FDS without HAVE_SOCKETPAIR */
953 msyslog(LOG_ERR, "fatal: pipes not supported on systems with socketpair()");
959 for (idx = 0; idx < blocking_children_alloc; idx++) {
960 c = blocking_children[idx];
963 if (fd == c->resp_read_pipe)
966 if (idx == blocking_children_alloc)
970 ev = c->resp_read_ctx;
971 c->resp_read_ctx = NULL;
978 ev = event_new(base, fd, EV_READ | EV_PERSIST,
982 "sntp_addremove_fd: event_new(base, fd) failed!");
985 c->resp_read_ctx = ev;
990 /* called by forked intres child to close open descriptors */
997 if (INVALID_SOCKET != sock4) {
999 sock4 = INVALID_SOCKET;
1001 if (INVALID_SOCKET != sock6) {
1003 sock6 = INVALID_SOCKET;
1005 if (INVALID_SOCKET != bsock4) {
1007 sock4 = INVALID_SOCKET;
1009 if (INVALID_SOCKET != bsock6) {
1011 sock6 = INVALID_SOCKET;
1018 * worker_resp_cb() is invoked when resp_read_pipe is readable.
1024 void * ctx /* blocking_child * */
1029 DEBUG_INSIST(EV_READ & what);
1031 DEBUG_INSIST(fd == c->resp_read_pipe);
1032 process_blocking_resp(c);
1037 * intres_timeout_req(s) is invoked in the parent to schedule an idle
1038 * timeout to fire in s seconds, if not reset earlier by a call to
1039 * intres_timeout_req(0), which clears any pending timeout. When the
1040 * timeout expires, worker_idle_timer_fired() is invoked (again, in the
1043 * sntp and ntpd each provide implementations adapted to their timers.
1047 u_int seconds /* 0 cancels */
1050 struct timeval tv_to;
1052 if (NULL == ev_worker_timeout) {
1053 ev_worker_timeout = event_new(base, -1,
1054 EV_TIMEOUT | EV_PERSIST,
1055 &worker_timeout, NULL);
1056 DEBUG_INSIST(NULL != ev_worker_timeout);
1058 event_del(ev_worker_timeout);
1062 tv_to.tv_sec = seconds;
1064 event_add(ev_worker_timeout, &tv_to);
1078 DEBUG_REQUIRE(EV_TIMEOUT & what);
1079 worker_idle_timer_fired();
1084 sntp_libevent_log_cb(
1094 case _EVENT_LOG_DEBUG:
1098 case _EVENT_LOG_MSG:
1102 case _EVENT_LOG_WARN:
1103 level = LOG_WARNING;
1106 case _EVENT_LOG_ERR:
1111 msyslog(level, "%s", msg);
1118 const struct timeval *tv_xmt,
1127 pkt_len = LEN_PKT_NOMAC;
1129 TVTOTS(tv_xmt, &xmt_fp);
1130 HTONL_FP(&xmt_fp, &x_pkt->xmt);
1131 x_pkt->stratum = STRATUM_TO_PKT(STRATUM_UNSPEC);
1133 /* FIXME! Modus broadcast + adr. check -> bdr. pkt */
1134 set_li_vn_mode(x_pkt, LEAP_NOTINSYNC, ntpver, 3);
1135 if (pkt_key != NULL) {
1136 x_pkt->exten[0] = htonl(key_id);
1137 mac_size = 20; /* max room for MAC */
1138 mac_size = make_mac(x_pkt, pkt_len, mac_size,
1139 pkt_key, (char *)&x_pkt->exten[1]);
1141 pkt_len += mac_size + 4;
1152 const char * hostname
1156 const char * addrtxt;
1157 struct timeval tv_dst;
1164 const char * leaptxt;
1167 double synch_distance;
1168 char * p_SNTP_PRETEND_TIME;
1169 time_t pretend_time;
1170 #if SIZEOF_TIME_T == 8
1185 case SERVER_UNUSEABLE:
1189 case PACKET_UNUSEABLE:
1192 case SERVER_AUTH_FAIL:
1195 case KOD_DEMOBILIZE:
1196 /* Received a DENY or RESTR KOD packet */
1197 addrtxt = stoa(host);
1198 ref = (char *)&rpkt->refid;
1199 add_entry(addrtxt, ref);
1200 msyslog(LOG_WARNING, "KOD code %c%c%c%c from %s %s",
1201 ref[0], ref[1], ref[2], ref[3], addrtxt, hostname);
1207 ** We should probably call add_entry() with an
1208 ** expiration timestamp of several seconds in the future,
1209 ** and back-off even more if we get more RATE responses.
1214 TRACE(3, ("handle_pkt: %d bytes from %s %s\n",
1215 rpktl, stoa(host), hostname));
1217 gettimeofday_cached(base, &tv_dst);
1219 p_SNTP_PRETEND_TIME = getenv("SNTP_PRETEND_TIME");
1220 if (p_SNTP_PRETEND_TIME) {
1222 #if SIZEOF_TIME_T == 4
1223 if (1 == sscanf(p_SNTP_PRETEND_TIME, "%ld", &l))
1224 pretend_time = (time_t)l;
1225 #elif SIZEOF_TIME_T == 8
1226 if (1 == sscanf(p_SNTP_PRETEND_TIME, "%lld", &ll))
1227 pretend_time = (time_t)ll;
1229 # include "GRONK: unexpected value for SIZEOF_TIME_T"
1231 if (0 != pretend_time)
1232 tv_dst.tv_sec = pretend_time;
1235 offset_calculation(rpkt, rpktl, &tv_dst, &offset,
1236 &precision, &synch_distance);
1237 time_derived = TRUE;
1239 for (digits = 0; (precision *= 10.) < 1.; ++digits)
1244 ts_str = tv_to_str(&tv_dst);
1245 stratum = rpkt->stratum;
1249 if (synch_distance > 0.) {
1250 cnt = snprintf(disptxt, sizeof(disptxt),
1251 " +/- %f", synch_distance);
1252 if ((size_t)cnt >= sizeof(disptxt))
1253 snprintf(disptxt, sizeof(disptxt),
1254 "ERROR %d >= %d", cnt,
1255 (int)sizeof(disptxt));
1260 switch (PKT_LEAP(rpkt->li_vn_mode)) {
1261 case LEAP_NOWARNING:
1262 leaptxt = "no-leap";
1264 case LEAP_ADDSECOND:
1265 leaptxt = "add-leap";
1267 case LEAP_DELSECOND:
1268 leaptxt = "del-leap";
1270 case LEAP_NOTINSYNC:
1274 leaptxt = "LEAP-ERROR";
1278 msyslog(LOG_INFO, "%s %+.*f%s %s s%d %s%s", ts_str,
1279 digits, offset, disptxt,
1280 hostnameaddr(hostname, host), stratum,
1287 if (p_SNTP_PRETEND_TIME)
1290 if (!time_adjusted &&
1291 (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
1292 return set_time(offset);
1305 struct timeval *tv_dst,
1308 double *synch_distance
1311 l_fp p_rec, p_xmt, p_ref, p_org, tmp, dst;
1312 u_fp p_rdly, p_rdsp;
1313 double t21, t34, delta;
1315 /* Convert timestamps from network to host byte order */
1316 p_rdly = NTOHS_FP(rpkt->rootdelay);
1317 p_rdsp = NTOHS_FP(rpkt->rootdisp);
1318 NTOHL_FP(&rpkt->reftime, &p_ref);
1319 NTOHL_FP(&rpkt->org, &p_org);
1320 NTOHL_FP(&rpkt->rec, &p_rec);
1321 NTOHL_FP(&rpkt->xmt, &p_xmt);
1323 *precision = LOGTOD(rpkt->precision);
1325 TRACE(3, ("offset_calculation: LOGTOD(rpkt->precision): %f\n", *precision));
1327 /* Compute offset etc. */
1329 L_SUB(&tmp, &p_org);
1331 TVTOTS(tv_dst, &dst);
1332 dst.l_ui += JAN_1970;
1336 *offset = (t21 + t34) / 2.;
1339 // synch_distance is:
1340 // (peer->delay + peer->rootdelay) / 2 + peer->disp
1341 // + peer->rootdisp + clock_phi * (current_time - peer->update)
1344 // and peer->delay = fabs(peer->offset - p_offset) * 2;
1345 // and peer->offset needs history, so we're left with
1346 // p_offset = (t21 + t34) / 2.;
1347 // peer->disp = 0; (we have no history to augment this)
1348 // clock_phi = 15e-6;
1349 // peer->jitter = LOGTOD(sys_precision); (we have no history to augment this)
1350 // and ntp_proto.c:set_sys_tick_precision() should get us sys_precision.
1352 // so our answer seems to be:
1354 // (fabs(t21 + t34) + peer->rootdelay) / 3.
1357 // + 15e-6 (clock_phi)
1358 // + LOGTOD(sys_precision)
1360 INSIST( FPTOD(p_rdly) >= 0. );
1362 *synch_distance = (fabs(t21 + t34) + FPTOD(p_rdly)) / 3.
1366 + 0. /* LOGTOD(sys_precision) when we can get it */
1368 INSIST( *synch_distance >= 0. );
1370 *synch_distance = (FPTOD(p_rdly) + FPTOD(p_rdsp))/2.0;
1375 printf("sntp rootdelay: %f\n", FPTOD(p_rdly));
1376 printf("sntp rootdisp: %f\n", FPTOD(p_rdsp));
1377 printf("sntp syncdist: %f\n", *synch_distance);
1379 pkt_output(rpkt, rpktl, stdout);
1381 printf("sntp offset_calculation: rpkt->reftime:\n");
1382 l_fp_output(&p_ref, stdout);
1383 printf("sntp offset_calculation: rpkt->org:\n");
1384 l_fp_output(&p_org, stdout);
1385 printf("sntp offset_calculation: rpkt->rec:\n");
1386 l_fp_output(&p_rec, stdout);
1387 printf("sntp offset_calculation: rpkt->xmt:\n");
1388 l_fp_output(&p_xmt, stdout);
1392 TRACE(3, ("sntp offset_calculation:\trec - org t21: %.6f\n"
1393 "\txmt - dst t34: %.6f\tdelta: %.6f\toffset: %.6f\n",
1394 t21, t34, delta, *offset));
1401 /* Compute the 8 bits for li_vn_mode */
1411 msyslog(LOG_DEBUG, "set_li_vn_mode: leap > 3, using max. 3");
1415 if ((unsigned char)version > 7) {
1416 msyslog(LOG_DEBUG, "set_li_vn_mode: version < 0 or > 7, using 4");
1421 msyslog(LOG_DEBUG, "set_li_vn_mode: mode > 7, using client mode 3");
1425 spkt->li_vn_mode = leap << 6;
1426 spkt->li_vn_mode |= version << 3;
1427 spkt->li_vn_mode |= mode;
1432 ** set_time applies 'offset' to the local clock.
1445 ** If we can step but we cannot slew, then step.
1446 ** If we can step or slew and and |offset| > steplimit, then step.
1448 if (ENABLED_OPT(STEP) &&
1449 ( !ENABLED_OPT(SLEW)
1450 || (ENABLED_OPT(SLEW) && (fabs(offset) > steplimit))
1452 rc = step_systime(offset);
1454 /* If there was a problem, can we rely on errno? */
1456 time_adjusted = TRUE;
1457 return (time_adjusted)
1461 ** In case of error, what should we use?
1468 if (ENABLED_OPT(SLEW)) {
1469 rc = adj_systime(offset);
1471 /* If there was a problem, can we rely on errno? */
1473 time_adjusted = TRUE;
1474 return (time_adjusted)
1478 ** In case of error, what should we use?
1490 libevent_version_ok(void)
1492 ev_uint32_t v_compile_maj;
1493 ev_uint32_t v_run_maj;
1495 v_compile_maj = LIBEVENT_VERSION_NUMBER & 0xffff0000;
1496 v_run_maj = event_get_version_number() & 0xffff0000;
1497 if (v_compile_maj != v_run_maj) {
1499 "Incompatible libevent versions: have %s, built with %s\n",
1500 event_get_version(),
1508 * gettimeofday_cached()
1510 * Clones the event_base_gettimeofday_cached() interface but ensures the
1511 * times are always on the gettimeofday() 1970 scale. Older libevent 2
1512 * sometimes used gettimeofday(), sometimes the since-system-start
1513 * clock_gettime(CLOCK_MONOTONIC), depending on the platform.
1515 * It is not cleanly possible to tell which timescale older libevent is
1518 * The strategy involves 1 hour thresholds chosen to be far longer than
1519 * the duration of a round of libevent callbacks, which share a cached
1520 * start-of-round time. First compare the last cached time with the
1521 * current gettimeofday() time. If they are within one hour, libevent
1522 * is using the proper timescale so leave the offset 0. Otherwise,
1523 * compare libevent's cached time and the current time on the monotonic
1524 * scale. If they are within an hour, libevent is using the monotonic
1525 * scale so calculate the offset to add to such times to bring them to
1526 * gettimeofday()'s scale.
1529 gettimeofday_cached(
1530 struct event_base * b,
1531 struct timeval * caller_tv
1534 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1535 static struct event_base * cached_b;
1536 static struct timeval cached;
1537 static struct timeval adj_cached;
1538 static struct timeval offset;
1539 static int offset_ready;
1540 struct timeval latest;
1541 struct timeval systemt;
1543 struct timeval mono;
1544 struct timeval diff;
1548 event_base_gettimeofday_cached(b, &latest);
1549 if (b == cached_b &&
1550 !memcmp(&latest, &cached, sizeof(latest))) {
1551 *caller_tv = adj_cached;
1556 if (!offset_ready) {
1557 cgt_rc = clock_gettime(CLOCK_MONOTONIC, &ts);
1558 gtod_rc = gettimeofday(&systemt, NULL);
1561 "%s: gettimeofday() error %m",
1565 diff = sub_tval(systemt, latest);
1567 printf("system minus cached %+ld.%06ld\n",
1568 (long)diff.tv_sec, (long)diff.tv_usec);
1569 if (0 != cgt_rc || labs((long)diff.tv_sec) < 3600) {
1571 * Either use_monotonic == 0, or this libevent
1572 * has been repaired. Leave offset at zero.
1575 mono.tv_sec = ts.tv_sec;
1576 mono.tv_usec = ts.tv_nsec / 1000;
1577 diff = sub_tval(latest, mono);
1579 printf("cached minus monotonic %+ld.%06ld\n",
1580 (long)diff.tv_sec, (long)diff.tv_usec);
1581 if (labs((long)diff.tv_sec) < 3600) {
1582 /* older libevent2 using monotonic */
1583 offset = sub_tval(systemt, mono);
1584 TRACE(1, ("%s: Offsetting libevent CLOCK_MONOTONIC times by %+ld.%06ld\n",
1585 "gettimeofday_cached",
1586 (long)offset.tv_sec,
1587 (long)offset.tv_usec));
1590 offset_ready = TRUE;
1592 adj_cached = add_tval(cached, offset);
1593 *caller_tv = adj_cached;
1597 return event_base_gettimeofday_cached(b, caller_tv);