3 #include <event2/util.h>
4 #include <event2/event.h>
6 #include "ntp_workimpl.h"
8 # include <event2/thread.h>
11 #ifdef HAVE_SYSEXITS_H
12 # include <sysexits.h>
16 #include "ntp_libopts.h"
17 #include "kod_management.h"
18 #include "networking.h"
19 #include "utilities.h"
27 int n_pending_dns = 0;
28 int n_pending_ntp = 0;
29 int ai_fam_pref = AF_UNSPEC;
31 double steplimit = -1;
32 SOCKET sock4 = -1; /* Socket for IPv4 */
33 SOCKET sock6 = -1; /* Socket for IPv6 */
35 ** BCAST *must* listen on port 123 (by default), so we can only
36 ** use the UCST sockets (above) if they too are using port 123
38 SOCKET bsock4 = -1; /* Broadcast Socket for IPv4 */
39 SOCKET bsock6 = -1; /* Broadcast Socket for IPv6 */
40 struct event_base *base;
41 struct event *ev_sock4;
42 struct event *ev_sock6;
43 struct event *ev_worker_timeout;
44 struct event *ev_xmt_timer;
49 #define CTX_BCST 0x0001
50 #define CTX_UCST 0x0002
51 #define CTX_xCST 0x0003
52 #define CTX_CONC 0x0004
53 #define CTX_unused 0xfffd
55 struct timeval timeout;
59 typedef struct sent_pkt_tag sent_pkt;
62 struct dns_ctx * dctx;
69 typedef struct xmt_ctx_tag xmt_ctx;
79 struct key * keys = NULL;
81 struct timeval response_tv;
82 struct timeval start_tv;
83 /* check the timeout at least once per second */
84 struct timeval wakeup_tv = { 0, 888888 };
86 sent_pkt * fam_listheads[2];
87 #define v4_pkts_list (fam_listheads[0])
88 #define v6_pkts_list (fam_listheads[1])
92 char buf[LEN_PKT_NOMAC + NTP_MAXEXTEN + MAX_MAC_LEN];
95 #define r_pkt rbuf.pkt
98 int droproot; /* intres imports these */
101 u_long current_time; /* libntp/authkeys.c */
103 void open_sockets(void);
104 void handle_lookup(const char *name, int flags);
105 void sntp_addremove_fd(int fd, int is_pipe, int remove_it);
106 void worker_timeout(evutil_socket_t, short, void *);
107 void worker_resp_cb(evutil_socket_t, short, void *);
108 void sntp_name_resolved(int, int, void *, const char *, const char *,
109 const struct addrinfo *,
110 const struct addrinfo *);
111 void queue_xmt(SOCKET sock, struct dns_ctx *dctx, sent_pkt *spkt,
113 void xmt_timer_cb(evutil_socket_t, short, void *ptr);
114 void xmt(xmt_ctx *xctx);
115 int check_kod(const struct addrinfo *ai);
116 void timeout_query(sent_pkt *);
117 void timeout_queries(void);
118 void sock_cb(evutil_socket_t, short, void *);
119 void check_exit_conditions(void);
120 void sntp_libevent_log_cb(int, const char *);
121 void set_li_vn_mode(struct pkt *spkt, char leap, char version, char mode);
122 int set_time(double offset);
123 void dec_pending_ntp(const char *, sockaddr_u *);
124 int libevent_version_ok(void);
125 int gettimeofday_cached(struct event_base *b, struct timeval *tv);
129 * The actual main function.
135 const char *sntpVersion
141 struct event_config * evcfg;
143 /* Initialize logging system - sets up progname */
144 sntp_init_logging(argv[0]);
146 if (!libevent_version_ok())
152 optct = ntpOptionProcess(&sntpOptions, argc, argv);
157 debug = OPT_VALUE_SET_DEBUG_LEVEL;
159 TRACE(2, ("init_lib() done, %s%s\n",
166 ntpver = OPT_VALUE_NTPVERSION;
167 steplimit = OPT_VALUE_STEPLIMIT / 1e3;
168 gap.tv_usec = max(0, OPT_VALUE_GAP * 1000);
169 gap.tv_usec = min(gap.tv_usec, 999999);
171 if (HAVE_OPT(LOGFILE))
172 open_logfile(OPT_ARG(LOGFILE));
174 msyslog(LOG_INFO, "%s", sntpVersion);
176 if (0 == argc && !HAVE_OPT(BROADCAST) && !HAVE_OPT(CONCURRENT)) {
177 printf("%s: Must supply at least one of -b hostname, -c hostname, or hostname.\n",
184 ** Eventually, we probably want:
185 ** - separate bcst and ucst timeouts (why?)
186 ** - multiple --timeout values in the commandline
189 response_timeout = OPT_VALUE_TIMEOUT;
190 response_tv.tv_sec = response_timeout;
191 response_tv.tv_usec = 0;
193 /* IPv6 available? */
194 if (isc_net_probeipv6() != ISC_R_SUCCESS) {
195 ai_fam_pref = AF_INET;
196 TRACE(1, ("No ipv6 support available, forcing ipv4\n"));
198 /* Check for options -4 and -6 */
200 ai_fam_pref = AF_INET;
201 else if (HAVE_OPT(IPV6))
202 ai_fam_pref = AF_INET6;
205 /* TODO: Parse config file if declared */
208 ** Init the KOD system.
209 ** For embedded systems with no writable filesystem,
210 ** -K /dev/null can be used to disable KoD storage.
212 kod_init_kod_db(OPT_ARG(KOD), FALSE);
214 /* HMS: Check and see what happens if KEYFILE doesn't exist */
215 auth_init(OPT_ARG(KEYFILE), &keys);
218 ** Considering employing a variable that prevents functions of doing
219 ** anything until everything is initialized properly
221 ** HMS: What exactly does the above mean?
223 event_set_log_callback(&sntp_libevent_log_cb);
225 event_enable_debug_mode();
227 evthread_use_pthreads();
228 /* we use libevent from main thread only, locks should be academic */
230 evthread_enable_lock_debuging();
232 evcfg = event_config_new();
234 printf("%s: event_config_new() failed!\n", progname);
237 #ifndef HAVE_SOCKETPAIR
238 event_config_require_features(evcfg, EV_FEATURE_FDS);
240 /* all libevent calls are from main thread */
241 /* event_config_set_flag(evcfg, EVENT_BASE_FLAG_NOLOCK); */
242 base = event_base_new_with_config(evcfg);
243 event_config_free(evcfg);
245 printf("%s: event_base_new() failed!\n", progname);
249 /* wire into intres resolver */
250 worker_per_query = TRUE;
251 addremove_io_fd = &sntp_addremove_fd;
255 if (HAVE_OPT(BROADCAST)) {
256 int cn = STACKCT_OPT( BROADCAST );
257 const char ** cp = STACKLST_OPT( BROADCAST );
260 handle_lookup(*cp, CTX_BCST);
265 if (HAVE_OPT(CONCURRENT)) {
266 int cn = STACKCT_OPT( CONCURRENT );
267 const char ** cp = STACKLST_OPT( CONCURRENT );
270 handle_lookup(*cp, CTX_UCST | CTX_CONC);
275 for (i = 0; i < argc; ++i)
276 handle_lookup(argv[i], CTX_UCST);
278 gettimeofday_cached(base, &start_tv);
279 event_base_dispatch(base);
280 event_base_free(base);
282 if (!time_adjusted &&
283 (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
293 ** open sockets and make them non-blocking
303 sock4 = socket(PF_INET, SOCK_DGRAM, 0);
305 /* error getting a socket */
306 msyslog(LOG_ERR, "open_sockets: socket(PF_INET) failed: %m");
309 /* Make it non-blocking */
310 make_socket_nonblocking(sock4);
312 /* Let's try using a wildcard... */
315 SET_ADDR4N(&name, INADDR_ANY);
316 SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
318 if (-1 == bind(sock4, &name.sa,
320 msyslog(LOG_ERR, "open_sockets: bind(sock4) failed: %m");
324 /* Register an NTP callback for recv/timeout */
325 ev_sock4 = event_new(base, sock4,
326 EV_TIMEOUT | EV_READ | EV_PERSIST,
328 if (NULL == ev_sock4) {
330 "open_sockets: event_new(base, sock4) failed!");
332 event_add(ev_sock4, &wakeup_tv);
336 /* We may not always have IPv6... */
337 if (-1 == sock6 && ipv6_works) {
338 sock6 = socket(PF_INET6, SOCK_DGRAM, 0);
339 if (-1 == sock6 && ipv6_works) {
340 /* error getting a socket */
341 msyslog(LOG_ERR, "open_sockets: socket(PF_INET6) failed: %m");
344 /* Make it non-blocking */
345 make_socket_nonblocking(sock6);
347 /* Let's try using a wildcard... */
349 AF(&name) = AF_INET6;
350 SET_ADDR6N(&name, in6addr_any);
351 SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
353 if (-1 == bind(sock6, &name.sa,
355 msyslog(LOG_ERR, "open_sockets: bind(sock6) failed: %m");
358 /* Register an NTP callback for recv/timeout */
359 ev_sock6 = event_new(base, sock6,
360 EV_TIMEOUT | EV_READ | EV_PERSIST,
362 if (NULL == ev_sock6) {
364 "open_sockets: event_new(base, sock6) failed!");
366 event_add(ev_sock6, &wakeup_tv);
383 struct addrinfo hints; /* Local copy is OK */
389 TRACE(1, ("handle_lookup(%s,%#x)\n", name, flags));
392 hints.ai_family = ai_fam_pref;
393 hints.ai_flags = AI_CANONNAME | Z_AI_NUMERICSERV;
395 ** Unless we specify a socktype, we'll get at least two
396 ** entries for each address: one for TCP and one for
397 ** UDP. That's not what we want.
399 hints.ai_socktype = SOCK_DGRAM;
400 hints.ai_protocol = IPPROTO_UDP;
402 name_sz = 1 + strlen(name);
403 octets = sizeof(*ctx) + name_sz; // Space for a ctx and the name
404 ctx = emalloc_zero(octets); // ctx at ctx[0]
405 name_copy = (char *)(ctx + 1); // Put the name at ctx[1]
406 memcpy(name_copy, name, name_sz); // copy the name to ctx[1]
407 ctx->name = name_copy; // point to it...
409 ctx->timeout = response_tv;
412 /* The following should arguably be passed in... */
413 if (ENABLED_OPT(AUTHENTICATION)) {
414 ctx->key_id = OPT_VALUE_AUTHENTICATION;
415 get_key(ctx->key_id, &ctx->key);
416 if (NULL == ctx->key) {
417 fprintf(stderr, "%s: Authentication with keyID %d requested, but no matching keyID found in <%s>!\n",
418 progname, ctx->key_id, OPT_ARG(KEYFILE));
426 getaddrinfo_sometime(name, "123", &hints, 0,
427 &sntp_name_resolved, ctx);
435 ** - - increment n_pending_ntp
436 ** - - send a request if this is a Unicast callback
437 ** - - queue wait for response
438 ** - decrement n_pending_dns
446 const char * service,
447 const struct addrinfo * hints,
448 const struct addrinfo * addr
451 struct dns_ctx * dctx;
453 const struct addrinfo * ai;
465 if (EAI_SYSTEM == rescode) {
467 mfprintf(stderr, "%s lookup error %m\n",
471 fprintf(stderr, "%s lookup error %s\n",
472 dctx->name, gai_strerror(rescode));
474 TRACE(3, ("%s [%s]\n", dctx->name,
475 (addr->ai_canonname != NULL)
479 for (ai = addr; ai != NULL; ai = ai->ai_next) {
484 switch (ai->ai_family) {
488 xmt_delay = xmt_delay_v4;
497 xmt_delay = xmt_delay_v6;
502 msyslog(LOG_ERR, "sntp_name_resolved: unexpected ai_family: %d",
509 ** We're waiting for a response for either unicast
510 ** or broadcast, so...
514 /* If this is for a unicast IP, queue a request */
515 if (dctx->flags & CTX_UCST) {
516 spkt = emalloc_zero(sizeof(*spkt));
518 octets = min(ai->ai_addrlen, sizeof(spkt->addr));
519 memcpy(&spkt->addr, ai->ai_addr, octets);
520 queue_xmt(sock, dctx, spkt, xmt_delay);
524 /* n_pending_dns really should be >0 here... */
526 check_exit_conditions();
536 struct dns_ctx * dctx,
542 sent_pkt ** pkt_listp;
545 struct timeval start_cb;
546 struct timeval delay;
550 pkt_listp = &v6_pkts_list;
552 pkt_listp = &v4_pkts_list;
554 /* reject attempts to add address already listed */
555 for (match = *pkt_listp; match != NULL; match = match->link) {
556 if (ADDR_PORT_EQ(&spkt->addr, &match->addr)) {
557 if (strcasecmp(spkt->dctx->name,
559 printf("%s %s duplicate address from %s ignored.\n",
564 printf("%s %s, duplicate address ignored.\n",
567 dec_pending_ntp(spkt->dctx->name, &spkt->addr);
573 LINK_SLIST(*pkt_listp, spkt, link);
575 xctx = emalloc_zero(sizeof(*xctx));
578 gettimeofday_cached(base, &start_cb);
579 xctx->sched = start_cb.tv_sec + (2 * xmt_delay);
581 LINK_SORT_SLIST(xmt_q, xctx, (xctx->sched < L_S_S_CUR()->sched),
585 * The new entry is the first scheduled. The timer is
586 * either not active or is set for the second xmt
589 if (NULL == ev_xmt_timer)
590 ev_xmt_timer = event_new(base, INVALID_SOCKET,
592 &xmt_timer_cb, NULL);
593 if (NULL == ev_xmt_timer) {
595 "queue_xmt: event_new(base, -1, EV_TIMEOUT) failed!");
599 if (xctx->sched > start_cb.tv_sec)
600 delay.tv_sec = xctx->sched - start_cb.tv_sec;
601 event_add(ev_xmt_timer, &delay);
602 TRACE(2, ("queue_xmt: xmt timer for %u usec\n",
603 (u_int)delay.tv_usec));
618 struct timeval start_cb;
619 struct timeval delay;
624 DEBUG_INSIST(EV_TIMEOUT == what);
626 if (NULL == xmt_q || shutting_down)
628 gettimeofday_cached(base, &start_cb);
629 if (xmt_q->sched <= start_cb.tv_sec) {
630 UNLINK_HEAD_SLIST(x, xmt_q, link);
631 TRACE(2, ("xmt_timer_cb: at .%6.6u -> %s\n",
632 (u_int)start_cb.tv_usec, stoa(&x->spkt->addr)));
638 if (xmt_q->sched <= start_cb.tv_sec) {
639 event_add(ev_xmt_timer, &gap);
640 TRACE(2, ("xmt_timer_cb: at .%6.6u gap %6.6u\n",
641 (u_int)start_cb.tv_usec,
642 (u_int)gap.tv_usec));
644 delay.tv_sec = xmt_q->sched - start_cb.tv_sec;
646 event_add(ev_xmt_timer, &delay);
647 TRACE(2, ("xmt_timer_cb: at .%6.6u next %ld seconds\n",
648 (u_int)start_cb.tv_usec,
649 (long)delay.tv_sec));
662 SOCKET sock = xctx->sock;
663 struct dns_ctx *dctx = xctx->spkt->dctx;
664 sent_pkt * spkt = xctx->spkt;
665 sockaddr_u * dst = &spkt->addr;
666 struct timeval tv_xmt;
671 if (0 != gettimeofday(&tv_xmt, NULL)) {
673 "xmt: gettimeofday() failed: %m");
676 tv_xmt.tv_sec += JAN_1970;
678 pkt_len = generate_pkt(&x_pkt, &tv_xmt, dctx->key_id,
681 sent = sendpkt(sock, dst, &x_pkt, pkt_len);
683 /* Save the packet we sent... */
684 memcpy(&spkt->x_pkt, &x_pkt, min(sizeof(spkt->x_pkt),
686 spkt->stime = tv_xmt.tv_sec - JAN_1970;
688 TRACE(2, ("xmt: %lx.%6.6u %s %s\n", (u_long)tv_xmt.tv_sec,
689 (u_int)tv_xmt.tv_usec, dctx->name, stoa(dst)));
691 dec_pending_ntp(dctx->name, dst);
699 * timeout_queries() -- give up on unrequited NTP queries
702 timeout_queries(void)
704 struct timeval start_cb;
708 sent_pkt * spkt_next;
710 int didsomething = 0;
712 TRACE(3, ("timeout_queries: called to check %u items\n",
713 (unsigned)COUNTOF(fam_listheads)));
715 gettimeofday_cached(base, &start_cb);
716 for (idx = 0; idx < COUNTOF(fam_listheads); idx++) {
717 head = fam_listheads[idx];
718 for (spkt = head; spkt != NULL; spkt = spkt_next) {
722 switch (spkt->dctx->flags & CTX_xCST) {
732 INSIST(!"spkt->dctx->flags neither UCST nor BCST");
736 spkt_next = spkt->link;
737 if (0 == spkt->stime || spkt->done)
739 age = start_cb.tv_sec - spkt->stime;
740 TRACE(3, ("%s %s %cCST age %ld\n",
742 spkt->dctx->name, xcst, age));
743 if (age > response_timeout)
747 // Do we care about didsomething?
748 TRACE(3, ("timeout_queries: didsomething is %d, age is %ld\n",
749 didsomething, (long) (start_cb.tv_sec - start_tv.tv_sec)));
750 if (start_cb.tv_sec - start_tv.tv_sec > response_timeout) {
751 TRACE(3, ("timeout_queries: bail!\n"));
752 event_base_loopexit(base, NULL);
753 shutting_down = TRUE;
758 void dec_pending_ntp(
763 if (n_pending_ntp > 0) {
765 check_exit_conditions();
767 INSIST(0 == n_pending_ntp);
768 TRACE(1, ("n_pending_ntp was zero before decrement for %s\n",
769 hostnameaddr(name, server)));
782 switch (spkt->dctx->flags & CTX_xCST) {
792 INSIST(!"spkt->dctx->flags neither UCST nor BCST");
796 server = &spkt->addr;
797 msyslog(LOG_INFO, "%s no %cCST response after %d seconds",
798 hostnameaddr(spkt->dctx->name, server), xcst,
800 dec_pending_ntp(spkt->dctx->name, server);
810 const struct addrinfo * ai
814 struct kod_entry *reason;
816 /* Is there a KoD on file for this address? */
817 hostname = addrinfo_to_str(ai);
818 TRACE(2, ("check_kod: checking <%s>\n", hostname));
819 if (search_entry(hostname, &reason)) {
820 printf("prior KoD for %s, skipping.\n",
834 ** Socket readable/timeout Callback:
835 ** Read in the packet
838 ** - decrement n_pending_ntp
839 ** - If packet is good, set the time and "exit"
841 ** - If packet is good, set the time and "exit"
852 sent_pkt ** p_pktlist;
857 INSIST(sock4 == fd || sock6 == fd);
859 TRACE(3, ("sock_cb: event on sock%s:%s%s%s%s\n",
863 (what & EV_TIMEOUT) ? " timeout" : "",
864 (what & EV_READ) ? " read" : "",
865 (what & EV_WRITE) ? " write" : "",
866 (what & EV_SIGNAL) ? " signal" : ""));
868 if (!(EV_READ & what)) {
869 if (EV_TIMEOUT & what)
875 /* Read in the packet */
876 rpktl = recvdata(fd, &sender, &rbuf, sizeof(rbuf));
878 msyslog(LOG_DEBUG, "recvfrom error %m");
883 p_pktlist = &v6_pkts_list;
885 p_pktlist = &v4_pkts_list;
887 for (spkt = *p_pktlist; spkt != NULL; spkt = spkt->link) {
889 if (SOCK_EQ(&sender, psau))
894 "Packet from unexpected source %s dropped",
899 TRACE(1, ("sock_cb: %s %s\n", spkt->dctx->name,
902 rpktl = process_pkt(&r_pkt, &sender, rpktl, MODE_SERVER,
903 &spkt->x_pkt, "sock_cb");
905 TRACE(2, ("sock_cb: process_pkt returned %d\n", rpktl));
907 /* If this is a Unicast packet, one down ... */
908 if (!spkt->done && (CTX_UCST & spkt->dctx->flags)) {
909 dec_pending_ntp(spkt->dctx->name, &spkt->addr);
914 /* If the packet is good, set the time and we're all done */
915 rc = handle_pkt(rpktl, &r_pkt, &spkt->addr, spkt->dctx->name);
917 TRACE(1, ("sock_cb: handle_pkt() returned %d\n", rc));
918 check_exit_conditions();
923 * check_exit_conditions()
925 * If sntp has a reply, ask the event loop to stop after this round of
926 * callbacks, unless --wait was used.
929 check_exit_conditions(void)
931 if ((0 == n_pending_ntp && 0 == n_pending_dns) ||
932 (time_derived && !HAVE_OPT(WAIT))) {
933 event_base_loopexit(base, NULL);
934 shutting_down = TRUE;
936 TRACE(2, ("%d NTP and %d name queries pending\n",
937 n_pending_ntp, n_pending_dns));
943 * sntp_addremove_fd() is invoked by the intres blocking worker code
944 * to read from a pipe, or to stop same.
946 void sntp_addremove_fd(
956 #ifdef HAVE_SOCKETPAIR
958 /* sntp only asks for EV_FEATURE_FDS without HAVE_SOCKETPAIR */
959 msyslog(LOG_ERR, "fatal: pipes not supported on systems with socketpair()");
965 for (idx = 0; idx < blocking_children_alloc; idx++) {
966 c = blocking_children[idx];
969 if (fd == c->resp_read_pipe)
972 if (idx == blocking_children_alloc)
976 ev = c->resp_read_ctx;
977 c->resp_read_ctx = NULL;
984 ev = event_new(base, fd, EV_READ | EV_PERSIST,
988 "sntp_addremove_fd: event_new(base, fd) failed!");
991 c->resp_read_ctx = ev;
996 /* called by forked intres child to close open descriptors */
1003 if (INVALID_SOCKET != sock4) {
1005 sock4 = INVALID_SOCKET;
1007 if (INVALID_SOCKET != sock6) {
1009 sock6 = INVALID_SOCKET;
1011 if (INVALID_SOCKET != bsock4) {
1013 sock4 = INVALID_SOCKET;
1015 if (INVALID_SOCKET != bsock6) {
1017 sock6 = INVALID_SOCKET;
1024 * worker_resp_cb() is invoked when resp_read_pipe is readable.
1030 void * ctx /* blocking_child * */
1035 DEBUG_INSIST(EV_READ & what);
1037 DEBUG_INSIST(fd == c->resp_read_pipe);
1038 process_blocking_resp(c);
1043 * intres_timeout_req(s) is invoked in the parent to schedule an idle
1044 * timeout to fire in s seconds, if not reset earlier by a call to
1045 * intres_timeout_req(0), which clears any pending timeout. When the
1046 * timeout expires, worker_idle_timer_fired() is invoked (again, in the
1049 * sntp and ntpd each provide implementations adapted to their timers.
1053 u_int seconds /* 0 cancels */
1056 struct timeval tv_to;
1058 if (NULL == ev_worker_timeout) {
1059 ev_worker_timeout = event_new(base, -1,
1060 EV_TIMEOUT | EV_PERSIST,
1061 &worker_timeout, NULL);
1062 DEBUG_INSIST(NULL != ev_worker_timeout);
1064 event_del(ev_worker_timeout);
1068 tv_to.tv_sec = seconds;
1070 event_add(ev_worker_timeout, &tv_to);
1084 DEBUG_REQUIRE(EV_TIMEOUT & what);
1085 worker_idle_timer_fired();
1090 sntp_libevent_log_cb(
1100 case _EVENT_LOG_DEBUG:
1104 case _EVENT_LOG_MSG:
1108 case _EVENT_LOG_WARN:
1109 level = LOG_WARNING;
1112 case _EVENT_LOG_ERR:
1117 msyslog(level, "%s", msg);
1124 const struct timeval *tv_xmt,
1133 pkt_len = LEN_PKT_NOMAC;
1135 TVTOTS(tv_xmt, &xmt_fp);
1136 HTONL_FP(&xmt_fp, &x_pkt->xmt);
1137 x_pkt->stratum = STRATUM_TO_PKT(STRATUM_UNSPEC);
1139 /* FIXME! Modus broadcast + adr. check -> bdr. pkt */
1140 set_li_vn_mode(x_pkt, LEAP_NOTINSYNC, ntpver, 3);
1142 printf("generate_pkt: key_id %d, key pointer %p\n", key_id, pkt_key);
1144 if (pkt_key != NULL) {
1145 x_pkt->exten[0] = htonl(key_id);
1146 mac_size = make_mac(x_pkt, pkt_len, MAX_MDG_LEN,
1147 pkt_key, (char *)&x_pkt->exten[1]);
1149 pkt_len += mac_size + KEY_MAC_LEN;
1152 printf("generate_pkt: mac_size is %d\n", mac_size);
1166 const char * hostname
1170 const char * addrtxt;
1171 struct timeval tv_dst;
1178 const char * leaptxt;
1181 double synch_distance;
1182 char * p_SNTP_PRETEND_TIME;
1183 time_t pretend_time;
1184 #if SIZEOF_TIME_T == 8
1199 case SERVER_UNUSEABLE:
1203 case PACKET_UNUSEABLE:
1206 case SERVER_AUTH_FAIL:
1209 case KOD_DEMOBILIZE:
1210 /* Received a DENY or RESTR KOD packet */
1211 addrtxt = stoa(host);
1212 ref = (char *)&rpkt->refid;
1213 add_entry(addrtxt, ref);
1214 msyslog(LOG_WARNING, "KOD code %c%c%c%c from %s %s",
1215 ref[0], ref[1], ref[2], ref[3], addrtxt, hostname);
1221 ** We should probably call add_entry() with an
1222 ** expiration timestamp of several seconds in the future,
1223 ** and back-off even more if we get more RATE responses.
1228 TRACE(3, ("handle_pkt: %d bytes from %s %s\n",
1229 rpktl, stoa(host), hostname));
1231 gettimeofday_cached(base, &tv_dst);
1233 p_SNTP_PRETEND_TIME = getenv("SNTP_PRETEND_TIME");
1234 if (p_SNTP_PRETEND_TIME) {
1236 #if SIZEOF_TIME_T == 4
1237 if (1 == sscanf(p_SNTP_PRETEND_TIME, "%ld", &l))
1238 pretend_time = (time_t)l;
1239 #elif SIZEOF_TIME_T == 8
1240 if (1 == sscanf(p_SNTP_PRETEND_TIME, "%lld", &ll))
1241 pretend_time = (time_t)ll;
1243 # include "GRONK: unexpected value for SIZEOF_TIME_T"
1245 if (0 != pretend_time)
1246 tv_dst.tv_sec = pretend_time;
1249 offset_calculation(rpkt, rpktl, &tv_dst, &offset,
1250 &precision, &synch_distance);
1251 time_derived = TRUE;
1253 for (digits = 0; (precision *= 10.) < 1.; ++digits)
1258 ts_str = tv_to_str(&tv_dst);
1259 stratum = rpkt->stratum;
1263 if (synch_distance > 0.) {
1264 cnt = snprintf(disptxt, sizeof(disptxt),
1265 " +/- %f", synch_distance);
1266 if ((size_t)cnt >= sizeof(disptxt))
1267 snprintf(disptxt, sizeof(disptxt),
1268 "ERROR %d >= %d", cnt,
1269 (int)sizeof(disptxt));
1274 switch (PKT_LEAP(rpkt->li_vn_mode)) {
1275 case LEAP_NOWARNING:
1276 leaptxt = "no-leap";
1278 case LEAP_ADDSECOND:
1279 leaptxt = "add-leap";
1281 case LEAP_DELSECOND:
1282 leaptxt = "del-leap";
1284 case LEAP_NOTINSYNC:
1288 leaptxt = "LEAP-ERROR";
1292 msyslog(LOG_INFO, "%s %+.*f%s %s s%d %s%s", ts_str,
1293 digits, offset, disptxt,
1294 hostnameaddr(hostname, host), stratum,
1301 if (p_SNTP_PRETEND_TIME)
1304 if (!time_adjusted &&
1305 (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
1306 return set_time(offset);
1319 struct timeval *tv_dst,
1322 double *synch_distance
1325 l_fp p_rec, p_xmt, p_ref, p_org, tmp, dst;
1326 u_fp p_rdly, p_rdsp;
1327 double t21, t34, delta;
1329 /* Convert timestamps from network to host byte order */
1330 p_rdly = NTOHS_FP(rpkt->rootdelay);
1331 p_rdsp = NTOHS_FP(rpkt->rootdisp);
1332 NTOHL_FP(&rpkt->reftime, &p_ref);
1333 NTOHL_FP(&rpkt->org, &p_org);
1334 NTOHL_FP(&rpkt->rec, &p_rec);
1335 NTOHL_FP(&rpkt->xmt, &p_xmt);
1337 *precision = LOGTOD(rpkt->precision);
1339 TRACE(3, ("offset_calculation: LOGTOD(rpkt->precision): %f\n", *precision));
1341 /* Compute offset etc. */
1343 L_SUB(&tmp, &p_org);
1345 TVTOTS(tv_dst, &dst);
1346 dst.l_ui += JAN_1970;
1350 *offset = (t21 + t34) / 2.;
1353 // synch_distance is:
1354 // (peer->delay + peer->rootdelay) / 2 + peer->disp
1355 // + peer->rootdisp + clock_phi * (current_time - peer->update)
1358 // and peer->delay = fabs(peer->offset - p_offset) * 2;
1359 // and peer->offset needs history, so we're left with
1360 // p_offset = (t21 + t34) / 2.;
1361 // peer->disp = 0; (we have no history to augment this)
1362 // clock_phi = 15e-6;
1363 // peer->jitter = LOGTOD(sys_precision); (we have no history to augment this)
1364 // and ntp_proto.c:set_sys_tick_precision() should get us sys_precision.
1366 // so our answer seems to be:
1368 // (fabs(t21 + t34) + peer->rootdelay) / 3.
1371 // + 15e-6 (clock_phi)
1372 // + LOGTOD(sys_precision)
1374 INSIST( FPTOD(p_rdly) >= 0. );
1376 *synch_distance = (fabs(t21 + t34) + FPTOD(p_rdly)) / 3.
1380 + 0. /* LOGTOD(sys_precision) when we can get it */
1382 INSIST( *synch_distance >= 0. );
1384 *synch_distance = (FPTOD(p_rdly) + FPTOD(p_rdsp))/2.0;
1389 printf("sntp rootdelay: %f\n", FPTOD(p_rdly));
1390 printf("sntp rootdisp: %f\n", FPTOD(p_rdsp));
1391 printf("sntp syncdist: %f\n", *synch_distance);
1393 pkt_output(rpkt, rpktl, stdout);
1395 printf("sntp offset_calculation: rpkt->reftime:\n");
1396 l_fp_output(&p_ref, stdout);
1397 printf("sntp offset_calculation: rpkt->org:\n");
1398 l_fp_output(&p_org, stdout);
1399 printf("sntp offset_calculation: rpkt->rec:\n");
1400 l_fp_output(&p_rec, stdout);
1401 printf("sntp offset_calculation: rpkt->xmt:\n");
1402 l_fp_output(&p_xmt, stdout);
1406 TRACE(3, ("sntp offset_calculation:\trec - org t21: %.6f\n"
1407 "\txmt - dst t34: %.6f\tdelta: %.6f\toffset: %.6f\n",
1408 t21, t34, delta, *offset));
1415 /* Compute the 8 bits for li_vn_mode */
1425 msyslog(LOG_DEBUG, "set_li_vn_mode: leap > 3, using max. 3");
1429 if ((unsigned char)version > 7) {
1430 msyslog(LOG_DEBUG, "set_li_vn_mode: version < 0 or > 7, using 4");
1435 msyslog(LOG_DEBUG, "set_li_vn_mode: mode > 7, using client mode 3");
1439 spkt->li_vn_mode = leap << 6;
1440 spkt->li_vn_mode |= version << 3;
1441 spkt->li_vn_mode |= mode;
1446 ** set_time applies 'offset' to the local clock.
1459 ** If we can step but we cannot slew, then step.
1460 ** If we can step or slew and and |offset| > steplimit, then step.
1462 if (ENABLED_OPT(STEP) &&
1463 ( !ENABLED_OPT(SLEW)
1464 || (ENABLED_OPT(SLEW) && (fabs(offset) > steplimit))
1466 rc = step_systime(offset);
1468 /* If there was a problem, can we rely on errno? */
1470 time_adjusted = TRUE;
1471 return (time_adjusted)
1475 ** In case of error, what should we use?
1482 if (ENABLED_OPT(SLEW)) {
1483 rc = adj_systime(offset);
1485 /* If there was a problem, can we rely on errno? */
1487 time_adjusted = TRUE;
1488 return (time_adjusted)
1492 ** In case of error, what should we use?
1504 libevent_version_ok(void)
1506 ev_uint32_t v_compile_maj;
1507 ev_uint32_t v_run_maj;
1509 v_compile_maj = LIBEVENT_VERSION_NUMBER & 0xffff0000;
1510 v_run_maj = event_get_version_number() & 0xffff0000;
1511 if (v_compile_maj != v_run_maj) {
1513 "Incompatible libevent versions: have %s, built with %s\n",
1514 event_get_version(),
1522 * gettimeofday_cached()
1524 * Clones the event_base_gettimeofday_cached() interface but ensures the
1525 * times are always on the gettimeofday() 1970 scale. Older libevent 2
1526 * sometimes used gettimeofday(), sometimes the since-system-start
1527 * clock_gettime(CLOCK_MONOTONIC), depending on the platform.
1529 * It is not cleanly possible to tell which timescale older libevent is
1532 * The strategy involves 1 hour thresholds chosen to be far longer than
1533 * the duration of a round of libevent callbacks, which share a cached
1534 * start-of-round time. First compare the last cached time with the
1535 * current gettimeofday() time. If they are within one hour, libevent
1536 * is using the proper timescale so leave the offset 0. Otherwise,
1537 * compare libevent's cached time and the current time on the monotonic
1538 * scale. If they are within an hour, libevent is using the monotonic
1539 * scale so calculate the offset to add to such times to bring them to
1540 * gettimeofday()'s scale.
1543 gettimeofday_cached(
1544 struct event_base * b,
1545 struct timeval * caller_tv
1548 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1549 static struct event_base * cached_b;
1550 static struct timeval cached;
1551 static struct timeval adj_cached;
1552 static struct timeval offset;
1553 static int offset_ready;
1554 struct timeval latest;
1555 struct timeval systemt;
1557 struct timeval mono;
1558 struct timeval diff;
1562 event_base_gettimeofday_cached(b, &latest);
1563 if (b == cached_b &&
1564 !memcmp(&latest, &cached, sizeof(latest))) {
1565 *caller_tv = adj_cached;
1570 if (!offset_ready) {
1571 cgt_rc = clock_gettime(CLOCK_MONOTONIC, &ts);
1572 gtod_rc = gettimeofday(&systemt, NULL);
1575 "%s: gettimeofday() error %m",
1579 diff = sub_tval(systemt, latest);
1581 printf("system minus cached %+ld.%06ld\n",
1582 (long)diff.tv_sec, (long)diff.tv_usec);
1583 if (0 != cgt_rc || labs((long)diff.tv_sec) < 3600) {
1585 * Either use_monotonic == 0, or this libevent
1586 * has been repaired. Leave offset at zero.
1589 mono.tv_sec = ts.tv_sec;
1590 mono.tv_usec = ts.tv_nsec / 1000;
1591 diff = sub_tval(latest, mono);
1593 printf("cached minus monotonic %+ld.%06ld\n",
1594 (long)diff.tv_sec, (long)diff.tv_usec);
1595 if (labs((long)diff.tv_sec) < 3600) {
1596 /* older libevent2 using monotonic */
1597 offset = sub_tval(systemt, mono);
1598 TRACE(1, ("%s: Offsetting libevent CLOCK_MONOTONIC times by %+ld.%06ld\n",
1599 "gettimeofday_cached",
1600 (long)offset.tv_sec,
1601 (long)offset.tv_usec));
1604 offset_ready = TRUE;
1606 adj_cached = add_tval(cached, offset);
1607 *caller_tv = adj_cached;
1611 return event_base_gettimeofday_cached(b, caller_tv);
1615 /* Dummy function to satisfy libntp/work_fork.c */
1616 extern int set_user_group_ids(void);
1617 int set_user_group_ids(void)