3 #include <event2/util.h>
4 #include <event2/event.h>
6 #include "ntp_workimpl.h"
8 # include <event2/thread.h>
12 #include "ntp_libopts.h"
13 #include "kod_management.h"
14 #include "networking.h"
15 #include "utilities.h"
23 int n_pending_dns = 0;
24 int n_pending_ntp = 0;
25 int ai_fam_pref = AF_UNSPEC;
27 double steplimit = -1;
28 SOCKET sock4 = -1; /* Socket for IPv4 */
29 SOCKET sock6 = -1; /* Socket for IPv6 */
31 ** BCAST *must* listen on port 123 (by default), so we can only
32 ** use the UCST sockets (above) if they too are using port 123
34 SOCKET bsock4 = -1; /* Broadcast Socket for IPv4 */
35 SOCKET bsock6 = -1; /* Broadcast Socket for IPv6 */
36 struct event_base *base;
37 struct event *ev_sock4;
38 struct event *ev_sock6;
39 struct event *ev_worker_timeout;
40 struct event *ev_xmt_timer;
45 #define CTX_BCST 0x0001
46 #define CTX_UCST 0x0002
47 #define CTX_xCST 0x0003
48 #define CTX_CONC 0x0004
49 #define CTX_unused 0xfffd
51 struct timeval timeout;
55 typedef struct sent_pkt_tag sent_pkt;
58 struct dns_ctx * dctx;
65 typedef struct xmt_ctx_tag xmt_ctx;
75 struct key * keys = NULL;
77 struct timeval response_tv;
78 struct timeval start_tv;
79 /* check the timeout at least once per second */
80 struct timeval wakeup_tv = { 0, 888888 };
82 sent_pkt * fam_listheads[2];
83 #define v4_pkts_list (fam_listheads[0])
84 #define v6_pkts_list (fam_listheads[1])
88 char buf[LEN_PKT_NOMAC + NTP_MAXEXTEN + MAX_MAC_LEN];
91 #define r_pkt rbuf.pkt
94 int droproot; /* intres imports these */
97 u_long current_time; /* libntp/authkeys.c */
99 void open_sockets(void);
100 void handle_lookup(const char *name, int flags);
101 void sntp_addremove_fd(int fd, int is_pipe, int remove_it);
102 void worker_timeout(evutil_socket_t, short, void *);
103 void worker_resp_cb(evutil_socket_t, short, void *);
104 void sntp_name_resolved(int, int, void *, const char *, const char *,
105 const struct addrinfo *,
106 const struct addrinfo *);
107 void queue_xmt(SOCKET sock, struct dns_ctx *dctx, sent_pkt *spkt,
109 void xmt_timer_cb(evutil_socket_t, short, void *ptr);
110 void xmt(xmt_ctx *xctx);
111 int check_kod(const struct addrinfo *ai);
112 void timeout_query(sent_pkt *);
113 void timeout_queries(void);
114 void sock_cb(evutil_socket_t, short, void *);
115 void check_exit_conditions(void);
116 void sntp_libevent_log_cb(int, const char *);
117 void set_li_vn_mode(struct pkt *spkt, char leap, char version, char mode);
118 int set_time(double offset);
119 void dec_pending_ntp(const char *, sockaddr_u *);
120 int libevent_version_ok(void);
121 int gettimeofday_cached(struct event_base *b, struct timeval *tv);
125 * The actual main function.
131 const char *sntpVersion
137 struct event_config * evcfg;
139 /* Initialize logging system - sets up progname */
140 sntp_init_logging(argv[0]);
142 if (!libevent_version_ok())
148 optct = ntpOptionProcess(&sntpOptions, argc, argv);
153 debug = OPT_VALUE_SET_DEBUG_LEVEL;
155 TRACE(2, ("init_lib() done, %s%s\n",
162 ntpver = OPT_VALUE_NTPVERSION;
163 steplimit = OPT_VALUE_STEPLIMIT / 1e3;
164 gap.tv_usec = max(0, OPT_VALUE_GAP * 1000);
165 gap.tv_usec = min(gap.tv_usec, 999999);
167 if (HAVE_OPT(LOGFILE))
168 open_logfile(OPT_ARG(LOGFILE));
170 msyslog(LOG_INFO, "%s", sntpVersion);
172 if (0 == argc && !HAVE_OPT(BROADCAST) && !HAVE_OPT(CONCURRENT)) {
173 printf("%s: Must supply at least one of -b hostname, -c hostname, or hostname.\n",
180 ** Eventually, we probably want:
181 ** - separate bcst and ucst timeouts (why?)
182 ** - multiple --timeout values in the commandline
185 response_timeout = OPT_VALUE_TIMEOUT;
186 response_tv.tv_sec = response_timeout;
187 response_tv.tv_usec = 0;
189 /* IPv6 available? */
190 if (isc_net_probeipv6() != ISC_R_SUCCESS) {
191 ai_fam_pref = AF_INET;
192 TRACE(1, ("No ipv6 support available, forcing ipv4\n"));
194 /* Check for options -4 and -6 */
196 ai_fam_pref = AF_INET;
197 else if (HAVE_OPT(IPV6))
198 ai_fam_pref = AF_INET6;
201 /* TODO: Parse config file if declared */
204 ** Init the KOD system.
205 ** For embedded systems with no writable filesystem,
206 ** -K /dev/null can be used to disable KoD storage.
208 kod_init_kod_db(OPT_ARG(KOD), FALSE);
210 /* HMS: Check and see what happens if KEYFILE doesn't exist */
211 auth_init(OPT_ARG(KEYFILE), &keys);
214 ** Considering employing a variable that prevents functions of doing
215 ** anything until everything is initialized properly
217 ** HMS: What exactly does the above mean?
219 event_set_log_callback(&sntp_libevent_log_cb);
221 event_enable_debug_mode();
223 evthread_use_pthreads();
224 /* we use libevent from main thread only, locks should be academic */
226 evthread_enable_lock_debuging();
228 evcfg = event_config_new();
230 printf("%s: event_config_new() failed!\n", progname);
233 #ifndef HAVE_SOCKETPAIR
234 event_config_require_features(evcfg, EV_FEATURE_FDS);
236 /* all libevent calls are from main thread */
237 /* event_config_set_flag(evcfg, EVENT_BASE_FLAG_NOLOCK); */
238 base = event_base_new_with_config(evcfg);
239 event_config_free(evcfg);
241 printf("%s: event_base_new() failed!\n", progname);
245 /* wire into intres resolver */
246 worker_per_query = TRUE;
247 addremove_io_fd = &sntp_addremove_fd;
251 if (HAVE_OPT(BROADCAST)) {
252 int cn = STACKCT_OPT( BROADCAST );
253 const char ** cp = STACKLST_OPT( BROADCAST );
256 handle_lookup(*cp, CTX_BCST);
261 if (HAVE_OPT(CONCURRENT)) {
262 int cn = STACKCT_OPT( CONCURRENT );
263 const char ** cp = STACKLST_OPT( CONCURRENT );
266 handle_lookup(*cp, CTX_UCST | CTX_CONC);
271 for (i = 0; i < argc; ++i)
272 handle_lookup(argv[i], CTX_UCST);
274 gettimeofday_cached(base, &start_tv);
275 event_base_dispatch(base);
276 event_base_free(base);
278 if (!time_adjusted &&
279 (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
289 ** open sockets and make them non-blocking
299 sock4 = socket(PF_INET, SOCK_DGRAM, 0);
301 /* error getting a socket */
302 msyslog(LOG_ERR, "open_sockets: socket(PF_INET) failed: %m");
305 /* Make it non-blocking */
306 make_socket_nonblocking(sock4);
308 /* Let's try using a wildcard... */
311 SET_ADDR4N(&name, INADDR_ANY);
312 SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
314 if (-1 == bind(sock4, &name.sa,
316 msyslog(LOG_ERR, "open_sockets: bind(sock4) failed: %m");
320 /* Register an NTP callback for recv/timeout */
321 ev_sock4 = event_new(base, sock4,
322 EV_TIMEOUT | EV_READ | EV_PERSIST,
324 if (NULL == ev_sock4) {
326 "open_sockets: event_new(base, sock4) failed!");
328 event_add(ev_sock4, &wakeup_tv);
332 /* We may not always have IPv6... */
333 if (-1 == sock6 && ipv6_works) {
334 sock6 = socket(PF_INET6, SOCK_DGRAM, 0);
335 if (-1 == sock6 && ipv6_works) {
336 /* error getting a socket */
337 msyslog(LOG_ERR, "open_sockets: socket(PF_INET6) failed: %m");
340 /* Make it non-blocking */
341 make_socket_nonblocking(sock6);
343 /* Let's try using a wildcard... */
345 AF(&name) = AF_INET6;
346 SET_ADDR6N(&name, in6addr_any);
347 SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
349 if (-1 == bind(sock6, &name.sa,
351 msyslog(LOG_ERR, "open_sockets: bind(sock6) failed: %m");
354 /* Register an NTP callback for recv/timeout */
355 ev_sock6 = event_new(base, sock6,
356 EV_TIMEOUT | EV_READ | EV_PERSIST,
358 if (NULL == ev_sock6) {
360 "open_sockets: event_new(base, sock6) failed!");
362 event_add(ev_sock6, &wakeup_tv);
379 struct addrinfo hints; /* Local copy is OK */
385 TRACE(1, ("handle_lookup(%s,%#x)\n", name, flags));
388 hints.ai_family = ai_fam_pref;
389 hints.ai_flags = AI_CANONNAME | Z_AI_NUMERICSERV;
391 ** Unless we specify a socktype, we'll get at least two
392 ** entries for each address: one for TCP and one for
393 ** UDP. That's not what we want.
395 hints.ai_socktype = SOCK_DGRAM;
396 hints.ai_protocol = IPPROTO_UDP;
398 name_sz = 1 + strlen(name);
399 octets = sizeof(*ctx) + name_sz; // Space for a ctx and the name
400 ctx = emalloc_zero(octets); // ctx at ctx[0]
401 name_copy = (char *)(ctx + 1); // Put the name at ctx[1]
402 memcpy(name_copy, name, name_sz); // copy the name to ctx[1]
403 ctx->name = name_copy; // point to it...
405 ctx->timeout = response_tv;
408 /* The following should arguably be passed in... */
409 if (ENABLED_OPT(AUTHENTICATION)) {
410 ctx->key_id = OPT_VALUE_AUTHENTICATION;
411 get_key(ctx->key_id, &ctx->key);
412 if (NULL == ctx->key) {
413 fprintf(stderr, "%s: Authentication with keyID %d requested, but no matching keyID found in <%s>!\n",
414 progname, ctx->key_id, OPT_ARG(KEYFILE));
422 getaddrinfo_sometime(name, "123", &hints, 0,
423 &sntp_name_resolved, ctx);
431 ** - - increment n_pending_ntp
432 ** - - send a request if this is a Unicast callback
433 ** - - queue wait for response
434 ** - decrement n_pending_dns
442 const char * service,
443 const struct addrinfo * hints,
444 const struct addrinfo * addr
447 struct dns_ctx * dctx;
449 const struct addrinfo * ai;
461 if (EAI_SYSTEM == rescode) {
463 mfprintf(stderr, "%s lookup error %m\n",
467 fprintf(stderr, "%s lookup error %s\n",
468 dctx->name, gai_strerror(rescode));
470 TRACE(3, ("%s [%s]\n", dctx->name,
471 (addr->ai_canonname != NULL)
475 for (ai = addr; ai != NULL; ai = ai->ai_next) {
480 switch (ai->ai_family) {
484 xmt_delay = xmt_delay_v4;
493 xmt_delay = xmt_delay_v6;
498 msyslog(LOG_ERR, "sntp_name_resolved: unexpected ai_family: %d",
505 ** We're waiting for a response for either unicast
506 ** or broadcast, so...
510 /* If this is for a unicast IP, queue a request */
511 if (dctx->flags & CTX_UCST) {
512 spkt = emalloc_zero(sizeof(*spkt));
514 octets = min(ai->ai_addrlen, sizeof(spkt->addr));
515 memcpy(&spkt->addr, ai->ai_addr, octets);
516 queue_xmt(sock, dctx, spkt, xmt_delay);
520 /* n_pending_dns really should be >0 here... */
522 check_exit_conditions();
532 struct dns_ctx * dctx,
538 sent_pkt ** pkt_listp;
541 struct timeval start_cb;
542 struct timeval delay;
546 pkt_listp = &v6_pkts_list;
548 pkt_listp = &v4_pkts_list;
550 /* reject attempts to add address already listed */
551 for (match = *pkt_listp; match != NULL; match = match->link) {
552 if (ADDR_PORT_EQ(&spkt->addr, &match->addr)) {
553 if (strcasecmp(spkt->dctx->name,
555 printf("%s %s duplicate address from %s ignored.\n",
560 printf("%s %s, duplicate address ignored.\n",
563 dec_pending_ntp(spkt->dctx->name, &spkt->addr);
569 LINK_SLIST(*pkt_listp, spkt, link);
571 xctx = emalloc_zero(sizeof(*xctx));
574 gettimeofday_cached(base, &start_cb);
575 xctx->sched = start_cb.tv_sec + (2 * xmt_delay);
577 LINK_SORT_SLIST(xmt_q, xctx, (xctx->sched < L_S_S_CUR()->sched),
581 * The new entry is the first scheduled. The timer is
582 * either not active or is set for the second xmt
585 if (NULL == ev_xmt_timer)
586 ev_xmt_timer = event_new(base, INVALID_SOCKET,
588 &xmt_timer_cb, NULL);
589 if (NULL == ev_xmt_timer) {
591 "queue_xmt: event_new(base, -1, EV_TIMEOUT) failed!");
595 if (xctx->sched > start_cb.tv_sec)
596 delay.tv_sec = xctx->sched - start_cb.tv_sec;
597 event_add(ev_xmt_timer, &delay);
598 TRACE(2, ("queue_xmt: xmt timer for %u usec\n",
599 (u_int)delay.tv_usec));
614 struct timeval start_cb;
615 struct timeval delay;
620 DEBUG_INSIST(EV_TIMEOUT == what);
622 if (NULL == xmt_q || shutting_down)
624 gettimeofday_cached(base, &start_cb);
625 if (xmt_q->sched <= start_cb.tv_sec) {
626 UNLINK_HEAD_SLIST(x, xmt_q, link);
627 TRACE(2, ("xmt_timer_cb: at .%6.6u -> %s\n",
628 (u_int)start_cb.tv_usec, stoa(&x->spkt->addr)));
634 if (xmt_q->sched <= start_cb.tv_sec) {
635 event_add(ev_xmt_timer, &gap);
636 TRACE(2, ("xmt_timer_cb: at .%6.6u gap %6.6u\n",
637 (u_int)start_cb.tv_usec,
638 (u_int)gap.tv_usec));
640 delay.tv_sec = xmt_q->sched - start_cb.tv_sec;
642 event_add(ev_xmt_timer, &delay);
643 TRACE(2, ("xmt_timer_cb: at .%6.6u next %ld seconds\n",
644 (u_int)start_cb.tv_usec,
645 (long)delay.tv_sec));
658 SOCKET sock = xctx->sock;
659 struct dns_ctx *dctx = xctx->spkt->dctx;
660 sent_pkt * spkt = xctx->spkt;
661 sockaddr_u * dst = &spkt->addr;
662 struct timeval tv_xmt;
667 if (0 != gettimeofday(&tv_xmt, NULL)) {
669 "xmt: gettimeofday() failed: %m");
672 tv_xmt.tv_sec += JAN_1970;
674 pkt_len = generate_pkt(&x_pkt, &tv_xmt, dctx->key_id,
677 sent = sendpkt(sock, dst, &x_pkt, pkt_len);
679 /* Save the packet we sent... */
680 memcpy(&spkt->x_pkt, &x_pkt, min(sizeof(spkt->x_pkt),
682 spkt->stime = tv_xmt.tv_sec - JAN_1970;
684 TRACE(2, ("xmt: %lx.%6.6u %s %s\n", (u_long)tv_xmt.tv_sec,
685 (u_int)tv_xmt.tv_usec, dctx->name, stoa(dst)));
687 dec_pending_ntp(dctx->name, dst);
695 * timeout_queries() -- give up on unrequited NTP queries
698 timeout_queries(void)
700 struct timeval start_cb;
704 sent_pkt * spkt_next;
706 int didsomething = 0;
708 TRACE(3, ("timeout_queries: called to check %u items\n",
709 (unsigned)COUNTOF(fam_listheads)));
711 gettimeofday_cached(base, &start_cb);
712 for (idx = 0; idx < COUNTOF(fam_listheads); idx++) {
713 head = fam_listheads[idx];
714 for (spkt = head; spkt != NULL; spkt = spkt_next) {
718 switch (spkt->dctx->flags & CTX_xCST) {
728 INSIST(!"spkt->dctx->flags neither UCST nor BCST");
732 spkt_next = spkt->link;
733 if (0 == spkt->stime || spkt->done)
735 age = start_cb.tv_sec - spkt->stime;
736 TRACE(3, ("%s %s %cCST age %ld\n",
738 spkt->dctx->name, xcst, age));
739 if (age > response_timeout)
743 // Do we care about didsomething?
744 TRACE(3, ("timeout_queries: didsomething is %d, age is %ld\n",
745 didsomething, (long) (start_cb.tv_sec - start_tv.tv_sec)));
746 if (start_cb.tv_sec - start_tv.tv_sec > response_timeout) {
747 TRACE(3, ("timeout_queries: bail!\n"));
748 event_base_loopexit(base, NULL);
749 shutting_down = TRUE;
754 void dec_pending_ntp(
759 if (n_pending_ntp > 0) {
761 check_exit_conditions();
763 INSIST(0 == n_pending_ntp);
764 TRACE(1, ("n_pending_ntp was zero before decrement for %s\n",
765 hostnameaddr(name, server)));
778 switch (spkt->dctx->flags & CTX_xCST) {
788 INSIST(!"spkt->dctx->flags neither UCST nor BCST");
792 server = &spkt->addr;
793 msyslog(LOG_INFO, "%s no %cCST response after %d seconds",
794 hostnameaddr(spkt->dctx->name, server), xcst,
796 dec_pending_ntp(spkt->dctx->name, server);
806 const struct addrinfo * ai
810 struct kod_entry *reason;
812 /* Is there a KoD on file for this address? */
813 hostname = addrinfo_to_str(ai);
814 TRACE(2, ("check_kod: checking <%s>\n", hostname));
815 if (search_entry(hostname, &reason)) {
816 printf("prior KoD for %s, skipping.\n",
830 ** Socket readable/timeout Callback:
831 ** Read in the packet
834 ** - decrement n_pending_ntp
835 ** - If packet is good, set the time and "exit"
837 ** - If packet is good, set the time and "exit"
848 sent_pkt ** p_pktlist;
853 INSIST(sock4 == fd || sock6 == fd);
855 TRACE(3, ("sock_cb: event on sock%s:%s%s%s%s\n",
859 (what & EV_TIMEOUT) ? " timeout" : "",
860 (what & EV_READ) ? " read" : "",
861 (what & EV_WRITE) ? " write" : "",
862 (what & EV_SIGNAL) ? " signal" : ""));
864 if (!(EV_READ & what)) {
865 if (EV_TIMEOUT & what)
871 /* Read in the packet */
872 rpktl = recvdata(fd, &sender, &rbuf, sizeof(rbuf));
874 msyslog(LOG_DEBUG, "recvfrom error %m");
879 p_pktlist = &v6_pkts_list;
881 p_pktlist = &v4_pkts_list;
883 for (spkt = *p_pktlist; spkt != NULL; spkt = spkt->link) {
885 if (SOCK_EQ(&sender, psau))
890 "Packet from unexpected source %s dropped",
895 TRACE(1, ("sock_cb: %s %s\n", spkt->dctx->name,
898 rpktl = process_pkt(&r_pkt, &sender, rpktl, MODE_SERVER,
899 &spkt->x_pkt, "sock_cb");
901 TRACE(2, ("sock_cb: process_pkt returned %d\n", rpktl));
903 /* If this is a Unicast packet, one down ... */
904 if (!spkt->done && (CTX_UCST & spkt->dctx->flags)) {
905 dec_pending_ntp(spkt->dctx->name, &spkt->addr);
910 /* If the packet is good, set the time and we're all done */
911 rc = handle_pkt(rpktl, &r_pkt, &spkt->addr, spkt->dctx->name);
913 TRACE(1, ("sock_cb: handle_pkt() returned %d\n", rc));
914 check_exit_conditions();
919 * check_exit_conditions()
921 * If sntp has a reply, ask the event loop to stop after this round of
922 * callbacks, unless --wait was used.
925 check_exit_conditions(void)
927 if ((0 == n_pending_ntp && 0 == n_pending_dns) ||
928 (time_derived && !HAVE_OPT(WAIT))) {
929 event_base_loopexit(base, NULL);
930 shutting_down = TRUE;
932 TRACE(2, ("%d NTP and %d name queries pending\n",
933 n_pending_ntp, n_pending_dns));
939 * sntp_addremove_fd() is invoked by the intres blocking worker code
940 * to read from a pipe, or to stop same.
942 void sntp_addremove_fd(
952 #ifdef HAVE_SOCKETPAIR
954 /* sntp only asks for EV_FEATURE_FDS without HAVE_SOCKETPAIR */
955 msyslog(LOG_ERR, "fatal: pipes not supported on systems with socketpair()");
961 for (idx = 0; idx < blocking_children_alloc; idx++) {
962 c = blocking_children[idx];
965 if (fd == c->resp_read_pipe)
968 if (idx == blocking_children_alloc)
972 ev = c->resp_read_ctx;
973 c->resp_read_ctx = NULL;
980 ev = event_new(base, fd, EV_READ | EV_PERSIST,
984 "sntp_addremove_fd: event_new(base, fd) failed!");
987 c->resp_read_ctx = ev;
992 /* called by forked intres child to close open descriptors */
999 if (INVALID_SOCKET != sock4) {
1001 sock4 = INVALID_SOCKET;
1003 if (INVALID_SOCKET != sock6) {
1005 sock6 = INVALID_SOCKET;
1007 if (INVALID_SOCKET != bsock4) {
1009 sock4 = INVALID_SOCKET;
1011 if (INVALID_SOCKET != bsock6) {
1013 sock6 = INVALID_SOCKET;
1020 * worker_resp_cb() is invoked when resp_read_pipe is readable.
1026 void * ctx /* blocking_child * */
1031 DEBUG_INSIST(EV_READ & what);
1033 DEBUG_INSIST(fd == c->resp_read_pipe);
1034 process_blocking_resp(c);
1039 * intres_timeout_req(s) is invoked in the parent to schedule an idle
1040 * timeout to fire in s seconds, if not reset earlier by a call to
1041 * intres_timeout_req(0), which clears any pending timeout. When the
1042 * timeout expires, worker_idle_timer_fired() is invoked (again, in the
1045 * sntp and ntpd each provide implementations adapted to their timers.
1049 u_int seconds /* 0 cancels */
1052 struct timeval tv_to;
1054 if (NULL == ev_worker_timeout) {
1055 ev_worker_timeout = event_new(base, -1,
1056 EV_TIMEOUT | EV_PERSIST,
1057 &worker_timeout, NULL);
1058 DEBUG_INSIST(NULL != ev_worker_timeout);
1060 event_del(ev_worker_timeout);
1064 tv_to.tv_sec = seconds;
1066 event_add(ev_worker_timeout, &tv_to);
1080 DEBUG_REQUIRE(EV_TIMEOUT & what);
1081 worker_idle_timer_fired();
1086 sntp_libevent_log_cb(
1096 case _EVENT_LOG_DEBUG:
1100 case _EVENT_LOG_MSG:
1104 case _EVENT_LOG_WARN:
1105 level = LOG_WARNING;
1108 case _EVENT_LOG_ERR:
1113 msyslog(level, "%s", msg);
1120 const struct timeval *tv_xmt,
1129 pkt_len = LEN_PKT_NOMAC;
1131 TVTOTS(tv_xmt, &xmt_fp);
1132 HTONL_FP(&xmt_fp, &x_pkt->xmt);
1133 x_pkt->stratum = STRATUM_TO_PKT(STRATUM_UNSPEC);
1135 /* FIXME! Modus broadcast + adr. check -> bdr. pkt */
1136 set_li_vn_mode(x_pkt, LEAP_NOTINSYNC, ntpver, 3);
1138 printf("generate_pkt: key_id %d, key pointer %p\n", key_id, pkt_key);
1140 if (pkt_key != NULL) {
1141 x_pkt->exten[0] = htonl(key_id);
1142 mac_size = make_mac(x_pkt, pkt_len, MAX_MDG_LEN,
1143 pkt_key, (char *)&x_pkt->exten[1]);
1145 pkt_len += mac_size + KEY_MAC_LEN;
1148 printf("generate_pkt: mac_size is %d\n", mac_size);
1162 const char * hostname
1166 const char * addrtxt;
1167 struct timeval tv_dst;
1174 const char * leaptxt;
1177 double synch_distance;
1178 char * p_SNTP_PRETEND_TIME;
1179 time_t pretend_time;
1180 #if SIZEOF_TIME_T == 8
1195 case SERVER_UNUSEABLE:
1199 case PACKET_UNUSEABLE:
1202 case SERVER_AUTH_FAIL:
1205 case KOD_DEMOBILIZE:
1206 /* Received a DENY or RESTR KOD packet */
1207 addrtxt = stoa(host);
1208 ref = (char *)&rpkt->refid;
1209 add_entry(addrtxt, ref);
1210 msyslog(LOG_WARNING, "KOD code %c%c%c%c from %s %s",
1211 ref[0], ref[1], ref[2], ref[3], addrtxt, hostname);
1217 ** We should probably call add_entry() with an
1218 ** expiration timestamp of several seconds in the future,
1219 ** and back-off even more if we get more RATE responses.
1224 TRACE(3, ("handle_pkt: %d bytes from %s %s\n",
1225 rpktl, stoa(host), hostname));
1227 gettimeofday_cached(base, &tv_dst);
1229 p_SNTP_PRETEND_TIME = getenv("SNTP_PRETEND_TIME");
1230 if (p_SNTP_PRETEND_TIME) {
1232 #if SIZEOF_TIME_T == 4
1233 if (1 == sscanf(p_SNTP_PRETEND_TIME, "%ld", &l))
1234 pretend_time = (time_t)l;
1235 #elif SIZEOF_TIME_T == 8
1236 if (1 == sscanf(p_SNTP_PRETEND_TIME, "%lld", &ll))
1237 pretend_time = (time_t)ll;
1239 # include "GRONK: unexpected value for SIZEOF_TIME_T"
1241 if (0 != pretend_time)
1242 tv_dst.tv_sec = pretend_time;
1245 offset_calculation(rpkt, rpktl, &tv_dst, &offset,
1246 &precision, &synch_distance);
1247 time_derived = TRUE;
1249 for (digits = 0; (precision *= 10.) < 1.; ++digits)
1254 ts_str = tv_to_str(&tv_dst);
1255 stratum = rpkt->stratum;
1259 if (synch_distance > 0.) {
1260 cnt = snprintf(disptxt, sizeof(disptxt),
1261 " +/- %f", synch_distance);
1262 if ((size_t)cnt >= sizeof(disptxt))
1263 snprintf(disptxt, sizeof(disptxt),
1264 "ERROR %d >= %d", cnt,
1265 (int)sizeof(disptxt));
1270 switch (PKT_LEAP(rpkt->li_vn_mode)) {
1271 case LEAP_NOWARNING:
1272 leaptxt = "no-leap";
1274 case LEAP_ADDSECOND:
1275 leaptxt = "add-leap";
1277 case LEAP_DELSECOND:
1278 leaptxt = "del-leap";
1280 case LEAP_NOTINSYNC:
1284 leaptxt = "LEAP-ERROR";
1288 msyslog(LOG_INFO, "%s %+.*f%s %s s%d %s%s", ts_str,
1289 digits, offset, disptxt,
1290 hostnameaddr(hostname, host), stratum,
1297 if (p_SNTP_PRETEND_TIME)
1300 if (!time_adjusted &&
1301 (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
1302 return set_time(offset);
1315 struct timeval *tv_dst,
1318 double *synch_distance
1321 l_fp p_rec, p_xmt, p_ref, p_org, tmp, dst;
1322 u_fp p_rdly, p_rdsp;
1323 double t21, t34, delta;
1325 /* Convert timestamps from network to host byte order */
1326 p_rdly = NTOHS_FP(rpkt->rootdelay);
1327 p_rdsp = NTOHS_FP(rpkt->rootdisp);
1328 NTOHL_FP(&rpkt->reftime, &p_ref);
1329 NTOHL_FP(&rpkt->org, &p_org);
1330 NTOHL_FP(&rpkt->rec, &p_rec);
1331 NTOHL_FP(&rpkt->xmt, &p_xmt);
1333 *precision = LOGTOD(rpkt->precision);
1335 TRACE(3, ("offset_calculation: LOGTOD(rpkt->precision): %f\n", *precision));
1337 /* Compute offset etc. */
1339 L_SUB(&tmp, &p_org);
1341 TVTOTS(tv_dst, &dst);
1342 dst.l_ui += JAN_1970;
1346 *offset = (t21 + t34) / 2.;
1349 // synch_distance is:
1350 // (peer->delay + peer->rootdelay) / 2 + peer->disp
1351 // + peer->rootdisp + clock_phi * (current_time - peer->update)
1354 // and peer->delay = fabs(peer->offset - p_offset) * 2;
1355 // and peer->offset needs history, so we're left with
1356 // p_offset = (t21 + t34) / 2.;
1357 // peer->disp = 0; (we have no history to augment this)
1358 // clock_phi = 15e-6;
1359 // peer->jitter = LOGTOD(sys_precision); (we have no history to augment this)
1360 // and ntp_proto.c:set_sys_tick_precision() should get us sys_precision.
1362 // so our answer seems to be:
1364 // (fabs(t21 + t34) + peer->rootdelay) / 3.
1367 // + 15e-6 (clock_phi)
1368 // + LOGTOD(sys_precision)
1370 INSIST( FPTOD(p_rdly) >= 0. );
1372 *synch_distance = (fabs(t21 + t34) + FPTOD(p_rdly)) / 3.
1376 + 0. /* LOGTOD(sys_precision) when we can get it */
1378 INSIST( *synch_distance >= 0. );
1380 *synch_distance = (FPTOD(p_rdly) + FPTOD(p_rdsp))/2.0;
1385 printf("sntp rootdelay: %f\n", FPTOD(p_rdly));
1386 printf("sntp rootdisp: %f\n", FPTOD(p_rdsp));
1387 printf("sntp syncdist: %f\n", *synch_distance);
1389 pkt_output(rpkt, rpktl, stdout);
1391 printf("sntp offset_calculation: rpkt->reftime:\n");
1392 l_fp_output(&p_ref, stdout);
1393 printf("sntp offset_calculation: rpkt->org:\n");
1394 l_fp_output(&p_org, stdout);
1395 printf("sntp offset_calculation: rpkt->rec:\n");
1396 l_fp_output(&p_rec, stdout);
1397 printf("sntp offset_calculation: rpkt->xmt:\n");
1398 l_fp_output(&p_xmt, stdout);
1402 TRACE(3, ("sntp offset_calculation:\trec - org t21: %.6f\n"
1403 "\txmt - dst t34: %.6f\tdelta: %.6f\toffset: %.6f\n",
1404 t21, t34, delta, *offset));
1411 /* Compute the 8 bits for li_vn_mode */
1421 msyslog(LOG_DEBUG, "set_li_vn_mode: leap > 3, using max. 3");
1425 if ((unsigned char)version > 7) {
1426 msyslog(LOG_DEBUG, "set_li_vn_mode: version < 0 or > 7, using 4");
1431 msyslog(LOG_DEBUG, "set_li_vn_mode: mode > 7, using client mode 3");
1435 spkt->li_vn_mode = leap << 6;
1436 spkt->li_vn_mode |= version << 3;
1437 spkt->li_vn_mode |= mode;
1442 ** set_time applies 'offset' to the local clock.
1455 ** If we can step but we cannot slew, then step.
1456 ** If we can step or slew and and |offset| > steplimit, then step.
1458 if (ENABLED_OPT(STEP) &&
1459 ( !ENABLED_OPT(SLEW)
1460 || (ENABLED_OPT(SLEW) && (fabs(offset) > steplimit))
1462 rc = step_systime(offset);
1464 /* If there was a problem, can we rely on errno? */
1466 time_adjusted = TRUE;
1467 return (time_adjusted)
1471 ** In case of error, what should we use?
1478 if (ENABLED_OPT(SLEW)) {
1479 rc = adj_systime(offset);
1481 /* If there was a problem, can we rely on errno? */
1483 time_adjusted = TRUE;
1484 return (time_adjusted)
1488 ** In case of error, what should we use?
1500 libevent_version_ok(void)
1502 ev_uint32_t v_compile_maj;
1503 ev_uint32_t v_run_maj;
1505 v_compile_maj = LIBEVENT_VERSION_NUMBER & 0xffff0000;
1506 v_run_maj = event_get_version_number() & 0xffff0000;
1507 if (v_compile_maj != v_run_maj) {
1509 "Incompatible libevent versions: have %s, built with %s\n",
1510 event_get_version(),
1518 * gettimeofday_cached()
1520 * Clones the event_base_gettimeofday_cached() interface but ensures the
1521 * times are always on the gettimeofday() 1970 scale. Older libevent 2
1522 * sometimes used gettimeofday(), sometimes the since-system-start
1523 * clock_gettime(CLOCK_MONOTONIC), depending on the platform.
1525 * It is not cleanly possible to tell which timescale older libevent is
1528 * The strategy involves 1 hour thresholds chosen to be far longer than
1529 * the duration of a round of libevent callbacks, which share a cached
1530 * start-of-round time. First compare the last cached time with the
1531 * current gettimeofday() time. If they are within one hour, libevent
1532 * is using the proper timescale so leave the offset 0. Otherwise,
1533 * compare libevent's cached time and the current time on the monotonic
1534 * scale. If they are within an hour, libevent is using the monotonic
1535 * scale so calculate the offset to add to such times to bring them to
1536 * gettimeofday()'s scale.
1539 gettimeofday_cached(
1540 struct event_base * b,
1541 struct timeval * caller_tv
1544 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1545 static struct event_base * cached_b;
1546 static struct timeval cached;
1547 static struct timeval adj_cached;
1548 static struct timeval offset;
1549 static int offset_ready;
1550 struct timeval latest;
1551 struct timeval systemt;
1553 struct timeval mono;
1554 struct timeval diff;
1558 event_base_gettimeofday_cached(b, &latest);
1559 if (b == cached_b &&
1560 !memcmp(&latest, &cached, sizeof(latest))) {
1561 *caller_tv = adj_cached;
1566 if (!offset_ready) {
1567 cgt_rc = clock_gettime(CLOCK_MONOTONIC, &ts);
1568 gtod_rc = gettimeofday(&systemt, NULL);
1571 "%s: gettimeofday() error %m",
1575 diff = sub_tval(systemt, latest);
1577 printf("system minus cached %+ld.%06ld\n",
1578 (long)diff.tv_sec, (long)diff.tv_usec);
1579 if (0 != cgt_rc || labs((long)diff.tv_sec) < 3600) {
1581 * Either use_monotonic == 0, or this libevent
1582 * has been repaired. Leave offset at zero.
1585 mono.tv_sec = ts.tv_sec;
1586 mono.tv_usec = ts.tv_nsec / 1000;
1587 diff = sub_tval(latest, mono);
1589 printf("cached minus monotonic %+ld.%06ld\n",
1590 (long)diff.tv_sec, (long)diff.tv_usec);
1591 if (labs((long)diff.tv_sec) < 3600) {
1592 /* older libevent2 using monotonic */
1593 offset = sub_tval(systemt, mono);
1594 TRACE(1, ("%s: Offsetting libevent CLOCK_MONOTONIC times by %+ld.%06ld\n",
1595 "gettimeofday_cached",
1596 (long)offset.tv_sec,
1597 (long)offset.tv_usec));
1600 offset_ready = TRUE;
1602 adj_cached = add_tval(cached, offset);
1603 *caller_tv = adj_cached;
1607 return event_base_gettimeofday_cached(b, caller_tv);