2 * ntp_request.c - respond to information requests
11 #include "ntp_request.h"
12 #include "ntp_control.h"
13 #include "ntp_refclock.h"
15 #include "ntp_stdlib.h"
16 #include "ntp_assert.h"
21 #ifdef HAVE_NETINET_IN_H
22 #include <netinet/in.h>
24 #include <arpa/inet.h>
29 #include "ntp_syscall.h"
30 #endif /* KERNEL_PLL */
33 * Structure to hold request procedure information
38 #define NO_REQUEST (-1)
40 * Because we now have v6 addresses in the messages, we need to compensate
41 * for the larger size. Therefore, we introduce the alternate size to
42 * keep us friendly with older implementations. A little ugly.
44 static int client_v6_capable = 0; /* the client can handle longer messages */
46 #define v6sizeof(type) (client_v6_capable ? sizeof(type) : v4sizeof(type))
49 short request_code; /* defined request code */
50 short needs_auth; /* true when authentication needed */
51 short sizeofitem; /* size of request data item (older size)*/
52 short v6_sizeofitem; /* size of request data item (new size)*/
53 void (*handler) (sockaddr_u *, endpt *,
54 struct req_pkt *); /* routine to handle request */
58 * Universal request codes
60 static const struct req_proc univ_codes[] = {
61 { NO_REQUEST, NOAUTH, 0, 0, NULL }
64 static void req_ack (sockaddr_u *, endpt *, struct req_pkt *, int);
65 static void * prepare_pkt (sockaddr_u *, endpt *,
66 struct req_pkt *, size_t);
67 static void * more_pkt (void);
68 static void flush_pkt (void);
69 static void list_peers (sockaddr_u *, endpt *, struct req_pkt *);
70 static void list_peers_sum (sockaddr_u *, endpt *, struct req_pkt *);
71 static void peer_info (sockaddr_u *, endpt *, struct req_pkt *);
72 static void peer_stats (sockaddr_u *, endpt *, struct req_pkt *);
73 static void sys_info (sockaddr_u *, endpt *, struct req_pkt *);
74 static void sys_stats (sockaddr_u *, endpt *, struct req_pkt *);
75 static void mem_stats (sockaddr_u *, endpt *, struct req_pkt *);
76 static void io_stats (sockaddr_u *, endpt *, struct req_pkt *);
77 static void timer_stats (sockaddr_u *, endpt *, struct req_pkt *);
78 static void loop_info (sockaddr_u *, endpt *, struct req_pkt *);
79 static void do_conf (sockaddr_u *, endpt *, struct req_pkt *);
80 static void do_unconf (sockaddr_u *, endpt *, struct req_pkt *);
81 static void set_sys_flag (sockaddr_u *, endpt *, struct req_pkt *);
82 static void clr_sys_flag (sockaddr_u *, endpt *, struct req_pkt *);
83 static void setclr_flags (sockaddr_u *, endpt *, struct req_pkt *, u_long);
84 static void list_restrict4 (const restrict_u *, struct info_restrict **);
85 static void list_restrict6 (const restrict_u *, struct info_restrict **);
86 static void list_restrict (sockaddr_u *, endpt *, struct req_pkt *);
87 static void do_resaddflags (sockaddr_u *, endpt *, struct req_pkt *);
88 static void do_ressubflags (sockaddr_u *, endpt *, struct req_pkt *);
89 static void do_unrestrict (sockaddr_u *, endpt *, struct req_pkt *);
90 static void do_restrict (sockaddr_u *, endpt *, struct req_pkt *, restrict_op);
91 static void mon_getlist (sockaddr_u *, endpt *, struct req_pkt *);
92 static void reset_stats (sockaddr_u *, endpt *, struct req_pkt *);
93 static void reset_peer (sockaddr_u *, endpt *, struct req_pkt *);
94 static void do_key_reread (sockaddr_u *, endpt *, struct req_pkt *);
95 static void trust_key (sockaddr_u *, endpt *, struct req_pkt *);
96 static void untrust_key (sockaddr_u *, endpt *, struct req_pkt *);
97 static void do_trustkey (sockaddr_u *, endpt *, struct req_pkt *, u_long);
98 static void get_auth_info (sockaddr_u *, endpt *, struct req_pkt *);
99 static void req_get_traps (sockaddr_u *, endpt *, struct req_pkt *);
100 static void req_set_trap (sockaddr_u *, endpt *, struct req_pkt *);
101 static void req_clr_trap (sockaddr_u *, endpt *, struct req_pkt *);
102 static void do_setclr_trap (sockaddr_u *, endpt *, struct req_pkt *, int);
103 static void set_request_keyid (sockaddr_u *, endpt *, struct req_pkt *);
104 static void set_control_keyid (sockaddr_u *, endpt *, struct req_pkt *);
105 static void get_ctl_stats (sockaddr_u *, endpt *, struct req_pkt *);
106 static void get_if_stats (sockaddr_u *, endpt *, struct req_pkt *);
107 static void do_if_reload (sockaddr_u *, endpt *, struct req_pkt *);
109 static void get_kernel_info (sockaddr_u *, endpt *, struct req_pkt *);
110 #endif /* KERNEL_PLL */
112 static void get_clock_info (sockaddr_u *, endpt *, struct req_pkt *);
113 static void set_clock_fudge (sockaddr_u *, endpt *, struct req_pkt *);
114 #endif /* REFCLOCK */
116 static void get_clkbug_info (sockaddr_u *, endpt *, struct req_pkt *);
117 #endif /* REFCLOCK */
122 static const struct req_proc ntp_codes[] = {
123 { REQ_PEER_LIST, NOAUTH, 0, 0, list_peers },
124 { REQ_PEER_LIST_SUM, NOAUTH, 0, 0, list_peers_sum },
125 { REQ_PEER_INFO, NOAUTH, v4sizeof(struct info_peer_list),
126 sizeof(struct info_peer_list), peer_info},
127 { REQ_PEER_STATS, NOAUTH, v4sizeof(struct info_peer_list),
128 sizeof(struct info_peer_list), peer_stats},
129 { REQ_SYS_INFO, NOAUTH, 0, 0, sys_info },
130 { REQ_SYS_STATS, NOAUTH, 0, 0, sys_stats },
131 { REQ_IO_STATS, NOAUTH, 0, 0, io_stats },
132 { REQ_MEM_STATS, NOAUTH, 0, 0, mem_stats },
133 { REQ_LOOP_INFO, NOAUTH, 0, 0, loop_info },
134 { REQ_TIMER_STATS, NOAUTH, 0, 0, timer_stats },
135 { REQ_CONFIG, AUTH, v4sizeof(struct conf_peer),
136 sizeof(struct conf_peer), do_conf },
137 { REQ_UNCONFIG, AUTH, v4sizeof(struct conf_unpeer),
138 sizeof(struct conf_unpeer), do_unconf },
139 { REQ_SET_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags),
140 sizeof(struct conf_sys_flags), set_sys_flag },
141 { REQ_CLR_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags),
142 sizeof(struct conf_sys_flags), clr_sys_flag },
143 { REQ_GET_RESTRICT, NOAUTH, 0, 0, list_restrict },
144 { REQ_RESADDFLAGS, AUTH, v4sizeof(struct conf_restrict),
145 sizeof(struct conf_restrict), do_resaddflags },
146 { REQ_RESSUBFLAGS, AUTH, v4sizeof(struct conf_restrict),
147 sizeof(struct conf_restrict), do_ressubflags },
148 { REQ_UNRESTRICT, AUTH, v4sizeof(struct conf_restrict),
149 sizeof(struct conf_restrict), do_unrestrict },
150 { REQ_MON_GETLIST, NOAUTH, 0, 0, mon_getlist },
151 { REQ_MON_GETLIST_1, NOAUTH, 0, 0, mon_getlist },
152 { REQ_RESET_STATS, AUTH, sizeof(struct reset_flags), 0, reset_stats },
153 { REQ_RESET_PEER, AUTH, v4sizeof(struct conf_unpeer),
154 sizeof(struct conf_unpeer), reset_peer },
155 { REQ_REREAD_KEYS, AUTH, 0, 0, do_key_reread },
156 { REQ_TRUSTKEY, AUTH, sizeof(u_long), sizeof(u_long), trust_key },
157 { REQ_UNTRUSTKEY, AUTH, sizeof(u_long), sizeof(u_long), untrust_key },
158 { REQ_AUTHINFO, NOAUTH, 0, 0, get_auth_info },
159 { REQ_TRAPS, NOAUTH, 0, 0, req_get_traps },
160 { REQ_ADD_TRAP, AUTH, v4sizeof(struct conf_trap),
161 sizeof(struct conf_trap), req_set_trap },
162 { REQ_CLR_TRAP, AUTH, v4sizeof(struct conf_trap),
163 sizeof(struct conf_trap), req_clr_trap },
164 { REQ_REQUEST_KEY, AUTH, sizeof(u_long), sizeof(u_long),
166 { REQ_CONTROL_KEY, AUTH, sizeof(u_long), sizeof(u_long),
168 { REQ_GET_CTLSTATS, NOAUTH, 0, 0, get_ctl_stats },
170 { REQ_GET_KERNEL, NOAUTH, 0, 0, get_kernel_info },
173 { REQ_GET_CLOCKINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32),
175 { REQ_SET_CLKFUDGE, AUTH, sizeof(struct conf_fudge),
176 sizeof(struct conf_fudge), set_clock_fudge },
177 { REQ_GET_CLKBUGINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32),
180 { REQ_IF_STATS, AUTH, 0, 0, get_if_stats },
181 { REQ_IF_RELOAD, AUTH, 0, 0, do_if_reload },
183 { NO_REQUEST, NOAUTH, 0, 0, 0 }
188 * Authentication keyid used to authenticate requests. Zero means we
189 * don't allow writing anything.
191 keyid_t info_auth_keyid;
194 * Statistic counters to keep track of requests and responses.
196 u_long numrequests; /* number of requests we've received */
197 u_long numresppkts; /* number of resp packets sent with data */
200 * lazy way to count errors, indexed by the error code
202 u_long errorcounter[MAX_INFO_ERR + 1];
205 * A hack. To keep the authentication module clear of ntp-ism's, we
206 * include a time reset variable for its stats here.
208 u_long auth_timereset;
211 * Response packet used by these routines. Also some state information
212 * so that we can handle packet formatting within a common set of
213 * subroutines. Note we try to enter data in place whenever possible,
214 * but the need to set the more bit correctly means we occasionally
215 * use the extra buffer and copy.
217 static struct resp_pkt rpkt;
222 static int databytes;
223 static char exbuf[RESP_DATA_SIZE];
224 static int usingexbuf;
225 static sockaddr_u *toaddr;
226 static endpt *frominter;
229 * init_request - initialize request data
239 info_auth_keyid = 0; /* by default, can't do this */
241 for (i = 0; i < sizeof(errorcounter)/sizeof(errorcounter[0]); i++)
247 * req_ack - acknowledge request with no data
253 struct req_pkt *inpkt,
260 rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver);
261 rpkt.auth_seq = AUTH_SEQ(0, 0);
262 rpkt.implementation = inpkt->implementation;
263 rpkt.request = inpkt->request;
264 rpkt.err_nitems = ERR_NITEMS(errcode, 0);
265 rpkt.mbz_itemsize = MBZ_ITEMSIZE(0);
268 * send packet and bump counters
270 sendpkt(srcadr, inter, -1, (struct pkt *)&rpkt, RESP_HEADER_SIZE);
271 errorcounter[errcode]++;
276 * prepare_pkt - prepare response packet for transmission, return pointer
277 * to storage for data item.
287 DPRINTF(4, ("request: preparing pkt\n"));
290 * Fill in the implementation, request and itemsize fields
291 * since these won't change.
293 rpkt.implementation = pkt->implementation;
294 rpkt.request = pkt->request;
295 rpkt.mbz_itemsize = MBZ_ITEMSIZE(structsize);
298 * Compute the static data needed to carry on.
304 itemsize = structsize;
309 * return the beginning of the packet buffer.
316 * more_pkt - return a data pointer for a new item.
322 * If we were using the extra buffer, send the packet.
325 DPRINTF(3, ("request: sending pkt\n"));
326 rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, MORE_BIT, reqver);
327 rpkt.auth_seq = AUTH_SEQ(0, seqno);
328 rpkt.err_nitems = htons((u_short)nitems);
329 sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt,
330 RESP_HEADER_SIZE + databytes);
334 * Copy data out of exbuf into the packet.
336 memcpy(&rpkt.u.data[0], exbuf, (unsigned)itemsize);
343 databytes += itemsize;
345 if (databytes + itemsize <= RESP_DATA_SIZE) {
346 DPRINTF(4, ("request: giving him more data\n"));
348 * More room in packet. Give him the
351 return &rpkt.u.data[databytes];
354 * No room in packet. Give him the extra
355 * buffer unless this was the last in the sequence.
357 DPRINTF(4, ("request: into extra buffer\n"));
369 * flush_pkt - we're done, return remaining information.
374 DPRINTF(3, ("request: flushing packet, %d items\n", nitems));
376 * Must send the last packet. If nothing in here and nothing
377 * has been sent, send an error saying no data to be found.
379 if (seqno == 0 && nitems == 0)
380 req_ack(toaddr, frominter, (struct req_pkt *)&rpkt,
383 rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver);
384 rpkt.auth_seq = AUTH_SEQ(0, seqno);
385 rpkt.err_nitems = htons((u_short)nitems);
386 sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt,
387 RESP_HEADER_SIZE+databytes);
395 * Given a buffer, return the packet mode
398 get_packet_mode(struct recvbuf *rbufp)
400 struct req_pkt *inpkt = (struct req_pkt *)&rbufp->recv_pkt;
401 return (INFO_MODE(inpkt->rm_vn_mode));
406 * process_private - process private mode (7) packets
410 struct recvbuf *rbufp,
414 static u_long quiet_until;
415 struct req_pkt *inpkt;
416 struct req_pkt_tail *tailinpkt;
419 const struct req_proc *proc;
429 * Initialize pointers, for convenience
431 recv_len = rbufp->recv_length;
432 inpkt = (struct req_pkt *)&rbufp->recv_pkt;
433 srcadr = &rbufp->recv_srcadr;
434 inter = rbufp->dstadr;
436 DPRINTF(3, ("process_private: impl %d req %d\n",
437 inpkt->implementation, inpkt->request));
440 * Do some sanity checks on the packet. Return a format
444 if ( (++ec, ISRESPONSE(inpkt->rm_vn_mode))
445 || (++ec, ISMORE(inpkt->rm_vn_mode))
446 || (++ec, INFO_VERSION(inpkt->rm_vn_mode) > NTP_VERSION)
447 || (++ec, INFO_VERSION(inpkt->rm_vn_mode) < NTP_OLDVERSION)
448 || (++ec, INFO_SEQ(inpkt->auth_seq) != 0)
449 || (++ec, INFO_ERR(inpkt->err_nitems) != 0)
450 || (++ec, INFO_MBZ(inpkt->mbz_itemsize) != 0)
451 || (++ec, rbufp->recv_length < (int)REQ_LEN_HDR)
454 if (current_time >= quiet_until) {
456 "process_private: drop test %d"
457 " failed, pkt from %s",
459 quiet_until = current_time + 60;
464 reqver = INFO_VERSION(inpkt->rm_vn_mode);
467 * Get the appropriate procedure list to search.
469 if (inpkt->implementation == IMPL_UNIV)
471 else if ((inpkt->implementation == IMPL_XNTPD) ||
472 (inpkt->implementation == IMPL_XNTPD_OLD))
475 req_ack(srcadr, inter, inpkt, INFO_ERR_IMPL);
480 * Search the list for the request codes. If it isn't one
481 * we know, return an error.
483 while (proc->request_code != NO_REQUEST) {
484 if (proc->request_code == (short) inpkt->request)
488 if (proc->request_code == NO_REQUEST) {
489 req_ack(srcadr, inter, inpkt, INFO_ERR_REQ);
493 DPRINTF(4, ("found request in tables\n"));
496 * If we need data, check to see if we have some. If we
497 * don't, check to see that there is none (picky, picky).
500 /* This part is a bit tricky, we want to be sure that the size
501 * returned is either the old or the new size. We also can find
502 * out if the client can accept both types of messages this way.
504 * Handle the exception of REQ_CONFIG. It can have two data sizes.
506 temp_size = INFO_ITEMSIZE(inpkt->mbz_itemsize);
507 if ((temp_size != proc->sizeofitem &&
508 temp_size != proc->v6_sizeofitem) &&
509 !(inpkt->implementation == IMPL_XNTPD &&
510 inpkt->request == REQ_CONFIG &&
511 temp_size == sizeof(struct old_conf_peer))) {
512 DPRINTF(3, ("process_private: wrong item size, received %d, should be %d or %d\n",
513 temp_size, proc->sizeofitem, proc->v6_sizeofitem));
514 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
517 if ((proc->sizeofitem != 0) &&
518 ((size_t)(temp_size * INFO_NITEMS(inpkt->err_nitems)) >
519 (recv_len - REQ_LEN_HDR))) {
520 DPRINTF(3, ("process_private: not enough data\n"));
521 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
525 switch (inpkt->implementation) {
527 client_v6_capable = 1;
530 client_v6_capable = 0;
533 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
538 * If we need to authenticate, do so. Note that an
539 * authenticatable packet must include a mac field, must
540 * have used key info_auth_keyid and must have included
541 * a time stamp in the appropriate field. The time stamp
542 * must be within INFO_TS_MAXSKEW of the receive
545 if (proc->needs_auth && sys_authenticate) {
547 if (recv_len < (REQ_LEN_HDR +
548 (INFO_ITEMSIZE(inpkt->mbz_itemsize) *
549 INFO_NITEMS(inpkt->err_nitems)) +
551 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
556 * For 16-octet digests, regardless of itemsize and
557 * nitems, authenticated requests are a fixed size
558 * with the timestamp, key ID, and digest located
559 * at the end of the packet. Because the key ID
560 * determining the digest size precedes the digest,
561 * for larger digests the fixed size request scheme
562 * is abandoned and the timestamp, key ID, and digest
563 * are located relative to the start of the packet,
564 * with the digest size determined by the packet size.
566 noslop_len = REQ_LEN_HDR
567 + INFO_ITEMSIZE(inpkt->mbz_itemsize) *
568 INFO_NITEMS(inpkt->err_nitems)
569 + sizeof(inpkt->tstamp);
570 /* 32-bit alignment */
571 noslop_len = (noslop_len + 3) & ~3;
572 if (recv_len > (noslop_len + MAX_MAC_LEN))
575 mac_len = recv_len - noslop_len;
577 tailinpkt = (void *)((char *)inpkt + recv_len -
578 (mac_len + sizeof(inpkt->tstamp)));
581 * If this guy is restricted from doing this, don't let
582 * him. If the wrong key was used, or packet doesn't
585 /* XXX: Use authistrustedip(), or equivalent. */
586 if (!INFO_IS_AUTH(inpkt->auth_seq) || !info_auth_keyid
587 || ntohl(tailinpkt->keyid) != info_auth_keyid) {
588 DPRINTF(5, ("failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n",
589 INFO_IS_AUTH(inpkt->auth_seq),
591 ntohl(tailinpkt->keyid), (u_long)mac_len));
594 "process_private: failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n",
595 INFO_IS_AUTH(inpkt->auth_seq),
597 ntohl(tailinpkt->keyid), (u_long)mac_len);
599 req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
602 if (recv_len > REQ_LEN_NOMAC + MAX_MAC_LEN) {
603 DPRINTF(5, ("bad pkt length %zu\n", recv_len));
605 "process_private: bad pkt length %zu",
607 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
610 if (!mod_okay || !authhavekey(info_auth_keyid)) {
611 DPRINTF(5, ("failed auth mod_okay %d\n",
615 "process_private: failed auth mod_okay %d\n",
621 req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
626 * calculate absolute time difference between xmit time stamp
627 * and receive time stamp. If too large, too bad.
629 NTOHL_FP(&tailinpkt->tstamp, &ftmp);
630 L_SUB(&ftmp, &rbufp->recv_time);
631 LFPTOD(&ftmp, dtemp);
632 if (fabs(dtemp) > INFO_TS_MAXSKEW) {
634 * He's a loser. Tell him.
636 DPRINTF(5, ("xmit/rcv timestamp delta %g > INFO_TS_MAXSKEW %g\n",
637 dtemp, INFO_TS_MAXSKEW));
638 req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
643 * So far so good. See if decryption works out okay.
645 if (!authdecrypt(info_auth_keyid, (u_int32 *)inpkt,
646 recv_len - mac_len, mac_len)) {
647 DPRINTF(5, ("authdecrypt failed\n"));
648 req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
653 DPRINTF(3, ("process_private: all okay, into handler\n"));
655 * Packet is okay. Call the handler to send him data.
657 (proc->handler)(srcadr, inter, inpkt);
662 * list_peers - send a list of the peers
668 struct req_pkt *inpkt
671 struct info_peer_list * ip;
672 const struct peer * pp;
674 ip = (struct info_peer_list *)prepare_pkt(srcadr, inter, inpkt,
675 v6sizeof(struct info_peer_list));
676 for (pp = peer_list; pp != NULL && ip != NULL; pp = pp->p_link) {
677 if (IS_IPV6(&pp->srcadr)) {
678 if (!client_v6_capable)
680 ip->addr6 = SOCK_ADDR6(&pp->srcadr);
683 ip->addr = NSRCADR(&pp->srcadr);
684 if (client_v6_capable)
688 ip->port = NSRCPORT(&pp->srcadr);
689 ip->hmode = pp->hmode;
691 if (pp->flags & FLAG_CONFIG)
692 ip->flags |= INFO_FLAG_CONFIG;
694 ip->flags |= INFO_FLAG_SYSPEER;
695 if (pp->status == CTL_PST_SEL_SYNCCAND)
696 ip->flags |= INFO_FLAG_SEL_CANDIDATE;
697 if (pp->status >= CTL_PST_SEL_SYSPEER)
698 ip->flags |= INFO_FLAG_SHORTLIST;
699 ip = (struct info_peer_list *)more_pkt();
707 * list_peers_sum - return extended peer list
713 struct req_pkt *inpkt
716 struct info_peer_summary * ips;
717 const struct peer * pp;
720 DPRINTF(3, ("wants peer list summary\n"));
722 ips = (struct info_peer_summary *)prepare_pkt(srcadr, inter, inpkt,
723 v6sizeof(struct info_peer_summary));
724 for (pp = peer_list; pp != NULL && ips != NULL; pp = pp->p_link) {
725 DPRINTF(4, ("sum: got one\n"));
727 * Be careful here not to return v6 peers when we
730 if (IS_IPV6(&pp->srcadr)) {
731 if (!client_v6_capable)
733 ips->srcadr6 = SOCK_ADDR6(&pp->srcadr);
736 ips->dstadr6 = SOCK_ADDR6(&pp->dstadr->sin);
740 ips->srcadr = NSRCADR(&pp->srcadr);
741 if (client_v6_capable)
746 ips->dstadr = NSRCADR(&pp->dstadr->sin);
748 if (MDF_BCAST == pp->cast_flags)
749 ips->dstadr = NSRCADR(&pp->dstadr->bcast);
750 else if (pp->cast_flags) {
751 ips->dstadr = NSRCADR(&pp->dstadr->sin);
753 ips->dstadr = NSRCADR(&pp->dstadr->bcast);
761 ips->srcport = NSRCPORT(&pp->srcadr);
762 ips->stratum = pp->stratum;
763 ips->hpoll = pp->hpoll;
764 ips->ppoll = pp->ppoll;
765 ips->reach = pp->reach;
768 ips->flags |= INFO_FLAG_SYSPEER;
769 if (pp->flags & FLAG_CONFIG)
770 ips->flags |= INFO_FLAG_CONFIG;
771 if (pp->flags & FLAG_REFCLOCK)
772 ips->flags |= INFO_FLAG_REFCLOCK;
773 if (pp->flags & FLAG_PREFER)
774 ips->flags |= INFO_FLAG_PREFER;
775 if (pp->flags & FLAG_BURST)
776 ips->flags |= INFO_FLAG_BURST;
777 if (pp->status == CTL_PST_SEL_SYNCCAND)
778 ips->flags |= INFO_FLAG_SEL_CANDIDATE;
779 if (pp->status >= CTL_PST_SEL_SYSPEER)
780 ips->flags |= INFO_FLAG_SHORTLIST;
781 ips->hmode = pp->hmode;
782 ips->delay = HTONS_FP(DTOFP(pp->delay));
783 DTOLFP(pp->offset, <mp);
784 HTONL_FP(<mp, &ips->offset);
785 ips->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp)));
787 ips = (struct info_peer_summary *)more_pkt();
795 * peer_info - send information for one or more peers
801 struct req_pkt *inpkt
807 struct info_peer_list ipl;
809 struct info_peer * ip;
815 items = INFO_NITEMS(inpkt->err_nitems);
816 item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
817 datap = inpkt->u.data;
818 if (item_sz != sizeof(ipl)) {
819 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
822 ip = prepare_pkt(srcadr, inter, inpkt,
823 v6sizeof(struct info_peer));
824 while (items-- > 0 && ip != NULL) {
826 memcpy(&ipl, datap, item_sz);
828 NSRCPORT(&addr) = ipl.port;
829 if (client_v6_capable && ipl.v6_flag) {
830 AF(&addr) = AF_INET6;
831 SOCK_ADDR6(&addr) = ipl.addr6;
834 NSRCADR(&addr) = ipl.addr;
836 #ifdef ISC_PLATFORM_HAVESALEN
837 addr.sa.sa_len = SOCKLEN(&addr);
841 pp = findexistingpeer(&addr, NULL, NULL, -1, 0, NULL);
844 if (IS_IPV6(srcadr)) {
847 (MDF_BCAST == pp->cast_flags)
848 ? SOCK_ADDR6(&pp->dstadr->bcast)
849 : SOCK_ADDR6(&pp->dstadr->sin);
853 ip->srcadr6 = SOCK_ADDR6(&pp->srcadr);
858 ip->dstadr = NSRCADR(&pp->dstadr->sin);
860 if (MDF_BCAST == pp->cast_flags)
861 ip->dstadr = NSRCADR(&pp->dstadr->bcast);
862 else if (pp->cast_flags) {
863 ip->dstadr = NSRCADR(&pp->dstadr->sin);
865 ip->dstadr = NSRCADR(&pp->dstadr->bcast);
871 ip->srcadr = NSRCADR(&pp->srcadr);
872 if (client_v6_capable)
875 ip->srcport = NSRCPORT(&pp->srcadr);
878 ip->flags |= INFO_FLAG_SYSPEER;
879 if (pp->flags & FLAG_CONFIG)
880 ip->flags |= INFO_FLAG_CONFIG;
881 if (pp->flags & FLAG_REFCLOCK)
882 ip->flags |= INFO_FLAG_REFCLOCK;
883 if (pp->flags & FLAG_PREFER)
884 ip->flags |= INFO_FLAG_PREFER;
885 if (pp->flags & FLAG_BURST)
886 ip->flags |= INFO_FLAG_BURST;
887 if (pp->status == CTL_PST_SEL_SYNCCAND)
888 ip->flags |= INFO_FLAG_SEL_CANDIDATE;
889 if (pp->status >= CTL_PST_SEL_SYSPEER)
890 ip->flags |= INFO_FLAG_SHORTLIST;
892 ip->hmode = pp->hmode;
893 ip->keyid = pp->keyid;
894 ip->stratum = pp->stratum;
895 ip->ppoll = pp->ppoll;
896 ip->hpoll = pp->hpoll;
897 ip->precision = pp->precision;
898 ip->version = pp->version;
899 ip->reach = pp->reach;
900 ip->unreach = (u_char)pp->unreach;
901 ip->flash = (u_char)pp->flash;
902 ip->flash2 = (u_short)pp->flash;
903 ip->estbdelay = HTONS_FP(DTOFP(pp->delay));
904 ip->ttl = (u_char)pp->ttl;
905 ip->associd = htons(pp->associd);
906 ip->rootdelay = HTONS_FP(DTOUFP(pp->rootdelay));
907 ip->rootdispersion = HTONS_FP(DTOUFP(pp->rootdisp));
908 ip->refid = pp->refid;
909 HTONL_FP(&pp->reftime, &ip->reftime);
910 HTONL_FP(&pp->aorg, &ip->org);
911 HTONL_FP(&pp->rec, &ip->rec);
912 HTONL_FP(&pp->xmt, &ip->xmt);
913 j = pp->filter_nextpt - 1;
914 for (i = 0; i < NTP_SHIFT; i++, j--) {
917 ip->filtdelay[i] = HTONS_FP(DTOFP(pp->filter_delay[j]));
918 DTOLFP(pp->filter_offset[j], <mp);
919 HTONL_FP(<mp, &ip->filtoffset[i]);
920 ip->order[i] = (u_char)((pp->filter_nextpt +
922 pp->filter_order[i]);
923 if (ip->order[i] >= NTP_SHIFT)
924 ip->order[i] -= NTP_SHIFT;
926 DTOLFP(pp->offset, <mp);
927 HTONL_FP(<mp, &ip->offset);
928 ip->delay = HTONS_FP(DTOFP(pp->delay));
929 ip->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp)));
930 ip->selectdisp = HTONS_FP(DTOUFP(SQRT(pp->jitter)));
938 * peer_stats - send statistics for one or more peers
944 struct req_pkt *inpkt
950 struct info_peer_list ipl;
952 struct info_peer_stats *ip;
955 DPRINTF(1, ("peer_stats: called\n"));
956 items = INFO_NITEMS(inpkt->err_nitems);
957 item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
958 datap = inpkt->u.data;
959 if (item_sz > sizeof(ipl)) {
960 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
963 ip = prepare_pkt(srcadr, inter, inpkt,
964 v6sizeof(struct info_peer_stats));
965 while (items-- > 0 && ip != NULL) {
967 memcpy(&ipl, datap, item_sz);
969 NSRCPORT(&addr) = ipl.port;
970 if (client_v6_capable && ipl.v6_flag) {
971 AF(&addr) = AF_INET6;
972 SOCK_ADDR6(&addr) = ipl.addr6;
975 NSRCADR(&addr) = ipl.addr;
977 #ifdef ISC_PLATFORM_HAVESALEN
978 addr.sa.sa_len = SOCKLEN(&addr);
980 DPRINTF(1, ("peer_stats: looking for %s, %d, %d\n",
981 stoa(&addr), ipl.port, NSRCPORT(&addr)));
985 pp = findexistingpeer(&addr, NULL, NULL, -1, 0, NULL);
989 DPRINTF(1, ("peer_stats: found %s\n", stoa(&addr)));
991 if (IS_IPV4(&pp->srcadr)) {
994 ip->dstadr = NSRCADR(&pp->dstadr->sin);
996 if (MDF_BCAST == pp->cast_flags)
997 ip->dstadr = NSRCADR(&pp->dstadr->bcast);
998 else if (pp->cast_flags) {
999 ip->dstadr = NSRCADR(&pp->dstadr->sin);
1001 ip->dstadr = NSRCADR(&pp->dstadr->bcast);
1007 ip->srcadr = NSRCADR(&pp->srcadr);
1008 if (client_v6_capable)
1013 (MDF_BCAST == pp->cast_flags)
1014 ? SOCK_ADDR6(&pp->dstadr->bcast)
1015 : SOCK_ADDR6(&pp->dstadr->sin);
1019 ip->srcadr6 = SOCK_ADDR6(&pp->srcadr);
1022 ip->srcport = NSRCPORT(&pp->srcadr);
1025 ip->flags |= INFO_FLAG_SYSPEER;
1026 if (pp->flags & FLAG_CONFIG)
1027 ip->flags |= INFO_FLAG_CONFIG;
1028 if (pp->flags & FLAG_REFCLOCK)
1029 ip->flags |= INFO_FLAG_REFCLOCK;
1030 if (pp->flags & FLAG_PREFER)
1031 ip->flags |= INFO_FLAG_PREFER;
1032 if (pp->flags & FLAG_BURST)
1033 ip->flags |= INFO_FLAG_BURST;
1034 if (pp->flags & FLAG_IBURST)
1035 ip->flags |= INFO_FLAG_IBURST;
1036 if (pp->status == CTL_PST_SEL_SYNCCAND)
1037 ip->flags |= INFO_FLAG_SEL_CANDIDATE;
1038 if (pp->status >= CTL_PST_SEL_SYSPEER)
1039 ip->flags |= INFO_FLAG_SHORTLIST;
1040 ip->flags = htons(ip->flags);
1041 ip->timereceived = htonl((u_int32)(current_time - pp->timereceived));
1042 ip->timetosend = htonl(pp->nextdate - current_time);
1043 ip->timereachable = htonl((u_int32)(current_time - pp->timereachable));
1044 ip->sent = htonl((u_int32)(pp->sent));
1045 ip->processed = htonl((u_int32)(pp->processed));
1046 ip->badauth = htonl((u_int32)(pp->badauth));
1047 ip->bogusorg = htonl((u_int32)(pp->bogusorg));
1048 ip->oldpkt = htonl((u_int32)(pp->oldpkt));
1049 ip->seldisp = htonl((u_int32)(pp->seldisptoolarge));
1050 ip->selbroken = htonl((u_int32)(pp->selbroken));
1051 ip->candidate = pp->status;
1052 ip = (struct info_peer_stats *)more_pkt();
1059 * sys_info - return system info
1065 struct req_pkt *inpkt
1068 register struct info_sys *is;
1070 is = (struct info_sys *)prepare_pkt(srcadr, inter, inpkt,
1071 v6sizeof(struct info_sys));
1074 if (IS_IPV4(&sys_peer->srcadr)) {
1075 is->peer = NSRCADR(&sys_peer->srcadr);
1076 if (client_v6_capable)
1078 } else if (client_v6_capable) {
1079 is->peer6 = SOCK_ADDR6(&sys_peer->srcadr);
1082 is->peer_mode = sys_peer->hmode;
1085 if (client_v6_capable) {
1091 is->leap = sys_leap;
1092 is->stratum = sys_stratum;
1093 is->precision = sys_precision;
1094 is->rootdelay = htonl(DTOFP(sys_rootdelay));
1095 is->rootdispersion = htonl(DTOUFP(sys_rootdisp));
1096 is->frequency = htonl(DTOFP(sys_jitter));
1097 is->stability = htonl(DTOUFP(clock_stability * 1e6));
1098 is->refid = sys_refid;
1099 HTONL_FP(&sys_reftime, &is->reftime);
1101 is->poll = sys_poll;
1104 if (sys_authenticate)
1105 is->flags |= INFO_FLAG_AUTHENTICATE;
1107 is->flags |= INFO_FLAG_BCLIENT;
1110 is->flags |= INFO_FLAG_CAL;
1111 #endif /* REFCLOCK */
1113 is->flags |= INFO_FLAG_KERNEL;
1114 if (mon_enabled != MON_OFF)
1115 is->flags |= INFO_FLAG_MONITOR;
1117 is->flags |= INFO_FLAG_NTP;
1119 is->flags |= INFO_FLAG_PPS_SYNC;
1121 is->flags |= INFO_FLAG_FILEGEN;
1122 is->bdelay = HTONS_FP(DTOFP(sys_bdelay));
1123 HTONL_UF(sys_authdelay.l_uf, &is->authdelay);
1130 * sys_stats - return system statistics
1136 struct req_pkt *inpkt
1139 register struct info_sys_stats *ss;
1141 ss = (struct info_sys_stats *)prepare_pkt(srcadr, inter, inpkt,
1142 sizeof(struct info_sys_stats));
1143 ss->timeup = htonl((u_int32)current_time);
1144 ss->timereset = htonl((u_int32)(current_time - sys_stattime));
1145 ss->denied = htonl((u_int32)sys_restricted);
1146 ss->oldversionpkt = htonl((u_int32)sys_oldversion);
1147 ss->newversionpkt = htonl((u_int32)sys_newversion);
1148 ss->unknownversion = htonl((u_int32)sys_declined);
1149 ss->badlength = htonl((u_int32)sys_badlength);
1150 ss->processed = htonl((u_int32)sys_processed);
1151 ss->badauth = htonl((u_int32)sys_badauth);
1152 ss->limitrejected = htonl((u_int32)sys_limitrejected);
1153 ss->received = htonl((u_int32)sys_received);
1154 ss->lamport = htonl((u_int32)sys_lamport);
1155 ss->tsrounding = htonl((u_int32)sys_tsrounding);
1162 * mem_stats - return memory statistics
1168 struct req_pkt *inpkt
1171 register struct info_mem_stats *ms;
1174 ms = (struct info_mem_stats *)prepare_pkt(srcadr, inter, inpkt,
1175 sizeof(struct info_mem_stats));
1177 ms->timereset = htonl((u_int32)(current_time - peer_timereset));
1178 ms->totalpeermem = htons((u_short)total_peer_structs);
1179 ms->freepeermem = htons((u_short)peer_free_count);
1180 ms->findpeer_calls = htonl((u_int32)findpeer_calls);
1181 ms->allocations = htonl((u_int32)peer_allocations);
1182 ms->demobilizations = htonl((u_int32)peer_demobilizations);
1184 for (i = 0; i < NTP_HASH_SIZE; i++)
1185 ms->hashcount[i] = (u_char)
1186 max((u_int)peer_hash_count[i], UCHAR_MAX);
1194 * io_stats - return io statistics
1200 struct req_pkt *inpkt
1203 struct info_io_stats *io;
1205 io = (struct info_io_stats *)prepare_pkt(srcadr, inter, inpkt,
1206 sizeof(struct info_io_stats));
1208 io->timereset = htonl((u_int32)(current_time - io_timereset));
1209 io->totalrecvbufs = htons((u_short) total_recvbuffs());
1210 io->freerecvbufs = htons((u_short) free_recvbuffs());
1211 io->fullrecvbufs = htons((u_short) full_recvbuffs());
1212 io->lowwater = htons((u_short) lowater_additions());
1213 io->dropped = htonl((u_int32)packets_dropped);
1214 io->ignored = htonl((u_int32)packets_ignored);
1215 io->received = htonl((u_int32)packets_received);
1216 io->sent = htonl((u_int32)packets_sent);
1217 io->notsent = htonl((u_int32)packets_notsent);
1218 io->interrupts = htonl((u_int32)handler_calls);
1219 io->int_received = htonl((u_int32)handler_pkts);
1227 * timer_stats - return timer statistics
1231 sockaddr_u * srcadr,
1233 struct req_pkt * inpkt
1236 struct info_timer_stats * ts;
1239 ts = (struct info_timer_stats *)prepare_pkt(srcadr, inter,
1240 inpkt, sizeof(*ts));
1242 sincereset = current_time - timer_timereset;
1243 ts->timereset = htonl((u_int32)sincereset);
1244 ts->alarms = ts->timereset;
1245 ts->overflows = htonl((u_int32)alarm_overflow);
1246 ts->xmtcalls = htonl((u_int32)timer_xmtcalls);
1254 * loop_info - return the current state of the loop filter
1260 struct req_pkt *inpkt
1263 struct info_loop *li;
1266 li = (struct info_loop *)prepare_pkt(srcadr, inter, inpkt,
1267 sizeof(struct info_loop));
1269 DTOLFP(last_offset, <mp);
1270 HTONL_FP(<mp, &li->last_offset);
1271 DTOLFP(drift_comp * 1e6, <mp);
1272 HTONL_FP(<mp, &li->drift_comp);
1273 li->compliance = htonl((u_int32)(tc_counter));
1274 li->watchdog_timer = htonl((u_int32)(current_time - sys_epoch));
1282 * do_conf - add a peer to the configuration list
1288 struct req_pkt *inpkt
1295 struct conf_peer temp_cp;
1296 sockaddr_u peeraddr;
1299 * Do a check of everything to see that it looks
1300 * okay. If not, complain about it. Note we are
1303 items = INFO_NITEMS(inpkt->err_nitems);
1304 item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1305 datap = inpkt->u.data;
1306 if (item_sz > sizeof(temp_cp)) {
1307 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1311 while (items-- > 0) {
1313 memcpy(&temp_cp, datap, item_sz);
1314 ZERO_SOCK(&peeraddr);
1317 if (temp_cp.flags & CONF_FLAG_PREFER)
1319 if (temp_cp.flags & CONF_FLAG_BURST)
1321 if (temp_cp.flags & CONF_FLAG_IBURST)
1324 if (temp_cp.flags & CONF_FLAG_SKEY)
1326 #endif /* AUTOKEY */
1327 if (client_v6_capable && temp_cp.v6_flag) {
1328 AF(&peeraddr) = AF_INET6;
1329 SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6;
1331 AF(&peeraddr) = AF_INET;
1332 NSRCADR(&peeraddr) = temp_cp.peeraddr;
1334 * Make sure the address is valid
1336 if (!ISREFCLOCKADR(&peeraddr) &&
1337 ISBADADR(&peeraddr)) {
1338 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1343 NSRCPORT(&peeraddr) = htons(NTP_PORT);
1344 #ifdef ISC_PLATFORM_HAVESALEN
1345 peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1348 /* check mode value: 0 <= hmode <= 6
1350 * There's no good global define for that limit, and
1351 * using a magic define is as good (or bad, actually) as
1352 * a magic number. So we use the highest possible peer
1353 * mode, and that is MODE_BCLIENT.
1355 * [Bug 3009] claims that a problem occurs for hmode > 7,
1356 * but the code in ntp_peer.c indicates trouble for any
1357 * hmode > 6 ( --> MODE_BCLIENT).
1359 if (temp_cp.hmode > MODE_BCLIENT) {
1360 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1364 /* Any more checks on the values? Unchecked at this
1370 * - minpoll/maxpoll, but they are treated properly
1371 * for all cases internally. Checking not necessary.
1373 * Note that we ignore any previously-specified ippeerlimit.
1374 * If we're told to create the peer, we create the peer.
1377 /* finally create the peer */
1378 if (peer_config(&peeraddr, NULL, NULL, -1,
1379 temp_cp.hmode, temp_cp.version, temp_cp.minpoll,
1380 temp_cp.maxpoll, fl, temp_cp.ttl, temp_cp.keyid,
1383 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
1389 req_ack(srcadr, inter, inpkt, INFO_OKAY);
1394 * do_unconf - remove a peer from the configuration list
1398 sockaddr_u * srcadr,
1400 struct req_pkt *inpkt
1406 struct conf_unpeer temp_cp;
1408 sockaddr_u peeraddr;
1412 * This is a bit unstructured, but I like to be careful.
1413 * We check to see that every peer exists and is actually
1414 * configured. If so, we remove them. If not, we return
1417 * [Bug 3011] Even if we checked all peers given in the request
1418 * in a dry run, there's still a chance that the caller played
1419 * unfair and gave the same peer multiple times. So we still
1420 * have to be prepared for nasty surprises in the second run ;)
1423 /* basic consistency checks */
1424 item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1425 if (item_sz > sizeof(temp_cp)) {
1426 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1430 /* now do two runs: first a dry run, then a busy one */
1431 for (loops = 0; loops != 2; ++loops) {
1432 items = INFO_NITEMS(inpkt->err_nitems);
1433 datap = inpkt->u.data;
1434 while (items-- > 0) {
1435 /* copy from request to local */
1437 memcpy(&temp_cp, datap, item_sz);
1438 /* get address structure */
1439 ZERO_SOCK(&peeraddr);
1440 if (client_v6_capable && temp_cp.v6_flag) {
1441 AF(&peeraddr) = AF_INET6;
1442 SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6;
1444 AF(&peeraddr) = AF_INET;
1445 NSRCADR(&peeraddr) = temp_cp.peeraddr;
1447 SET_PORT(&peeraddr, NTP_PORT);
1448 #ifdef ISC_PLATFORM_HAVESALEN
1449 peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1451 DPRINTF(1, ("searching for %s\n",
1454 /* search for matching configred(!) peer */
1457 p = findexistingpeer(
1458 &peeraddr, NULL, p, -1, 0, NULL);
1459 } while (p && !(FLAG_CONFIG & p->flags));
1462 /* Item not found in dry run -- bail! */
1463 req_ack(srcadr, inter, inpkt,
1466 } else if (loops && p) {
1467 /* Item found in busy run -- remove! */
1468 peer_clear(p, "GONE");
1475 /* report success */
1476 req_ack(srcadr, inter, inpkt, INFO_OKAY);
1481 * set_sys_flag - set system flags
1487 struct req_pkt *inpkt
1490 setclr_flags(srcadr, inter, inpkt, 1);
1495 * clr_sys_flag - clear system flags
1501 struct req_pkt *inpkt
1504 setclr_flags(srcadr, inter, inpkt, 0);
1509 * setclr_flags - do the grunge work of flag setting/clearing
1515 struct req_pkt *inpkt,
1519 struct conf_sys_flags *sf;
1522 if (INFO_NITEMS(inpkt->err_nitems) > 1) {
1523 msyslog(LOG_ERR, "setclr_flags: err_nitems > 1");
1524 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1528 sf = (struct conf_sys_flags *)&inpkt->u;
1529 flags = ntohl(sf->flags);
1531 if (flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS |
1532 SYS_FLAG_NTP | SYS_FLAG_KERNEL | SYS_FLAG_MONITOR |
1533 SYS_FLAG_FILEGEN | SYS_FLAG_AUTH | SYS_FLAG_CAL)) {
1534 msyslog(LOG_ERR, "setclr_flags: extra flags: %#x",
1535 flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS |
1536 SYS_FLAG_NTP | SYS_FLAG_KERNEL |
1537 SYS_FLAG_MONITOR | SYS_FLAG_FILEGEN |
1538 SYS_FLAG_AUTH | SYS_FLAG_CAL));
1539 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1543 if (flags & SYS_FLAG_BCLIENT)
1544 proto_config(PROTO_BROADCLIENT, set, 0., NULL);
1545 if (flags & SYS_FLAG_PPS)
1546 proto_config(PROTO_PPS, set, 0., NULL);
1547 if (flags & SYS_FLAG_NTP)
1548 proto_config(PROTO_NTP, set, 0., NULL);
1549 if (flags & SYS_FLAG_KERNEL)
1550 proto_config(PROTO_KERNEL, set, 0., NULL);
1551 if (flags & SYS_FLAG_MONITOR)
1552 proto_config(PROTO_MONITOR, set, 0., NULL);
1553 if (flags & SYS_FLAG_FILEGEN)
1554 proto_config(PROTO_FILEGEN, set, 0., NULL);
1555 if (flags & SYS_FLAG_AUTH)
1556 proto_config(PROTO_AUTHENTICATE, set, 0., NULL);
1557 if (flags & SYS_FLAG_CAL)
1558 proto_config(PROTO_CAL, set, 0., NULL);
1559 req_ack(srcadr, inter, inpkt, INFO_OKAY);
1562 /* There have been some issues with the restrict list processing,
1563 * ranging from problems with deep recursion (resulting in stack
1564 * overflows) and overfull reply buffers.
1566 * To avoid this trouble the list reversal is done iteratively using a
1569 typedef struct RestrictStack RestrictStackT;
1570 struct RestrictStack {
1571 RestrictStackT *link;
1573 const restrict_u *pres[63];
1582 return sizeof(sp->pres)/sizeof(sp->pres[0]);
1588 RestrictStackT **spp,
1589 const restrict_u *ptr
1594 if (NULL == (sp = *spp) || 0 == sp->fcnt) {
1595 /* need another sheet in the scratch pad */
1596 sp = emalloc(sizeof(*sp));
1598 sp->fcnt = getStackSheetSize(sp);
1601 sp->pres[--sp->fcnt] = ptr;
1607 RestrictStackT **spp,
1608 const restrict_u **opp
1613 if (NULL == (sp = *spp) || sp->fcnt >= getStackSheetSize(sp))
1616 *opp = sp->pres[sp->fcnt++];
1617 if (sp->fcnt >= getStackSheetSize(sp)) {
1618 /* discard sheet from scratch pad */
1626 flushRestrictionStack(
1627 RestrictStackT **spp
1632 while (NULL != (sp = *spp)) {
1639 * list_restrict4 - iterative helper for list_restrict dumps IPv4
1640 * restriction list in reverse order.
1644 const restrict_u * res,
1645 struct info_restrict ** ppir
1648 RestrictStackT * rpad;
1649 struct info_restrict * pir;
1652 for (rpad = NULL; res; res = res->link)
1653 if (!pushRestriction(&rpad, res))
1656 while (pir && popRestriction(&rpad, &res)) {
1657 pir->addr = htonl(res->u.v4.addr);
1658 if (client_v6_capable)
1660 pir->mask = htonl(res->u.v4.mask);
1661 pir->count = htonl(res->count);
1662 pir->rflags = htons(res->rflags);
1663 pir->mflags = htons(res->mflags);
1664 pir = (struct info_restrict *)more_pkt();
1666 flushRestrictionStack(&rpad);
1671 * list_restrict6 - iterative helper for list_restrict dumps IPv6
1672 * restriction list in reverse order.
1676 const restrict_u * res,
1677 struct info_restrict ** ppir
1680 RestrictStackT * rpad;
1681 struct info_restrict * pir;
1684 for (rpad = NULL; res; res = res->link)
1685 if (!pushRestriction(&rpad, res))
1688 while (pir && popRestriction(&rpad, &res)) {
1689 pir->addr6 = res->u.v6.addr;
1690 pir->mask6 = res->u.v6.mask;
1692 pir->count = htonl(res->count);
1693 pir->rflags = htons(res->rflags);
1694 pir->mflags = htons(res->mflags);
1695 pir = (struct info_restrict *)more_pkt();
1697 flushRestrictionStack(&rpad);
1703 * list_restrict - return the restrict list
1709 struct req_pkt *inpkt
1712 struct info_restrict *ir;
1714 DPRINTF(3, ("wants restrict list summary\n"));
1716 ir = (struct info_restrict *)prepare_pkt(srcadr, inter, inpkt,
1717 v6sizeof(struct info_restrict));
1720 * The restriction lists are kept sorted in the reverse order
1721 * than they were originally. To preserve the output semantics,
1722 * dump each list in reverse order. The workers take care of that.
1724 list_restrict4(restrictlist4, &ir);
1725 if (client_v6_capable)
1726 list_restrict6(restrictlist6, &ir);
1732 * do_resaddflags - add flags to a restrict entry (or create one)
1738 struct req_pkt *inpkt
1741 do_restrict(srcadr, inter, inpkt, RESTRICT_FLAGS);
1747 * do_ressubflags - remove flags from a restrict entry
1753 struct req_pkt *inpkt
1756 do_restrict(srcadr, inter, inpkt, RESTRICT_UNFLAG);
1761 * do_unrestrict - remove a restrict entry from the list
1767 struct req_pkt *inpkt
1770 do_restrict(srcadr, inter, inpkt, RESTRICT_REMOVE);
1775 * do_restrict - do the dirty stuff of dealing with restrictions
1781 struct req_pkt *inpkt,
1786 struct conf_restrict cr;
1789 sockaddr_u matchaddr;
1790 sockaddr_u matchmask;
1794 case RESTRICT_FLAGS:
1795 case RESTRICT_UNFLAG:
1796 case RESTRICT_REMOVE:
1797 case RESTRICT_REMOVEIF:
1801 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1806 * Do a check of the flags to make sure that only
1807 * the NTPPORT flag is set, if any. If not, complain
1808 * about it. Note we are very picky here.
1810 items = INFO_NITEMS(inpkt->err_nitems);
1811 item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1812 datap = inpkt->u.data;
1813 if (item_sz > sizeof(cr)) {
1814 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1819 while (items-- > 0 && !bad) {
1820 memcpy(&cr, datap, item_sz);
1821 cr.flags = ntohs(cr.flags);
1822 cr.mflags = ntohs(cr.mflags);
1823 if (~RESM_NTPONLY & cr.mflags)
1825 if (~RES_ALLFLAGS & cr.flags)
1827 if (INADDR_ANY != cr.mask) {
1828 if (client_v6_capable && cr.v6_flag) {
1829 if (IN6_IS_ADDR_UNSPECIFIED(&cr.addr6))
1832 if (INADDR_ANY == cr.addr)
1840 msyslog(LOG_ERR, "do_restrict: bad = %#x", bad);
1841 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1846 * Looks okay, try it out. Needs to reload data pointer and
1847 * item counter. (Talos-CAN-0052)
1849 ZERO_SOCK(&matchaddr);
1850 ZERO_SOCK(&matchmask);
1851 items = INFO_NITEMS(inpkt->err_nitems);
1852 datap = inpkt->u.data;
1854 while (items-- > 0) {
1855 memcpy(&cr, datap, item_sz);
1856 cr.flags = ntohs(cr.flags);
1857 cr.mflags = ntohs(cr.mflags);
1858 cr.ippeerlimit = ntohs(cr.ippeerlimit);
1859 if (client_v6_capable && cr.v6_flag) {
1860 AF(&matchaddr) = AF_INET6;
1861 AF(&matchmask) = AF_INET6;
1862 SOCK_ADDR6(&matchaddr) = cr.addr6;
1863 SOCK_ADDR6(&matchmask) = cr.mask6;
1865 AF(&matchaddr) = AF_INET;
1866 AF(&matchmask) = AF_INET;
1867 NSRCADR(&matchaddr) = cr.addr;
1868 NSRCADR(&matchmask) = cr.mask;
1870 hack_restrict(op, &matchaddr, &matchmask, cr.mflags,
1871 cr.ippeerlimit, cr.flags, 0);
1875 req_ack(srcadr, inter, inpkt, INFO_OKAY);
1880 * mon_getlist - return monitor data
1886 struct req_pkt *inpkt
1889 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
1894 * Module entry points and the flags they correspond with
1896 struct reset_entry {
1897 int flag; /* flag this corresponds to */
1898 void (*handler)(void); /* routine to handle request */
1901 struct reset_entry reset_entries[] = {
1902 { RESET_FLAG_ALLPEERS, peer_all_reset },
1903 { RESET_FLAG_IO, io_clr_stats },
1904 { RESET_FLAG_SYS, proto_clr_stats },
1905 { RESET_FLAG_MEM, peer_clr_stats },
1906 { RESET_FLAG_TIMER, timer_clr_stats },
1907 { RESET_FLAG_AUTH, reset_auth_stats },
1908 { RESET_FLAG_CTL, ctl_clr_stats },
1913 * reset_stats - reset statistic counters here and there
1919 struct req_pkt *inpkt
1922 struct reset_flags *rflags;
1924 struct reset_entry *rent;
1926 if (INFO_NITEMS(inpkt->err_nitems) > 1) {
1927 msyslog(LOG_ERR, "reset_stats: err_nitems > 1");
1928 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1932 rflags = (struct reset_flags *)&inpkt->u;
1933 flags = ntohl(rflags->flags);
1935 if (flags & ~RESET_ALLFLAGS) {
1936 msyslog(LOG_ERR, "reset_stats: reset leaves %#lx",
1937 flags & ~RESET_ALLFLAGS);
1938 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1942 for (rent = reset_entries; rent->flag != 0; rent++) {
1943 if (flags & rent->flag)
1946 req_ack(srcadr, inter, inpkt, INFO_OKAY);
1951 * reset_peer - clear a peer's statistics
1957 struct req_pkt *inpkt
1963 struct conf_unpeer cp;
1965 sockaddr_u peeraddr;
1969 * We check first to see that every peer exists. If not,
1970 * we return an error.
1973 items = INFO_NITEMS(inpkt->err_nitems);
1974 item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1975 datap = inpkt->u.data;
1976 if (item_sz > sizeof(cp)) {
1977 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1982 while (items-- > 0 && !bad) {
1984 memcpy(&cp, datap, item_sz);
1985 ZERO_SOCK(&peeraddr);
1986 if (client_v6_capable && cp.v6_flag) {
1987 AF(&peeraddr) = AF_INET6;
1988 SOCK_ADDR6(&peeraddr) = cp.peeraddr6;
1990 AF(&peeraddr) = AF_INET;
1991 NSRCADR(&peeraddr) = cp.peeraddr;
1994 #ifdef ISC_PLATFORM_HAVESALEN
1995 peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1997 p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0, NULL);
2004 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2009 * Now do it in earnest. Needs to reload data pointer and item
2010 * counter. (Talos-CAN-0052)
2013 items = INFO_NITEMS(inpkt->err_nitems);
2014 datap = inpkt->u.data;
2015 while (items-- > 0) {
2017 memcpy(&cp, datap, item_sz);
2018 ZERO_SOCK(&peeraddr);
2019 if (client_v6_capable && cp.v6_flag) {
2020 AF(&peeraddr) = AF_INET6;
2021 SOCK_ADDR6(&peeraddr) = cp.peeraddr6;
2023 AF(&peeraddr) = AF_INET;
2024 NSRCADR(&peeraddr) = cp.peeraddr;
2026 SET_PORT(&peeraddr, 123);
2027 #ifdef ISC_PLATFORM_HAVESALEN
2028 peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
2030 p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0, NULL);
2033 p = findexistingpeer(&peeraddr, NULL, p, -1, 0, NULL);
2038 req_ack(srcadr, inter, inpkt, INFO_OKAY);
2043 * do_key_reread - reread the encryption key file
2049 struct req_pkt *inpkt
2053 req_ack(srcadr, inter, inpkt, INFO_OKAY);
2058 * trust_key - make one or more keys trusted
2064 struct req_pkt *inpkt
2067 do_trustkey(srcadr, inter, inpkt, 1);
2072 * untrust_key - make one or more keys untrusted
2078 struct req_pkt *inpkt
2081 do_trustkey(srcadr, inter, inpkt, 0);
2086 * do_trustkey - make keys either trustable or untrustable
2092 struct req_pkt *inpkt,
2096 register uint32_t *kp;
2099 items = INFO_NITEMS(inpkt->err_nitems);
2100 kp = (uint32_t *)&inpkt->u;
2101 while (items-- > 0) {
2102 authtrust(*kp, trust);
2106 req_ack(srcadr, inter, inpkt, INFO_OKAY);
2111 * get_auth_info - return some stats concerning the authentication module
2117 struct req_pkt *inpkt
2120 register struct info_auth *ia;
2122 ia = (struct info_auth *)prepare_pkt(srcadr, inter, inpkt,
2123 sizeof(struct info_auth));
2125 ia->numkeys = htonl((u_int32)authnumkeys);
2126 ia->numfreekeys = htonl((u_int32)authnumfreekeys);
2127 ia->keylookups = htonl((u_int32)authkeylookups);
2128 ia->keynotfound = htonl((u_int32)authkeynotfound);
2129 ia->encryptions = htonl((u_int32)authencryptions);
2130 ia->decryptions = htonl((u_int32)authdecryptions);
2131 ia->keyuncached = htonl((u_int32)authkeyuncached);
2132 ia->expired = htonl((u_int32)authkeyexpired);
2133 ia->timereset = htonl((u_int32)(current_time - auth_timereset));
2142 * reset_auth_stats - reset the authentication stat counters. Done here
2143 * to keep ntp-isms out of the authentication module
2146 reset_auth_stats(void)
2149 authkeynotfound = 0;
2150 authencryptions = 0;
2151 authdecryptions = 0;
2152 authkeyuncached = 0;
2153 auth_timereset = current_time;
2158 * req_get_traps - return information about current trap holders
2164 struct req_pkt *inpkt
2167 struct info_trap *it;
2168 struct ctl_trap *tr;
2171 if (num_ctl_traps == 0) {
2172 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2176 it = (struct info_trap *)prepare_pkt(srcadr, inter, inpkt,
2177 v6sizeof(struct info_trap));
2179 for (i = 0, tr = ctl_traps; it && i < COUNTOF(ctl_traps); i++, tr++) {
2180 if (tr->tr_flags & TRAP_INUSE) {
2181 if (IS_IPV4(&tr->tr_addr)) {
2182 if (tr->tr_localaddr == any_interface)
2183 it->local_address = 0;
2186 = NSRCADR(&tr->tr_localaddr->sin);
2187 it->trap_address = NSRCADR(&tr->tr_addr);
2188 if (client_v6_capable)
2191 if (!client_v6_capable)
2194 = SOCK_ADDR6(&tr->tr_localaddr->sin);
2195 it->trap_address6 = SOCK_ADDR6(&tr->tr_addr);
2198 it->trap_port = NSRCPORT(&tr->tr_addr);
2199 it->sequence = htons(tr->tr_sequence);
2200 it->settime = htonl((u_int32)(current_time - tr->tr_settime));
2201 it->origtime = htonl((u_int32)(current_time - tr->tr_origtime));
2202 it->resets = htonl((u_int32)tr->tr_resets);
2203 it->flags = htonl((u_int32)tr->tr_flags);
2204 it = (struct info_trap *)more_pkt();
2212 * req_set_trap - configure a trap
2218 struct req_pkt *inpkt
2221 do_setclr_trap(srcadr, inter, inpkt, 1);
2227 * req_clr_trap - unconfigure a trap
2233 struct req_pkt *inpkt
2236 do_setclr_trap(srcadr, inter, inpkt, 0);
2242 * do_setclr_trap - do the grunge work of (un)configuring a trap
2248 struct req_pkt *inpkt,
2252 register struct conf_trap *ct;
2253 register endpt *linter;
2261 AF(&laddr) = AF(srcadr);
2262 SET_PORT(&laddr, NTP_PORT);
2265 * Restrict ourselves to one item only. This eliminates
2266 * the error reporting problem.
2268 if (INFO_NITEMS(inpkt->err_nitems) > 1) {
2269 msyslog(LOG_ERR, "do_setclr_trap: err_nitems > 1");
2270 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2273 ct = (struct conf_trap *)&inpkt->u;
2276 * Look for the local interface. If none, use the default.
2278 if (ct->local_address == 0) {
2279 linter = any_interface;
2281 if (IS_IPV4(&laddr))
2282 NSRCADR(&laddr) = ct->local_address;
2284 SOCK_ADDR6(&laddr) = ct->local_address6;
2285 linter = findinterface(&laddr);
2286 if (NULL == linter) {
2287 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2292 if (IS_IPV4(&laddr))
2293 NSRCADR(&laddr) = ct->trap_address;
2295 SOCK_ADDR6(&laddr) = ct->trap_address6;
2297 NSRCPORT(&laddr) = ct->trap_port;
2299 SET_PORT(&laddr, TRAPPORT);
2302 res = ctlsettrap(&laddr, linter, 0,
2303 INFO_VERSION(inpkt->rm_vn_mode));
2305 res = ctlclrtrap(&laddr, linter, 0);
2309 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2311 req_ack(srcadr, inter, inpkt, INFO_OKAY);
2317 * Validate a request packet for a new request or control key:
2318 * - only one item allowed
2319 * - key must be valid (that is, known, and not in the autokey range)
2327 struct req_pkt *inpkt
2333 /* restrict ourselves to one item only */
2334 if (INFO_NITEMS(inpkt->err_nitems) > 1) {
2335 msyslog(LOG_ERR, "set_keyid_checked[%s]: err_nitems > 1",
2337 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2341 /* plug the new key from the packet */
2342 pkeyid = (keyid_t *)&inpkt->u;
2343 tmpkey = ntohl(*pkeyid);
2345 /* validate the new key id, claim data error on failure */
2346 if (tmpkey < 1 || tmpkey > NTP_MAXKEY || !auth_havekey(tmpkey)) {
2347 msyslog(LOG_ERR, "set_keyid_checked[%s]: invalid key id: %ld",
2348 what, (long)tmpkey);
2349 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2353 /* if we arrive here, the key is good -- use it */
2355 req_ack(srcadr, inter, inpkt, INFO_OKAY);
2359 * set_request_keyid - set the keyid used to authenticate requests
2365 struct req_pkt *inpkt
2368 set_keyid_checked(&info_auth_keyid, "request",
2369 srcadr, inter, inpkt);
2375 * set_control_keyid - set the keyid used to authenticate requests
2381 struct req_pkt *inpkt
2384 set_keyid_checked(&ctl_auth_keyid, "control",
2385 srcadr, inter, inpkt);
2391 * get_ctl_stats - return some stats concerning the control message module
2397 struct req_pkt *inpkt
2400 register struct info_control *ic;
2402 ic = (struct info_control *)prepare_pkt(srcadr, inter, inpkt,
2403 sizeof(struct info_control));
2405 ic->ctltimereset = htonl((u_int32)(current_time - ctltimereset));
2406 ic->numctlreq = htonl((u_int32)numctlreq);
2407 ic->numctlbadpkts = htonl((u_int32)numctlbadpkts);
2408 ic->numctlresponses = htonl((u_int32)numctlresponses);
2409 ic->numctlfrags = htonl((u_int32)numctlfrags);
2410 ic->numctlerrors = htonl((u_int32)numctlerrors);
2411 ic->numctltooshort = htonl((u_int32)numctltooshort);
2412 ic->numctlinputresp = htonl((u_int32)numctlinputresp);
2413 ic->numctlinputfrag = htonl((u_int32)numctlinputfrag);
2414 ic->numctlinputerr = htonl((u_int32)numctlinputerr);
2415 ic->numctlbadoffset = htonl((u_int32)numctlbadoffset);
2416 ic->numctlbadversion = htonl((u_int32)numctlbadversion);
2417 ic->numctldatatooshort = htonl((u_int32)numctldatatooshort);
2418 ic->numctlbadop = htonl((u_int32)numctlbadop);
2419 ic->numasyncmsgs = htonl((u_int32)numasyncmsgs);
2428 * get_kernel_info - get kernel pll/pps information
2434 struct req_pkt *inpkt
2437 register struct info_kernel *ik;
2441 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2446 if (ntp_adjtime(&ntx) < 0)
2447 msyslog(LOG_ERR, "get_kernel_info: ntp_adjtime() failed: %m");
2448 ik = (struct info_kernel *)prepare_pkt(srcadr, inter, inpkt,
2449 sizeof(struct info_kernel));
2454 ik->offset = htonl((u_int32)ntx.offset);
2455 ik->freq = htonl((u_int32)ntx.freq);
2456 ik->maxerror = htonl((u_int32)ntx.maxerror);
2457 ik->esterror = htonl((u_int32)ntx.esterror);
2458 ik->status = htons(ntx.status);
2459 ik->constant = htonl((u_int32)ntx.constant);
2460 ik->precision = htonl((u_int32)ntx.precision);
2461 ik->tolerance = htonl((u_int32)ntx.tolerance);
2466 ik->ppsfreq = htonl((u_int32)ntx.ppsfreq);
2467 ik->jitter = htonl((u_int32)ntx.jitter);
2468 ik->shift = htons(ntx.shift);
2469 ik->stabil = htonl((u_int32)ntx.stabil);
2470 ik->jitcnt = htonl((u_int32)ntx.jitcnt);
2471 ik->calcnt = htonl((u_int32)ntx.calcnt);
2472 ik->errcnt = htonl((u_int32)ntx.errcnt);
2473 ik->stbcnt = htonl((u_int32)ntx.stbcnt);
2478 #endif /* KERNEL_PLL */
2483 * get_clock_info - get info about a clock
2489 struct req_pkt *inpkt
2492 register struct info_clock *ic;
2493 register u_int32 *clkaddr;
2495 struct refclockstat clock_stat;
2500 AF(&addr) = AF_INET;
2501 #ifdef ISC_PLATFORM_HAVESALEN
2502 addr.sa.sa_len = SOCKLEN(&addr);
2504 SET_PORT(&addr, NTP_PORT);
2505 items = INFO_NITEMS(inpkt->err_nitems);
2506 clkaddr = &inpkt->u.u32[0];
2508 ic = (struct info_clock *)prepare_pkt(srcadr, inter, inpkt,
2509 sizeof(struct info_clock));
2511 while (items-- > 0 && ic) {
2512 NSRCADR(&addr) = *clkaddr++;
2513 if (!ISREFCLOCKADR(&addr) || NULL ==
2514 findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) {
2515 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2519 clock_stat.kv_list = (struct ctl_var *)0;
2521 refclock_control(&addr, NULL, &clock_stat);
2523 ic->clockadr = NSRCADR(&addr);
2524 ic->type = clock_stat.type;
2525 ic->flags = clock_stat.flags;
2526 ic->lastevent = clock_stat.lastevent;
2527 ic->currentstatus = clock_stat.currentstatus;
2528 ic->polls = htonl((u_int32)clock_stat.polls);
2529 ic->noresponse = htonl((u_int32)clock_stat.noresponse);
2530 ic->badformat = htonl((u_int32)clock_stat.badformat);
2531 ic->baddata = htonl((u_int32)clock_stat.baddata);
2532 ic->timestarted = htonl((u_int32)clock_stat.timereset);
2533 DTOLFP(clock_stat.fudgetime1, <mp);
2534 HTONL_FP(<mp, &ic->fudgetime1);
2535 DTOLFP(clock_stat.fudgetime2, <mp);
2536 HTONL_FP(<mp, &ic->fudgetime2);
2537 ic->fudgeval1 = htonl((u_int32)clock_stat.fudgeval1);
2538 ic->fudgeval2 = htonl(clock_stat.fudgeval2);
2540 free_varlist(clock_stat.kv_list);
2542 ic = (struct info_clock *)more_pkt();
2550 * set_clock_fudge - get a clock's fudge factors
2556 struct req_pkt *inpkt
2559 register struct conf_fudge *cf;
2561 struct refclockstat clock_stat;
2567 items = INFO_NITEMS(inpkt->err_nitems);
2568 cf = (struct conf_fudge *)&inpkt->u;
2570 while (items-- > 0) {
2571 AF(&addr) = AF_INET;
2572 NSRCADR(&addr) = cf->clockadr;
2573 #ifdef ISC_PLATFORM_HAVESALEN
2574 addr.sa.sa_len = SOCKLEN(&addr);
2576 SET_PORT(&addr, NTP_PORT);
2577 if (!ISREFCLOCKADR(&addr) || NULL ==
2578 findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) {
2579 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2583 switch(ntohl(cf->which)) {
2585 NTOHL_FP(&cf->fudgetime, <mp);
2586 LFPTOD(<mp, clock_stat.fudgetime1);
2587 clock_stat.haveflags = CLK_HAVETIME1;
2590 NTOHL_FP(&cf->fudgetime, <mp);
2591 LFPTOD(<mp, clock_stat.fudgetime2);
2592 clock_stat.haveflags = CLK_HAVETIME2;
2595 clock_stat.fudgeval1 = ntohl(cf->fudgeval_flags);
2596 clock_stat.haveflags = CLK_HAVEVAL1;
2599 clock_stat.fudgeval2 = ntohl(cf->fudgeval_flags);
2600 clock_stat.haveflags = CLK_HAVEVAL2;
2603 clock_stat.flags = (u_char) (ntohl(cf->fudgeval_flags) & 0xf);
2604 clock_stat.haveflags =
2605 (CLK_HAVEFLAG1|CLK_HAVEFLAG2|CLK_HAVEFLAG3|CLK_HAVEFLAG4);
2608 msyslog(LOG_ERR, "set_clock_fudge: default!");
2609 req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2613 refclock_control(&addr, &clock_stat, (struct refclockstat *)0);
2616 req_ack(srcadr, inter, inpkt, INFO_OKAY);
2622 * get_clkbug_info - get debugging info about a clock
2628 struct req_pkt *inpkt
2632 register struct info_clkbug *ic;
2633 register u_int32 *clkaddr;
2635 struct refclockbug bug;
2639 AF(&addr) = AF_INET;
2640 #ifdef ISC_PLATFORM_HAVESALEN
2641 addr.sa.sa_len = SOCKLEN(&addr);
2643 SET_PORT(&addr, NTP_PORT);
2644 items = INFO_NITEMS(inpkt->err_nitems);
2645 clkaddr = (u_int32 *)&inpkt->u;
2647 ic = (struct info_clkbug *)prepare_pkt(srcadr, inter, inpkt,
2648 sizeof(struct info_clkbug));
2650 while (items-- > 0 && ic) {
2651 NSRCADR(&addr) = *clkaddr++;
2652 if (!ISREFCLOCKADR(&addr) || NULL ==
2653 findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) {
2654 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2659 refclock_buginfo(&addr, &bug);
2660 if (bug.nvalues == 0 && bug.ntimes == 0) {
2661 req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2665 ic->clockadr = NSRCADR(&addr);
2667 if (i > NUMCBUGVALUES)
2669 ic->nvalues = (u_char)i;
2670 ic->svalues = htons((u_short) (bug.svalues & ((1<<i)-1)));
2672 ic->values[i] = htonl(bug.values[i]);
2675 if (i > NUMCBUGTIMES)
2677 ic->ntimes = (u_char)i;
2678 ic->stimes = htonl(bug.stimes);
2680 HTONL_FP(&bug.times[i], &ic->times[i]);
2683 ic = (struct info_clkbug *)more_pkt();
2690 * receiver of interface structures
2693 fill_info_if_stats(void *data, interface_info_t *interface_info)
2695 struct info_if_stats **ifsp = (struct info_if_stats **)data;
2696 struct info_if_stats *ifs = *ifsp;
2697 endpt *ep = interface_info->ep;
2704 if (IS_IPV6(&ep->sin)) {
2705 if (!client_v6_capable)
2708 ifs->unaddr.addr6 = SOCK_ADDR6(&ep->sin);
2709 ifs->unbcast.addr6 = SOCK_ADDR6(&ep->bcast);
2710 ifs->unmask.addr6 = SOCK_ADDR6(&ep->mask);
2713 ifs->unaddr.addr = SOCK_ADDR4(&ep->sin);
2714 ifs->unbcast.addr = SOCK_ADDR4(&ep->bcast);
2715 ifs->unmask.addr = SOCK_ADDR4(&ep->mask);
2717 ifs->v6_flag = htonl(ifs->v6_flag);
2718 strlcpy(ifs->name, ep->name, sizeof(ifs->name));
2719 ifs->family = htons(ep->family);
2720 ifs->flags = htonl(ep->flags);
2721 ifs->last_ttl = htonl(ep->last_ttl);
2722 ifs->num_mcast = htonl(ep->num_mcast);
2723 ifs->received = htonl(ep->received);
2724 ifs->sent = htonl(ep->sent);
2725 ifs->notsent = htonl(ep->notsent);
2726 ifs->ifindex = htonl(ep->ifindex);
2727 /* scope no longer in endpt, in in6_addr typically */
2728 ifs->scopeid = ifs->ifindex;
2729 ifs->ifnum = htonl(ep->ifnum);
2730 ifs->uptime = htonl(current_time - ep->starttime);
2731 ifs->ignore_packets = ep->ignore_packets;
2732 ifs->peercnt = htonl(ep->peercnt);
2733 ifs->action = interface_info->action;
2735 *ifsp = (struct info_if_stats *)more_pkt();
2739 * get_if_stats - get interface statistics
2745 struct req_pkt *inpkt
2748 struct info_if_stats *ifs;
2750 DPRINTF(3, ("wants interface statistics\n"));
2752 ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt,
2753 v6sizeof(struct info_if_stats));
2755 interface_enumerate(fill_info_if_stats, &ifs);
2764 struct req_pkt *inpkt
2767 struct info_if_stats *ifs;
2769 DPRINTF(3, ("wants interface reload\n"));
2771 ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt,
2772 v6sizeof(struct info_if_stats));
2774 interface_update(fill_info_if_stats, &ifs);