2 * ntp_restrict.c - determine host restrictions
13 #include "ntp_lists.h"
14 #include "ntp_stdlib.h"
15 #include "ntp_assert.h"
18 * This code keeps a simple address-and-mask list of hosts we want
19 * to place restrictions on (or remove them from). The restrictions
20 * are implemented as a set of flags which tell you what the host
21 * can't do. There is a subroutine entry to return the flags. The
22 * list is kept sorted to reduce the average number of comparisons
23 * and make sure you get the set of restrictions most specific to
26 * The algorithm is that, when looking up a host, it is first assumed
27 * that the default set of restrictions will apply. It then searches
28 * down through the list. Whenever it finds a match it adopts the
29 * match's flags instead. When you hit the point where the sorted
30 * address is greater than the target, you return with the last set of
31 * flags you found. Because of the ordering of the list, the most
32 * specific match will provide the final set of flags.
34 * This was originally intended to restrict you from sync'ing to your
35 * own broadcasts when you are doing that, by restricting yourself from
36 * your own interfaces. It was also thought it would sometimes be useful
37 * to keep a misbehaving host or two from abusing your primary clock. It
38 * has been expanded, however, to suit the needs of those with more
39 * restrictive access policies.
42 * We will use two lists, one for IPv4 addresses and one for IPv6
43 * addresses. This is not protocol-independant but for now I can't
44 * find a way to respect this. We'll check this later... JFB 07/2001
46 #define MASK_IPV6_ADDR(dst, src, msk) \
49 for (idx = 0; idx < (int)COUNTOF((dst)->s6_addr); idx++) { \
50 (dst)->s6_addr[idx] = (src)->s6_addr[idx] \
51 & (msk)->s6_addr[idx]; \
56 * We allocate INC_RESLIST{4|6} entries to the free list whenever empty.
57 * Auto-tune these to be just less than 1KB (leaving at least 16 bytes
58 * for allocator overhead).
60 #define INC_RESLIST4 ((1024 - 16) / V4_SIZEOF_RESTRICT_U)
61 #define INC_RESLIST6 ((1024 - 16) / V6_SIZEOF_RESTRICT_U)
64 * The restriction list
66 restrict_u *restrictlist4;
67 restrict_u *restrictlist6;
68 static int restrictcount; /* count in the restrict lists */
71 * The free list and associated counters. Also some uninteresting
74 static restrict_u *resfree4; /* available entries (free list) */
75 static restrict_u *resfree6;
77 static u_long res_calls;
78 static u_long res_found;
79 static u_long res_not_found;
82 * Count number of restriction entries referring to RES_LIMITED, to
83 * control implicit activation/deactivation of the MRU monlist.
85 static u_long res_limited_refcnt;
88 * Our default entries.
90 static restrict_u restrict_def4;
91 static restrict_u restrict_def6;
94 * "restrict source ..." enabled knob and restriction bits.
96 static int restrict_source_enabled;
97 static u_short restrict_source_flags;
98 static u_short restrict_source_mflags;
103 static restrict_u * alloc_res4(void);
104 static restrict_u * alloc_res6(void);
105 static void free_res(restrict_u *, int);
106 static void inc_res_limited(void);
107 static void dec_res_limited(void);
108 static restrict_u * match_restrict4_addr(u_int32, u_short);
109 static restrict_u * match_restrict6_addr(const struct in6_addr *,
111 static restrict_u * match_restrict_entry(const restrict_u *, int);
112 static int res_sorts_before4(restrict_u *, restrict_u *);
113 static int res_sorts_before6(restrict_u *, restrict_u *);
117 * init_restrict - initialize the restriction data structures
123 * The restriction lists begin with a default entry with address
124 * and mask 0, which will match any entry. The lists are kept
125 * sorted by descending address followed by descending mask:
128 * 192.168.0.0 255.255.255.0 kod limited noquery nopeer
129 * 192.168.0.0 255.255.0.0 kod limited
130 * 0.0.0.0 0.0.0.0 kod limited noquery
132 * The first entry which matches an address is used. With the
133 * example restrictions above, 192.168.0.0/24 matches the first
134 * entry, the rest of 192.168.0.0/16 matches the second, and
135 * everything else matches the third (default).
137 * Note this achieves the same result a little more efficiently
138 * than the documented behavior, which is to keep the lists
139 * sorted by ascending address followed by ascending mask, with
140 * the _last_ matching entry used.
142 * An additional wrinkle is we may have multiple entries with
143 * the same address and mask but differing match flags (mflags).
144 * At present there is only one, RESM_NTPONLY. Entries with
145 * RESM_NTPONLY are sorted earlier so they take precedence over
146 * any otherwise similar entry without. Again, this is the same
147 * behavior as but reversed implementation compared to the docs.
150 LINK_SLIST(restrictlist4, &restrict_def4, link);
151 LINK_SLIST(restrictlist6, &restrict_def6, link);
159 const size_t cb = V4_SIZEOF_RESTRICT_U;
160 const size_t count = INC_RESLIST4;
165 UNLINK_HEAD_SLIST(res, resfree4, link);
169 rl = emalloc_zero(count * cb);
170 /* link all but the first onto free list */
171 res = (void *)((char *)rl + (count - 1) * cb);
172 for (i = count - 1; i > 0; i--) {
173 LINK_SLIST(resfree4, res, link);
174 res = (void *)((char *)res - cb);
176 NTP_INSIST(rl == res);
177 /* allocate the first */
185 const size_t cb = V6_SIZEOF_RESTRICT_U;
186 const size_t count = INC_RESLIST6;
191 UNLINK_HEAD_SLIST(res, resfree6, link);
195 rl = emalloc_zero(count * cb);
196 /* link all but the first onto free list */
197 res = (void *)((char *)rl + (count - 1) * cb);
198 for (i = count - 1; i > 0; i--) {
199 LINK_SLIST(resfree6, res, link);
200 res = (void *)((char *)res - cb);
202 NTP_INSIST(rl == res);
203 /* allocate the first */
214 restrict_u ** plisthead;
215 restrict_u * unlinked;
218 if (RES_LIMITED & res->flags)
222 plisthead = &restrictlist6;
224 plisthead = &restrictlist4;
225 UNLINK_SLIST(unlinked, *plisthead, res, link, restrict_u);
226 NTP_INSIST(unlinked == res);
229 zero_mem(res, V6_SIZEOF_RESTRICT_U);
230 plisthead = &resfree6;
232 zero_mem(res, V4_SIZEOF_RESTRICT_U);
233 plisthead = &resfree4;
235 LINK_SLIST(*plisthead, res, link);
240 inc_res_limited(void)
242 if (!res_limited_refcnt)
244 res_limited_refcnt++;
249 dec_res_limited(void)
251 res_limited_refcnt--;
252 if (!res_limited_refcnt)
258 match_restrict4_addr(
267 for (res = restrictlist4; res != NULL; res = next) {
270 res->expire <= current_time)
272 if (res->u.v4.addr == (addr & res->u.v4.mask)
273 && (!(RESM_NTPONLY & res->mflags)
274 || NTP_PORT == port))
282 match_restrict6_addr(
283 const struct in6_addr * addr,
290 struct in6_addr masked;
292 for (res = restrictlist6; res != NULL; res = next) {
294 NTP_INSIST(next != res);
296 res->expire <= current_time)
298 MASK_IPV6_ADDR(&masked, addr, &res->u.v6.mask);
299 if (ADDR6_EQ(&masked, &res->u.v6.addr)
300 && (!(RESM_NTPONLY & res->mflags)
301 || NTP_PORT == (int)port))
309 * match_restrict_entry - find an exact match on a restrict list.
311 * Exact match is addr, mask, and mflags all equal.
312 * In order to use more common code for IPv4 and IPv6, this routine
313 * requires the caller to populate a restrict_u with mflags and either
314 * the v4 or v6 address and mask as appropriate. Other fields in the
315 * input restrict_u are ignored.
318 match_restrict_entry(
319 const restrict_u * pmatch,
328 rlist = restrictlist6;
329 cb = sizeof(pmatch->u.v6);
331 rlist = restrictlist4;
332 cb = sizeof(pmatch->u.v4);
335 for (res = rlist; res != NULL; res = res->link)
336 if (res->mflags == pmatch->mflags &&
337 !memcmp(&res->u, &pmatch->u, cb))
344 * res_sorts_before4 - compare two restrict4 entries
346 * Returns nonzero if r1 sorts before r2. We sort by descending
347 * address, then descending mask, then descending mflags, so sorting
348 * before means having a higher value.
358 if (r1->u.v4.addr > r2->u.v4.addr)
360 else if (r1->u.v4.addr < r2->u.v4.addr)
362 else if (r1->u.v4.mask > r2->u.v4.mask)
364 else if (r1->u.v4.mask < r2->u.v4.mask)
366 else if (r1->mflags > r2->mflags)
376 * res_sorts_before6 - compare two restrict6 entries
378 * Returns nonzero if r1 sorts before r2. We sort by descending
379 * address, then descending mask, then descending mflags, so sorting
380 * before means having a higher value.
391 cmp = ADDR6_CMP(&r1->u.v6.addr, &r2->u.v6.addr);
392 if (cmp > 0) /* r1->addr > r2->addr */
394 else if (cmp < 0) /* r2->addr > r1->addr */
397 cmp = ADDR6_CMP(&r1->u.v6.mask, &r2->u.v6.mask);
398 if (cmp > 0) /* r1->mask > r2->mask*/
400 else if (cmp < 0) /* r2->mask > r1->mask */
402 else if (r1->mflags > r2->mflags)
413 * restrictions - return restrictions for this host
421 struct in6_addr *pin6;
426 /* IPv4 source address */
427 if (IS_IPV4(srcadr)) {
429 * Ignore any packets with a multicast source address
430 * (this should be done early in the receive process,
433 if (IN_CLASSD(SRCADR(srcadr)))
434 return (int)RES_IGNORE;
436 match = match_restrict4_addr(SRCADR(srcadr),
440 * res_not_found counts only use of the final default
441 * entry, not any "restrict default ntpport ...", which
442 * would be just before the final default.
444 if (&restrict_def4 == match)
448 flags = match->flags;
451 /* IPv6 source address */
452 if (IS_IPV6(srcadr)) {
453 pin6 = PSOCK_ADDR6(srcadr);
456 * Ignore any packets with a multicast source address
457 * (this should be done early in the receive process,
460 if (IN6_IS_ADDR_MULTICAST(pin6))
461 return (int)RES_IGNORE;
463 match = match_restrict6_addr(pin6, SRCPORT(srcadr));
465 if (&restrict_def6 == match)
469 flags = match->flags;
476 * hack_restrict - add/subtract/manipulate entries on the restrict list
481 sockaddr_u * resaddr,
482 sockaddr_u * resmask,
491 restrict_u ** plisthead;
493 DPRINTF(1, ("restrict: op %d addr %s mask %s mflags %08x flags %08x\n",
494 op, stoa(resaddr), stoa(resmask), mflags, flags));
496 if (NULL == resaddr) {
497 NTP_REQUIRE(NULL == resmask);
498 NTP_REQUIRE(RESTRICT_FLAGS == op);
499 restrict_source_flags = flags;
500 restrict_source_mflags = mflags;
501 restrict_source_enabled = 1;
506 /* silence VC9 potentially uninit warnings */
510 if (IS_IPV4(resaddr)) {
513 * Get address and mask in host byte order for easy
514 * comparison as u_int32
516 match.u.v4.addr = SRCADR(resaddr);
517 match.u.v4.mask = SRCADR(resmask);
518 match.u.v4.addr &= match.u.v4.mask;
520 } else if (IS_IPV6(resaddr)) {
523 * Get address and mask in network byte order for easy
524 * comparison as byte sequences (e.g. memcmp())
526 match.u.v6.mask = SOCK_ADDR6(resmask);
527 MASK_IPV6_ADDR(&match.u.v6.addr, PSOCK_ADDR6(resaddr),
530 } else /* not IPv4 nor IPv6 */
534 match.mflags = mflags;
535 match.expire = expire;
536 res = match_restrict_entry(&match, v6);
542 * Here we add bits to the flags. If this is a
543 * new restriction add it.
549 V6_SIZEOF_RESTRICT_U);
550 plisthead = &restrictlist6;
554 V4_SIZEOF_RESTRICT_U);
555 plisthead = &restrictlist4;
560 ? res_sorts_before6(res, L_S_S_CUR())
561 : res_sorts_before4(res, L_S_S_CUR()),
564 if (RES_LIMITED & flags)
567 if ((RES_LIMITED & flags) &&
568 !(RES_LIMITED & res->flags))
574 case RESTRICT_UNFLAG:
576 * Remove some bits from the flags. If we didn't
577 * find this one, just return.
580 if ((RES_LIMITED & res->flags)
581 && (RES_LIMITED & flags))
583 res->flags &= ~flags;
587 case RESTRICT_REMOVE:
588 case RESTRICT_REMOVEIF:
590 * Remove an entry from the table entirely if we
591 * found one. Don't remove the default entry and
592 * don't remove an interface entry.
595 && (RESTRICT_REMOVEIF == op
596 || !(RESM_INTERFACE & res->mflags))
597 && res != &restrict_def4
598 && res != &restrict_def6)
602 default: /* unknown op */
611 * restrict_source - maintains dynamic "restrict source ..." entries as
617 int farewell, /* 0 to add, 1 to remove */
618 u_long expire /* 0 is infinite, valid until */
625 if (!restrict_source_enabled || SOCK_UNSPEC(addr) ||
626 IS_MCAST(addr) || ISREFCLOCKADR(addr))
629 NTP_REQUIRE(AF_INET == AF(addr) || AF_INET6 == AF(addr));
631 SET_HOSTMASK(&onesmask, AF(addr));
633 hack_restrict(RESTRICT_REMOVE, addr, &onesmask,
635 DPRINTF(1, ("restrict_source: %s removed", stoa(addr)));
640 * If there is a specific entry for this address, hands
641 * off, as it is condidered more specific than "restrict
643 * However, if the specific entry found is a fleeting one
644 * added by pool_xmit() before soliciting, replace it
645 * immediately regardless of the expire value to make way
646 * for the more persistent entry.
649 res = match_restrict4_addr(SRCADR(addr), SRCPORT(addr));
650 found_specific = (SRCADR(&onesmask) == res->u.v4.mask);
652 res = match_restrict6_addr(&SOCK_ADDR6(addr),
654 found_specific = ADDR6_EQ(&res->u.v6.mask,
655 &SOCK_ADDR6(&onesmask));
657 if (!expire && found_specific && res->expire) {
659 free_res(res, IS_IPV6(addr));
664 hack_restrict(RESTRICT_FLAGS, addr, &onesmask,
665 restrict_source_mflags, restrict_source_flags,
667 DPRINTF(1, ("restrict_source: %s host restriction added\n",