2 * SPDX-License-Identifier: (BSD-2-Clause-FreeBSD AND ISC)
4 * Copyright (c) 2002 Michael Shalayeff
5 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
21 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
25 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
26 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGE.
31 * Copyright (c) 2009 David Gwynne <dlg@openbsd.org>
33 * Permission to use, copy, modify, and distribute this software for any
34 * purpose with or without fee is hereby granted, provided that the above
35 * copyright notice and this permission notice appear in all copies.
37 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
38 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
39 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
40 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
41 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
42 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
43 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
47 * $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $
49 * Revisions picked from OpenBSD after revision 1.110 import:
50 * 1.119 - don't m_copydata() beyond the len of mbuf in pfsync_input()
51 * 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates
52 * 1.120, 1.175 - use monotonic time_uptime
53 * 1.122 - reduce number of updates for non-TCP sessions
54 * 1.125, 1.127 - rewrite merge or stale processing
56 * 1.146 - bzero() mbuf before sparsely filling it with data
57 * 1.170 - SIOCSIFMTU checks
58 * 1.126, 1.142 - deferred packets processing
59 * 1.173 - correct expire time processing
62 #include <sys/cdefs.h>
63 __FBSDID("$FreeBSD$");
66 #include "opt_inet6.h"
69 #include <sys/param.h>
71 #include <sys/endian.h>
72 #include <sys/interrupt.h>
73 #include <sys/kernel.h>
76 #include <sys/module.h>
77 #include <sys/mutex.h>
79 #include <sys/protosw.h>
80 #include <sys/socket.h>
81 #include <sys/sockio.h>
82 #include <sys/sysctl.h>
83 #include <sys/syslog.h>
87 #include <net/if_var.h>
88 #include <net/if_clone.h>
89 #include <net/if_types.h>
91 #include <net/pfvar.h>
92 #include <net/if_pfsync.h>
94 #include <netinet/if_ether.h>
95 #include <netinet/in.h>
96 #include <netinet/in_var.h>
97 #include <netinet/ip.h>
98 #include <netinet/ip_carp.h>
99 #include <netinet/ip_var.h>
100 #include <netinet/tcp.h>
101 #include <netinet/tcp_fsm.h>
102 #include <netinet/tcp_seq.h>
104 #define PFSYNC_MINPKT ( \
105 sizeof(struct ip) + \
106 sizeof(struct pfsync_header) + \
107 sizeof(struct pfsync_subheader) )
115 static int pfsync_upd_tcp(struct pf_state *, struct pfsync_state_peer *,
116 struct pfsync_state_peer *);
117 static int pfsync_in_clr(struct pfsync_pkt *, struct mbuf *, int, int);
118 static int pfsync_in_ins(struct pfsync_pkt *, struct mbuf *, int, int);
119 static int pfsync_in_iack(struct pfsync_pkt *, struct mbuf *, int, int);
120 static int pfsync_in_upd(struct pfsync_pkt *, struct mbuf *, int, int);
121 static int pfsync_in_upd_c(struct pfsync_pkt *, struct mbuf *, int, int);
122 static int pfsync_in_ureq(struct pfsync_pkt *, struct mbuf *, int, int);
123 static int pfsync_in_del(struct pfsync_pkt *, struct mbuf *, int, int);
124 static int pfsync_in_del_c(struct pfsync_pkt *, struct mbuf *, int, int);
125 static int pfsync_in_bus(struct pfsync_pkt *, struct mbuf *, int, int);
126 static int pfsync_in_tdb(struct pfsync_pkt *, struct mbuf *, int, int);
127 static int pfsync_in_eof(struct pfsync_pkt *, struct mbuf *, int, int);
128 static int pfsync_in_error(struct pfsync_pkt *, struct mbuf *, int, int);
130 static int (*pfsync_acts[])(struct pfsync_pkt *, struct mbuf *, int, int) = {
131 pfsync_in_clr, /* PFSYNC_ACT_CLR */
132 pfsync_in_ins, /* PFSYNC_ACT_INS */
133 pfsync_in_iack, /* PFSYNC_ACT_INS_ACK */
134 pfsync_in_upd, /* PFSYNC_ACT_UPD */
135 pfsync_in_upd_c, /* PFSYNC_ACT_UPD_C */
136 pfsync_in_ureq, /* PFSYNC_ACT_UPD_REQ */
137 pfsync_in_del, /* PFSYNC_ACT_DEL */
138 pfsync_in_del_c, /* PFSYNC_ACT_DEL_C */
139 pfsync_in_error, /* PFSYNC_ACT_INS_F */
140 pfsync_in_error, /* PFSYNC_ACT_DEL_F */
141 pfsync_in_bus, /* PFSYNC_ACT_BUS */
142 pfsync_in_tdb, /* PFSYNC_ACT_TDB */
143 pfsync_in_eof /* PFSYNC_ACT_EOF */
147 void (*write)(struct pf_state *, void *);
152 /* we have one of these for every PFSYNC_S_ */
153 static void pfsync_out_state(struct pf_state *, void *);
154 static void pfsync_out_iack(struct pf_state *, void *);
155 static void pfsync_out_upd_c(struct pf_state *, void *);
156 static void pfsync_out_del(struct pf_state *, void *);
158 static struct pfsync_q pfsync_qs[] = {
159 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_INS },
160 { pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK },
161 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_UPD },
162 { pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C },
163 { pfsync_out_del, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C }
166 static void pfsync_q_ins(struct pf_state *, int, bool);
167 static void pfsync_q_del(struct pf_state *, bool);
169 static void pfsync_update_state(struct pf_state *);
171 struct pfsync_upd_req_item {
172 TAILQ_ENTRY(pfsync_upd_req_item) ur_entry;
173 struct pfsync_upd_req ur_msg;
176 struct pfsync_deferral {
177 struct pfsync_softc *pd_sc;
178 TAILQ_ENTRY(pfsync_deferral) pd_entry;
180 struct callout pd_tmo;
182 struct pf_state *pd_st;
186 struct pfsync_softc {
188 struct ifnet *sc_ifp;
189 struct ifnet *sc_sync_if;
190 struct ip_moptions sc_imo;
191 struct in_addr sc_sync_peer;
193 #define PFSYNCF_OK 0x00000001
194 #define PFSYNCF_DEFER 0x00000002
195 #define PFSYNCF_PUSH 0x00000004
196 uint8_t sc_maxupdates;
197 struct ip sc_template;
198 struct callout sc_tmo;
203 TAILQ_HEAD(, pf_state) sc_qs[PFSYNC_S_COUNT];
204 TAILQ_HEAD(, pfsync_upd_req_item) sc_upd_req_list;
205 TAILQ_HEAD(, pfsync_deferral) sc_deferrals;
210 /* Bulk update info */
211 struct mtx sc_bulk_mtx;
212 uint32_t sc_ureq_sent;
214 uint32_t sc_ureq_received;
216 uint64_t sc_bulk_stateid;
217 uint32_t sc_bulk_creatorid;
218 struct callout sc_bulk_tmo;
219 struct callout sc_bulkfail_tmo;
222 #define PFSYNC_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
223 #define PFSYNC_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
224 #define PFSYNC_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
226 #define PFSYNC_BLOCK(sc) mtx_lock(&(sc)->sc_bulk_mtx)
227 #define PFSYNC_BUNLOCK(sc) mtx_unlock(&(sc)->sc_bulk_mtx)
228 #define PFSYNC_BLOCK_ASSERT(sc) mtx_assert(&(sc)->sc_bulk_mtx, MA_OWNED)
230 static const char pfsyncname[] = "pfsync";
231 static MALLOC_DEFINE(M_PFSYNC, pfsyncname, "pfsync(4) data");
232 static VNET_DEFINE(struct pfsync_softc *, pfsyncif) = NULL;
233 #define V_pfsyncif VNET(pfsyncif)
234 static VNET_DEFINE(void *, pfsync_swi_cookie) = NULL;
235 #define V_pfsync_swi_cookie VNET(pfsync_swi_cookie)
236 static VNET_DEFINE(struct pfsyncstats, pfsyncstats);
237 #define V_pfsyncstats VNET(pfsyncstats)
238 static VNET_DEFINE(int, pfsync_carp_adj) = CARP_MAXSKEW;
239 #define V_pfsync_carp_adj VNET(pfsync_carp_adj)
241 static void pfsync_timeout(void *);
242 static void pfsync_push(struct pfsync_softc *);
243 static void pfsyncintr(void *);
244 static int pfsync_multicast_setup(struct pfsync_softc *, struct ifnet *,
246 static void pfsync_multicast_cleanup(struct pfsync_softc *);
247 static void pfsync_pointers_init(void);
248 static void pfsync_pointers_uninit(void);
249 static int pfsync_init(void);
250 static void pfsync_uninit(void);
252 SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW, 0, "PFSYNC");
253 SYSCTL_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_VNET | CTLFLAG_RW,
254 &VNET_NAME(pfsyncstats), pfsyncstats,
255 "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)");
256 SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_RW,
257 &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment");
259 static int pfsync_clone_create(struct if_clone *, int, caddr_t);
260 static void pfsync_clone_destroy(struct ifnet *);
261 static int pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
262 struct pf_state_peer *);
263 static int pfsyncoutput(struct ifnet *, struct mbuf *,
264 const struct sockaddr *, struct route *);
265 static int pfsyncioctl(struct ifnet *, u_long, caddr_t);
267 static int pfsync_defer(struct pf_state *, struct mbuf *);
268 static void pfsync_undefer(struct pfsync_deferral *, int);
269 static void pfsync_undefer_state(struct pf_state *, int);
270 static void pfsync_defer_tmo(void *);
272 static void pfsync_request_update(u_int32_t, u_int64_t);
273 static void pfsync_update_state_req(struct pf_state *);
275 static void pfsync_drop(struct pfsync_softc *);
276 static void pfsync_sendout(int);
277 static void pfsync_send_plus(void *, size_t);
279 static void pfsync_bulk_start(void);
280 static void pfsync_bulk_status(u_int8_t);
281 static void pfsync_bulk_update(void *);
282 static void pfsync_bulk_fail(void *);
285 static void pfsync_update_net_tdb(struct pfsync_tdb *);
288 #define PFSYNC_MAX_BULKTRIES 12
290 VNET_DEFINE(struct if_clone *, pfsync_cloner);
291 #define V_pfsync_cloner VNET(pfsync_cloner)
294 pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param)
296 struct pfsync_softc *sc;
303 sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO);
304 sc->sc_flags |= PFSYNCF_OK;
306 for (q = 0; q < PFSYNC_S_COUNT; q++)
307 TAILQ_INIT(&sc->sc_qs[q]);
309 TAILQ_INIT(&sc->sc_upd_req_list);
310 TAILQ_INIT(&sc->sc_deferrals);
312 sc->sc_len = PFSYNC_MINPKT;
313 sc->sc_maxupdates = 128;
315 ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC);
320 if_initname(ifp, pfsyncname, unit);
322 ifp->if_ioctl = pfsyncioctl;
323 ifp->if_output = pfsyncoutput;
324 ifp->if_type = IFT_PFSYNC;
325 ifp->if_snd.ifq_maxlen = ifqmaxlen;
326 ifp->if_hdrlen = sizeof(struct pfsync_header);
327 ifp->if_mtu = ETHERMTU;
328 mtx_init(&sc->sc_mtx, pfsyncname, NULL, MTX_DEF);
329 mtx_init(&sc->sc_bulk_mtx, "pfsync bulk", NULL, MTX_DEF);
330 callout_init(&sc->sc_tmo, 1);
331 callout_init_mtx(&sc->sc_bulk_tmo, &sc->sc_bulk_mtx, 0);
332 callout_init_mtx(&sc->sc_bulkfail_tmo, &sc->sc_bulk_mtx, 0);
336 bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
344 pfsync_clone_destroy(struct ifnet *ifp)
346 struct pfsync_softc *sc = ifp->if_softc;
349 * At this stage, everything should have already been
350 * cleared by pfsync_uninit(), and we have only to
353 while (sc->sc_deferred > 0) {
354 struct pfsync_deferral *pd = TAILQ_FIRST(&sc->sc_deferrals);
356 TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry);
358 if (callout_stop(&pd->pd_tmo) > 0) {
359 pf_release_state(pd->pd_st);
364 callout_drain(&pd->pd_tmo);
369 callout_drain(&sc->sc_tmo);
370 callout_drain(&sc->sc_bulkfail_tmo);
371 callout_drain(&sc->sc_bulk_tmo);
373 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
374 (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy");
381 if (sc->sc_imo.imo_membership)
382 pfsync_multicast_cleanup(sc);
383 mtx_destroy(&sc->sc_mtx);
384 mtx_destroy(&sc->sc_bulk_mtx);
391 pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
392 struct pf_state_peer *d)
394 if (s->scrub.scrub_flag && d->scrub == NULL) {
395 d->scrub = uma_zalloc(V_pf_state_scrub_z, M_NOWAIT | M_ZERO);
396 if (d->scrub == NULL)
405 pfsync_state_import(struct pfsync_state *sp, u_int8_t flags)
407 struct pfsync_softc *sc = V_pfsyncif;
408 #ifndef __NO_STRICT_ALIGNMENT
409 struct pfsync_state_key key[2];
411 struct pfsync_state_key *kw, *ks;
412 struct pf_state *st = NULL;
413 struct pf_state_key *skw = NULL, *sks = NULL;
414 struct pf_rule *r = NULL;
420 if (sp->creatorid == 0) {
421 if (V_pf_status.debug >= PF_DEBUG_MISC)
422 printf("%s: invalid creator id: %08x\n", __func__,
423 ntohl(sp->creatorid));
427 if ((kif = pfi_kif_find(sp->ifname)) == NULL) {
428 if (V_pf_status.debug >= PF_DEBUG_MISC)
429 printf("%s: unknown interface: %s\n", __func__,
431 if (flags & PFSYNC_SI_IOCTL)
433 return (0); /* skip this state */
437 * If the ruleset checksums match or the state is coming from the ioctl,
438 * it's safe to associate the state with the rule of that number.
440 if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) &&
441 (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) <
442 pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount)
443 r = pf_main_ruleset.rules[
444 PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)];
446 r = &V_pf_default_rule;
448 if ((r->max_states &&
449 counter_u64_fetch(r->states_cur) >= r->max_states))
453 * XXXGL: consider M_WAITOK in ioctl path after.
455 if ((st = uma_zalloc(V_pf_state_z, M_NOWAIT | M_ZERO)) == NULL)
458 if ((skw = uma_zalloc(V_pf_state_key_z, M_NOWAIT)) == NULL)
461 #ifndef __NO_STRICT_ALIGNMENT
462 bcopy(&sp->key, key, sizeof(struct pfsync_state_key) * 2);
463 kw = &key[PF_SK_WIRE];
464 ks = &key[PF_SK_STACK];
466 kw = &sp->key[PF_SK_WIRE];
467 ks = &sp->key[PF_SK_STACK];
470 if (PF_ANEQ(&kw->addr[0], &ks->addr[0], sp->af) ||
471 PF_ANEQ(&kw->addr[1], &ks->addr[1], sp->af) ||
472 kw->port[0] != ks->port[0] ||
473 kw->port[1] != ks->port[1]) {
474 sks = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
480 /* allocate memory for scrub info */
481 if (pfsync_alloc_scrub_memory(&sp->src, &st->src) ||
482 pfsync_alloc_scrub_memory(&sp->dst, &st->dst))
485 /* Copy to state key(s). */
486 skw->addr[0] = kw->addr[0];
487 skw->addr[1] = kw->addr[1];
488 skw->port[0] = kw->port[0];
489 skw->port[1] = kw->port[1];
490 skw->proto = sp->proto;
493 sks->addr[0] = ks->addr[0];
494 sks->addr[1] = ks->addr[1];
495 sks->port[0] = ks->port[0];
496 sks->port[1] = ks->port[1];
497 sks->proto = sp->proto;
502 bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr));
503 st->creation = time_uptime - ntohl(sp->creation);
504 st->expire = time_uptime;
508 timeout = r->timeout[sp->timeout];
510 timeout = V_pf_default_rule.timeout[sp->timeout];
512 /* sp->expire may have been adaptively scaled by export. */
513 st->expire -= timeout - ntohl(sp->expire);
516 st->direction = sp->direction;
518 st->timeout = sp->timeout;
519 st->state_flags = sp->state_flags;
522 st->creatorid = sp->creatorid;
523 pf_state_peer_ntoh(&sp->src, &st->src);
524 pf_state_peer_ntoh(&sp->dst, &st->dst);
527 st->nat_rule.ptr = NULL;
528 st->anchor.ptr = NULL;
531 st->pfsync_time = time_uptime;
532 st->sync_state = PFSYNC_S_NONE;
534 if (!(flags & PFSYNC_SI_IOCTL))
535 st->state_flags |= PFSTATE_NOSYNC;
537 if ((error = pf_state_insert(kif, skw, sks, st)) != 0)
540 /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
541 counter_u64_add(r->states_cur, 1);
542 counter_u64_add(r->states_tot, 1);
544 if (!(flags & PFSYNC_SI_IOCTL)) {
545 st->state_flags &= ~PFSTATE_NOSYNC;
546 if (st->state_flags & PFSTATE_ACK) {
547 pfsync_q_ins(st, PFSYNC_S_IACK, true);
551 st->state_flags &= ~PFSTATE_ACK;
561 uma_zfree(V_pf_state_key_z, skw);
563 uma_zfree(V_pf_state_key_z, sks);
565 cleanup_state: /* pf_state_insert() frees the state keys. */
568 uma_zfree(V_pf_state_scrub_z, st->dst.scrub);
570 uma_zfree(V_pf_state_scrub_z, st->src.scrub);
571 uma_zfree(V_pf_state_z, st);
577 pfsync_input(struct mbuf **mp, int *offp __unused, int proto __unused)
579 struct pfsync_softc *sc = V_pfsyncif;
580 struct pfsync_pkt pkt;
581 struct mbuf *m = *mp;
582 struct ip *ip = mtod(m, struct ip *);
583 struct pfsync_header *ph;
584 struct pfsync_subheader subh;
591 V_pfsyncstats.pfsyncs_ipackets++;
593 /* Verify that we have a sync interface configured. */
594 if (!sc || !sc->sc_sync_if || !V_pf_status.running ||
595 (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
598 /* verify that the packet came in on the right interface */
599 if (sc->sc_sync_if != m->m_pkthdr.rcvif) {
600 V_pfsyncstats.pfsyncs_badif++;
604 if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1);
605 if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
606 /* verify that the IP TTL is 255. */
607 if (ip->ip_ttl != PFSYNC_DFLTTL) {
608 V_pfsyncstats.pfsyncs_badttl++;
612 offset = ip->ip_hl << 2;
613 if (m->m_pkthdr.len < offset + sizeof(*ph)) {
614 V_pfsyncstats.pfsyncs_hdrops++;
618 if (offset + sizeof(*ph) > m->m_len) {
619 if (m_pullup(m, offset + sizeof(*ph)) == NULL) {
620 V_pfsyncstats.pfsyncs_hdrops++;
621 return (IPPROTO_DONE);
623 ip = mtod(m, struct ip *);
625 ph = (struct pfsync_header *)((char *)ip + offset);
627 /* verify the version */
628 if (ph->version != PFSYNC_VERSION) {
629 V_pfsyncstats.pfsyncs_badver++;
633 len = ntohs(ph->len) + offset;
634 if (m->m_pkthdr.len < len) {
635 V_pfsyncstats.pfsyncs_badlen++;
639 /* Cheaper to grab this now than having to mess with mbufs later */
641 pkt.src = ip->ip_src;
645 * Trusting pf_chksum during packet processing, as well as seeking
646 * in interface name tree, require holding PF_RULES_RLOCK().
649 if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
650 pkt.flags |= PFSYNC_SI_CKSUM;
652 offset += sizeof(*ph);
653 while (offset <= len - sizeof(subh)) {
654 m_copydata(m, offset, sizeof(subh), (caddr_t)&subh);
655 offset += sizeof(subh);
657 if (subh.action >= PFSYNC_ACT_MAX) {
658 V_pfsyncstats.pfsyncs_badact++;
663 count = ntohs(subh.count);
664 V_pfsyncstats.pfsyncs_iacts[subh.action] += count;
665 rv = (*pfsync_acts[subh.action])(&pkt, m, offset, count);
668 return (IPPROTO_DONE);
677 return (IPPROTO_DONE);
681 pfsync_in_clr(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
683 struct pfsync_clr *clr;
685 int len = sizeof(*clr) * count;
689 mp = m_pulldown(m, offset, len, &offp);
691 V_pfsyncstats.pfsyncs_badlen++;
694 clr = (struct pfsync_clr *)(mp->m_data + offp);
696 for (i = 0; i < count; i++) {
697 creatorid = clr[i].creatorid;
699 if (clr[i].ifname[0] != '\0' &&
700 pfi_kif_find(clr[i].ifname) == NULL)
703 for (int i = 0; i <= pf_hashmask; i++) {
704 struct pf_idhash *ih = &V_pf_idhash[i];
708 LIST_FOREACH(s, &ih->states, entry) {
709 if (s->creatorid == creatorid) {
710 s->state_flags |= PFSTATE_NOSYNC;
711 pf_unlink_state(s, PF_ENTER_LOCKED);
715 PF_HASHROW_UNLOCK(ih);
723 pfsync_in_ins(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
726 struct pfsync_state *sa, *sp;
727 int len = sizeof(*sp) * count;
730 mp = m_pulldown(m, offset, len, &offp);
732 V_pfsyncstats.pfsyncs_badlen++;
735 sa = (struct pfsync_state *)(mp->m_data + offp);
737 for (i = 0; i < count; i++) {
740 /* Check for invalid values. */
741 if (sp->timeout >= PFTM_MAX ||
742 sp->src.state > PF_TCPS_PROXY_DST ||
743 sp->dst.state > PF_TCPS_PROXY_DST ||
744 sp->direction > PF_OUT ||
745 (sp->af != AF_INET && sp->af != AF_INET6)) {
746 if (V_pf_status.debug >= PF_DEBUG_MISC)
747 printf("%s: invalid value\n", __func__);
748 V_pfsyncstats.pfsyncs_badval++;
752 if (pfsync_state_import(sp, pkt->flags) == ENOMEM)
753 /* Drop out, but process the rest of the actions. */
761 pfsync_in_iack(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
763 struct pfsync_ins_ack *ia, *iaa;
767 int len = count * sizeof(*ia);
770 mp = m_pulldown(m, offset, len, &offp);
772 V_pfsyncstats.pfsyncs_badlen++;
775 iaa = (struct pfsync_ins_ack *)(mp->m_data + offp);
777 for (i = 0; i < count; i++) {
780 st = pf_find_state_byid(ia->id, ia->creatorid);
784 if (st->state_flags & PFSTATE_ACK) {
785 PFSYNC_LOCK(V_pfsyncif);
786 pfsync_undefer_state(st, 0);
787 PFSYNC_UNLOCK(V_pfsyncif);
792 * XXX this is not yet implemented, but we know the size of the
793 * message so we can skip it.
796 return (count * sizeof(struct pfsync_ins_ack));
800 pfsync_upd_tcp(struct pf_state *st, struct pfsync_state_peer *src,
801 struct pfsync_state_peer *dst)
805 PF_STATE_LOCK_ASSERT(st);
808 * The state should never go backwards except
809 * for syn-proxy states. Neither should the
810 * sequence window slide backwards.
812 if ((st->src.state > src->state &&
813 (st->src.state < PF_TCPS_PROXY_SRC ||
814 src->state >= PF_TCPS_PROXY_SRC)) ||
816 (st->src.state == src->state &&
817 SEQ_GT(st->src.seqlo, ntohl(src->seqlo))))
820 pf_state_peer_ntoh(src, &st->src);
822 if ((st->dst.state > dst->state) ||
824 (st->dst.state >= TCPS_SYN_SENT &&
825 SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo))))
828 pf_state_peer_ntoh(dst, &st->dst);
834 pfsync_in_upd(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
836 struct pfsync_softc *sc = V_pfsyncif;
837 struct pfsync_state *sa, *sp;
842 int len = count * sizeof(*sp);
845 mp = m_pulldown(m, offset, len, &offp);
847 V_pfsyncstats.pfsyncs_badlen++;
850 sa = (struct pfsync_state *)(mp->m_data + offp);
852 for (i = 0; i < count; i++) {
855 /* check for invalid values */
856 if (sp->timeout >= PFTM_MAX ||
857 sp->src.state > PF_TCPS_PROXY_DST ||
858 sp->dst.state > PF_TCPS_PROXY_DST) {
859 if (V_pf_status.debug >= PF_DEBUG_MISC) {
860 printf("pfsync_input: PFSYNC_ACT_UPD: "
863 V_pfsyncstats.pfsyncs_badval++;
867 st = pf_find_state_byid(sp->id, sp->creatorid);
869 /* insert the update */
870 if (pfsync_state_import(sp, 0))
871 V_pfsyncstats.pfsyncs_badstate++;
875 if (st->state_flags & PFSTATE_ACK) {
877 pfsync_undefer_state(st, 1);
881 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP)
882 sync = pfsync_upd_tcp(st, &sp->src, &sp->dst);
887 * Non-TCP protocol state machine always go
890 if (st->src.state > sp->src.state)
893 pf_state_peer_ntoh(&sp->src, &st->src);
894 if (st->dst.state > sp->dst.state)
897 pf_state_peer_ntoh(&sp->dst, &st->dst);
900 pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
901 pf_state_peer_ntoh(&sp->dst, &st->dst);
902 st->expire = time_uptime;
903 st->timeout = sp->timeout;
905 st->pfsync_time = time_uptime;
908 V_pfsyncstats.pfsyncs_stale++;
910 pfsync_update_state(st);
924 pfsync_in_upd_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
926 struct pfsync_softc *sc = V_pfsyncif;
927 struct pfsync_upd_c *ua, *up;
929 int len = count * sizeof(*up);
934 mp = m_pulldown(m, offset, len, &offp);
936 V_pfsyncstats.pfsyncs_badlen++;
939 ua = (struct pfsync_upd_c *)(mp->m_data + offp);
941 for (i = 0; i < count; i++) {
944 /* check for invalid values */
945 if (up->timeout >= PFTM_MAX ||
946 up->src.state > PF_TCPS_PROXY_DST ||
947 up->dst.state > PF_TCPS_PROXY_DST) {
948 if (V_pf_status.debug >= PF_DEBUG_MISC) {
949 printf("pfsync_input: "
953 V_pfsyncstats.pfsyncs_badval++;
957 st = pf_find_state_byid(up->id, up->creatorid);
959 /* We don't have this state. Ask for it. */
961 pfsync_request_update(up->creatorid, up->id);
966 if (st->state_flags & PFSTATE_ACK) {
968 pfsync_undefer_state(st, 1);
972 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP)
973 sync = pfsync_upd_tcp(st, &up->src, &up->dst);
978 * Non-TCP protocol state machine always go
981 if (st->src.state > up->src.state)
984 pf_state_peer_ntoh(&up->src, &st->src);
985 if (st->dst.state > up->dst.state)
988 pf_state_peer_ntoh(&up->dst, &st->dst);
991 pfsync_alloc_scrub_memory(&up->dst, &st->dst);
992 pf_state_peer_ntoh(&up->dst, &st->dst);
993 st->expire = time_uptime;
994 st->timeout = up->timeout;
996 st->pfsync_time = time_uptime;
999 V_pfsyncstats.pfsyncs_stale++;
1001 pfsync_update_state(st);
1002 PF_STATE_UNLOCK(st);
1008 PF_STATE_UNLOCK(st);
1015 pfsync_in_ureq(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1017 struct pfsync_upd_req *ur, *ura;
1019 int len = count * sizeof(*ur);
1022 struct pf_state *st;
1024 mp = m_pulldown(m, offset, len, &offp);
1026 V_pfsyncstats.pfsyncs_badlen++;
1029 ura = (struct pfsync_upd_req *)(mp->m_data + offp);
1031 for (i = 0; i < count; i++) {
1034 if (ur->id == 0 && ur->creatorid == 0)
1035 pfsync_bulk_start();
1037 st = pf_find_state_byid(ur->id, ur->creatorid);
1039 V_pfsyncstats.pfsyncs_badstate++;
1042 if (st->state_flags & PFSTATE_NOSYNC) {
1043 PF_STATE_UNLOCK(st);
1047 pfsync_update_state_req(st);
1048 PF_STATE_UNLOCK(st);
1056 pfsync_in_del(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1059 struct pfsync_state *sa, *sp;
1060 struct pf_state *st;
1061 int len = count * sizeof(*sp);
1064 mp = m_pulldown(m, offset, len, &offp);
1066 V_pfsyncstats.pfsyncs_badlen++;
1069 sa = (struct pfsync_state *)(mp->m_data + offp);
1071 for (i = 0; i < count; i++) {
1074 st = pf_find_state_byid(sp->id, sp->creatorid);
1076 V_pfsyncstats.pfsyncs_badstate++;
1079 st->state_flags |= PFSTATE_NOSYNC;
1080 pf_unlink_state(st, PF_ENTER_LOCKED);
1087 pfsync_in_del_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1090 struct pfsync_del_c *sa, *sp;
1091 struct pf_state *st;
1092 int len = count * sizeof(*sp);
1095 mp = m_pulldown(m, offset, len, &offp);
1097 V_pfsyncstats.pfsyncs_badlen++;
1100 sa = (struct pfsync_del_c *)(mp->m_data + offp);
1102 for (i = 0; i < count; i++) {
1105 st = pf_find_state_byid(sp->id, sp->creatorid);
1107 V_pfsyncstats.pfsyncs_badstate++;
1111 st->state_flags |= PFSTATE_NOSYNC;
1112 pf_unlink_state(st, PF_ENTER_LOCKED);
1119 pfsync_in_bus(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1121 struct pfsync_softc *sc = V_pfsyncif;
1122 struct pfsync_bus *bus;
1124 int len = count * sizeof(*bus);
1129 /* If we're not waiting for a bulk update, who cares. */
1130 if (sc->sc_ureq_sent == 0) {
1135 mp = m_pulldown(m, offset, len, &offp);
1138 V_pfsyncstats.pfsyncs_badlen++;
1141 bus = (struct pfsync_bus *)(mp->m_data + offp);
1143 switch (bus->status) {
1144 case PFSYNC_BUS_START:
1145 callout_reset(&sc->sc_bulkfail_tmo, 4 * hz +
1146 V_pf_limits[PF_LIMIT_STATES].limit /
1147 ((sc->sc_ifp->if_mtu - PFSYNC_MINPKT) /
1148 sizeof(struct pfsync_state)),
1149 pfsync_bulk_fail, sc);
1150 if (V_pf_status.debug >= PF_DEBUG_MISC)
1151 printf("pfsync: received bulk update start\n");
1154 case PFSYNC_BUS_END:
1155 if (time_uptime - ntohl(bus->endtime) >=
1157 /* that's it, we're happy */
1158 sc->sc_ureq_sent = 0;
1159 sc->sc_bulk_tries = 0;
1160 callout_stop(&sc->sc_bulkfail_tmo);
1161 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
1162 (*carp_demote_adj_p)(-V_pfsync_carp_adj,
1163 "pfsync bulk done");
1164 sc->sc_flags |= PFSYNCF_OK;
1165 if (V_pf_status.debug >= PF_DEBUG_MISC)
1166 printf("pfsync: received valid "
1167 "bulk update end\n");
1169 if (V_pf_status.debug >= PF_DEBUG_MISC)
1170 printf("pfsync: received invalid "
1171 "bulk update end: bad timestamp\n");
1181 pfsync_in_tdb(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1183 int len = count * sizeof(struct pfsync_tdb);
1186 struct pfsync_tdb *tp;
1192 mp = m_pulldown(m, offset, len, &offp);
1194 V_pfsyncstats.pfsyncs_badlen++;
1197 tp = (struct pfsync_tdb *)(mp->m_data + offp);
1199 for (i = 0; i < count; i++)
1200 pfsync_update_net_tdb(&tp[i]);
1207 /* Update an in-kernel tdb. Silently fail if no tdb is found. */
1209 pfsync_update_net_tdb(struct pfsync_tdb *pt)
1214 /* check for invalid values */
1215 if (ntohl(pt->spi) <= SPI_RESERVED_MAX ||
1216 (pt->dst.sa.sa_family != AF_INET &&
1217 pt->dst.sa.sa_family != AF_INET6))
1220 tdb = gettdb(pt->spi, &pt->dst, pt->sproto);
1222 pt->rpl = ntohl(pt->rpl);
1223 pt->cur_bytes = (unsigned long long)be64toh(pt->cur_bytes);
1225 /* Neither replay nor byte counter should ever decrease. */
1226 if (pt->rpl < tdb->tdb_rpl ||
1227 pt->cur_bytes < tdb->tdb_cur_bytes) {
1231 tdb->tdb_rpl = pt->rpl;
1232 tdb->tdb_cur_bytes = pt->cur_bytes;
1237 if (V_pf_status.debug >= PF_DEBUG_MISC)
1238 printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: "
1240 V_pfsyncstats.pfsyncs_badstate++;
1247 pfsync_in_eof(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1249 /* check if we are at the right place in the packet */
1250 if (offset != m->m_pkthdr.len)
1251 V_pfsyncstats.pfsyncs_badlen++;
1253 /* we're done. free and let the caller return */
1259 pfsync_in_error(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1261 V_pfsyncstats.pfsyncs_badact++;
1268 pfsyncoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
1277 pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1279 struct pfsync_softc *sc = ifp->if_softc;
1280 struct ifreq *ifr = (struct ifreq *)data;
1281 struct pfsyncreq pfsyncr;
1287 if (ifp->if_flags & IFF_UP) {
1288 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1290 pfsync_pointers_init();
1292 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1294 pfsync_pointers_uninit();
1298 if (!sc->sc_sync_if ||
1299 ifr->ifr_mtu <= PFSYNC_MINPKT ||
1300 ifr->ifr_mtu > sc->sc_sync_if->if_mtu)
1302 if (ifr->ifr_mtu < ifp->if_mtu) {
1304 if (sc->sc_len > PFSYNC_MINPKT)
1308 ifp->if_mtu = ifr->ifr_mtu;
1311 bzero(&pfsyncr, sizeof(pfsyncr));
1313 if (sc->sc_sync_if) {
1314 strlcpy(pfsyncr.pfsyncr_syncdev,
1315 sc->sc_sync_if->if_xname, IFNAMSIZ);
1317 pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer;
1318 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
1319 pfsyncr.pfsyncr_defer = (PFSYNCF_DEFER ==
1320 (sc->sc_flags & PFSYNCF_DEFER));
1322 return (copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr)));
1326 struct ip_moptions *imo = &sc->sc_imo;
1331 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
1333 if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr))))
1336 if (pfsyncr.pfsyncr_maxupdates > 255)
1339 if (pfsyncr.pfsyncr_syncdev[0] == 0)
1341 else if ((sifp = ifunit_ref(pfsyncr.pfsyncr_syncdev)) == NULL)
1344 if (sifp != NULL && (
1345 pfsyncr.pfsyncr_syncpeer.s_addr == 0 ||
1346 pfsyncr.pfsyncr_syncpeer.s_addr ==
1347 htonl(INADDR_PFSYNC_GROUP)))
1348 mship = malloc((sizeof(struct in_multi *) *
1349 IP_MIN_MEMBERSHIPS), M_PFSYNC, M_WAITOK | M_ZERO);
1352 if (pfsyncr.pfsyncr_syncpeer.s_addr == 0)
1353 sc->sc_sync_peer.s_addr = htonl(INADDR_PFSYNC_GROUP);
1355 sc->sc_sync_peer.s_addr =
1356 pfsyncr.pfsyncr_syncpeer.s_addr;
1358 sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates;
1359 if (pfsyncr.pfsyncr_defer) {
1360 sc->sc_flags |= PFSYNCF_DEFER;
1361 pfsync_defer_ptr = pfsync_defer;
1363 sc->sc_flags &= ~PFSYNCF_DEFER;
1364 pfsync_defer_ptr = NULL;
1369 if_rele(sc->sc_sync_if);
1370 sc->sc_sync_if = NULL;
1371 if (imo->imo_membership)
1372 pfsync_multicast_cleanup(sc);
1377 if (sc->sc_len > PFSYNC_MINPKT &&
1378 (sifp->if_mtu < sc->sc_ifp->if_mtu ||
1379 (sc->sc_sync_if != NULL &&
1380 sifp->if_mtu < sc->sc_sync_if->if_mtu) ||
1381 sifp->if_mtu < MCLBYTES - sizeof(struct ip)))
1384 if (imo->imo_membership)
1385 pfsync_multicast_cleanup(sc);
1387 if (sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) {
1388 error = pfsync_multicast_setup(sc, sifp, mship);
1391 free(mship, M_PFSYNC);
1396 if_rele(sc->sc_sync_if);
1397 sc->sc_sync_if = sifp;
1399 ip = &sc->sc_template;
1400 bzero(ip, sizeof(*ip));
1401 ip->ip_v = IPVERSION;
1402 ip->ip_hl = sizeof(sc->sc_template) >> 2;
1403 ip->ip_tos = IPTOS_LOWDELAY;
1404 /* len and id are set later. */
1405 ip->ip_off = htons(IP_DF);
1406 ip->ip_ttl = PFSYNC_DFLTTL;
1407 ip->ip_p = IPPROTO_PFSYNC;
1408 ip->ip_src.s_addr = INADDR_ANY;
1409 ip->ip_dst.s_addr = sc->sc_sync_peer.s_addr;
1411 /* Request a full state table update. */
1412 if ((sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
1413 (*carp_demote_adj_p)(V_pfsync_carp_adj,
1414 "pfsync bulk start");
1415 sc->sc_flags &= ~PFSYNCF_OK;
1416 if (V_pf_status.debug >= PF_DEBUG_MISC)
1417 printf("pfsync: requesting bulk update\n");
1418 pfsync_request_update(0, 0);
1421 sc->sc_ureq_sent = time_uptime;
1422 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail,
1436 pfsync_out_state(struct pf_state *st, void *buf)
1438 struct pfsync_state *sp = buf;
1440 pfsync_state_export(sp, st);
1444 pfsync_out_iack(struct pf_state *st, void *buf)
1446 struct pfsync_ins_ack *iack = buf;
1449 iack->creatorid = st->creatorid;
1453 pfsync_out_upd_c(struct pf_state *st, void *buf)
1455 struct pfsync_upd_c *up = buf;
1457 bzero(up, sizeof(*up));
1459 pf_state_peer_hton(&st->src, &up->src);
1460 pf_state_peer_hton(&st->dst, &up->dst);
1461 up->creatorid = st->creatorid;
1462 up->timeout = st->timeout;
1466 pfsync_out_del(struct pf_state *st, void *buf)
1468 struct pfsync_del_c *dp = buf;
1471 dp->creatorid = st->creatorid;
1472 st->state_flags |= PFSTATE_NOSYNC;
1476 pfsync_drop(struct pfsync_softc *sc)
1478 struct pf_state *st, *next;
1479 struct pfsync_upd_req_item *ur;
1482 for (q = 0; q < PFSYNC_S_COUNT; q++) {
1483 if (TAILQ_EMPTY(&sc->sc_qs[q]))
1486 TAILQ_FOREACH_SAFE(st, &sc->sc_qs[q], sync_list, next) {
1487 KASSERT(st->sync_state == q,
1488 ("%s: st->sync_state == q",
1490 st->sync_state = PFSYNC_S_NONE;
1491 pf_release_state(st);
1493 TAILQ_INIT(&sc->sc_qs[q]);
1496 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
1497 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
1502 sc->sc_len = PFSYNC_MINPKT;
1506 pfsync_sendout(int schedswi)
1508 struct pfsync_softc *sc = V_pfsyncif;
1509 struct ifnet *ifp = sc->sc_ifp;
1512 struct pfsync_header *ph;
1513 struct pfsync_subheader *subh;
1514 struct pf_state *st, *st_next;
1515 struct pfsync_upd_req_item *ur;
1519 KASSERT(sc != NULL, ("%s: null sc", __func__));
1520 KASSERT(sc->sc_len > PFSYNC_MINPKT,
1521 ("%s: sc_len %zu", __func__, sc->sc_len));
1522 PFSYNC_LOCK_ASSERT(sc);
1524 if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) {
1529 m = m_get2(max_linkhdr + sc->sc_len, M_NOWAIT, MT_DATA, M_PKTHDR);
1531 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
1532 V_pfsyncstats.pfsyncs_onomem++;
1535 m->m_data += max_linkhdr;
1536 m->m_len = m->m_pkthdr.len = sc->sc_len;
1538 /* build the ip header */
1539 ip = (struct ip *)m->m_data;
1540 bcopy(&sc->sc_template, ip, sizeof(*ip));
1541 offset = sizeof(*ip);
1543 ip->ip_len = htons(m->m_pkthdr.len);
1546 /* build the pfsync header */
1547 ph = (struct pfsync_header *)(m->m_data + offset);
1548 bzero(ph, sizeof(*ph));
1549 offset += sizeof(*ph);
1551 ph->version = PFSYNC_VERSION;
1552 ph->len = htons(sc->sc_len - sizeof(*ip));
1553 bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
1555 /* walk the queues */
1556 for (q = 0; q < PFSYNC_S_COUNT; q++) {
1557 if (TAILQ_EMPTY(&sc->sc_qs[q]))
1560 subh = (struct pfsync_subheader *)(m->m_data + offset);
1561 offset += sizeof(*subh);
1564 TAILQ_FOREACH_SAFE(st, &sc->sc_qs[q], sync_list, st_next) {
1565 KASSERT(st->sync_state == q,
1566 ("%s: st->sync_state == q",
1569 * XXXGL: some of write methods do unlocked reads
1572 pfsync_qs[q].write(st, m->m_data + offset);
1573 offset += pfsync_qs[q].len;
1574 st->sync_state = PFSYNC_S_NONE;
1575 pf_release_state(st);
1578 TAILQ_INIT(&sc->sc_qs[q]);
1580 bzero(subh, sizeof(*subh));
1581 subh->action = pfsync_qs[q].action;
1582 subh->count = htons(count);
1583 V_pfsyncstats.pfsyncs_oacts[pfsync_qs[q].action] += count;
1586 if (!TAILQ_EMPTY(&sc->sc_upd_req_list)) {
1587 subh = (struct pfsync_subheader *)(m->m_data + offset);
1588 offset += sizeof(*subh);
1591 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
1592 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
1594 bcopy(&ur->ur_msg, m->m_data + offset,
1595 sizeof(ur->ur_msg));
1596 offset += sizeof(ur->ur_msg);
1601 bzero(subh, sizeof(*subh));
1602 subh->action = PFSYNC_ACT_UPD_REQ;
1603 subh->count = htons(count);
1604 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_UPD_REQ] += count;
1607 /* has someone built a custom region for us to add? */
1608 if (sc->sc_plus != NULL) {
1609 bcopy(sc->sc_plus, m->m_data + offset, sc->sc_pluslen);
1610 offset += sc->sc_pluslen;
1615 subh = (struct pfsync_subheader *)(m->m_data + offset);
1616 offset += sizeof(*subh);
1618 bzero(subh, sizeof(*subh));
1619 subh->action = PFSYNC_ACT_EOF;
1620 subh->count = htons(1);
1621 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_EOF]++;
1623 /* we're done, let's put it on the wire */
1625 m->m_data += sizeof(*ip);
1626 m->m_len = m->m_pkthdr.len = sc->sc_len - sizeof(*ip);
1628 m->m_data -= sizeof(*ip);
1629 m->m_len = m->m_pkthdr.len = sc->sc_len;
1632 if (sc->sc_sync_if == NULL) {
1633 sc->sc_len = PFSYNC_MINPKT;
1638 if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1);
1639 if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
1640 sc->sc_len = PFSYNC_MINPKT;
1642 if (!_IF_QFULL(&sc->sc_ifp->if_snd))
1643 _IF_ENQUEUE(&sc->sc_ifp->if_snd, m);
1646 if_inc_counter(sc->sc_ifp, IFCOUNTER_OQDROPS, 1);
1649 swi_sched(V_pfsync_swi_cookie, 0);
1653 pfsync_insert_state(struct pf_state *st)
1655 struct pfsync_softc *sc = V_pfsyncif;
1657 if (st->state_flags & PFSTATE_NOSYNC)
1660 if ((st->rule.ptr->rule_flag & PFRULE_NOSYNC) ||
1661 st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) {
1662 st->state_flags |= PFSTATE_NOSYNC;
1666 KASSERT(st->sync_state == PFSYNC_S_NONE,
1667 ("%s: st->sync_state %u", __func__, st->sync_state));
1670 if (sc->sc_len == PFSYNC_MINPKT)
1671 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, V_pfsyncif);
1673 pfsync_q_ins(st, PFSYNC_S_INS, true);
1676 st->sync_updates = 0;
1680 pfsync_defer(struct pf_state *st, struct mbuf *m)
1682 struct pfsync_softc *sc = V_pfsyncif;
1683 struct pfsync_deferral *pd;
1685 if (m->m_flags & (M_BCAST|M_MCAST))
1690 if (sc == NULL || !(sc->sc_ifp->if_flags & IFF_DRV_RUNNING) ||
1691 !(sc->sc_flags & PFSYNCF_DEFER)) {
1696 if (sc->sc_deferred >= 128)
1697 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0);
1699 pd = malloc(sizeof(*pd), M_PFSYNC, M_NOWAIT);
1704 m->m_flags |= M_SKIP_FIREWALL;
1705 st->state_flags |= PFSTATE_ACK;
1713 TAILQ_INSERT_TAIL(&sc->sc_deferrals, pd, pd_entry);
1714 callout_init_mtx(&pd->pd_tmo, &sc->sc_mtx, CALLOUT_RETURNUNLOCKED);
1715 callout_reset(&pd->pd_tmo, 10, pfsync_defer_tmo, pd);
1723 pfsync_undefer(struct pfsync_deferral *pd, int drop)
1725 struct pfsync_softc *sc = pd->pd_sc;
1726 struct mbuf *m = pd->pd_m;
1727 struct pf_state *st = pd->pd_st;
1729 PFSYNC_LOCK_ASSERT(sc);
1731 TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry);
1733 pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */
1735 pf_release_state(st);
1740 _IF_ENQUEUE(&sc->sc_ifp->if_snd, m);
1746 pfsync_defer_tmo(void *arg)
1748 struct pfsync_deferral *pd = arg;
1749 struct pfsync_softc *sc = pd->pd_sc;
1750 struct mbuf *m = pd->pd_m;
1751 struct pf_state *st = pd->pd_st;
1753 PFSYNC_LOCK_ASSERT(sc);
1755 CURVNET_SET(m->m_pkthdr.rcvif->if_vnet);
1757 TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry);
1759 pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */
1760 if (pd->pd_refs == 0)
1764 ip_output(m, NULL, NULL, 0, NULL, NULL);
1766 pf_release_state(st);
1772 pfsync_undefer_state(struct pf_state *st, int drop)
1774 struct pfsync_softc *sc = V_pfsyncif;
1775 struct pfsync_deferral *pd;
1777 PFSYNC_LOCK_ASSERT(sc);
1779 TAILQ_FOREACH(pd, &sc->sc_deferrals, pd_entry) {
1780 if (pd->pd_st == st) {
1781 if (callout_stop(&pd->pd_tmo) > 0)
1782 pfsync_undefer(pd, drop);
1787 panic("%s: unable to find deferred state", __func__);
1791 pfsync_update_state(struct pf_state *st)
1793 struct pfsync_softc *sc = V_pfsyncif;
1794 bool sync = false, ref = true;
1796 PF_STATE_LOCK_ASSERT(st);
1799 if (st->state_flags & PFSTATE_ACK)
1800 pfsync_undefer_state(st, 0);
1801 if (st->state_flags & PFSTATE_NOSYNC) {
1802 if (st->sync_state != PFSYNC_S_NONE)
1803 pfsync_q_del(st, true);
1808 if (sc->sc_len == PFSYNC_MINPKT)
1809 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, V_pfsyncif);
1811 switch (st->sync_state) {
1812 case PFSYNC_S_UPD_C:
1815 /* we're already handling it */
1817 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) {
1819 if (st->sync_updates >= sc->sc_maxupdates)
1825 pfsync_q_del(st, false);
1830 pfsync_q_ins(st, PFSYNC_S_UPD_C, ref);
1831 st->sync_updates = 0;
1835 panic("%s: unexpected sync state %d", __func__, st->sync_state);
1838 if (sync || (time_uptime - st->pfsync_time) < 2)
1845 pfsync_request_update(u_int32_t creatorid, u_int64_t id)
1847 struct pfsync_softc *sc = V_pfsyncif;
1848 struct pfsync_upd_req_item *item;
1849 size_t nlen = sizeof(struct pfsync_upd_req);
1851 PFSYNC_LOCK_ASSERT(sc);
1854 * This code does a bit to prevent multiple update requests for the
1855 * same state being generated. It searches current subheader queue,
1856 * but it doesn't lookup into queue of already packed datagrams.
1858 TAILQ_FOREACH(item, &sc->sc_upd_req_list, ur_entry)
1859 if (item->ur_msg.id == id &&
1860 item->ur_msg.creatorid == creatorid)
1863 item = malloc(sizeof(*item), M_PFSYNC, M_NOWAIT);
1865 return; /* XXX stats */
1867 item->ur_msg.id = id;
1868 item->ur_msg.creatorid = creatorid;
1870 if (TAILQ_EMPTY(&sc->sc_upd_req_list))
1871 nlen += sizeof(struct pfsync_subheader);
1873 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) {
1876 nlen = sizeof(struct pfsync_subheader) +
1877 sizeof(struct pfsync_upd_req);
1880 TAILQ_INSERT_TAIL(&sc->sc_upd_req_list, item, ur_entry);
1885 pfsync_update_state_req(struct pf_state *st)
1887 struct pfsync_softc *sc = V_pfsyncif;
1890 PF_STATE_LOCK_ASSERT(st);
1893 if (st->state_flags & PFSTATE_NOSYNC) {
1894 if (st->sync_state != PFSYNC_S_NONE)
1895 pfsync_q_del(st, true);
1900 switch (st->sync_state) {
1901 case PFSYNC_S_UPD_C:
1903 pfsync_q_del(st, false);
1908 pfsync_q_ins(st, PFSYNC_S_UPD, ref);
1915 /* we're already handling it */
1919 panic("%s: unexpected sync state %d", __func__, st->sync_state);
1926 pfsync_delete_state(struct pf_state *st)
1928 struct pfsync_softc *sc = V_pfsyncif;
1932 if (st->state_flags & PFSTATE_ACK)
1933 pfsync_undefer_state(st, 1);
1934 if (st->state_flags & PFSTATE_NOSYNC) {
1935 if (st->sync_state != PFSYNC_S_NONE)
1936 pfsync_q_del(st, true);
1941 if (sc->sc_len == PFSYNC_MINPKT)
1942 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, V_pfsyncif);
1944 switch (st->sync_state) {
1946 /* We never got to tell the world so just forget about it. */
1947 pfsync_q_del(st, true);
1950 case PFSYNC_S_UPD_C:
1953 pfsync_q_del(st, false);
1958 pfsync_q_ins(st, PFSYNC_S_DEL, ref);
1962 panic("%s: unexpected sync state %d", __func__, st->sync_state);
1969 pfsync_clear_states(u_int32_t creatorid, const char *ifname)
1971 struct pfsync_softc *sc = V_pfsyncif;
1973 struct pfsync_subheader subh;
1974 struct pfsync_clr clr;
1977 bzero(&r, sizeof(r));
1979 r.subh.action = PFSYNC_ACT_CLR;
1980 r.subh.count = htons(1);
1981 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_CLR]++;
1983 strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname));
1984 r.clr.creatorid = creatorid;
1987 pfsync_send_plus(&r, sizeof(r));
1992 pfsync_q_ins(struct pf_state *st, int q, bool ref)
1994 struct pfsync_softc *sc = V_pfsyncif;
1995 size_t nlen = pfsync_qs[q].len;
1997 PFSYNC_LOCK_ASSERT(sc);
1999 KASSERT(st->sync_state == PFSYNC_S_NONE,
2000 ("%s: st->sync_state %u", __func__, st->sync_state));
2001 KASSERT(sc->sc_len >= PFSYNC_MINPKT, ("pfsync pkt len is too low %zu",
2004 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2005 nlen += sizeof(struct pfsync_subheader);
2007 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) {
2010 nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len;
2014 TAILQ_INSERT_TAIL(&sc->sc_qs[q], st, sync_list);
2021 pfsync_q_del(struct pf_state *st, bool unref)
2023 struct pfsync_softc *sc = V_pfsyncif;
2024 int q = st->sync_state;
2026 PFSYNC_LOCK_ASSERT(sc);
2027 KASSERT(st->sync_state != PFSYNC_S_NONE,
2028 ("%s: st->sync_state != PFSYNC_S_NONE", __func__));
2030 sc->sc_len -= pfsync_qs[q].len;
2031 TAILQ_REMOVE(&sc->sc_qs[q], st, sync_list);
2032 st->sync_state = PFSYNC_S_NONE;
2034 pf_release_state(st);
2036 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2037 sc->sc_len -= sizeof(struct pfsync_subheader);
2041 pfsync_bulk_start(void)
2043 struct pfsync_softc *sc = V_pfsyncif;
2045 if (V_pf_status.debug >= PF_DEBUG_MISC)
2046 printf("pfsync: received bulk update request\n");
2050 sc->sc_ureq_received = time_uptime;
2051 sc->sc_bulk_hashid = 0;
2052 sc->sc_bulk_stateid = 0;
2053 pfsync_bulk_status(PFSYNC_BUS_START);
2054 callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update, sc);
2059 pfsync_bulk_update(void *arg)
2061 struct pfsync_softc *sc = arg;
2065 PFSYNC_BLOCK_ASSERT(sc);
2066 CURVNET_SET(sc->sc_ifp->if_vnet);
2069 * Start with last state from previous invocation.
2070 * It may had gone, in this case start from the
2073 s = pf_find_state_byid(sc->sc_bulk_stateid, sc->sc_bulk_creatorid);
2078 i = sc->sc_bulk_hashid;
2080 for (; i <= pf_hashmask; i++) {
2081 struct pf_idhash *ih = &V_pf_idhash[i];
2084 PF_HASHROW_ASSERT(ih);
2086 PF_HASHROW_LOCK(ih);
2087 s = LIST_FIRST(&ih->states);
2090 for (; s; s = LIST_NEXT(s, entry)) {
2092 if (sent > 1 && (sc->sc_ifp->if_mtu - sc->sc_len) <
2093 sizeof(struct pfsync_state)) {
2094 /* We've filled a packet. */
2095 sc->sc_bulk_hashid = i;
2096 sc->sc_bulk_stateid = s->id;
2097 sc->sc_bulk_creatorid = s->creatorid;
2098 PF_HASHROW_UNLOCK(ih);
2099 callout_reset(&sc->sc_bulk_tmo, 1,
2100 pfsync_bulk_update, sc);
2104 if (s->sync_state == PFSYNC_S_NONE &&
2105 s->timeout < PFTM_MAX &&
2106 s->pfsync_time <= sc->sc_ureq_received) {
2107 pfsync_update_state_req(s);
2111 PF_HASHROW_UNLOCK(ih);
2115 pfsync_bulk_status(PFSYNC_BUS_END);
2122 pfsync_bulk_status(u_int8_t status)
2125 struct pfsync_subheader subh;
2126 struct pfsync_bus bus;
2129 struct pfsync_softc *sc = V_pfsyncif;
2131 bzero(&r, sizeof(r));
2133 r.subh.action = PFSYNC_ACT_BUS;
2134 r.subh.count = htons(1);
2135 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_BUS]++;
2137 r.bus.creatorid = V_pf_status.hostid;
2138 r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received);
2139 r.bus.status = status;
2142 pfsync_send_plus(&r, sizeof(r));
2147 pfsync_bulk_fail(void *arg)
2149 struct pfsync_softc *sc = arg;
2151 CURVNET_SET(sc->sc_ifp->if_vnet);
2153 PFSYNC_BLOCK_ASSERT(sc);
2155 if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
2157 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
2158 pfsync_bulk_fail, V_pfsyncif);
2160 pfsync_request_update(0, 0);
2163 /* Pretend like the transfer was ok. */
2164 sc->sc_ureq_sent = 0;
2165 sc->sc_bulk_tries = 0;
2167 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
2168 (*carp_demote_adj_p)(-V_pfsync_carp_adj,
2169 "pfsync bulk fail");
2170 sc->sc_flags |= PFSYNCF_OK;
2172 if (V_pf_status.debug >= PF_DEBUG_MISC)
2173 printf("pfsync: failed to receive bulk update\n");
2180 pfsync_send_plus(void *plus, size_t pluslen)
2182 struct pfsync_softc *sc = V_pfsyncif;
2184 PFSYNC_LOCK_ASSERT(sc);
2186 if (sc->sc_len + pluslen > sc->sc_ifp->if_mtu)
2190 sc->sc_len += (sc->sc_pluslen = pluslen);
2196 pfsync_timeout(void *arg)
2198 struct pfsync_softc *sc = arg;
2200 CURVNET_SET(sc->sc_ifp->if_vnet);
2208 pfsync_push(struct pfsync_softc *sc)
2211 PFSYNC_LOCK_ASSERT(sc);
2213 sc->sc_flags |= PFSYNCF_PUSH;
2214 swi_sched(V_pfsync_swi_cookie, 0);
2218 pfsyncintr(void *arg)
2220 struct pfsync_softc *sc = arg;
2223 CURVNET_SET(sc->sc_ifp->if_vnet);
2226 if ((sc->sc_flags & PFSYNCF_PUSH) && sc->sc_len > PFSYNC_MINPKT) {
2228 sc->sc_flags &= ~PFSYNCF_PUSH;
2230 _IF_DEQUEUE_ALL(&sc->sc_ifp->if_snd, m);
2233 for (; m != NULL; m = n) {
2236 m->m_nextpkt = NULL;
2239 * We distinguish between a deferral packet and our
2240 * own pfsync packet based on M_SKIP_FIREWALL
2241 * flag. This is XXX.
2243 if (m->m_flags & M_SKIP_FIREWALL)
2244 ip_output(m, NULL, NULL, 0, NULL, NULL);
2245 else if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo,
2247 V_pfsyncstats.pfsyncs_opackets++;
2249 V_pfsyncstats.pfsyncs_oerrors++;
2255 pfsync_multicast_setup(struct pfsync_softc *sc, struct ifnet *ifp, void *mship)
2257 struct ip_moptions *imo = &sc->sc_imo;
2260 if (!(ifp->if_flags & IFF_MULTICAST))
2261 return (EADDRNOTAVAIL);
2263 imo->imo_membership = (struct in_multi **)mship;
2264 imo->imo_max_memberships = IP_MIN_MEMBERSHIPS;
2265 imo->imo_multicast_vif = -1;
2267 if ((error = in_joingroup(ifp, &sc->sc_sync_peer, NULL,
2268 &imo->imo_membership[0])) != 0) {
2269 imo->imo_membership = NULL;
2272 imo->imo_num_memberships++;
2273 imo->imo_multicast_ifp = ifp;
2274 imo->imo_multicast_ttl = PFSYNC_DFLTTL;
2275 imo->imo_multicast_loop = 0;
2281 pfsync_multicast_cleanup(struct pfsync_softc *sc)
2283 struct ip_moptions *imo = &sc->sc_imo;
2285 in_leavegroup(imo->imo_membership[0], NULL);
2286 free(imo->imo_membership, M_PFSYNC);
2287 imo->imo_membership = NULL;
2288 imo->imo_multicast_ifp = NULL;
2292 extern struct domain inetdomain;
2293 static struct protosw in_pfsync_protosw = {
2294 .pr_type = SOCK_RAW,
2295 .pr_domain = &inetdomain,
2296 .pr_protocol = IPPROTO_PFSYNC,
2297 .pr_flags = PR_ATOMIC|PR_ADDR,
2298 .pr_input = pfsync_input,
2299 .pr_output = rip_output,
2300 .pr_ctloutput = rip_ctloutput,
2301 .pr_usrreqs = &rip_usrreqs
2306 pfsync_pointers_init()
2310 pfsync_state_import_ptr = pfsync_state_import;
2311 pfsync_insert_state_ptr = pfsync_insert_state;
2312 pfsync_update_state_ptr = pfsync_update_state;
2313 pfsync_delete_state_ptr = pfsync_delete_state;
2314 pfsync_clear_states_ptr = pfsync_clear_states;
2315 pfsync_defer_ptr = pfsync_defer;
2320 pfsync_pointers_uninit()
2324 pfsync_state_import_ptr = NULL;
2325 pfsync_insert_state_ptr = NULL;
2326 pfsync_update_state_ptr = NULL;
2327 pfsync_delete_state_ptr = NULL;
2328 pfsync_clear_states_ptr = NULL;
2329 pfsync_defer_ptr = NULL;
2334 vnet_pfsync_init(const void *unused __unused)
2338 V_pfsync_cloner = if_clone_simple(pfsyncname,
2339 pfsync_clone_create, pfsync_clone_destroy, 1);
2340 error = swi_add(NULL, pfsyncname, pfsyncintr, V_pfsyncif,
2341 SWI_NET, INTR_MPSAFE, &V_pfsync_swi_cookie);
2343 if_clone_detach(V_pfsync_cloner);
2344 log(LOG_INFO, "swi_add() failed in %s\n", __func__);
2347 VNET_SYSINIT(vnet_pfsync_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY,
2348 vnet_pfsync_init, NULL);
2351 vnet_pfsync_uninit(const void *unused __unused)
2354 if_clone_detach(V_pfsync_cloner);
2355 swi_remove(V_pfsync_swi_cookie);
2358 * Detach after pf is gone; otherwise we might touch pfsync memory
2359 * from within pf after freeing pfsync.
2361 VNET_SYSUNINIT(vnet_pfsync_uninit, SI_SUB_INIT_IF, SI_ORDER_SECOND,
2362 vnet_pfsync_uninit, NULL);
2370 error = pf_proto_register(PF_INET, &in_pfsync_protosw);
2373 error = ipproto_register(IPPROTO_PFSYNC);
2375 pf_proto_unregister(PF_INET, IPPROTO_PFSYNC, SOCK_RAW);
2379 pfsync_pointers_init();
2388 pfsync_pointers_uninit();
2391 ipproto_unregister(IPPROTO_PFSYNC);
2392 pf_proto_unregister(PF_INET, IPPROTO_PFSYNC, SOCK_RAW);
2397 pfsync_modevent(module_t mod, int type, void *data)
2403 error = pfsync_init();
2407 * Module should not be unloaded due to race conditions.
2422 static moduledata_t pfsync_mod = {
2428 #define PFSYNC_MODVER 1
2430 /* Stay on FIREWALL as we depend on pf being initialized and on inetdomain. */
2431 DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY);
2432 MODULE_VERSION(pfsync, PFSYNC_MODVER);
2433 MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER);