1 /* $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $ */
4 * Copyright (c) 2002 Michael Shalayeff
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
30 * Copyright (c) 2009 David Gwynne <dlg@openbsd.org>
32 * Permission to use, copy, modify, and distribute this software for any
33 * purpose with or without fee is hereby granted, provided that the above
34 * copyright notice and this permission notice appear in all copies.
36 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
37 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
38 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
39 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
40 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
41 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
42 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
46 * Revisions picked from OpenBSD after revision 1.110 import:
47 * 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates
48 * 1.120, 1.175 - use monotonic time_uptime
49 * 1.122 - reduce number of updates for non-TCP sessions
51 * 1.146 - bzero() mbuf before sparsely filling it with data
52 * 1.170 - SIOCSIFMTU checks
53 * 1.126, 1.142 - deferred packets processing
54 * 1.173 - correct expire time processing
59 #include "opt_inet6.h"
62 #include <sys/cdefs.h>
63 __FBSDID("$FreeBSD$");
66 #endif /* __FreeBSD__ */
68 #include <sys/param.h>
69 #include <sys/kernel.h>
72 #include <sys/interrupt.h>
76 #include <sys/systm.h>
79 #include <sys/socket.h>
81 #include <sys/endian.h>
82 #include <sys/malloc.h>
83 #include <sys/module.h>
84 #include <sys/sockio.h>
85 #include <sys/taskqueue.h>
87 #include <sys/mutex.h>
88 #include <sys/protosw.h>
90 #include <sys/ioctl.h>
91 #include <sys/timeout.h>
93 #include <sys/sysctl.h>
100 #include <net/if_clone.h>
102 #include <net/if_types.h>
103 #include <net/route.h>
105 #include <net/netisr.h>
107 #include <net/vnet.h>
110 #include <netinet/in.h>
111 #include <netinet/if_ether.h>
112 #include <netinet/tcp.h>
113 #include <netinet/tcp_seq.h>
116 #include <netinet/in_systm.h>
117 #include <netinet/in_var.h>
118 #include <netinet/ip.h>
119 #include <netinet/ip_var.h>
123 #include <netinet6/nd6.h>
127 #include <netinet/ip_carp.h>
131 #include <netinet/ip_carp.h>
135 #include <net/pfvar.h>
136 #include <net/if_pfsync.h>
139 #include "bpfilter.h"
143 #define PFSYNC_MINPKT ( \
144 sizeof(struct ip) + \
145 sizeof(struct pfsync_header) + \
146 sizeof(struct pfsync_subheader) + \
147 sizeof(struct pfsync_eof))
155 int pfsync_input_hmac(struct mbuf *, int);
157 int pfsync_upd_tcp(struct pf_state *, struct pfsync_state_peer *,
158 struct pfsync_state_peer *);
160 int pfsync_in_clr(struct pfsync_pkt *, struct mbuf *, int, int);
161 int pfsync_in_ins(struct pfsync_pkt *, struct mbuf *, int, int);
162 int pfsync_in_iack(struct pfsync_pkt *, struct mbuf *, int, int);
163 int pfsync_in_upd(struct pfsync_pkt *, struct mbuf *, int, int);
164 int pfsync_in_upd_c(struct pfsync_pkt *, struct mbuf *, int, int);
165 int pfsync_in_ureq(struct pfsync_pkt *, struct mbuf *, int, int);
166 int pfsync_in_del(struct pfsync_pkt *, struct mbuf *, int, int);
167 int pfsync_in_del_c(struct pfsync_pkt *, struct mbuf *, int, int);
168 int pfsync_in_bus(struct pfsync_pkt *, struct mbuf *, int, int);
169 int pfsync_in_tdb(struct pfsync_pkt *, struct mbuf *, int, int);
170 int pfsync_in_eof(struct pfsync_pkt *, struct mbuf *, int, int);
172 int pfsync_in_error(struct pfsync_pkt *, struct mbuf *, int, int);
174 int (*pfsync_acts[])(struct pfsync_pkt *, struct mbuf *, int, int) = {
175 pfsync_in_clr, /* PFSYNC_ACT_CLR */
176 pfsync_in_ins, /* PFSYNC_ACT_INS */
177 pfsync_in_iack, /* PFSYNC_ACT_INS_ACK */
178 pfsync_in_upd, /* PFSYNC_ACT_UPD */
179 pfsync_in_upd_c, /* PFSYNC_ACT_UPD_C */
180 pfsync_in_ureq, /* PFSYNC_ACT_UPD_REQ */
181 pfsync_in_del, /* PFSYNC_ACT_DEL */
182 pfsync_in_del_c, /* PFSYNC_ACT_DEL_C */
183 pfsync_in_error, /* PFSYNC_ACT_INS_F */
184 pfsync_in_error, /* PFSYNC_ACT_DEL_F */
185 pfsync_in_bus, /* PFSYNC_ACT_BUS */
186 pfsync_in_tdb, /* PFSYNC_ACT_TDB */
187 pfsync_in_eof /* PFSYNC_ACT_EOF */
191 int (*write)(struct pf_state *, struct mbuf *, int);
196 /* we have one of these for every PFSYNC_S_ */
197 int pfsync_out_state(struct pf_state *, struct mbuf *, int);
198 int pfsync_out_iack(struct pf_state *, struct mbuf *, int);
199 int pfsync_out_upd_c(struct pf_state *, struct mbuf *, int);
200 int pfsync_out_del(struct pf_state *, struct mbuf *, int);
202 struct pfsync_q pfsync_qs[] = {
203 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_INS },
204 { pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK },
205 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_UPD },
206 { pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C },
207 { pfsync_out_del, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C }
210 void pfsync_q_ins(struct pf_state *, int);
211 void pfsync_q_del(struct pf_state *);
213 struct pfsync_upd_req_item {
214 TAILQ_ENTRY(pfsync_upd_req_item) ur_entry;
215 struct pfsync_upd_req ur_msg;
217 TAILQ_HEAD(pfsync_upd_reqs, pfsync_upd_req_item);
219 struct pfsync_deferral {
220 TAILQ_ENTRY(pfsync_deferral) pd_entry;
221 struct pf_state *pd_st;
224 struct callout pd_tmo;
226 struct timeout pd_tmo;
229 TAILQ_HEAD(pfsync_deferrals, pfsync_deferral);
231 #define PFSYNC_PLSIZE MAX(sizeof(struct pfsync_upd_req_item), \
232 sizeof(struct pfsync_deferral))
235 int pfsync_out_tdb(struct tdb *, struct mbuf *, int);
238 struct pfsync_softc {
240 struct ifnet *sc_ifp;
244 struct ifnet *sc_sync_if;
252 struct ip_moptions sc_imo;
254 struct in_addr sc_sync_peer;
255 u_int8_t sc_maxupdates;
260 struct ip sc_template;
262 struct pf_state_queue sc_qs[PFSYNC_S_COUNT];
265 struct pfsync_upd_reqs sc_upd_req_list;
268 struct pfsync_deferrals sc_deferrals;
274 u_int32_t sc_ureq_sent;
277 struct callout sc_bulkfail_tmo;
279 struct timeout sc_bulkfail_tmo;
282 u_int32_t sc_ureq_received;
283 struct pf_state *sc_bulk_next;
284 struct pf_state *sc_bulk_last;
286 struct callout sc_bulk_tmo;
288 struct timeout sc_bulk_tmo;
291 TAILQ_HEAD(, tdb) sc_tdb_q;
294 struct callout sc_tmo;
296 struct timeout sc_tmo;
301 static MALLOC_DEFINE(M_PFSYNC, "pfsync", "pfsync data");
302 static VNET_DEFINE(struct pfsync_softc *, pfsyncif) = NULL;
303 #define V_pfsyncif VNET(pfsyncif)
304 static VNET_DEFINE(void *, pfsync_swi_cookie) = NULL;
305 #define V_pfsync_swi_cookie VNET(pfsync_swi_cookie)
306 static VNET_DEFINE(struct pfsyncstats, pfsyncstats);
307 #define V_pfsyncstats VNET(pfsyncstats)
308 static VNET_DEFINE(int, pfsync_carp_adj) = CARP_MAXSKEW;
309 #define V_pfsync_carp_adj VNET(pfsync_carp_adj)
311 static void pfsyncintr(void *);
312 static int pfsync_multicast_setup(struct pfsync_softc *);
313 static void pfsync_multicast_cleanup(struct pfsync_softc *);
314 static int pfsync_init(void);
315 static void pfsync_uninit(void);
316 static void pfsync_sendout1(int);
318 #define schednetisr(NETISR_PFSYNC) swi_sched(V_pfsync_swi_cookie, 0)
320 SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW, 0, "PFSYNC");
321 SYSCTL_VNET_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_RW,
322 &VNET_NAME(pfsyncstats), pfsyncstats,
323 "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)");
324 SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_RW,
325 &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment");
327 struct pfsync_softc *pfsyncif = NULL;
328 struct pfsyncstats pfsyncstats;
329 #define V_pfsyncstats pfsyncstats
332 void pfsyncattach(int);
334 int pfsync_clone_create(struct if_clone *, int, caddr_t);
335 void pfsync_clone_destroy(struct ifnet *);
337 int pfsync_clone_create(struct if_clone *, int);
338 int pfsync_clone_destroy(struct ifnet *);
340 int pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
341 struct pf_state_peer *);
342 void pfsync_update_net_tdb(struct pfsync_tdb *);
343 int pfsyncoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
349 int pfsyncioctl(struct ifnet *, u_long, caddr_t);
350 void pfsyncstart(struct ifnet *);
352 struct mbuf *pfsync_if_dequeue(struct ifnet *);
354 void pfsync_deferred(struct pf_state *, int);
355 void pfsync_undefer(struct pfsync_deferral *, int);
356 void pfsync_defer_tmo(void *);
358 void pfsync_request_update(u_int32_t, u_int64_t);
359 void pfsync_update_state_req(struct pf_state *);
361 void pfsync_drop(struct pfsync_softc *);
362 void pfsync_sendout(void);
363 void pfsync_send_plus(void *, size_t);
364 void pfsync_timeout(void *);
365 void pfsync_tdb_timeout(void *);
367 void pfsync_bulk_start(void);
368 void pfsync_bulk_status(u_int8_t);
369 void pfsync_bulk_update(void *);
370 void pfsync_bulk_fail(void *);
374 #define betoh64 (unsigned long long)be64toh
375 #define timeout_del callout_stop
378 #define PFSYNC_MAX_BULKTRIES 12
384 VNET_DEFINE(struct ifc_simple_data, pfsync_cloner_data);
385 VNET_DEFINE(struct if_clone, pfsync_cloner);
386 #define V_pfsync_cloner_data VNET(pfsync_cloner_data)
387 #define V_pfsync_cloner VNET(pfsync_cloner)
388 IFC_SIMPLE_DECLARE(pfsync, 1);
390 struct if_clone pfsync_cloner =
391 IF_CLONE_INITIALIZER("pfsync", pfsync_clone_create, pfsync_clone_destroy);
395 pfsyncattach(int npfsync)
397 if_clone_attach(&pfsync_cloner);
401 pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param)
403 pfsync_clone_create(struct if_clone *ifc, int unit)
406 struct pfsync_softc *sc;
414 sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO);
415 sc->pfsync_sync_ok = 1;
418 sc = malloc(sizeof(*pfsyncif), M_DEVBUF, M_NOWAIT | M_ZERO);
421 for (q = 0; q < PFSYNC_S_COUNT; q++)
422 TAILQ_INIT(&sc->sc_qs[q]);
425 sc->sc_pool = uma_zcreate("pfsync", PFSYNC_PLSIZE, NULL, NULL, NULL,
426 NULL, UMA_ALIGN_PTR, 0);
428 pool_init(&sc->sc_pool, PFSYNC_PLSIZE, 0, 0, 0, "pfsync", NULL);
430 TAILQ_INIT(&sc->sc_upd_req_list);
431 TAILQ_INIT(&sc->sc_deferrals);
434 TAILQ_INIT(&sc->sc_tdb_q);
436 sc->sc_len = PFSYNC_MINPKT;
437 sc->sc_maxupdates = 128;
440 sc->sc_imo.imo_membership = (struct in_multi **)malloc(
441 (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_IPMOPTS,
443 sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS;
447 ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC);
449 uma_zdestroy(sc->sc_pool);
453 if_initname(ifp, ifc->ifc_name, unit);
456 snprintf(ifp->if_xname, sizeof ifp->if_xname, "pfsync%d", unit);
459 ifp->if_ioctl = pfsyncioctl;
460 ifp->if_output = pfsyncoutput;
461 ifp->if_start = pfsyncstart;
462 ifp->if_type = IFT_PFSYNC;
463 ifp->if_snd.ifq_maxlen = ifqmaxlen;
464 ifp->if_hdrlen = sizeof(struct pfsync_header);
465 ifp->if_mtu = ETHERMTU;
467 callout_init(&sc->sc_tmo, CALLOUT_MPSAFE);
468 callout_init_mtx(&sc->sc_bulk_tmo, &pf_task_mtx, 0);
469 callout_init(&sc->sc_bulkfail_tmo, CALLOUT_MPSAFE);
471 timeout_set(&sc->sc_tmo, pfsync_timeout, sc);
472 timeout_set(&sc->sc_bulk_tmo, pfsync_bulk_update, sc);
473 timeout_set(&sc->sc_bulkfail_tmo, pfsync_bulk_fail, sc);
481 if_addgroup(ifp, "carp");
487 bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
489 bpfattach(&sc->sc_if.if_bpf, ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
507 pfsync_clone_destroy(struct ifnet *ifp)
509 struct pfsync_softc *sc = ifp->if_softc;
514 timeout_del(&sc->sc_bulkfail_tmo);
515 timeout_del(&sc->sc_bulk_tmo);
516 timeout_del(&sc->sc_tmo);
519 if (!sc->pfsync_sync_ok && carp_demote_adj_p)
520 (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy");
524 carp_group_demote_adj(&sc->sc_if, -1);
534 while (sc->sc_deferred > 0)
535 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0);
538 UMA_DESTROY(sc->sc_pool);
540 pool_destroy(&sc->sc_pool);
544 if (sc->sc_imo.imo_membership)
545 pfsync_multicast_cleanup(sc);
548 free(sc->sc_imo.imo_membership, M_IPMOPTS);
564 pfsync_if_dequeue(struct ifnet *ifp)
572 IF_LOCK(&ifp->if_snd);
573 _IF_DROP(&ifp->if_snd);
574 _IF_DEQUEUE(&ifp->if_snd, m);
575 IF_UNLOCK(&ifp->if_snd);
578 IF_DEQUEUE(&ifp->if_snd, m);
586 * Start output on the pfsync interface.
589 pfsyncstart(struct ifnet *ifp)
593 while ((m = pfsync_if_dequeue(ifp)) != NULL) {
595 IF_DROP(&ifp->if_snd);
602 pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
603 struct pf_state_peer *d)
605 if (s->scrub.scrub_flag && d->scrub == NULL) {
607 d->scrub = pool_get(&V_pf_state_scrub_pl, PR_NOWAIT | PR_ZERO);
609 d->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT | PR_ZERO);
611 if (d->scrub == NULL)
620 pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
622 bzero(sp, sizeof(struct pfsync_state));
624 /* copy from state key */
625 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
626 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
627 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
628 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
629 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
630 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
631 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
632 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
633 sp->proto = st->key[PF_SK_WIRE]->proto;
634 sp->af = st->key[PF_SK_WIRE]->af;
636 /* copy from state */
637 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
638 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
639 sp->creation = htonl(time_uptime - st->creation);
640 sp->expire = pf_state_expires(st);
641 if (sp->expire <= time_second)
642 sp->expire = htonl(0);
644 sp->expire = htonl(sp->expire - time_second);
646 sp->direction = st->direction;
648 sp->timeout = st->timeout;
649 sp->state_flags = st->state_flags;
651 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
652 if (st->nat_src_node)
653 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
655 bcopy(&st->id, &sp->id, sizeof(sp->id));
656 sp->creatorid = st->creatorid;
657 pf_state_peer_hton(&st->src, &sp->src);
658 pf_state_peer_hton(&st->dst, &sp->dst);
660 if (st->rule.ptr == NULL)
661 sp->rule = htonl(-1);
663 sp->rule = htonl(st->rule.ptr->nr);
664 if (st->anchor.ptr == NULL)
665 sp->anchor = htonl(-1);
667 sp->anchor = htonl(st->anchor.ptr->nr);
668 if (st->nat_rule.ptr == NULL)
669 sp->nat_rule = htonl(-1);
671 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
673 pf_state_counter_hton(st->packets[0], sp->packets[0]);
674 pf_state_counter_hton(st->packets[1], sp->packets[1]);
675 pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
676 pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
682 pfsync_state_import(struct pfsync_state *sp, u_int8_t flags)
684 struct pf_state *st = NULL;
685 struct pf_state_key *skw = NULL, *sks = NULL;
686 struct pf_rule *r = NULL;
694 if (sp->creatorid == 0 && V_pf_status.debug >= PF_DEBUG_MISC) {
696 if (sp->creatorid == 0 && pf_status.debug >= PF_DEBUG_MISC) {
698 printf("pfsync_state_import: invalid creator id:"
699 " %08x\n", ntohl(sp->creatorid));
703 if ((kif = pfi_kif_get(sp->ifname)) == NULL) {
705 if (V_pf_status.debug >= PF_DEBUG_MISC)
707 if (pf_status.debug >= PF_DEBUG_MISC)
709 printf("pfsync_state_import: "
710 "unknown interface: %s\n", sp->ifname);
711 if (flags & PFSYNC_SI_IOCTL)
713 return (0); /* skip this state */
717 * If the ruleset checksums match or the state is coming from the ioctl,
718 * it's safe to associate the state with the rule of that number.
720 if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) &&
721 (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) <
722 pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount)
723 r = pf_main_ruleset.rules[
724 PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)];
727 r = &V_pf_default_rule;
729 r = &pf_default_rule;
732 if ((r->max_states && r->states_cur >= r->max_states))
736 if (flags & PFSYNC_SI_IOCTL)
737 pool_flags = PR_WAITOK | PR_ZERO;
739 pool_flags = PR_NOWAIT | PR_ZERO;
741 if ((st = pool_get(&V_pf_state_pl, pool_flags)) == NULL)
744 if (flags & PFSYNC_SI_IOCTL)
745 pool_flags = PR_WAITOK | PR_LIMITFAIL | PR_ZERO;
747 pool_flags = PR_LIMITFAIL | PR_ZERO;
749 if ((st = pool_get(&pf_state_pl, pool_flags)) == NULL)
753 if ((skw = pf_alloc_state_key(pool_flags)) == NULL)
756 if (PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0],
757 &sp->key[PF_SK_STACK].addr[0], sp->af) ||
758 PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1],
759 &sp->key[PF_SK_STACK].addr[1], sp->af) ||
760 sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] ||
761 sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1]) {
762 if ((sks = pf_alloc_state_key(pool_flags)) == NULL)
767 /* allocate memory for scrub info */
768 if (pfsync_alloc_scrub_memory(&sp->src, &st->src) ||
769 pfsync_alloc_scrub_memory(&sp->dst, &st->dst))
772 /* copy to state key(s) */
773 skw->addr[0] = sp->key[PF_SK_WIRE].addr[0];
774 skw->addr[1] = sp->key[PF_SK_WIRE].addr[1];
775 skw->port[0] = sp->key[PF_SK_WIRE].port[0];
776 skw->port[1] = sp->key[PF_SK_WIRE].port[1];
777 skw->proto = sp->proto;
780 sks->addr[0] = sp->key[PF_SK_STACK].addr[0];
781 sks->addr[1] = sp->key[PF_SK_STACK].addr[1];
782 sks->port[0] = sp->key[PF_SK_STACK].port[0];
783 sks->port[1] = sp->key[PF_SK_STACK].port[1];
784 sks->proto = sp->proto;
789 bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr));
790 st->creation = time_uptime - ntohl(sp->creation);
791 st->expire = time_second;
795 timeout = r->timeout[sp->timeout];
797 timeout = pf_default_rule.timeout[sp->timeout];
799 /* sp->expire may have been adaptively scaled by export. */
800 st->expire -= timeout - ntohl(sp->expire);
803 st->direction = sp->direction;
805 st->timeout = sp->timeout;
806 st->state_flags = sp->state_flags;
808 bcopy(sp->id, &st->id, sizeof(st->id));
809 st->creatorid = sp->creatorid;
810 pf_state_peer_ntoh(&sp->src, &st->src);
811 pf_state_peer_ntoh(&sp->dst, &st->dst);
814 st->nat_rule.ptr = NULL;
815 st->anchor.ptr = NULL;
818 st->pfsync_time = time_uptime;
819 st->sync_state = PFSYNC_S_NONE;
821 /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
825 if (!ISSET(flags, PFSYNC_SI_IOCTL))
826 SET(st->state_flags, PFSTATE_NOSYNC);
828 if ((error = pf_state_insert(kif, skw, sks, st)) != 0) {
829 /* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */
834 if (!ISSET(flags, PFSYNC_SI_IOCTL)) {
835 CLR(st->state_flags, PFSTATE_NOSYNC);
836 if (ISSET(st->state_flags, PFSTATE_ACK)) {
837 pfsync_q_ins(st, PFSYNC_S_IACK);
838 schednetisr(NETISR_PFSYNC);
841 CLR(st->state_flags, PFSTATE_ACK);
851 pool_put(&V_pf_state_key_pl, skw);
853 pool_put(&V_pf_state_key_pl, sks);
856 pool_put(&pf_state_key_pl, skw);
858 pool_put(&pf_state_key_pl, sks);
861 cleanup_state: /* pf_state_insert frees the state keys */
865 pool_put(&V_pf_state_scrub_pl, st->dst.scrub);
867 pool_put(&V_pf_state_scrub_pl, st->src.scrub);
868 pool_put(&V_pf_state_pl, st);
871 pool_put(&pf_state_scrub_pl, st->dst.scrub);
873 pool_put(&pf_state_scrub_pl, st->src.scrub);
874 pool_put(&pf_state_pl, st);
882 pfsync_input(struct mbuf *m, __unused int off)
884 pfsync_input(struct mbuf *m, ...)
888 struct pfsync_softc *sc = V_pfsyncif;
890 struct pfsync_softc *sc = pfsyncif;
892 struct pfsync_pkt pkt;
893 struct ip *ip = mtod(m, struct ip *);
894 struct pfsync_header *ph;
895 struct pfsync_subheader subh;
900 V_pfsyncstats.pfsyncs_ipackets++;
902 /* verify that we have a sync interface configured */
904 if (!sc || !sc->sc_sync_if || !V_pf_status.running)
906 if (!sc || !sc->sc_sync_if || !pf_status.running)
910 /* verify that the packet came in on the right interface */
911 if (sc->sc_sync_if != m->m_pkthdr.rcvif) {
912 V_pfsyncstats.pfsyncs_badif++;
917 sc->sc_ifp->if_ipackets++;
918 sc->sc_ifp->if_ibytes += m->m_pkthdr.len;
920 sc->sc_if.if_ipackets++;
921 sc->sc_if.if_ibytes += m->m_pkthdr.len;
923 /* verify that the IP TTL is 255. */
924 if (ip->ip_ttl != PFSYNC_DFLTTL) {
925 V_pfsyncstats.pfsyncs_badttl++;
929 offset = ip->ip_hl << 2;
930 if (m->m_pkthdr.len < offset + sizeof(*ph)) {
931 V_pfsyncstats.pfsyncs_hdrops++;
935 if (offset + sizeof(*ph) > m->m_len) {
936 if (m_pullup(m, offset + sizeof(*ph)) == NULL) {
937 V_pfsyncstats.pfsyncs_hdrops++;
940 ip = mtod(m, struct ip *);
942 ph = (struct pfsync_header *)((char *)ip + offset);
944 /* verify the version */
945 if (ph->version != PFSYNC_VERSION) {
946 V_pfsyncstats.pfsyncs_badver++;
951 if (pfsync_input_hmac(m, offset) != 0) {
957 /* Cheaper to grab this now than having to mess with mbufs later */
959 pkt.src = ip->ip_src;
963 if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
965 if (!bcmp(&ph->pfcksum, &pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
967 pkt.flags |= PFSYNC_SI_CKSUM;
969 offset += sizeof(*ph);
971 m_copydata(m, offset, sizeof(subh), (caddr_t)&subh);
972 offset += sizeof(subh);
974 if (subh.action >= PFSYNC_ACT_MAX) {
975 V_pfsyncstats.pfsyncs_badact++;
979 rv = (*pfsync_acts[subh.action])(&pkt, m, offset,
992 pfsync_in_clr(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
994 struct pfsync_clr *clr;
996 int len = sizeof(*clr) * count;
999 struct pf_state *st, *nexts;
1000 struct pf_state_key *sk, *nextsk;
1001 struct pf_state_item *si;
1002 u_int32_t creatorid;
1005 mp = m_pulldown(m, offset, len, &offp);
1007 V_pfsyncstats.pfsyncs_badlen++;
1010 clr = (struct pfsync_clr *)(mp->m_data + offp);
1016 for (i = 0; i < count; i++) {
1017 creatorid = clr[i].creatorid;
1019 if (clr[i].ifname[0] == '\0') {
1021 for (st = RB_MIN(pf_state_tree_id, &V_tree_id);
1023 nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, st);
1025 for (st = RB_MIN(pf_state_tree_id, &tree_id);
1027 nexts = RB_NEXT(pf_state_tree_id, &tree_id, st);
1029 if (st->creatorid == creatorid) {
1030 SET(st->state_flags, PFSTATE_NOSYNC);
1031 pf_unlink_state(st);
1035 if (pfi_kif_get(clr[i].ifname) == NULL)
1040 for (sk = RB_MIN(pf_state_tree, &V_pf_statetbl);
1042 for (sk = RB_MIN(pf_state_tree, &pf_statetbl);
1045 nextsk = RB_NEXT(pf_state_tree,
1047 &V_pf_statetbl, sk);
1051 TAILQ_FOREACH(si, &sk->states, entry) {
1052 if (si->s->creatorid == creatorid) {
1053 SET(si->s->state_flags,
1055 pf_unlink_state(si->s);
1070 pfsync_in_ins(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1073 struct pfsync_state *sa, *sp;
1074 int len = sizeof(*sp) * count;
1079 mp = m_pulldown(m, offset, len, &offp);
1081 V_pfsyncstats.pfsyncs_badlen++;
1084 sa = (struct pfsync_state *)(mp->m_data + offp);
1090 for (i = 0; i < count; i++) {
1093 /* check for invalid values */
1094 if (sp->timeout >= PFTM_MAX ||
1095 sp->src.state > PF_TCPS_PROXY_DST ||
1096 sp->dst.state > PF_TCPS_PROXY_DST ||
1097 sp->direction > PF_OUT ||
1098 (sp->af != AF_INET && sp->af != AF_INET6)) {
1100 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1102 if (pf_status.debug >= PF_DEBUG_MISC) {
1104 printf("pfsync_input: PFSYNC5_ACT_INS: "
1107 V_pfsyncstats.pfsyncs_badval++;
1111 if (pfsync_state_import(sp, pkt->flags) == ENOMEM) {
1112 /* drop out, but process the rest of the actions */
1125 pfsync_in_iack(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1127 struct pfsync_ins_ack *ia, *iaa;
1128 struct pf_state_cmp id_key;
1129 struct pf_state *st;
1132 int len = count * sizeof(*ia);
1136 mp = m_pulldown(m, offset, len, &offp);
1138 V_pfsyncstats.pfsyncs_badlen++;
1141 iaa = (struct pfsync_ins_ack *)(mp->m_data + offp);
1147 for (i = 0; i < count; i++) {
1150 bcopy(&ia->id, &id_key.id, sizeof(id_key.id));
1151 id_key.creatorid = ia->creatorid;
1153 st = pf_find_state_byid(&id_key);
1157 if (ISSET(st->state_flags, PFSTATE_ACK))
1158 pfsync_deferred(st, 0);
1165 * XXX this is not yet implemented, but we know the size of the
1166 * message so we can skip it.
1169 return (count * sizeof(struct pfsync_ins_ack));
1173 pfsync_upd_tcp(struct pf_state *st, struct pfsync_state_peer *src,
1174 struct pfsync_state_peer *dst)
1179 * The state should never go backwards except
1180 * for syn-proxy states. Neither should the
1181 * sequence window slide backwards.
1183 if (st->src.state > src->state &&
1184 (st->src.state < PF_TCPS_PROXY_SRC ||
1185 src->state >= PF_TCPS_PROXY_SRC))
1187 else if (SEQ_GT(st->src.seqlo, ntohl(src->seqlo)))
1189 else if (st->dst.state > dst->state) {
1190 /* There might still be useful
1191 * information about the src state here,
1192 * so import that part of the update,
1193 * then "fail" so we send the updated
1194 * state back to the peer who is missing
1195 * our what we know. */
1196 pf_state_peer_ntoh(src, &st->src);
1197 /* XXX do anything with timeouts? */
1199 } else if (st->dst.state >= TCPS_SYN_SENT &&
1200 SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo)))
1207 pfsync_in_upd(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1209 struct pfsync_state *sa, *sp;
1210 struct pf_state_cmp id_key;
1211 struct pf_state_key *sk;
1212 struct pf_state *st;
1216 int len = count * sizeof(*sp);
1220 mp = m_pulldown(m, offset, len, &offp);
1222 V_pfsyncstats.pfsyncs_badlen++;
1225 sa = (struct pfsync_state *)(mp->m_data + offp);
1231 for (i = 0; i < count; i++) {
1234 /* check for invalid values */
1235 if (sp->timeout >= PFTM_MAX ||
1236 sp->src.state > PF_TCPS_PROXY_DST ||
1237 sp->dst.state > PF_TCPS_PROXY_DST) {
1239 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1241 if (pf_status.debug >= PF_DEBUG_MISC) {
1243 printf("pfsync_input: PFSYNC_ACT_UPD: "
1246 V_pfsyncstats.pfsyncs_badval++;
1250 bcopy(sp->id, &id_key.id, sizeof(id_key.id));
1251 id_key.creatorid = sp->creatorid;
1253 st = pf_find_state_byid(&id_key);
1255 /* insert the update */
1256 if (pfsync_state_import(sp, 0))
1257 V_pfsyncstats.pfsyncs_badstate++;
1261 if (ISSET(st->state_flags, PFSTATE_ACK))
1262 pfsync_deferred(st, 1);
1264 sk = st->key[PF_SK_WIRE]; /* XXX right one? */
1266 if (sk->proto == IPPROTO_TCP)
1267 sfail = pfsync_upd_tcp(st, &sp->src, &sp->dst);
1270 * Non-TCP protocol state machine always go
1273 if (st->src.state > sp->src.state)
1275 else if (st->dst.state > sp->dst.state)
1281 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1283 if (pf_status.debug >= PF_DEBUG_MISC) {
1285 printf("pfsync: %s stale update (%d)"
1286 " id: %016llx creatorid: %08x\n",
1287 (sfail < 7 ? "ignoring" : "partial"),
1288 sfail, betoh64(st->id),
1289 ntohl(st->creatorid));
1291 V_pfsyncstats.pfsyncs_stale++;
1293 pfsync_update_state(st);
1294 schednetisr(NETISR_PFSYNC);
1297 pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
1298 pf_state_peer_ntoh(&sp->src, &st->src);
1299 pf_state_peer_ntoh(&sp->dst, &st->dst);
1300 st->expire = time_second;
1301 st->timeout = sp->timeout;
1302 st->pfsync_time = time_uptime;
1313 pfsync_in_upd_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1315 struct pfsync_upd_c *ua, *up;
1316 struct pf_state_key *sk;
1317 struct pf_state_cmp id_key;
1318 struct pf_state *st;
1320 int len = count * sizeof(*up);
1327 mp = m_pulldown(m, offset, len, &offp);
1329 V_pfsyncstats.pfsyncs_badlen++;
1332 ua = (struct pfsync_upd_c *)(mp->m_data + offp);
1338 for (i = 0; i < count; i++) {
1341 /* check for invalid values */
1342 if (up->timeout >= PFTM_MAX ||
1343 up->src.state > PF_TCPS_PROXY_DST ||
1344 up->dst.state > PF_TCPS_PROXY_DST) {
1346 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1348 if (pf_status.debug >= PF_DEBUG_MISC) {
1350 printf("pfsync_input: "
1351 "PFSYNC_ACT_UPD_C: "
1354 V_pfsyncstats.pfsyncs_badval++;
1358 bcopy(&up->id, &id_key.id, sizeof(id_key.id));
1359 id_key.creatorid = up->creatorid;
1361 st = pf_find_state_byid(&id_key);
1363 /* We don't have this state. Ask for it. */
1364 pfsync_request_update(id_key.creatorid, id_key.id);
1368 if (ISSET(st->state_flags, PFSTATE_ACK))
1369 pfsync_deferred(st, 1);
1371 sk = st->key[PF_SK_WIRE]; /* XXX right one? */
1373 if (sk->proto == IPPROTO_TCP)
1374 sfail = pfsync_upd_tcp(st, &up->src, &up->dst);
1377 * Non-TCP protocol state machine always go forwards
1379 if (st->src.state > up->src.state)
1381 else if (st->dst.state > up->dst.state)
1387 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1389 if (pf_status.debug >= PF_DEBUG_MISC) {
1391 printf("pfsync: ignoring stale update "
1393 "creatorid: %08x\n", sfail,
1395 ntohl(st->creatorid));
1397 V_pfsyncstats.pfsyncs_stale++;
1399 pfsync_update_state(st);
1400 schednetisr(NETISR_PFSYNC);
1403 pfsync_alloc_scrub_memory(&up->dst, &st->dst);
1404 pf_state_peer_ntoh(&up->src, &st->src);
1405 pf_state_peer_ntoh(&up->dst, &st->dst);
1406 st->expire = time_second;
1407 st->timeout = up->timeout;
1408 st->pfsync_time = time_uptime;
1419 pfsync_in_ureq(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1421 struct pfsync_upd_req *ur, *ura;
1423 int len = count * sizeof(*ur);
1426 struct pf_state_cmp id_key;
1427 struct pf_state *st;
1429 mp = m_pulldown(m, offset, len, &offp);
1431 V_pfsyncstats.pfsyncs_badlen++;
1434 ura = (struct pfsync_upd_req *)(mp->m_data + offp);
1439 for (i = 0; i < count; i++) {
1442 bcopy(&ur->id, &id_key.id, sizeof(id_key.id));
1443 id_key.creatorid = ur->creatorid;
1445 if (id_key.id == 0 && id_key.creatorid == 0)
1446 pfsync_bulk_start();
1448 st = pf_find_state_byid(&id_key);
1450 V_pfsyncstats.pfsyncs_badstate++;
1453 if (ISSET(st->state_flags, PFSTATE_NOSYNC))
1456 pfsync_update_state_req(st);
1467 pfsync_in_del(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1470 struct pfsync_state *sa, *sp;
1471 struct pf_state_cmp id_key;
1472 struct pf_state *st;
1473 int len = count * sizeof(*sp);
1477 mp = m_pulldown(m, offset, len, &offp);
1479 V_pfsyncstats.pfsyncs_badlen++;
1482 sa = (struct pfsync_state *)(mp->m_data + offp);
1488 for (i = 0; i < count; i++) {
1491 bcopy(sp->id, &id_key.id, sizeof(id_key.id));
1492 id_key.creatorid = sp->creatorid;
1494 st = pf_find_state_byid(&id_key);
1496 V_pfsyncstats.pfsyncs_badstate++;
1499 SET(st->state_flags, PFSTATE_NOSYNC);
1500 pf_unlink_state(st);
1511 pfsync_in_del_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1514 struct pfsync_del_c *sa, *sp;
1515 struct pf_state_cmp id_key;
1516 struct pf_state *st;
1517 int len = count * sizeof(*sp);
1521 mp = m_pulldown(m, offset, len, &offp);
1523 V_pfsyncstats.pfsyncs_badlen++;
1526 sa = (struct pfsync_del_c *)(mp->m_data + offp);
1532 for (i = 0; i < count; i++) {
1535 bcopy(&sp->id, &id_key.id, sizeof(id_key.id));
1536 id_key.creatorid = sp->creatorid;
1538 st = pf_find_state_byid(&id_key);
1540 V_pfsyncstats.pfsyncs_badstate++;
1544 SET(st->state_flags, PFSTATE_NOSYNC);
1545 pf_unlink_state(st);
1556 pfsync_in_bus(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1559 struct pfsync_softc *sc = V_pfsyncif;
1561 struct pfsync_softc *sc = pfsyncif;
1563 struct pfsync_bus *bus;
1565 int len = count * sizeof(*bus);
1568 /* If we're not waiting for a bulk update, who cares. */
1569 if (sc->sc_ureq_sent == 0)
1572 mp = m_pulldown(m, offset, len, &offp);
1574 V_pfsyncstats.pfsyncs_badlen++;
1577 bus = (struct pfsync_bus *)(mp->m_data + offp);
1579 switch (bus->status) {
1580 case PFSYNC_BUS_START:
1582 callout_reset(&sc->sc_bulkfail_tmo, 4 * hz +
1583 V_pf_pool_limits[PF_LIMIT_STATES].limit /
1584 ((sc->sc_ifp->if_mtu - PFSYNC_MINPKT) /
1585 sizeof(struct pfsync_state)),
1586 pfsync_bulk_fail, V_pfsyncif);
1588 timeout_add(&sc->sc_bulkfail_tmo, 4 * hz +
1589 pf_pool_limits[PF_LIMIT_STATES].limit /
1590 ((sc->sc_if.if_mtu - PFSYNC_MINPKT) /
1591 sizeof(struct pfsync_state)));
1594 if (V_pf_status.debug >= PF_DEBUG_MISC)
1596 if (pf_status.debug >= PF_DEBUG_MISC)
1598 printf("pfsync: received bulk update start\n");
1601 case PFSYNC_BUS_END:
1602 if (time_uptime - ntohl(bus->endtime) >=
1604 /* that's it, we're happy */
1605 sc->sc_ureq_sent = 0;
1606 sc->sc_bulk_tries = 0;
1607 timeout_del(&sc->sc_bulkfail_tmo);
1609 if (!sc->pfsync_sync_ok && carp_demote_adj_p)
1610 (*carp_demote_adj_p)(-V_pfsync_carp_adj,
1611 "pfsync bulk done");
1612 sc->pfsync_sync_ok = 1;
1615 if (!pfsync_sync_ok)
1616 carp_group_demote_adj(&sc->sc_if, -1);
1621 if (V_pf_status.debug >= PF_DEBUG_MISC)
1623 if (pf_status.debug >= PF_DEBUG_MISC)
1625 printf("pfsync: received valid "
1626 "bulk update end\n");
1629 if (V_pf_status.debug >= PF_DEBUG_MISC)
1631 if (pf_status.debug >= PF_DEBUG_MISC)
1633 printf("pfsync: received invalid "
1634 "bulk update end: bad timestamp\n");
1643 pfsync_in_tdb(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1645 int len = count * sizeof(struct pfsync_tdb);
1648 struct pfsync_tdb *tp;
1654 mp = m_pulldown(m, offset, len, &offp);
1656 V_pfsyncstats.pfsyncs_badlen++;
1659 tp = (struct pfsync_tdb *)(mp->m_data + offp);
1665 for (i = 0; i < count; i++)
1666 pfsync_update_net_tdb(&tp[i]);
1677 /* Update an in-kernel tdb. Silently fail if no tdb is found. */
1679 pfsync_update_net_tdb(struct pfsync_tdb *pt)
1684 /* check for invalid values */
1685 if (ntohl(pt->spi) <= SPI_RESERVED_MAX ||
1686 (pt->dst.sa.sa_family != AF_INET &&
1687 pt->dst.sa.sa_family != AF_INET6))
1691 tdb = gettdb(pt->spi, &pt->dst, pt->sproto);
1693 pt->rpl = ntohl(pt->rpl);
1694 pt->cur_bytes = betoh64(pt->cur_bytes);
1696 /* Neither replay nor byte counter should ever decrease. */
1697 if (pt->rpl < tdb->tdb_rpl ||
1698 pt->cur_bytes < tdb->tdb_cur_bytes) {
1703 tdb->tdb_rpl = pt->rpl;
1704 tdb->tdb_cur_bytes = pt->cur_bytes;
1711 if (V_pf_status.debug >= PF_DEBUG_MISC)
1713 if (pf_status.debug >= PF_DEBUG_MISC)
1715 printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: "
1717 V_pfsyncstats.pfsyncs_badstate++;
1724 pfsync_in_eof(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1726 /* check if we are at the right place in the packet */
1727 if (offset != m->m_pkthdr.len - sizeof(struct pfsync_eof))
1728 V_pfsyncstats.pfsyncs_badact++;
1730 /* we're done. free and let the caller return */
1736 pfsync_in_error(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1738 V_pfsyncstats.pfsyncs_badact++;
1745 pfsyncoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
1758 pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1761 struct proc *p = curproc;
1763 struct pfsync_softc *sc = ifp->if_softc;
1764 struct ifreq *ifr = (struct ifreq *)data;
1765 struct ip_moptions *imo = &sc->sc_imo;
1766 struct pfsyncreq pfsyncr;
1775 case SIOCSIFDSTADDR:
1779 if (ifp->if_flags & IFF_UP)
1780 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1782 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1784 if (ifp->if_flags & IFF_UP)
1785 ifp->if_flags |= IFF_RUNNING;
1787 ifp->if_flags &= ~IFF_RUNNING;
1791 if (!sc->sc_sync_if ||
1792 ifr->ifr_mtu <= PFSYNC_MINPKT ||
1793 ifr->ifr_mtu > sc->sc_sync_if->if_mtu)
1795 if (ifr->ifr_mtu < ifp->if_mtu) {
1806 ifp->if_mtu = ifr->ifr_mtu;
1809 bzero(&pfsyncr, sizeof(pfsyncr));
1810 if (sc->sc_sync_if) {
1811 strlcpy(pfsyncr.pfsyncr_syncdev,
1812 sc->sc_sync_if->if_xname, IFNAMSIZ);
1814 pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer;
1815 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
1816 pfsyncr.pfsyncr_defer = sc->sc_defer;
1817 return (copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr)));
1821 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
1823 if ((error = suser(p, p->p_acflag)) != 0)
1826 if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr))))
1832 if (pfsyncr.pfsyncr_syncpeer.s_addr == 0)
1834 sc->sc_sync_peer.s_addr = htonl(INADDR_PFSYNC_GROUP);
1836 sc->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP;
1839 sc->sc_sync_peer.s_addr =
1840 pfsyncr.pfsyncr_syncpeer.s_addr;
1842 if (pfsyncr.pfsyncr_maxupdates > 255)
1851 sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates;
1852 sc->sc_defer = pfsyncr.pfsyncr_defer;
1854 if (pfsyncr.pfsyncr_syncdev[0] == 0) {
1855 sc->sc_sync_if = NULL;
1858 if (imo->imo_membership)
1859 pfsync_multicast_cleanup(sc);
1861 if (imo->imo_num_memberships > 0) {
1862 in_delmulti(imo->imo_membership[
1863 --imo->imo_num_memberships]);
1864 imo->imo_multicast_ifp = NULL;
1873 if ((sifp = ifunit(pfsyncr.pfsyncr_syncdev)) == NULL)
1881 if (sifp->if_mtu < sc->sc_ifp->if_mtu ||
1883 if (sifp->if_mtu < sc->sc_if.if_mtu ||
1885 (sc->sc_sync_if != NULL &&
1886 sifp->if_mtu < sc->sc_sync_if->if_mtu) ||
1887 sifp->if_mtu < MCLBYTES - sizeof(struct ip))
1889 sc->sc_sync_if = sifp;
1892 if (imo->imo_membership) {
1894 pfsync_multicast_cleanup(sc);
1898 if (imo->imo_num_memberships > 0) {
1899 in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
1900 imo->imo_multicast_ifp = NULL;
1905 if (sc->sc_sync_if &&
1906 sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) {
1908 error = pfsync_multicast_setup(sc);
1914 if (sc->sc_sync_if &&
1915 sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
1916 struct in_addr addr;
1918 if (!(sc->sc_sync_if->if_flags & IFF_MULTICAST)) {
1919 sc->sc_sync_if = NULL;
1921 return (EADDRNOTAVAIL);
1924 addr.s_addr = INADDR_PFSYNC_GROUP;
1926 if ((imo->imo_membership[0] =
1927 in_addmulti(&addr, sc->sc_sync_if)) == NULL) {
1928 sc->sc_sync_if = NULL;
1932 imo->imo_num_memberships++;
1933 imo->imo_multicast_ifp = sc->sc_sync_if;
1934 imo->imo_multicast_ttl = PFSYNC_DFLTTL;
1935 imo->imo_multicast_loop = 0;
1937 #endif /* !__FreeBSD__ */
1939 ip = &sc->sc_template;
1940 bzero(ip, sizeof(*ip));
1941 ip->ip_v = IPVERSION;
1942 ip->ip_hl = sizeof(sc->sc_template) >> 2;
1943 ip->ip_tos = IPTOS_LOWDELAY;
1944 /* len and id are set later */
1948 ip->ip_off = htons(IP_DF);
1950 ip->ip_ttl = PFSYNC_DFLTTL;
1951 ip->ip_p = IPPROTO_PFSYNC;
1952 ip->ip_src.s_addr = INADDR_ANY;
1953 ip->ip_dst.s_addr = sc->sc_sync_peer.s_addr;
1955 if (sc->sc_sync_if) {
1956 /* Request a full state table update. */
1957 sc->sc_ureq_sent = time_uptime;
1959 if (sc->pfsync_sync_ok && carp_demote_adj_p)
1960 (*carp_demote_adj_p)(V_pfsync_carp_adj,
1961 "pfsync bulk start");
1962 sc->pfsync_sync_ok = 0;
1966 carp_group_demote_adj(&sc->sc_if, 1);
1971 if (V_pf_status.debug >= PF_DEBUG_MISC)
1973 if (pf_status.debug >= PF_DEBUG_MISC)
1975 printf("pfsync: requesting bulk update\n");
1977 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
1978 pfsync_bulk_fail, V_pfsyncif);
1980 timeout_add_sec(&sc->sc_bulkfail_tmo, 5);
1982 pfsync_request_update(0, 0);
1999 pfsync_out_state(struct pf_state *st, struct mbuf *m, int offset)
2001 struct pfsync_state *sp = (struct pfsync_state *)(m->m_data + offset);
2003 pfsync_state_export(sp, st);
2005 return (sizeof(*sp));
2009 pfsync_out_iack(struct pf_state *st, struct mbuf *m, int offset)
2011 struct pfsync_ins_ack *iack =
2012 (struct pfsync_ins_ack *)(m->m_data + offset);
2015 iack->creatorid = st->creatorid;
2017 return (sizeof(*iack));
2021 pfsync_out_upd_c(struct pf_state *st, struct mbuf *m, int offset)
2023 struct pfsync_upd_c *up = (struct pfsync_upd_c *)(m->m_data + offset);
2025 bzero(up, sizeof(*up));
2027 pf_state_peer_hton(&st->src, &up->src);
2028 pf_state_peer_hton(&st->dst, &up->dst);
2029 up->creatorid = st->creatorid;
2030 up->timeout = st->timeout;
2032 return (sizeof(*up));
2036 pfsync_out_del(struct pf_state *st, struct mbuf *m, int offset)
2038 struct pfsync_del_c *dp = (struct pfsync_del_c *)(m->m_data + offset);
2041 dp->creatorid = st->creatorid;
2043 SET(st->state_flags, PFSTATE_NOSYNC);
2045 return (sizeof(*dp));
2049 pfsync_drop(struct pfsync_softc *sc)
2051 struct pf_state *st;
2052 struct pfsync_upd_req_item *ur;
2058 for (q = 0; q < PFSYNC_S_COUNT; q++) {
2059 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2062 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) {
2065 KASSERT(st->sync_state == q,
2066 ("%s: st->sync_state == q",
2069 KASSERT(st->sync_state == q);
2072 st->sync_state = PFSYNC_S_NONE;
2074 TAILQ_INIT(&sc->sc_qs[q]);
2077 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
2078 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
2079 pool_put(&sc->sc_pool, ur);
2085 if (!TAILQ_EMPTY(&sc->sc_tdb_q)) {
2086 TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry)
2087 CLR(t->tdb_flags, TDBF_PFSYNC);
2089 TAILQ_INIT(&sc->sc_tdb_q);
2093 sc->sc_len = PFSYNC_MINPKT;
2097 void pfsync_sendout()
2103 pfsync_sendout1(int schedswi)
2105 struct pfsync_softc *sc = V_pfsyncif;
2108 pfsync_sendout(void)
2110 struct pfsync_softc *sc = pfsyncif;
2114 struct ifnet *ifp = sc->sc_ifp;
2116 struct ifnet *ifp = &sc->sc_if;
2121 struct pfsync_header *ph;
2122 struct pfsync_subheader *subh;
2123 struct pf_state *st;
2124 struct pfsync_upd_req_item *ur;
2137 if (sc == NULL || sc->sc_len == PFSYNC_MINPKT)
2141 if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) {
2143 if (sc->sc_sync_if == NULL) {
2150 m = m_get2(M_NOWAIT, MT_DATA, M_PKTHDR, max_linkhdr + sc->sc_len);
2152 sc->sc_ifp->if_oerrors++;
2153 V_pfsyncstats.pfsyncs_onomem++;
2157 MGETHDR(m, M_DONTWAIT, MT_DATA);
2159 sc->sc_if.if_oerrors++;
2160 pfsyncstats.pfsyncs_onomem++;
2165 if (max_linkhdr + sc->sc_len > MHLEN) {
2166 MCLGETI(m, M_DONTWAIT, NULL, max_linkhdr + sc->sc_len);
2167 if (!ISSET(m->m_flags, M_EXT)) {
2169 sc->sc_if.if_oerrors++;
2170 pfsyncstats.pfsyncs_onomem++;
2176 m->m_data += max_linkhdr;
2177 m->m_len = m->m_pkthdr.len = sc->sc_len;
2179 /* build the ip header */
2180 ip = (struct ip *)m->m_data;
2181 bcopy(&sc->sc_template, ip, sizeof(*ip));
2182 offset = sizeof(*ip);
2185 ip->ip_len = m->m_pkthdr.len;
2187 ip->ip_len = htons(m->m_pkthdr.len);
2189 ip->ip_id = htons(ip_randomid());
2191 /* build the pfsync header */
2192 ph = (struct pfsync_header *)(m->m_data + offset);
2193 bzero(ph, sizeof(*ph));
2194 offset += sizeof(*ph);
2196 ph->version = PFSYNC_VERSION;
2197 ph->len = htons(sc->sc_len - sizeof(*ip));
2199 bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
2201 bcopy(pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
2204 /* walk the queues */
2205 for (q = 0; q < PFSYNC_S_COUNT; q++) {
2206 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2209 subh = (struct pfsync_subheader *)(m->m_data + offset);
2210 offset += sizeof(*subh);
2213 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) {
2216 KASSERT(st->sync_state == q,
2217 ("%s: st->sync_state == q",
2220 KASSERT(st->sync_state == q);
2224 offset += pfsync_qs[q].write(st, m, offset);
2225 st->sync_state = PFSYNC_S_NONE;
2228 TAILQ_INIT(&sc->sc_qs[q]);
2230 bzero(subh, sizeof(*subh));
2231 subh->action = pfsync_qs[q].action;
2232 subh->count = htons(count);
2235 if (!TAILQ_EMPTY(&sc->sc_upd_req_list)) {
2236 subh = (struct pfsync_subheader *)(m->m_data + offset);
2237 offset += sizeof(*subh);
2240 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
2241 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
2243 bcopy(&ur->ur_msg, m->m_data + offset,
2244 sizeof(ur->ur_msg));
2245 offset += sizeof(ur->ur_msg);
2247 pool_put(&sc->sc_pool, ur);
2252 bzero(subh, sizeof(*subh));
2253 subh->action = PFSYNC_ACT_UPD_REQ;
2254 subh->count = htons(count);
2257 /* has someone built a custom region for us to add? */
2258 if (sc->sc_plus != NULL) {
2259 bcopy(sc->sc_plus, m->m_data + offset, sc->sc_pluslen);
2260 offset += sc->sc_pluslen;
2266 if (!TAILQ_EMPTY(&sc->sc_tdb_q)) {
2267 subh = (struct pfsync_subheader *)(m->m_data + offset);
2268 offset += sizeof(*subh);
2271 TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry) {
2272 offset += pfsync_out_tdb(t, m, offset);
2273 CLR(t->tdb_flags, TDBF_PFSYNC);
2277 TAILQ_INIT(&sc->sc_tdb_q);
2279 bzero(subh, sizeof(*subh));
2280 subh->action = PFSYNC_ACT_TDB;
2281 subh->count = htons(count);
2285 subh = (struct pfsync_subheader *)(m->m_data + offset);
2286 offset += sizeof(*subh);
2288 bzero(subh, sizeof(*subh));
2289 subh->action = PFSYNC_ACT_EOF;
2290 subh->count = htons(1);
2292 /* XXX write checksum in EOF here */
2294 /* we're done, let's put it on the wire */
2297 m->m_data += sizeof(*ip);
2298 m->m_len = m->m_pkthdr.len = sc->sc_len - sizeof(*ip);
2302 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
2304 m->m_data -= sizeof(*ip);
2305 m->m_len = m->m_pkthdr.len = sc->sc_len;
2308 if (sc->sc_sync_if == NULL) {
2309 sc->sc_len = PFSYNC_MINPKT;
2316 sc->sc_ifp->if_opackets++;
2317 sc->sc_ifp->if_obytes += m->m_pkthdr.len;
2318 sc->sc_len = PFSYNC_MINPKT;
2320 if (!_IF_QFULL(&sc->sc_ifp->if_snd))
2321 _IF_ENQUEUE(&sc->sc_ifp->if_snd, m);
2324 sc->sc_ifp->if_snd.ifq_drops++;
2327 swi_sched(V_pfsync_swi_cookie, 0);
2329 sc->sc_if.if_opackets++;
2330 sc->sc_if.if_obytes += m->m_pkthdr.len;
2332 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL) == 0)
2333 pfsyncstats.pfsyncs_opackets++;
2335 pfsyncstats.pfsyncs_oerrors++;
2338 sc->sc_len = PFSYNC_MINPKT;
2343 pfsync_insert_state(struct pf_state *st)
2346 struct pfsync_softc *sc = V_pfsyncif;
2348 struct pfsync_softc *sc = pfsyncif;
2354 splassert(IPL_SOFTNET);
2357 if (ISSET(st->rule.ptr->rule_flag, PFRULE_NOSYNC) ||
2358 st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) {
2359 SET(st->state_flags, PFSTATE_NOSYNC);
2363 if (sc == NULL || ISSET(st->state_flags, PFSTATE_NOSYNC))
2368 KASSERT(st->sync_state == PFSYNC_S_NONE,
2369 ("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__));
2371 KASSERT(st->sync_state == PFSYNC_S_NONE);
2375 if (sc->sc_len == PFSYNC_MINPKT)
2377 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2380 timeout_add_sec(&sc->sc_tmo, 1);
2383 pfsync_q_ins(st, PFSYNC_S_INS);
2385 st->sync_updates = 0;
2391 pfsync_defer(struct pf_state *st, struct mbuf *m)
2394 struct pfsync_softc *sc = V_pfsyncif;
2396 struct pfsync_softc *sc = pfsyncif;
2398 struct pfsync_deferral *pd;
2403 splassert(IPL_SOFTNET);
2406 if (!sc->sc_defer || m->m_flags & (M_BCAST|M_MCAST))
2409 if (sc->sc_deferred >= 128)
2410 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0);
2412 pd = pool_get(&sc->sc_pool, M_NOWAIT);
2418 m->m_flags |= M_SKIP_FIREWALL;
2420 m->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
2422 SET(st->state_flags, PFSTATE_ACK);
2427 TAILQ_INSERT_TAIL(&sc->sc_deferrals, pd, pd_entry);
2429 callout_init(&pd->pd_tmo, CALLOUT_MPSAFE);
2430 callout_reset(&pd->pd_tmo, defer, pfsync_defer_tmo,
2433 timeout_set(&pd->pd_tmo, pfsync_defer_tmo, pd);
2434 timeout_add(&pd->pd_tmo, defer);
2437 swi_sched(V_pfsync_swi_cookie, 0);
2443 pfsync_undefer(struct pfsync_deferral *pd, int drop)
2446 struct pfsync_softc *sc = V_pfsyncif;
2448 struct pfsync_softc *sc = pfsyncif;
2455 splassert(IPL_SOFTNET);
2458 TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry);
2461 CLR(pd->pd_st->state_flags, PFSTATE_ACK);
2462 timeout_del(&pd->pd_tmo); /* bah */
2468 /* XXX: use pf_defered?! */
2471 ip_output(pd->pd_m, (void *)NULL, (void *)NULL, 0,
2472 (void *)NULL, (void *)NULL);
2479 pool_put(&sc->sc_pool, pd);
2483 pfsync_defer_tmo(void *arg)
2485 #if defined(__FreeBSD__) && defined(VIMAGE)
2486 struct pfsync_deferral *pd = arg;
2492 CURVNET_SET(pd->pd_m->m_pkthdr.rcvif->if_vnet); /* XXX */
2495 pfsync_undefer(arg, 0);
2504 pfsync_deferred(struct pf_state *st, int drop)
2507 struct pfsync_softc *sc = V_pfsyncif;
2509 struct pfsync_softc *sc = pfsyncif;
2511 struct pfsync_deferral *pd;
2513 TAILQ_FOREACH(pd, &sc->sc_deferrals, pd_entry) {
2514 if (pd->pd_st == st) {
2515 pfsync_undefer(pd, drop);
2520 panic("pfsync_send_deferred: unable to find deferred state");
2523 u_int pfsync_upds = 0;
2526 pfsync_update_state(struct pf_state *st)
2529 struct pfsync_softc *sc = V_pfsyncif;
2531 struct pfsync_softc *sc = pfsyncif;
2538 splassert(IPL_SOFTNET);
2544 if (ISSET(st->state_flags, PFSTATE_ACK))
2545 pfsync_deferred(st, 0);
2546 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2547 if (st->sync_state != PFSYNC_S_NONE)
2552 if (sc->sc_len == PFSYNC_MINPKT)
2554 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2557 timeout_add_sec(&sc->sc_tmo, 1);
2560 switch (st->sync_state) {
2561 case PFSYNC_S_UPD_C:
2564 /* we're already handling it */
2566 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) {
2568 if (st->sync_updates >= sc->sc_maxupdates)
2576 pfsync_q_ins(st, PFSYNC_S_UPD_C);
2577 st->sync_updates = 0;
2581 panic("pfsync_update_state: unexpected sync state %d",
2585 if (sync || (time_uptime - st->pfsync_time) < 2) {
2587 schednetisr(NETISR_PFSYNC);
2592 pfsync_request_update(u_int32_t creatorid, u_int64_t id)
2595 struct pfsync_softc *sc = V_pfsyncif;
2597 struct pfsync_softc *sc = pfsyncif;
2599 struct pfsync_upd_req_item *item;
2600 size_t nlen = sizeof(struct pfsync_upd_req);
2606 * this code does nothing to prevent multiple update requests for the
2607 * same state being generated.
2610 item = pool_get(&sc->sc_pool, PR_NOWAIT);
2616 item->ur_msg.id = id;
2617 item->ur_msg.creatorid = creatorid;
2619 if (TAILQ_EMPTY(&sc->sc_upd_req_list))
2620 nlen += sizeof(struct pfsync_subheader);
2623 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) {
2625 if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2631 nlen = sizeof(struct pfsync_subheader) +
2632 sizeof(struct pfsync_upd_req);
2635 TAILQ_INSERT_TAIL(&sc->sc_upd_req_list, item, ur_entry);
2638 schednetisr(NETISR_PFSYNC);
2642 pfsync_update_state_req(struct pf_state *st)
2645 struct pfsync_softc *sc = V_pfsyncif;
2647 struct pfsync_softc *sc = pfsyncif;
2653 panic("pfsync_update_state_req: nonexistant instance");
2655 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2656 if (st->sync_state != PFSYNC_S_NONE)
2661 switch (st->sync_state) {
2662 case PFSYNC_S_UPD_C:
2666 pfsync_q_ins(st, PFSYNC_S_UPD);
2667 schednetisr(NETISR_PFSYNC);
2673 /* we're already handling it */
2677 panic("pfsync_update_state_req: unexpected sync state %d",
2683 pfsync_delete_state(struct pf_state *st)
2686 struct pfsync_softc *sc = V_pfsyncif;
2688 struct pfsync_softc *sc = pfsyncif;
2694 splassert(IPL_SOFTNET);
2700 if (ISSET(st->state_flags, PFSTATE_ACK))
2701 pfsync_deferred(st, 1);
2702 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2703 if (st->sync_state != PFSYNC_S_NONE)
2708 if (sc->sc_len == PFSYNC_MINPKT)
2710 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2713 timeout_add_sec(&sc->sc_tmo, 1);
2716 switch (st->sync_state) {
2718 /* we never got to tell the world so just forget about it */
2722 case PFSYNC_S_UPD_C:
2726 /* FALLTHROUGH to putting it on the del list */
2729 pfsync_q_ins(st, PFSYNC_S_DEL);
2733 panic("pfsync_delete_state: unexpected sync state %d",
2739 pfsync_clear_states(u_int32_t creatorid, const char *ifname)
2742 struct pfsync_subheader subh;
2743 struct pfsync_clr clr;
2747 struct pfsync_softc *sc = V_pfsyncif;
2749 struct pfsync_softc *sc = pfsyncif;
2755 splassert(IPL_SOFTNET);
2761 bzero(&r, sizeof(r));
2763 r.subh.action = PFSYNC_ACT_CLR;
2764 r.subh.count = htons(1);
2766 strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname));
2767 r.clr.creatorid = creatorid;
2769 pfsync_send_plus(&r, sizeof(r));
2773 pfsync_q_ins(struct pf_state *st, int q)
2776 struct pfsync_softc *sc = V_pfsyncif;
2778 struct pfsync_softc *sc = pfsyncif;
2780 size_t nlen = pfsync_qs[q].len;
2786 KASSERT(st->sync_state == PFSYNC_S_NONE,
2787 ("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__));
2789 KASSERT(st->sync_state == PFSYNC_S_NONE);
2792 #if 1 || defined(PFSYNC_DEBUG)
2793 if (sc->sc_len < PFSYNC_MINPKT)
2795 panic("pfsync pkt len is too low %zu", sc->sc_len);
2797 panic("pfsync pkt len is too low %d", sc->sc_len);
2800 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2801 nlen += sizeof(struct pfsync_subheader);
2804 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) {
2806 if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2812 nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len;
2816 TAILQ_INSERT_TAIL(&sc->sc_qs[q], st, sync_list);
2821 pfsync_q_del(struct pf_state *st)
2824 struct pfsync_softc *sc = V_pfsyncif;
2826 struct pfsync_softc *sc = pfsyncif;
2828 int q = st->sync_state;
2831 KASSERT(st->sync_state != PFSYNC_S_NONE,
2832 ("%s: st->sync_state != PFSYNC_S_NONE", __FUNCTION__));
2834 KASSERT(st->sync_state != PFSYNC_S_NONE);
2837 sc->sc_len -= pfsync_qs[q].len;
2838 TAILQ_REMOVE(&sc->sc_qs[q], st, sync_list);
2839 st->sync_state = PFSYNC_S_NONE;
2841 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2842 sc->sc_len -= sizeof(struct pfsync_subheader);
2847 pfsync_update_tdb(struct tdb *t, int output)
2850 struct pfsync_softc *sc = V_pfsyncif;
2852 struct pfsync_softc *sc = pfsyncif;
2854 size_t nlen = sizeof(struct pfsync_tdb);
2860 if (!ISSET(t->tdb_flags, TDBF_PFSYNC)) {
2861 if (TAILQ_EMPTY(&sc->sc_tdb_q))
2862 nlen += sizeof(struct pfsync_subheader);
2864 if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2871 nlen = sizeof(struct pfsync_subheader) +
2872 sizeof(struct pfsync_tdb);
2876 TAILQ_INSERT_TAIL(&sc->sc_tdb_q, t, tdb_sync_entry);
2877 SET(t->tdb_flags, TDBF_PFSYNC);
2880 if (++t->tdb_updates >= sc->sc_maxupdates)
2881 schednetisr(NETISR_PFSYNC);
2885 SET(t->tdb_flags, TDBF_PFSYNC_RPL);
2887 CLR(t->tdb_flags, TDBF_PFSYNC_RPL);
2891 pfsync_delete_tdb(struct tdb *t)
2894 struct pfsync_softc *sc = V_pfsyncif;
2896 struct pfsync_softc *sc = pfsyncif;
2899 if (sc == NULL || !ISSET(t->tdb_flags, TDBF_PFSYNC))
2902 sc->sc_len -= sizeof(struct pfsync_tdb);
2903 TAILQ_REMOVE(&sc->sc_tdb_q, t, tdb_sync_entry);
2904 CLR(t->tdb_flags, TDBF_PFSYNC);
2906 if (TAILQ_EMPTY(&sc->sc_tdb_q))
2907 sc->sc_len -= sizeof(struct pfsync_subheader);
2911 pfsync_out_tdb(struct tdb *t, struct mbuf *m, int offset)
2913 struct pfsync_tdb *ut = (struct pfsync_tdb *)(m->m_data + offset);
2915 bzero(ut, sizeof(*ut));
2916 ut->spi = t->tdb_spi;
2917 bcopy(&t->tdb_dst, &ut->dst, sizeof(ut->dst));
2919 * When a failover happens, the master's rpl is probably above
2920 * what we see here (we may be up to a second late), so
2921 * increase it a bit for outbound tdbs to manage most such
2924 * For now, just add an offset that is likely to be larger
2925 * than the number of packets we can see in one second. The RFC
2926 * just says the next packet must have a higher seq value.
2928 * XXX What is a good algorithm for this? We could use
2929 * a rate-determined increase, but to know it, we would have
2930 * to extend struct tdb.
2931 * XXX pt->rpl can wrap over MAXINT, but if so the real tdb
2932 * will soon be replaced anyway. For now, just don't handle
2935 #define RPL_INCR 16384
2936 ut->rpl = htonl(t->tdb_rpl + (ISSET(t->tdb_flags, TDBF_PFSYNC_RPL) ?
2938 ut->cur_bytes = htobe64(t->tdb_cur_bytes);
2939 ut->sproto = t->tdb_sproto;
2941 return (sizeof(*ut));
2946 pfsync_bulk_start(void)
2949 struct pfsync_softc *sc = V_pfsyncif;
2951 struct pfsync_softc *sc = pfsyncif;
2955 if (V_pf_status.debug >= PF_DEBUG_MISC)
2957 if (pf_status.debug >= PF_DEBUG_MISC)
2959 printf("pfsync: received bulk update request\n");
2963 if (TAILQ_EMPTY(&V_state_list))
2965 if (TAILQ_EMPTY(&state_list))
2967 pfsync_bulk_status(PFSYNC_BUS_END);
2969 sc->sc_ureq_received = time_uptime;
2970 if (sc->sc_bulk_next == NULL)
2972 sc->sc_bulk_next = TAILQ_FIRST(&V_state_list);
2974 sc->sc_bulk_next = TAILQ_FIRST(&state_list);
2976 sc->sc_bulk_last = sc->sc_bulk_next;
2978 pfsync_bulk_status(PFSYNC_BUS_START);
2979 callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update, sc);
2984 pfsync_bulk_update(void *arg)
2986 struct pfsync_softc *sc = arg;
2987 struct pf_state *st = sc->sc_bulk_next;
2995 CURVNET_SET(sc->sc_ifp->if_vnet);
2998 if (st->sync_state == PFSYNC_S_NONE &&
2999 st->timeout < PFTM_MAX &&
3000 st->pfsync_time <= sc->sc_ureq_received) {
3001 pfsync_update_state_req(st);
3005 st = TAILQ_NEXT(st, entry_list);
3008 st = TAILQ_FIRST(&V_state_list);
3010 st = TAILQ_FIRST(&state_list);
3013 if (st == sc->sc_bulk_last) {
3015 sc->sc_bulk_next = NULL;
3016 sc->sc_bulk_last = NULL;
3017 pfsync_bulk_status(PFSYNC_BUS_END);
3022 if (i > 1 && (sc->sc_ifp->if_mtu - sc->sc_len) <
3024 if (i > 1 && (sc->sc_if.if_mtu - sc->sc_len) <
3026 sizeof(struct pfsync_state)) {
3027 /* we've filled a packet */
3028 sc->sc_bulk_next = st;
3030 callout_reset(&sc->sc_bulk_tmo, 1,
3031 pfsync_bulk_update, sc);
3033 timeout_add(&sc->sc_bulk_tmo, 1);
3046 pfsync_bulk_status(u_int8_t status)
3049 struct pfsync_subheader subh;
3050 struct pfsync_bus bus;
3054 struct pfsync_softc *sc = V_pfsyncif;
3056 struct pfsync_softc *sc = pfsyncif;
3061 bzero(&r, sizeof(r));
3063 r.subh.action = PFSYNC_ACT_BUS;
3064 r.subh.count = htons(1);
3067 r.bus.creatorid = V_pf_status.hostid;
3069 r.bus.creatorid = pf_status.hostid;
3071 r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received);
3072 r.bus.status = status;
3074 pfsync_send_plus(&r, sizeof(r));
3078 pfsync_bulk_fail(void *arg)
3080 struct pfsync_softc *sc = arg;
3083 CURVNET_SET(sc->sc_ifp->if_vnet);
3086 if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
3089 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
3090 pfsync_bulk_fail, V_pfsyncif);
3092 timeout_add_sec(&sc->sc_bulkfail_tmo, 5);
3095 pfsync_request_update(0, 0);
3098 /* Pretend like the transfer was ok */
3099 sc->sc_ureq_sent = 0;
3100 sc->sc_bulk_tries = 0;
3102 if (!sc->pfsync_sync_ok && carp_demote_adj_p)
3103 (*carp_demote_adj_p)(-V_pfsync_carp_adj,
3104 "pfsync bulk fail");
3105 sc->pfsync_sync_ok = 1;
3108 if (!pfsync_sync_ok)
3109 carp_group_demote_adj(&sc->sc_if, -1);
3114 if (V_pf_status.debug >= PF_DEBUG_MISC)
3116 if (pf_status.debug >= PF_DEBUG_MISC)
3118 printf("pfsync: failed to receive bulk update\n");
3127 pfsync_send_plus(void *plus, size_t pluslen)
3130 struct pfsync_softc *sc = V_pfsyncif;
3132 struct pfsync_softc *sc = pfsyncif;
3139 if (sc->sc_len + pluslen > sc->sc_ifp->if_mtu) {
3141 if (sc->sc_len + pluslen > sc->sc_if.if_mtu) {
3149 sc->sc_len += (sc->sc_pluslen = pluslen);
3160 struct pfsync_softc *sc = V_pfsyncif;
3162 struct pfsync_softc *sc = pfsyncif;
3166 if (sc == NULL || !ISSET(sc->sc_ifp->if_flags, IFF_DRV_RUNNING))
3168 if (sc == NULL || !ISSET(sc->sc_if.if_flags, IFF_RUNNING))
3176 pfsync_state_in_use(struct pf_state *st)
3179 struct pfsync_softc *sc = V_pfsyncif;
3181 struct pfsync_softc *sc = pfsyncif;
3187 if (st->sync_state != PFSYNC_S_NONE ||
3188 st == sc->sc_bulk_next ||
3189 st == sc->sc_bulk_last)
3199 pfsync_timeout(void *arg)
3201 #if defined(__FreeBSD__) && defined(VIMAGE)
3202 struct pfsync_softc *sc = arg;
3207 CURVNET_SET(sc->sc_ifp->if_vnet);
3227 /* this is a softnet/netisr handler */
3230 pfsyncintr(void *arg)
3232 struct pfsync_softc *sc = arg;
3235 CURVNET_SET(sc->sc_ifp->if_vnet);
3239 if (sc->sc_len > PFSYNC_MINPKT)
3241 _IF_DEQUEUE_ALL(&sc->sc_ifp->if_snd, m);
3244 for (; m != NULL; m = n) {
3247 m->m_nextpkt = NULL;
3248 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL)
3250 V_pfsyncstats.pfsyncs_opackets++;
3252 V_pfsyncstats.pfsyncs_oerrors++;
3270 pfsync_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
3275 /* All sysctl names at this level are terminal. */
3280 case PFSYNCCTL_STATS:
3283 return (sysctl_struct(oldp, oldlenp, newp, newlen,
3284 &V_pfsyncstats, sizeof(V_pfsyncstats)));
3287 return (ENOPROTOOPT);
3292 pfsync_multicast_setup(struct pfsync_softc *sc)
3294 struct ip_moptions *imo = &sc->sc_imo;
3297 if (!(sc->sc_sync_if->if_flags & IFF_MULTICAST)) {
3298 sc->sc_sync_if = NULL;
3299 return (EADDRNOTAVAIL);
3302 imo->imo_membership = (struct in_multi **)malloc(
3303 (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_PFSYNC,
3305 imo->imo_max_memberships = IP_MIN_MEMBERSHIPS;
3306 imo->imo_multicast_vif = -1;
3308 if ((error = in_joingroup(sc->sc_sync_if, &sc->sc_sync_peer, NULL,
3309 &imo->imo_membership[0])) != 0) {
3310 free(imo->imo_membership, M_PFSYNC);
3313 imo->imo_num_memberships++;
3314 imo->imo_multicast_ifp = sc->sc_sync_if;
3315 imo->imo_multicast_ttl = PFSYNC_DFLTTL;
3316 imo->imo_multicast_loop = 0;
3322 pfsync_multicast_cleanup(struct pfsync_softc *sc)
3324 struct ip_moptions *imo = &sc->sc_imo;
3326 in_leavegroup(imo->imo_membership[0], NULL);
3327 free(imo->imo_membership, M_PFSYNC);
3328 imo->imo_membership = NULL;
3329 imo->imo_multicast_ifp = NULL;
3333 extern struct domain inetdomain;
3334 static struct protosw in_pfsync_protosw = {
3335 .pr_type = SOCK_RAW,
3336 .pr_domain = &inetdomain,
3337 .pr_protocol = IPPROTO_PFSYNC,
3338 .pr_flags = PR_ATOMIC|PR_ADDR,
3339 .pr_input = pfsync_input,
3340 .pr_output = (pr_output_t *)rip_output,
3341 .pr_ctloutput = rip_ctloutput,
3342 .pr_usrreqs = &rip_usrreqs
3349 VNET_ITERATOR_DECL(vnet_iter);
3353 VNET_FOREACH(vnet_iter) {
3354 CURVNET_SET(vnet_iter);
3355 V_pfsync_cloner = pfsync_cloner;
3356 V_pfsync_cloner_data = pfsync_cloner_data;
3357 V_pfsync_cloner.ifc_data = &V_pfsync_cloner_data;
3358 if_clone_attach(&V_pfsync_cloner);
3359 error = swi_add(NULL, "pfsync", pfsyncintr, V_pfsyncif,
3360 SWI_NET, INTR_MPSAFE, &V_pfsync_swi_cookie);
3365 VNET_LIST_RUNLOCK();
3367 error = pf_proto_register(PF_INET, &in_pfsync_protosw);
3370 error = ipproto_register(IPPROTO_PFSYNC);
3372 pf_proto_unregister(PF_INET, IPPROTO_PFSYNC, SOCK_RAW);
3377 pfsync_state_import_ptr = pfsync_state_import;
3378 pfsync_up_ptr = pfsync_up;
3379 pfsync_insert_state_ptr = pfsync_insert_state;
3380 pfsync_update_state_ptr = pfsync_update_state;
3381 pfsync_delete_state_ptr = pfsync_delete_state;
3382 pfsync_clear_states_ptr = pfsync_clear_states;
3383 pfsync_state_in_use_ptr = pfsync_state_in_use;
3384 pfsync_defer_ptr = pfsync_defer;
3392 VNET_FOREACH(vnet_iter) {
3393 CURVNET_SET(vnet_iter);
3394 if (V_pfsync_swi_cookie) {
3395 swi_remove(V_pfsync_swi_cookie);
3396 if_clone_detach(&V_pfsync_cloner);
3400 VNET_LIST_RUNLOCK();
3408 VNET_ITERATOR_DECL(vnet_iter);
3411 pfsync_state_import_ptr = NULL;
3412 pfsync_up_ptr = NULL;
3413 pfsync_insert_state_ptr = NULL;
3414 pfsync_update_state_ptr = NULL;
3415 pfsync_delete_state_ptr = NULL;
3416 pfsync_clear_states_ptr = NULL;
3417 pfsync_state_in_use_ptr = NULL;
3418 pfsync_defer_ptr = NULL;
3421 ipproto_unregister(IPPROTO_PFSYNC);
3422 pf_proto_unregister(PF_INET, IPPROTO_PFSYNC, SOCK_RAW);
3424 VNET_FOREACH(vnet_iter) {
3425 CURVNET_SET(vnet_iter);
3426 swi_remove(V_pfsync_swi_cookie);
3427 if_clone_detach(&V_pfsync_cloner);
3430 VNET_LIST_RUNLOCK();
3434 pfsync_modevent(module_t mod, int type, void *data)
3440 error = pfsync_init();
3444 * Module should not be unloaded due to race conditions.
3459 static moduledata_t pfsync_mod = {
3465 #define PFSYNC_MODVER 1
3467 DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY);
3468 MODULE_VERSION(pfsync, PFSYNC_MODVER);
3469 MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER);
3470 #endif /* __FreeBSD__ */