1 /* $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $ */
4 * Copyright (c) 2002 Michael Shalayeff
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
30 * Copyright (c) 2009 David Gwynne <dlg@openbsd.org>
32 * Permission to use, copy, modify, and distribute this software for any
33 * purpose with or without fee is hereby granted, provided that the above
34 * copyright notice and this permission notice appear in all copies.
36 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
37 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
38 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
39 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
40 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
41 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
42 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
47 #include "opt_inet6.h"
51 #include <sys/cdefs.h>
52 __FBSDID("$FreeBSD$");
55 #define NBPFILTER DEV_BPF
61 #define NPFSYNC DEV_PFSYNC
67 #define NCARP DEV_CARP
71 #endif /* __FreeBSD__ */
73 #include <sys/param.h>
74 #include <sys/kernel.h>
77 #include <sys/interrupt.h>
81 #include <sys/systm.h>
84 #include <sys/socket.h>
86 #include <sys/endian.h>
87 #include <sys/malloc.h>
88 #include <sys/module.h>
89 #include <sys/sockio.h>
90 #include <sys/taskqueue.h>
92 #include <sys/mutex.h>
94 #include <sys/ioctl.h>
95 #include <sys/timeout.h>
97 #include <sys/sysctl.h>
104 #include <net/if_clone.h>
106 #include <net/if_types.h>
107 #include <net/route.h>
109 #include <net/netisr.h>
111 #include <net/vnet.h>
114 #include <netinet/in.h>
115 #include <netinet/if_ether.h>
116 #include <netinet/tcp.h>
117 #include <netinet/tcp_seq.h>
120 #include <netinet/in_systm.h>
121 #include <netinet/in_var.h>
122 #include <netinet/ip.h>
123 #include <netinet/ip_var.h>
127 #include <netinet6/nd6.h>
134 #include <netinet/ip_carp.h>
137 #include <net/pfvar.h>
138 #include <net/if_pfsync.h>
141 #include "bpfilter.h"
145 #define PFSYNC_MINPKT ( \
146 sizeof(struct ip) + \
147 sizeof(struct pfsync_header) + \
148 sizeof(struct pfsync_subheader) + \
149 sizeof(struct pfsync_eof))
157 int pfsync_input_hmac(struct mbuf *, int);
159 int pfsync_upd_tcp(struct pf_state *, struct pfsync_state_peer *,
160 struct pfsync_state_peer *);
162 int pfsync_in_clr(struct pfsync_pkt *, struct mbuf *, int, int);
163 int pfsync_in_ins(struct pfsync_pkt *, struct mbuf *, int, int);
164 int pfsync_in_iack(struct pfsync_pkt *, struct mbuf *, int, int);
165 int pfsync_in_upd(struct pfsync_pkt *, struct mbuf *, int, int);
166 int pfsync_in_upd_c(struct pfsync_pkt *, struct mbuf *, int, int);
167 int pfsync_in_ureq(struct pfsync_pkt *, struct mbuf *, int, int);
168 int pfsync_in_del(struct pfsync_pkt *, struct mbuf *, int, int);
169 int pfsync_in_del_c(struct pfsync_pkt *, struct mbuf *, int, int);
170 int pfsync_in_bus(struct pfsync_pkt *, struct mbuf *, int, int);
171 int pfsync_in_tdb(struct pfsync_pkt *, struct mbuf *, int, int);
172 int pfsync_in_eof(struct pfsync_pkt *, struct mbuf *, int, int);
174 int pfsync_in_error(struct pfsync_pkt *, struct mbuf *, int, int);
176 int (*pfsync_acts[])(struct pfsync_pkt *, struct mbuf *, int, int) = {
177 pfsync_in_clr, /* PFSYNC_ACT_CLR */
178 pfsync_in_ins, /* PFSYNC_ACT_INS */
179 pfsync_in_iack, /* PFSYNC_ACT_INS_ACK */
180 pfsync_in_upd, /* PFSYNC_ACT_UPD */
181 pfsync_in_upd_c, /* PFSYNC_ACT_UPD_C */
182 pfsync_in_ureq, /* PFSYNC_ACT_UPD_REQ */
183 pfsync_in_del, /* PFSYNC_ACT_DEL */
184 pfsync_in_del_c, /* PFSYNC_ACT_DEL_C */
185 pfsync_in_error, /* PFSYNC_ACT_INS_F */
186 pfsync_in_error, /* PFSYNC_ACT_DEL_F */
187 pfsync_in_bus, /* PFSYNC_ACT_BUS */
188 pfsync_in_tdb, /* PFSYNC_ACT_TDB */
189 pfsync_in_eof /* PFSYNC_ACT_EOF */
193 int (*write)(struct pf_state *, struct mbuf *, int);
198 /* we have one of these for every PFSYNC_S_ */
199 int pfsync_out_state(struct pf_state *, struct mbuf *, int);
200 int pfsync_out_iack(struct pf_state *, struct mbuf *, int);
201 int pfsync_out_upd_c(struct pf_state *, struct mbuf *, int);
202 int pfsync_out_del(struct pf_state *, struct mbuf *, int);
204 struct pfsync_q pfsync_qs[] = {
205 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_INS },
206 { pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK },
207 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_UPD },
208 { pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C },
209 { pfsync_out_del, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C }
212 void pfsync_q_ins(struct pf_state *, int);
213 void pfsync_q_del(struct pf_state *);
215 struct pfsync_upd_req_item {
216 TAILQ_ENTRY(pfsync_upd_req_item) ur_entry;
217 struct pfsync_upd_req ur_msg;
219 TAILQ_HEAD(pfsync_upd_reqs, pfsync_upd_req_item);
221 struct pfsync_deferral {
222 TAILQ_ENTRY(pfsync_deferral) pd_entry;
223 struct pf_state *pd_st;
226 struct callout pd_tmo;
228 struct timeout pd_tmo;
231 TAILQ_HEAD(pfsync_deferrals, pfsync_deferral);
233 #define PFSYNC_PLSIZE MAX(sizeof(struct pfsync_upd_req_item), \
234 sizeof(struct pfsync_deferral))
237 int pfsync_out_tdb(struct tdb *, struct mbuf *, int);
240 struct pfsync_softc {
242 struct ifnet *sc_ifp;
246 struct ifnet *sc_sync_if;
254 struct ip_moptions sc_imo;
256 struct in_addr sc_sync_peer;
257 u_int8_t sc_maxupdates;
262 struct ip sc_template;
264 struct pf_state_queue sc_qs[PFSYNC_S_COUNT];
267 struct pfsync_upd_reqs sc_upd_req_list;
269 struct pfsync_deferrals sc_deferrals;
275 u_int32_t sc_ureq_sent;
278 struct callout sc_bulkfail_tmo;
280 struct timeout sc_bulkfail_tmo;
283 u_int32_t sc_ureq_received;
284 struct pf_state *sc_bulk_next;
285 struct pf_state *sc_bulk_last;
287 struct callout sc_bulk_tmo;
289 struct timeout sc_bulk_tmo;
292 TAILQ_HEAD(, tdb) sc_tdb_q;
295 struct callout sc_tmo;
297 struct timeout sc_tmo;
300 eventhandler_tag sc_detachtag;
306 static VNET_DEFINE(struct pfsync_softc *, pfsyncif) = NULL;
307 #define V_pfsyncif VNET(pfsyncif)
309 static VNET_DEFINE(struct pfsyncstats, pfsyncstats);
310 #define V_pfsyncstats VNET(pfsyncstats)
312 SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW, 0, "PFSYNC");
313 SYSCTL_VNET_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_RW,
314 &VNET_NAME(pfsyncstats), pfsyncstats,
315 "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)");
317 struct pfsync_softc *pfsyncif = NULL;
318 struct pfsyncstats pfsyncstats;
319 #define V_pfsyncstats pfsyncstats
323 static void pfsyncintr(void *);
325 void * pfsync_swi_cookie;
327 static struct pfsync_swi pfsync_swi;
328 #define schednetisr(p) swi_sched(pfsync_swi.pfsync_swi_cookie, 0)
329 #define NETISR_PFSYNC
332 void pfsyncattach(int);
334 int pfsync_clone_create(struct if_clone *, int, caddr_t);
335 void pfsync_clone_destroy(struct ifnet *);
337 int pfsync_clone_create(struct if_clone *, int);
338 int pfsync_clone_destroy(struct ifnet *);
340 int pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
341 struct pf_state_peer *);
342 void pfsync_update_net_tdb(struct pfsync_tdb *);
343 int pfsyncoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
349 int pfsyncioctl(struct ifnet *, u_long, caddr_t);
350 void pfsyncstart(struct ifnet *);
352 struct mbuf *pfsync_if_dequeue(struct ifnet *);
353 struct mbuf *pfsync_get_mbuf(struct pfsync_softc *);
355 void pfsync_deferred(struct pf_state *, int);
356 void pfsync_undefer(struct pfsync_deferral *, int);
357 void pfsync_defer_tmo(void *);
359 void pfsync_request_update(u_int32_t, u_int64_t);
360 void pfsync_update_state_req(struct pf_state *);
362 void pfsync_drop(struct pfsync_softc *);
363 void pfsync_sendout(void);
364 void pfsync_send_plus(void *, size_t);
365 int pfsync_tdb_sendout(struct pfsync_softc *);
366 int pfsync_sendout_mbuf(struct pfsync_softc *, struct mbuf *);
367 void pfsync_timeout(void *);
368 void pfsync_tdb_timeout(void *);
369 void pfsync_send_bus(struct pfsync_softc *, u_int8_t);
371 void pfsync_bulk_start(void);
372 void pfsync_bulk_status(u_int8_t);
373 void pfsync_bulk_update(void *);
374 void pfsync_bulk_fail(void *);
377 void pfsync_ifdetach(void *, struct ifnet *);
380 #define betoh64 (unsigned long long)be64toh
381 #define timeout_del callout_stop
384 #define PFSYNC_MAX_BULKTRIES 12
390 IFC_SIMPLE_DECLARE(pfsync, 1);
392 struct if_clone pfsync_cloner =
393 IF_CLONE_INITIALIZER("pfsync", pfsync_clone_create, pfsync_clone_destroy);
397 pfsyncattach(int npfsync)
399 if_clone_attach(&pfsync_cloner);
403 pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param)
405 pfsync_clone_create(struct if_clone *ifc, int unit)
408 struct pfsync_softc *sc;
419 sc = malloc(sizeof(struct pfsync_softc), M_DEVBUF, M_NOWAIT | M_ZERO);
423 for (q = 0; q < PFSYNC_S_COUNT; q++)
424 TAILQ_INIT(&sc->sc_qs[q]);
427 sc->pfsync_sync_ok = 1;
428 sc->sc_pool = uma_zcreate("pfsync", PFSYNC_PLSIZE,
429 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
430 if (sc->sc_pool == NULL) {
435 pool_init(&sc->sc_pool, PFSYNC_PLSIZE, 0, 0, 0, "pfsync", NULL);
437 TAILQ_INIT(&sc->sc_upd_req_list);
438 TAILQ_INIT(&sc->sc_deferrals);
441 TAILQ_INIT(&sc->sc_tdb_q);
443 sc->sc_len = PFSYNC_MINPKT;
444 sc->sc_maxupdates = 128;
447 sc->sc_imo.imo_membership = (struct in_multi **)malloc(
448 (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_DEVBUF,
450 sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS;
451 sc->sc_imo.imo_multicast_vif = -1;
453 sc->sc_imo.imo_membership = (struct in_multi **)malloc(
454 (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_IPMOPTS,
456 sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS;
460 ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC);
462 free(sc->sc_imo.imo_membership, M_DEVBUF);
463 uma_zdestroy(sc->sc_pool);
467 if_initname(ifp, ifc->ifc_name, unit);
469 sc->sc_detachtag = EVENTHANDLER_REGISTER(ifnet_departure_event,
471 pfsync_ifdetach, V_pfsyncif, EVENTHANDLER_PRI_ANY);
473 pfsync_ifdetach, pfsyncif, EVENTHANDLER_PRI_ANY);
475 if (sc->sc_detachtag == NULL) {
477 free(sc->sc_imo.imo_membership, M_DEVBUF);
478 uma_zdestroy(sc->sc_pool);
484 snprintf(ifp->if_xname, sizeof ifp->if_xname, "pfsync%d", unit);
487 ifp->if_ioctl = pfsyncioctl;
488 ifp->if_output = pfsyncoutput;
489 ifp->if_start = pfsyncstart;
490 ifp->if_type = IFT_PFSYNC;
491 ifp->if_snd.ifq_maxlen = ifqmaxlen;
492 ifp->if_hdrlen = sizeof(struct pfsync_header);
493 ifp->if_mtu = 1500; /* XXX */
495 callout_init(&sc->sc_tmo, CALLOUT_MPSAFE);
496 callout_init_mtx(&sc->sc_bulk_tmo, &pf_task_mtx, 0);
497 callout_init(&sc->sc_bulkfail_tmo, CALLOUT_MPSAFE);
499 ifp->if_hardmtu = MCLBYTES; /* XXX */
500 timeout_set(&sc->sc_tmo, pfsync_timeout, sc);
501 timeout_set(&sc->sc_bulk_tmo, pfsync_bulk_update, sc);
502 timeout_set(&sc->sc_bulkfail_tmo, pfsync_bulk_fail, sc);
511 if_addgroup(ifp, "carp");
516 bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
518 bpfattach(&sc->sc_if.if_bpf, ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
536 pfsync_clone_destroy(struct ifnet *ifp)
538 struct pfsync_softc *sc = ifp->if_softc;
541 EVENTHANDLER_DEREGISTER(ifnet_departure_event, sc->sc_detachtag);
543 timeout_del(&sc->sc_bulk_tmo); /* XXX: need PF_LOCK() before */
544 timeout_del(&sc->sc_tmo);
548 if (!sc->pfsync_sync_ok)
552 carp_group_demote_adj(&sc->sc_if, -1);
562 while (sc->sc_deferred > 0)
563 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0);
566 UMA_DESTROY(sc->sc_pool);
568 pool_destroy(&sc->sc_pool);
572 free(sc->sc_imo.imo_membership, M_DEVBUF);
574 free(sc->sc_imo.imo_membership, M_IPMOPTS);
590 pfsync_if_dequeue(struct ifnet *ifp)
598 IF_LOCK(&ifp->if_snd);
599 _IF_DROP(&ifp->if_snd);
600 _IF_DEQUEUE(&ifp->if_snd, m);
601 IF_UNLOCK(&ifp->if_snd);
604 IF_DEQUEUE(&ifp->if_snd, m);
612 * Start output on the pfsync interface.
615 pfsyncstart(struct ifnet *ifp)
619 while ((m = pfsync_if_dequeue(ifp)) != NULL) {
621 IF_DROP(&ifp->if_snd);
628 pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
629 struct pf_state_peer *d)
631 if (s->scrub.scrub_flag && d->scrub == NULL) {
633 d->scrub = pool_get(&V_pf_state_scrub_pl, PR_NOWAIT | PR_ZERO);
635 d->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT | PR_ZERO);
637 if (d->scrub == NULL)
646 pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
648 bzero(sp, sizeof(struct pfsync_state));
650 /* copy from state key */
651 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
652 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
653 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
654 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
655 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
656 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
657 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
658 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
659 sp->proto = st->key[PF_SK_WIRE]->proto;
660 sp->af = st->key[PF_SK_WIRE]->af;
662 /* copy from state */
663 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
664 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
665 sp->creation = htonl(time_second - st->creation);
666 sp->expire = pf_state_expires(st);
667 if (sp->expire <= time_second)
668 sp->expire = htonl(0);
670 sp->expire = htonl(sp->expire - time_second);
672 sp->direction = st->direction;
674 sp->timeout = st->timeout;
675 sp->state_flags = st->state_flags;
677 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
678 if (st->nat_src_node)
679 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
681 bcopy(&st->id, &sp->id, sizeof(sp->id));
682 sp->creatorid = st->creatorid;
683 pf_state_peer_hton(&st->src, &sp->src);
684 pf_state_peer_hton(&st->dst, &sp->dst);
686 if (st->rule.ptr == NULL)
687 sp->rule = htonl(-1);
689 sp->rule = htonl(st->rule.ptr->nr);
690 if (st->anchor.ptr == NULL)
691 sp->anchor = htonl(-1);
693 sp->anchor = htonl(st->anchor.ptr->nr);
694 if (st->nat_rule.ptr == NULL)
695 sp->nat_rule = htonl(-1);
697 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
699 pf_state_counter_hton(st->packets[0], sp->packets[0]);
700 pf_state_counter_hton(st->packets[1], sp->packets[1]);
701 pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
702 pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
708 pfsync_state_import(struct pfsync_state *sp, u_int8_t flags)
710 struct pf_state *st = NULL;
711 struct pf_state_key *skw = NULL, *sks = NULL;
712 struct pf_rule *r = NULL;
720 if (sp->creatorid == 0 && V_pf_status.debug >= PF_DEBUG_MISC) {
722 if (sp->creatorid == 0 && pf_status.debug >= PF_DEBUG_MISC) {
724 printf("pfsync_state_import: invalid creator id:"
725 " %08x\n", ntohl(sp->creatorid));
729 if ((kif = pfi_kif_get(sp->ifname)) == NULL) {
731 if (V_pf_status.debug >= PF_DEBUG_MISC)
733 if (pf_status.debug >= PF_DEBUG_MISC)
735 printf("pfsync_state_import: "
736 "unknown interface: %s\n", sp->ifname);
737 if (flags & PFSYNC_SI_IOCTL)
739 return (0); /* skip this state */
743 * If the ruleset checksums match or the state is coming from the ioctl,
744 * it's safe to associate the state with the rule of that number.
746 if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) &&
747 (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) <
748 pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount)
749 r = pf_main_ruleset.rules[
750 PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)];
753 r = &V_pf_default_rule;
755 r = &pf_default_rule;
758 if ((r->max_states && r->states_cur >= r->max_states))
762 if (flags & PFSYNC_SI_IOCTL)
763 pool_flags = PR_WAITOK | PR_ZERO;
765 pool_flags = PR_NOWAIT | PR_ZERO;
767 if ((st = pool_get(&V_pf_state_pl, pool_flags)) == NULL)
770 if (flags & PFSYNC_SI_IOCTL)
771 pool_flags = PR_WAITOK | PR_LIMITFAIL | PR_ZERO;
773 pool_flags = PR_LIMITFAIL | PR_ZERO;
775 if ((st = pool_get(&pf_state_pl, pool_flags)) == NULL)
779 if ((skw = pf_alloc_state_key(pool_flags)) == NULL)
782 if (PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0],
783 &sp->key[PF_SK_STACK].addr[0], sp->af) ||
784 PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1],
785 &sp->key[PF_SK_STACK].addr[1], sp->af) ||
786 sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] ||
787 sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1]) {
788 if ((sks = pf_alloc_state_key(pool_flags)) == NULL)
793 /* allocate memory for scrub info */
794 if (pfsync_alloc_scrub_memory(&sp->src, &st->src) ||
795 pfsync_alloc_scrub_memory(&sp->dst, &st->dst))
798 /* copy to state key(s) */
799 skw->addr[0] = sp->key[PF_SK_WIRE].addr[0];
800 skw->addr[1] = sp->key[PF_SK_WIRE].addr[1];
801 skw->port[0] = sp->key[PF_SK_WIRE].port[0];
802 skw->port[1] = sp->key[PF_SK_WIRE].port[1];
803 skw->proto = sp->proto;
806 sks->addr[0] = sp->key[PF_SK_STACK].addr[0];
807 sks->addr[1] = sp->key[PF_SK_STACK].addr[1];
808 sks->port[0] = sp->key[PF_SK_STACK].port[0];
809 sks->port[1] = sp->key[PF_SK_STACK].port[1];
810 sks->proto = sp->proto;
815 bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr));
816 st->creation = time_second - ntohl(sp->creation);
817 st->expire = time_second;
819 /* XXX No adaptive scaling. */
820 st->expire -= r->timeout[sp->timeout] - ntohl(sp->expire);
823 st->expire = ntohl(sp->expire) + time_second;
824 st->direction = sp->direction;
826 st->timeout = sp->timeout;
827 st->state_flags = sp->state_flags;
829 bcopy(sp->id, &st->id, sizeof(st->id));
830 st->creatorid = sp->creatorid;
831 pf_state_peer_ntoh(&sp->src, &st->src);
832 pf_state_peer_ntoh(&sp->dst, &st->dst);
835 st->nat_rule.ptr = NULL;
836 st->anchor.ptr = NULL;
839 st->pfsync_time = time_second;
840 st->sync_state = PFSYNC_S_NONE;
842 /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
846 if (!ISSET(flags, PFSYNC_SI_IOCTL))
847 SET(st->state_flags, PFSTATE_NOSYNC);
849 if ((error = pf_state_insert(kif, skw, sks, st)) != 0) {
850 /* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */
855 if (!ISSET(flags, PFSYNC_SI_IOCTL)) {
856 CLR(st->state_flags, PFSTATE_NOSYNC);
857 if (ISSET(st->state_flags, PFSTATE_ACK)) {
858 pfsync_q_ins(st, PFSYNC_S_IACK);
862 schednetisr(NETISR_PFSYNC);
866 CLR(st->state_flags, PFSTATE_ACK);
876 pool_put(&V_pf_state_key_pl, skw);
878 pool_put(&V_pf_state_key_pl, sks);
881 pool_put(&pf_state_key_pl, skw);
883 pool_put(&pf_state_key_pl, sks);
886 cleanup_state: /* pf_state_insert frees the state keys */
890 pool_put(&V_pf_state_scrub_pl, st->dst.scrub);
892 pool_put(&V_pf_state_scrub_pl, st->src.scrub);
893 pool_put(&V_pf_state_pl, st);
896 pool_put(&pf_state_scrub_pl, st->dst.scrub);
898 pool_put(&pf_state_scrub_pl, st->src.scrub);
899 pool_put(&pf_state_pl, st);
907 pfsync_input(struct mbuf *m, __unused int off)
909 pfsync_input(struct mbuf *m, ...)
913 struct pfsync_softc *sc = V_pfsyncif;
915 struct pfsync_softc *sc = pfsyncif;
917 struct pfsync_pkt pkt;
918 struct ip *ip = mtod(m, struct ip *);
919 struct pfsync_header *ph;
920 struct pfsync_subheader subh;
925 V_pfsyncstats.pfsyncs_ipackets++;
927 /* verify that we have a sync interface configured */
929 if (!sc || !sc->sc_sync_if || !V_pf_status.running)
931 if (!sc || !sc->sc_sync_if || !pf_status.running)
935 /* verify that the packet came in on the right interface */
936 if (sc->sc_sync_if != m->m_pkthdr.rcvif) {
937 V_pfsyncstats.pfsyncs_badif++;
942 sc->sc_ifp->if_ipackets++;
943 sc->sc_ifp->if_ibytes += m->m_pkthdr.len;
945 sc->sc_if.if_ipackets++;
946 sc->sc_if.if_ibytes += m->m_pkthdr.len;
948 /* verify that the IP TTL is 255. */
949 if (ip->ip_ttl != PFSYNC_DFLTTL) {
950 V_pfsyncstats.pfsyncs_badttl++;
954 offset = ip->ip_hl << 2;
955 if (m->m_pkthdr.len < offset + sizeof(*ph)) {
956 V_pfsyncstats.pfsyncs_hdrops++;
960 if (offset + sizeof(*ph) > m->m_len) {
961 if (m_pullup(m, offset + sizeof(*ph)) == NULL) {
962 V_pfsyncstats.pfsyncs_hdrops++;
965 ip = mtod(m, struct ip *);
967 ph = (struct pfsync_header *)((char *)ip + offset);
969 /* verify the version */
970 if (ph->version != PFSYNC_VERSION) {
971 V_pfsyncstats.pfsyncs_badver++;
976 if (pfsync_input_hmac(m, offset) != 0) {
982 /* Cheaper to grab this now than having to mess with mbufs later */
984 pkt.src = ip->ip_src;
988 if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
990 if (!bcmp(&ph->pfcksum, &pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
992 pkt.flags |= PFSYNC_SI_CKSUM;
994 offset += sizeof(*ph);
996 m_copydata(m, offset, sizeof(subh), (caddr_t)&subh);
997 offset += sizeof(subh);
999 if (subh.action >= PFSYNC_ACT_MAX) {
1000 V_pfsyncstats.pfsyncs_badact++;
1004 rv = (*pfsync_acts[subh.action])(&pkt, m, offset,
1017 pfsync_in_clr(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1019 struct pfsync_clr *clr;
1021 int len = sizeof(*clr) * count;
1024 struct pf_state *st, *nexts;
1025 struct pf_state_key *sk, *nextsk;
1026 struct pf_state_item *si;
1027 u_int32_t creatorid;
1030 mp = m_pulldown(m, offset, len, &offp);
1032 V_pfsyncstats.pfsyncs_badlen++;
1035 clr = (struct pfsync_clr *)(mp->m_data + offp);
1041 for (i = 0; i < count; i++) {
1042 creatorid = clr[i].creatorid;
1044 if (clr[i].ifname[0] == '\0') {
1046 for (st = RB_MIN(pf_state_tree_id, &V_tree_id);
1048 nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, st);
1050 for (st = RB_MIN(pf_state_tree_id, &tree_id);
1052 nexts = RB_NEXT(pf_state_tree_id, &tree_id, st);
1054 if (st->creatorid == creatorid) {
1055 SET(st->state_flags, PFSTATE_NOSYNC);
1056 pf_unlink_state(st);
1060 if (pfi_kif_get(clr[i].ifname) == NULL)
1065 for (sk = RB_MIN(pf_state_tree, &V_pf_statetbl);
1067 for (sk = RB_MIN(pf_state_tree, &pf_statetbl);
1070 nextsk = RB_NEXT(pf_state_tree,
1072 &V_pf_statetbl, sk);
1076 TAILQ_FOREACH(si, &sk->states, entry) {
1077 if (si->s->creatorid == creatorid) {
1078 SET(si->s->state_flags,
1080 pf_unlink_state(si->s);
1095 pfsync_in_ins(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1098 struct pfsync_state *sa, *sp;
1099 int len = sizeof(*sp) * count;
1104 mp = m_pulldown(m, offset, len, &offp);
1106 V_pfsyncstats.pfsyncs_badlen++;
1109 sa = (struct pfsync_state *)(mp->m_data + offp);
1115 for (i = 0; i < count; i++) {
1118 /* check for invalid values */
1119 if (sp->timeout >= PFTM_MAX ||
1120 sp->src.state > PF_TCPS_PROXY_DST ||
1121 sp->dst.state > PF_TCPS_PROXY_DST ||
1122 sp->direction > PF_OUT ||
1123 (sp->af != AF_INET && sp->af != AF_INET6)) {
1125 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1127 if (pf_status.debug >= PF_DEBUG_MISC) {
1129 printf("pfsync_input: PFSYNC5_ACT_INS: "
1132 V_pfsyncstats.pfsyncs_badval++;
1136 if (pfsync_state_import(sp, pkt->flags) == ENOMEM) {
1137 /* drop out, but process the rest of the actions */
1150 pfsync_in_iack(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1152 struct pfsync_ins_ack *ia, *iaa;
1153 struct pf_state_cmp id_key;
1154 struct pf_state *st;
1157 int len = count * sizeof(*ia);
1161 mp = m_pulldown(m, offset, len, &offp);
1163 V_pfsyncstats.pfsyncs_badlen++;
1166 iaa = (struct pfsync_ins_ack *)(mp->m_data + offp);
1172 for (i = 0; i < count; i++) {
1175 bcopy(&ia->id, &id_key.id, sizeof(id_key.id));
1176 id_key.creatorid = ia->creatorid;
1178 st = pf_find_state_byid(&id_key);
1182 if (ISSET(st->state_flags, PFSTATE_ACK))
1183 pfsync_deferred(st, 0);
1190 * XXX this is not yet implemented, but we know the size of the
1191 * message so we can skip it.
1194 return (count * sizeof(struct pfsync_ins_ack));
1198 pfsync_upd_tcp(struct pf_state *st, struct pfsync_state_peer *src,
1199 struct pfsync_state_peer *dst)
1204 * The state should never go backwards except
1205 * for syn-proxy states. Neither should the
1206 * sequence window slide backwards.
1208 if (st->src.state > src->state &&
1209 (st->src.state < PF_TCPS_PROXY_SRC ||
1210 src->state >= PF_TCPS_PROXY_SRC))
1212 else if (SEQ_GT(st->src.seqlo, ntohl(src->seqlo)))
1214 else if (st->dst.state > dst->state) {
1215 /* There might still be useful
1216 * information about the src state here,
1217 * so import that part of the update,
1218 * then "fail" so we send the updated
1219 * state back to the peer who is missing
1220 * our what we know. */
1221 pf_state_peer_ntoh(src, &st->src);
1222 /* XXX do anything with timeouts? */
1224 } else if (st->dst.state >= TCPS_SYN_SENT &&
1225 SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo)))
1232 pfsync_in_upd(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1234 struct pfsync_state *sa, *sp;
1235 struct pf_state_cmp id_key;
1236 struct pf_state_key *sk;
1237 struct pf_state *st;
1241 int len = count * sizeof(*sp);
1245 mp = m_pulldown(m, offset, len, &offp);
1247 V_pfsyncstats.pfsyncs_badlen++;
1250 sa = (struct pfsync_state *)(mp->m_data + offp);
1256 for (i = 0; i < count; i++) {
1259 /* check for invalid values */
1260 if (sp->timeout >= PFTM_MAX ||
1261 sp->src.state > PF_TCPS_PROXY_DST ||
1262 sp->dst.state > PF_TCPS_PROXY_DST) {
1264 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1266 if (pf_status.debug >= PF_DEBUG_MISC) {
1268 printf("pfsync_input: PFSYNC_ACT_UPD: "
1271 V_pfsyncstats.pfsyncs_badval++;
1275 bcopy(sp->id, &id_key.id, sizeof(id_key.id));
1276 id_key.creatorid = sp->creatorid;
1278 st = pf_find_state_byid(&id_key);
1280 /* insert the update */
1281 if (pfsync_state_import(sp, 0))
1282 V_pfsyncstats.pfsyncs_badstate++;
1286 if (ISSET(st->state_flags, PFSTATE_ACK))
1287 pfsync_deferred(st, 1);
1289 sk = st->key[PF_SK_WIRE]; /* XXX right one? */
1291 if (sk->proto == IPPROTO_TCP)
1292 sfail = pfsync_upd_tcp(st, &sp->src, &sp->dst);
1295 * Non-TCP protocol state machine always go
1298 if (st->src.state > sp->src.state)
1300 else if (st->dst.state > sp->dst.state)
1306 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1308 if (pf_status.debug >= PF_DEBUG_MISC) {
1310 printf("pfsync: %s stale update (%d)"
1311 " id: %016llx creatorid: %08x\n",
1312 (sfail < 7 ? "ignoring" : "partial"),
1313 sfail, betoh64(st->id),
1314 ntohl(st->creatorid));
1316 V_pfsyncstats.pfsyncs_stale++;
1318 pfsync_update_state(st);
1322 schednetisr(NETISR_PFSYNC);
1326 pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
1327 pf_state_peer_ntoh(&sp->src, &st->src);
1328 pf_state_peer_ntoh(&sp->dst, &st->dst);
1329 st->expire = ntohl(sp->expire) + time_second;
1330 st->timeout = sp->timeout;
1331 st->pfsync_time = time_second;
1342 pfsync_in_upd_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1344 struct pfsync_upd_c *ua, *up;
1345 struct pf_state_key *sk;
1346 struct pf_state_cmp id_key;
1347 struct pf_state *st;
1349 int len = count * sizeof(*up);
1356 mp = m_pulldown(m, offset, len, &offp);
1358 V_pfsyncstats.pfsyncs_badlen++;
1361 ua = (struct pfsync_upd_c *)(mp->m_data + offp);
1367 for (i = 0; i < count; i++) {
1370 /* check for invalid values */
1371 if (up->timeout >= PFTM_MAX ||
1372 up->src.state > PF_TCPS_PROXY_DST ||
1373 up->dst.state > PF_TCPS_PROXY_DST) {
1375 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1377 if (pf_status.debug >= PF_DEBUG_MISC) {
1379 printf("pfsync_input: "
1380 "PFSYNC_ACT_UPD_C: "
1383 V_pfsyncstats.pfsyncs_badval++;
1387 bcopy(&up->id, &id_key.id, sizeof(id_key.id));
1388 id_key.creatorid = up->creatorid;
1390 st = pf_find_state_byid(&id_key);
1392 /* We don't have this state. Ask for it. */
1393 pfsync_request_update(id_key.creatorid, id_key.id);
1397 if (ISSET(st->state_flags, PFSTATE_ACK))
1398 pfsync_deferred(st, 1);
1400 sk = st->key[PF_SK_WIRE]; /* XXX right one? */
1402 if (sk->proto == IPPROTO_TCP)
1403 sfail = pfsync_upd_tcp(st, &up->src, &up->dst);
1406 * Non-TCP protocol state machine always go forwards
1408 if (st->src.state > up->src.state)
1410 else if (st->dst.state > up->dst.state)
1416 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1418 if (pf_status.debug >= PF_DEBUG_MISC) {
1420 printf("pfsync: ignoring stale update "
1422 "creatorid: %08x\n", sfail,
1424 ntohl(st->creatorid));
1426 V_pfsyncstats.pfsyncs_stale++;
1428 pfsync_update_state(st);
1432 schednetisr(NETISR_PFSYNC);
1436 pfsync_alloc_scrub_memory(&up->dst, &st->dst);
1437 pf_state_peer_ntoh(&up->src, &st->src);
1438 pf_state_peer_ntoh(&up->dst, &st->dst);
1439 st->expire = ntohl(up->expire) + time_second;
1440 st->timeout = up->timeout;
1441 st->pfsync_time = time_second;
1452 pfsync_in_ureq(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1454 struct pfsync_upd_req *ur, *ura;
1456 int len = count * sizeof(*ur);
1459 struct pf_state_cmp id_key;
1460 struct pf_state *st;
1462 mp = m_pulldown(m, offset, len, &offp);
1464 V_pfsyncstats.pfsyncs_badlen++;
1467 ura = (struct pfsync_upd_req *)(mp->m_data + offp);
1469 for (i = 0; i < count; i++) {
1472 bcopy(&ur->id, &id_key.id, sizeof(id_key.id));
1473 id_key.creatorid = ur->creatorid;
1475 if (id_key.id == 0 && id_key.creatorid == 0)
1476 pfsync_bulk_start();
1478 st = pf_find_state_byid(&id_key);
1480 V_pfsyncstats.pfsyncs_badstate++;
1483 if (ISSET(st->state_flags, PFSTATE_NOSYNC))
1487 pfsync_update_state_req(st);
1496 pfsync_in_del(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1499 struct pfsync_state *sa, *sp;
1500 struct pf_state_cmp id_key;
1501 struct pf_state *st;
1502 int len = count * sizeof(*sp);
1506 mp = m_pulldown(m, offset, len, &offp);
1508 V_pfsyncstats.pfsyncs_badlen++;
1511 sa = (struct pfsync_state *)(mp->m_data + offp);
1517 for (i = 0; i < count; i++) {
1520 bcopy(sp->id, &id_key.id, sizeof(id_key.id));
1521 id_key.creatorid = sp->creatorid;
1523 st = pf_find_state_byid(&id_key);
1525 V_pfsyncstats.pfsyncs_badstate++;
1528 SET(st->state_flags, PFSTATE_NOSYNC);
1529 pf_unlink_state(st);
1540 pfsync_in_del_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1543 struct pfsync_del_c *sa, *sp;
1544 struct pf_state_cmp id_key;
1545 struct pf_state *st;
1546 int len = count * sizeof(*sp);
1550 mp = m_pulldown(m, offset, len, &offp);
1552 V_pfsyncstats.pfsyncs_badlen++;
1555 sa = (struct pfsync_del_c *)(mp->m_data + offp);
1561 for (i = 0; i < count; i++) {
1564 bcopy(&sp->id, &id_key.id, sizeof(id_key.id));
1565 id_key.creatorid = sp->creatorid;
1567 st = pf_find_state_byid(&id_key);
1569 V_pfsyncstats.pfsyncs_badstate++;
1573 SET(st->state_flags, PFSTATE_NOSYNC);
1574 pf_unlink_state(st);
1585 pfsync_in_bus(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1588 struct pfsync_softc *sc = V_pfsyncif;
1590 struct pfsync_softc *sc = pfsyncif;
1592 struct pfsync_bus *bus;
1594 int len = count * sizeof(*bus);
1597 /* If we're not waiting for a bulk update, who cares. */
1598 if (sc->sc_ureq_sent == 0)
1601 mp = m_pulldown(m, offset, len, &offp);
1603 V_pfsyncstats.pfsyncs_badlen++;
1606 bus = (struct pfsync_bus *)(mp->m_data + offp);
1608 switch (bus->status) {
1609 case PFSYNC_BUS_START:
1611 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail,
1614 timeout_add_sec(&sc->sc_bulkfail_tmo, 5); /* XXX magic */
1617 pf_pool_limits[PF_LIMIT_STATES].limit /
1618 (PFSYNC_BULKPACKETS * sc->sc_maxcount));
1621 if (V_pf_status.debug >= PF_DEBUG_MISC)
1623 if (pf_status.debug >= PF_DEBUG_MISC)
1625 printf("pfsync: received bulk update start\n");
1628 case PFSYNC_BUS_END:
1629 if (time_uptime - ntohl(bus->endtime) >=
1631 /* that's it, we're happy */
1632 sc->sc_ureq_sent = 0;
1633 sc->sc_bulk_tries = 0;
1634 timeout_del(&sc->sc_bulkfail_tmo);
1638 if (!sc->pfsync_sync_ok)
1640 if (!pfsync_sync_ok)
1642 carp_group_demote_adj(&sc->sc_if, -1);
1646 sc->pfsync_sync_ok = 1;
1651 if (V_pf_status.debug >= PF_DEBUG_MISC)
1653 if (pf_status.debug >= PF_DEBUG_MISC)
1655 printf("pfsync: received valid "
1656 "bulk update end\n");
1659 if (V_pf_status.debug >= PF_DEBUG_MISC)
1661 if (pf_status.debug >= PF_DEBUG_MISC)
1663 printf("pfsync: received invalid "
1664 "bulk update end: bad timestamp\n");
1673 pfsync_in_tdb(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1675 int len = count * sizeof(struct pfsync_tdb);
1678 struct pfsync_tdb *tp;
1684 mp = m_pulldown(m, offset, len, &offp);
1686 V_pfsyncstats.pfsyncs_badlen++;
1689 tp = (struct pfsync_tdb *)(mp->m_data + offp);
1695 for (i = 0; i < count; i++)
1696 pfsync_update_net_tdb(&tp[i]);
1707 /* Update an in-kernel tdb. Silently fail if no tdb is found. */
1709 pfsync_update_net_tdb(struct pfsync_tdb *pt)
1714 /* check for invalid values */
1715 if (ntohl(pt->spi) <= SPI_RESERVED_MAX ||
1716 (pt->dst.sa.sa_family != AF_INET &&
1717 pt->dst.sa.sa_family != AF_INET6))
1721 tdb = gettdb(pt->spi, &pt->dst, pt->sproto);
1723 pt->rpl = ntohl(pt->rpl);
1724 pt->cur_bytes = betoh64(pt->cur_bytes);
1726 /* Neither replay nor byte counter should ever decrease. */
1727 if (pt->rpl < tdb->tdb_rpl ||
1728 pt->cur_bytes < tdb->tdb_cur_bytes) {
1733 tdb->tdb_rpl = pt->rpl;
1734 tdb->tdb_cur_bytes = pt->cur_bytes;
1741 if (V_pf_status.debug >= PF_DEBUG_MISC)
1743 if (pf_status.debug >= PF_DEBUG_MISC)
1745 printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: "
1747 V_pfsyncstats.pfsyncs_badstate++;
1754 pfsync_in_eof(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1756 /* check if we are at the right place in the packet */
1757 if (offset != m->m_pkthdr.len - sizeof(struct pfsync_eof))
1758 V_pfsyncstats.pfsyncs_badact++;
1760 /* we're done. free and let the caller return */
1766 pfsync_in_error(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1768 V_pfsyncstats.pfsyncs_badact++;
1775 pfsyncoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
1788 pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1791 struct proc *p = curproc;
1793 struct pfsync_softc *sc = ifp->if_softc;
1794 struct ifreq *ifr = (struct ifreq *)data;
1795 struct ip_moptions *imo = &sc->sc_imo;
1796 struct pfsyncreq pfsyncr;
1805 case SIOCSIFDSTADDR:
1809 if (ifp->if_flags & IFF_UP)
1810 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1812 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1814 if (ifp->if_flags & IFF_UP)
1815 ifp->if_flags |= IFF_RUNNING;
1817 ifp->if_flags &= ~IFF_RUNNING;
1821 if (ifr->ifr_mtu <= PFSYNC_MINPKT)
1823 if (ifr->ifr_mtu > MCLBYTES) /* XXX could be bigger */
1824 ifr->ifr_mtu = MCLBYTES;
1825 if (ifr->ifr_mtu < ifp->if_mtu) {
1836 ifp->if_mtu = ifr->ifr_mtu;
1839 bzero(&pfsyncr, sizeof(pfsyncr));
1840 if (sc->sc_sync_if) {
1841 strlcpy(pfsyncr.pfsyncr_syncdev,
1842 sc->sc_sync_if->if_xname, IFNAMSIZ);
1844 pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer;
1845 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
1846 return (copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr)));
1850 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
1852 if ((error = suser(p, p->p_acflag)) != 0)
1855 if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr))))
1861 if (pfsyncr.pfsyncr_syncpeer.s_addr == 0)
1863 sc->sc_sync_peer.s_addr = htonl(INADDR_PFSYNC_GROUP);
1865 sc->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP;
1868 sc->sc_sync_peer.s_addr =
1869 pfsyncr.pfsyncr_syncpeer.s_addr;
1871 if (pfsyncr.pfsyncr_maxupdates > 255)
1880 sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates;
1882 if (pfsyncr.pfsyncr_syncdev[0] == 0) {
1883 sc->sc_sync_if = NULL;
1887 if (imo->imo_num_memberships > 0) {
1888 in_delmulti(imo->imo_membership[
1889 --imo->imo_num_memberships]);
1890 imo->imo_multicast_ifp = NULL;
1898 if ((sifp = ifunit(pfsyncr.pfsyncr_syncdev)) == NULL)
1906 if (sifp->if_mtu < sc->sc_ifp->if_mtu ||
1908 if (sifp->if_mtu < sc->sc_if.if_mtu ||
1910 (sc->sc_sync_if != NULL &&
1911 sifp->if_mtu < sc->sc_sync_if->if_mtu) ||
1912 sifp->if_mtu < MCLBYTES - sizeof(struct ip))
1914 sc->sc_sync_if = sifp;
1916 if (imo->imo_num_memberships > 0) {
1920 in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
1924 imo->imo_multicast_ifp = NULL;
1927 if (sc->sc_sync_if &&
1929 sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) {
1931 sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
1933 struct in_addr addr;
1935 if (!(sc->sc_sync_if->if_flags & IFF_MULTICAST)) {
1936 sc->sc_sync_if = NULL;
1941 return (EADDRNOTAVAIL);
1945 addr.s_addr = htonl(INADDR_PFSYNC_GROUP);
1947 addr.s_addr = INADDR_PFSYNC_GROUP;
1953 if ((imo->imo_membership[0] =
1954 in_addmulti(&addr, sc->sc_sync_if)) == NULL) {
1955 sc->sc_sync_if = NULL;
1962 imo->imo_num_memberships++;
1963 imo->imo_multicast_ifp = sc->sc_sync_if;
1964 imo->imo_multicast_ttl = PFSYNC_DFLTTL;
1965 imo->imo_multicast_loop = 0;
1968 ip = &sc->sc_template;
1969 bzero(ip, sizeof(*ip));
1970 ip->ip_v = IPVERSION;
1971 ip->ip_hl = sizeof(sc->sc_template) >> 2;
1972 ip->ip_tos = IPTOS_LOWDELAY;
1973 /* len and id are set later */
1977 ip->ip_off = htons(IP_DF);
1979 ip->ip_ttl = PFSYNC_DFLTTL;
1980 ip->ip_p = IPPROTO_PFSYNC;
1981 ip->ip_src.s_addr = INADDR_ANY;
1982 ip->ip_dst.s_addr = sc->sc_sync_peer.s_addr;
1984 if (sc->sc_sync_if) {
1985 /* Request a full state table update. */
1986 sc->sc_ureq_sent = time_uptime;
1990 if (sc->pfsync_sync_ok)
1994 carp_group_demote_adj(&sc->sc_if, 1);
1998 sc->pfsync_sync_ok = 0;
2003 if (V_pf_status.debug >= PF_DEBUG_MISC)
2005 if (pf_status.debug >= PF_DEBUG_MISC)
2007 printf("pfsync: requesting bulk update\n");
2009 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
2010 pfsync_bulk_fail, V_pfsyncif);
2012 timeout_add_sec(&sc->sc_bulkfail_tmo, 5);
2014 pfsync_request_update(0, 0);
2031 pfsync_out_state(struct pf_state *st, struct mbuf *m, int offset)
2033 struct pfsync_state *sp = (struct pfsync_state *)(m->m_data + offset);
2035 pfsync_state_export(sp, st);
2037 return (sizeof(*sp));
2041 pfsync_out_iack(struct pf_state *st, struct mbuf *m, int offset)
2043 struct pfsync_ins_ack *iack =
2044 (struct pfsync_ins_ack *)(m->m_data + offset);
2047 iack->creatorid = st->creatorid;
2049 return (sizeof(*iack));
2053 pfsync_out_upd_c(struct pf_state *st, struct mbuf *m, int offset)
2055 struct pfsync_upd_c *up = (struct pfsync_upd_c *)(m->m_data + offset);
2058 pf_state_peer_hton(&st->src, &up->src);
2059 pf_state_peer_hton(&st->dst, &up->dst);
2060 up->creatorid = st->creatorid;
2062 up->expire = pf_state_expires(st);
2063 if (up->expire <= time_second)
2064 up->expire = htonl(0);
2066 up->expire = htonl(up->expire - time_second);
2067 up->timeout = st->timeout;
2069 bzero(up->_pad, sizeof(up->_pad)); /* XXX */
2071 return (sizeof(*up));
2075 pfsync_out_del(struct pf_state *st, struct mbuf *m, int offset)
2077 struct pfsync_del_c *dp = (struct pfsync_del_c *)(m->m_data + offset);
2080 dp->creatorid = st->creatorid;
2082 SET(st->state_flags, PFSTATE_NOSYNC);
2084 return (sizeof(*dp));
2088 pfsync_drop(struct pfsync_softc *sc)
2090 struct pf_state *st;
2091 struct pfsync_upd_req_item *ur;
2097 for (q = 0; q < PFSYNC_S_COUNT; q++) {
2098 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2101 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) {
2104 KASSERT(st->sync_state == q,
2105 ("%s: st->sync_state == q",
2108 KASSERT(st->sync_state == q);
2111 st->sync_state = PFSYNC_S_NONE;
2113 TAILQ_INIT(&sc->sc_qs[q]);
2116 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
2117 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
2118 pool_put(&sc->sc_pool, ur);
2124 if (!TAILQ_EMPTY(&sc->sc_tdb_q)) {
2125 TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry)
2126 CLR(t->tdb_flags, TDBF_PFSYNC);
2128 TAILQ_INIT(&sc->sc_tdb_q);
2132 sc->sc_len = PFSYNC_MINPKT;
2136 pfsync_sendout(void)
2139 struct pfsync_softc *sc = V_pfsyncif;
2141 struct pfsync_softc *sc = pfsyncif;
2145 struct ifnet *ifp = sc->sc_ifp;
2147 struct ifnet *ifp = &sc->sc_if;
2152 struct pfsync_header *ph;
2153 struct pfsync_subheader *subh;
2154 struct pf_state *st;
2155 struct pfsync_upd_req_item *ur;
2172 if (sc == NULL || sc->sc_len == PFSYNC_MINPKT)
2176 if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) {
2178 if (sc->sc_sync_if == NULL) {
2184 MGETHDR(m, M_DONTWAIT, MT_DATA);
2187 sc->sc_ifp->if_oerrors++;
2189 sc->sc_if.if_oerrors++;
2191 V_pfsyncstats.pfsyncs_onomem++;
2197 pktlen = max_linkhdr + sc->sc_len;
2198 if (pktlen > MHLEN) {
2199 /* Find the right pool to allocate from. */
2200 /* XXX: This is ugly. */
2201 m_cljget(m, M_DONTWAIT, pktlen <= MSIZE ? MSIZE :
2202 pktlen <= MCLBYTES ? MCLBYTES :
2203 #if MJUMPAGESIZE != MCLBYTES
2204 pktlen <= MJUMPAGESIZE ? MJUMPAGESIZE :
2206 pktlen <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES);
2208 if (max_linkhdr + sc->sc_len > MHLEN) {
2209 MCLGETI(m, M_DONTWAIT, NULL, max_linkhdr + sc->sc_len);
2211 if (!ISSET(m->m_flags, M_EXT)) {
2214 sc->sc_ifp->if_oerrors++;
2216 sc->sc_if.if_oerrors++;
2218 V_pfsyncstats.pfsyncs_onomem++;
2223 m->m_data += max_linkhdr;
2224 m->m_len = m->m_pkthdr.len = sc->sc_len;
2226 /* build the ip header */
2227 ip = (struct ip *)m->m_data;
2228 bcopy(&sc->sc_template, ip, sizeof(*ip));
2229 offset = sizeof(*ip);
2232 ip->ip_len = m->m_pkthdr.len;
2234 ip->ip_len = htons(m->m_pkthdr.len);
2236 ip->ip_id = htons(ip_randomid());
2238 /* build the pfsync header */
2239 ph = (struct pfsync_header *)(m->m_data + offset);
2240 bzero(ph, sizeof(*ph));
2241 offset += sizeof(*ph);
2243 ph->version = PFSYNC_VERSION;
2244 ph->len = htons(sc->sc_len - sizeof(*ip));
2246 bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
2248 bcopy(pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
2251 /* walk the queues */
2252 for (q = 0; q < PFSYNC_S_COUNT; q++) {
2253 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2256 subh = (struct pfsync_subheader *)(m->m_data + offset);
2257 offset += sizeof(*subh);
2260 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) {
2263 KASSERT(st->sync_state == q,
2264 ("%s: st->sync_state == q",
2267 KASSERT(st->sync_state == q);
2271 offset += pfsync_qs[q].write(st, m, offset);
2272 st->sync_state = PFSYNC_S_NONE;
2275 TAILQ_INIT(&sc->sc_qs[q]);
2277 bzero(subh, sizeof(*subh));
2278 subh->action = pfsync_qs[q].action;
2279 subh->count = htons(count);
2282 if (!TAILQ_EMPTY(&sc->sc_upd_req_list)) {
2283 subh = (struct pfsync_subheader *)(m->m_data + offset);
2284 offset += sizeof(*subh);
2287 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
2288 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
2290 bcopy(&ur->ur_msg, m->m_data + offset,
2291 sizeof(ur->ur_msg));
2292 offset += sizeof(ur->ur_msg);
2294 pool_put(&sc->sc_pool, ur);
2299 bzero(subh, sizeof(*subh));
2300 subh->action = PFSYNC_ACT_UPD_REQ;
2301 subh->count = htons(count);
2304 /* has someone built a custom region for us to add? */
2305 if (sc->sc_plus != NULL) {
2306 bcopy(sc->sc_plus, m->m_data + offset, sc->sc_pluslen);
2307 offset += sc->sc_pluslen;
2313 if (!TAILQ_EMPTY(&sc->sc_tdb_q)) {
2314 subh = (struct pfsync_subheader *)(m->m_data + offset);
2315 offset += sizeof(*subh);
2318 TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry) {
2319 offset += pfsync_out_tdb(t, m, offset);
2320 CLR(t->tdb_flags, TDBF_PFSYNC);
2324 TAILQ_INIT(&sc->sc_tdb_q);
2326 bzero(subh, sizeof(*subh));
2327 subh->action = PFSYNC_ACT_TDB;
2328 subh->count = htons(count);
2332 subh = (struct pfsync_subheader *)(m->m_data + offset);
2333 offset += sizeof(*subh);
2335 bzero(subh, sizeof(*subh));
2336 subh->action = PFSYNC_ACT_EOF;
2337 subh->count = htons(1);
2339 /* XXX write checksum in EOF here */
2341 /* we're done, let's put it on the wire */
2344 m->m_data += sizeof(*ip);
2345 m->m_len = m->m_pkthdr.len = sc->sc_len - sizeof(*ip);
2349 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
2351 m->m_data -= sizeof(*ip);
2352 m->m_len = m->m_pkthdr.len = sc->sc_len;
2355 if (sc->sc_sync_if == NULL) {
2356 sc->sc_len = PFSYNC_MINPKT;
2363 sc->sc_ifp->if_opackets++;
2364 sc->sc_ifp->if_obytes += m->m_pkthdr.len;
2365 sc->sc_len = PFSYNC_MINPKT;
2367 IFQ_ENQUEUE(&sc->sc_ifp->if_snd, m, dummy_error);
2368 schednetisr(NETISR_PFSYNC);
2370 sc->sc_if.if_opackets++;
2371 sc->sc_if.if_obytes += m->m_pkthdr.len;
2373 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL) == 0)
2374 pfsyncstats.pfsyncs_opackets++;
2376 pfsyncstats.pfsyncs_oerrors++;
2379 sc->sc_len = PFSYNC_MINPKT;
2384 pfsync_insert_state(struct pf_state *st)
2387 struct pfsync_softc *sc = V_pfsyncif;
2389 struct pfsync_softc *sc = pfsyncif;
2395 splassert(IPL_SOFTNET);
2398 if (ISSET(st->rule.ptr->rule_flag, PFRULE_NOSYNC) ||
2399 st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) {
2400 SET(st->state_flags, PFSTATE_NOSYNC);
2404 if (sc == NULL || ISSET(st->state_flags, PFSTATE_NOSYNC))
2409 KASSERT(st->sync_state == PFSYNC_S_NONE,
2410 ("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__));
2412 KASSERT(st->sync_state == PFSYNC_S_NONE);
2416 if (sc->sc_len == PFSYNC_MINPKT)
2418 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2421 timeout_add_sec(&sc->sc_tmo, 1);
2424 pfsync_q_ins(st, PFSYNC_S_INS);
2426 if (ISSET(st->state_flags, PFSTATE_ACK))
2430 schednetisr(NETISR_PFSYNC);
2433 st->sync_updates = 0;
2439 pfsync_defer(struct pf_state *st, struct mbuf *m)
2442 struct pfsync_softc *sc = V_pfsyncif;
2444 struct pfsync_softc *sc = pfsyncif;
2446 struct pfsync_deferral *pd;
2451 splassert(IPL_SOFTNET);
2454 if (sc->sc_deferred >= 128)
2455 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0);
2457 pd = pool_get(&sc->sc_pool, M_NOWAIT);
2463 m->m_flags |= M_SKIP_FIREWALL;
2465 m->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
2467 SET(st->state_flags, PFSTATE_ACK);
2472 TAILQ_INSERT_TAIL(&sc->sc_deferrals, pd, pd_entry);
2474 callout_init(&pd->pd_tmo, CALLOUT_MPSAFE);
2475 callout_reset(&pd->pd_tmo, defer, pfsync_defer_tmo,
2478 timeout_set(&pd->pd_tmo, pfsync_defer_tmo, pd);
2479 timeout_add(&pd->pd_tmo, defer);
2486 pfsync_undefer(struct pfsync_deferral *pd, int drop)
2489 struct pfsync_softc *sc = V_pfsyncif;
2491 struct pfsync_softc *sc = pfsyncif;
2498 splassert(IPL_SOFTNET);
2501 TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry);
2504 CLR(pd->pd_st->state_flags, PFSTATE_ACK);
2505 timeout_del(&pd->pd_tmo); /* bah */
2511 /* XXX: use pf_defered?! */
2514 ip_output(pd->pd_m, (void *)NULL, (void *)NULL, 0,
2515 (void *)NULL, (void *)NULL);
2522 pool_put(&sc->sc_pool, pd);
2526 pfsync_defer_tmo(void *arg)
2528 #if defined(__FreeBSD__) && defined(VIMAGE)
2529 struct pfsync_deferral *pd = arg;
2535 CURVNET_SET(pd->pd_m->m_pkthdr.rcvif->if_vnet); /* XXX */
2538 pfsync_undefer(arg, 0);
2547 pfsync_deferred(struct pf_state *st, int drop)
2550 struct pfsync_softc *sc = V_pfsyncif;
2552 struct pfsync_softc *sc = pfsyncif;
2554 struct pfsync_deferral *pd;
2556 TAILQ_FOREACH(pd, &sc->sc_deferrals, pd_entry) {
2557 if (pd->pd_st == st) {
2558 pfsync_undefer(pd, drop);
2563 panic("pfsync_send_deferred: unable to find deferred state");
2566 u_int pfsync_upds = 0;
2569 pfsync_update_state(struct pf_state *st)
2572 struct pfsync_softc *sc = V_pfsyncif;
2574 struct pfsync_softc *sc = pfsyncif;
2581 splassert(IPL_SOFTNET);
2587 if (ISSET(st->state_flags, PFSTATE_ACK))
2588 pfsync_deferred(st, 0);
2589 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2590 if (st->sync_state != PFSYNC_S_NONE)
2595 if (sc->sc_len == PFSYNC_MINPKT)
2597 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2600 timeout_add_sec(&sc->sc_tmo, 1);
2603 switch (st->sync_state) {
2604 case PFSYNC_S_UPD_C:
2607 /* we're already handling it */
2610 if (st->sync_updates >= sc->sc_maxupdates)
2617 pfsync_q_ins(st, PFSYNC_S_UPD_C);
2618 st->sync_updates = 0;
2622 panic("pfsync_update_state: unexpected sync state %d",
2626 if (sync || (time_second - st->pfsync_time) < 2) {
2631 schednetisr(NETISR_PFSYNC);
2637 pfsync_request_update(u_int32_t creatorid, u_int64_t id)
2640 struct pfsync_softc *sc = V_pfsyncif;
2642 struct pfsync_softc *sc = pfsyncif;
2644 struct pfsync_upd_req_item *item;
2645 size_t nlen = sizeof(struct pfsync_upd_req);
2651 * this code does nothing to prevent multiple update requests for the
2652 * same state being generated.
2655 item = pool_get(&sc->sc_pool, PR_NOWAIT);
2661 item->ur_msg.id = id;
2662 item->ur_msg.creatorid = creatorid;
2664 if (TAILQ_EMPTY(&sc->sc_upd_req_list))
2665 nlen += sizeof(struct pfsync_subheader);
2668 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) {
2670 if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2676 nlen = sizeof(struct pfsync_subheader) +
2677 sizeof(struct pfsync_upd_req);
2680 TAILQ_INSERT_TAIL(&sc->sc_upd_req_list, item, ur_entry);
2686 schednetisr(NETISR_PFSYNC);
2691 pfsync_update_state_req(struct pf_state *st)
2694 struct pfsync_softc *sc = V_pfsyncif;
2696 struct pfsync_softc *sc = pfsyncif;
2702 panic("pfsync_update_state_req: nonexistant instance");
2704 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2705 if (st->sync_state != PFSYNC_S_NONE)
2710 switch (st->sync_state) {
2711 case PFSYNC_S_UPD_C:
2715 pfsync_q_ins(st, PFSYNC_S_UPD);
2719 schednetisr(NETISR_PFSYNC);
2726 /* we're already handling it */
2730 panic("pfsync_update_state_req: unexpected sync state %d",
2736 pfsync_delete_state(struct pf_state *st)
2739 struct pfsync_softc *sc = V_pfsyncif;
2741 struct pfsync_softc *sc = pfsyncif;
2747 splassert(IPL_SOFTNET);
2753 if (ISSET(st->state_flags, PFSTATE_ACK))
2754 pfsync_deferred(st, 1);
2755 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2756 if (st->sync_state != PFSYNC_S_NONE)
2761 if (sc->sc_len == PFSYNC_MINPKT)
2763 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2766 timeout_add_sec(&sc->sc_tmo, 1);
2769 switch (st->sync_state) {
2771 /* we never got to tell the world so just forget about it */
2775 case PFSYNC_S_UPD_C:
2779 /* FALLTHROUGH to putting it on the del list */
2782 pfsync_q_ins(st, PFSYNC_S_DEL);
2786 panic("pfsync_delete_state: unexpected sync state %d",
2792 pfsync_clear_states(u_int32_t creatorid, const char *ifname)
2795 struct pfsync_subheader subh;
2796 struct pfsync_clr clr;
2800 struct pfsync_softc *sc = V_pfsyncif;
2802 struct pfsync_softc *sc = pfsyncif;
2808 splassert(IPL_SOFTNET);
2814 bzero(&r, sizeof(r));
2816 r.subh.action = PFSYNC_ACT_CLR;
2817 r.subh.count = htons(1);
2819 strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname));
2820 r.clr.creatorid = creatorid;
2822 pfsync_send_plus(&r, sizeof(r));
2826 pfsync_q_ins(struct pf_state *st, int q)
2829 struct pfsync_softc *sc = V_pfsyncif;
2831 struct pfsync_softc *sc = pfsyncif;
2833 size_t nlen = pfsync_qs[q].len;
2839 KASSERT(st->sync_state == PFSYNC_S_NONE,
2840 ("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__));
2842 KASSERT(st->sync_state == PFSYNC_S_NONE);
2845 #if 1 || defined(PFSYNC_DEBUG)
2846 if (sc->sc_len < PFSYNC_MINPKT)
2848 panic("pfsync pkt len is too low %zu", sc->sc_len);
2850 panic("pfsync pkt len is too low %d", sc->sc_len);
2853 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2854 nlen += sizeof(struct pfsync_subheader);
2857 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) {
2859 if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2865 nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len;
2869 TAILQ_INSERT_TAIL(&sc->sc_qs[q], st, sync_list);
2874 pfsync_q_del(struct pf_state *st)
2877 struct pfsync_softc *sc = V_pfsyncif;
2879 struct pfsync_softc *sc = pfsyncif;
2881 int q = st->sync_state;
2884 KASSERT(st->sync_state != PFSYNC_S_NONE,
2885 ("%s: st->sync_state != PFSYNC_S_NONE", __FUNCTION__));
2887 KASSERT(st->sync_state != PFSYNC_S_NONE);
2890 sc->sc_len -= pfsync_qs[q].len;
2891 TAILQ_REMOVE(&sc->sc_qs[q], st, sync_list);
2892 st->sync_state = PFSYNC_S_NONE;
2894 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2895 sc->sc_len -= sizeof(struct pfsync_subheader);
2900 pfsync_update_tdb(struct tdb *t, int output)
2903 struct pfsync_softc *sc = V_pfsyncif;
2905 struct pfsync_softc *sc = pfsyncif;
2907 size_t nlen = sizeof(struct pfsync_tdb);
2913 if (!ISSET(t->tdb_flags, TDBF_PFSYNC)) {
2914 if (TAILQ_EMPTY(&sc->sc_tdb_q))
2915 nlen += sizeof(struct pfsync_subheader);
2917 if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2924 nlen = sizeof(struct pfsync_subheader) +
2925 sizeof(struct pfsync_tdb);
2929 TAILQ_INSERT_TAIL(&sc->sc_tdb_q, t, tdb_sync_entry);
2930 SET(t->tdb_flags, TDBF_PFSYNC);
2933 if (++t->tdb_updates >= sc->sc_maxupdates)
2934 schednetisr(NETISR_PFSYNC);
2938 SET(t->tdb_flags, TDBF_PFSYNC_RPL);
2940 CLR(t->tdb_flags, TDBF_PFSYNC_RPL);
2944 pfsync_delete_tdb(struct tdb *t)
2947 struct pfsync_softc *sc = V_pfsyncif;
2949 struct pfsync_softc *sc = pfsyncif;
2952 if (sc == NULL || !ISSET(t->tdb_flags, TDBF_PFSYNC))
2955 sc->sc_len -= sizeof(struct pfsync_tdb);
2956 TAILQ_REMOVE(&sc->sc_tdb_q, t, tdb_sync_entry);
2957 CLR(t->tdb_flags, TDBF_PFSYNC);
2959 if (TAILQ_EMPTY(&sc->sc_tdb_q))
2960 sc->sc_len -= sizeof(struct pfsync_subheader);
2964 pfsync_out_tdb(struct tdb *t, struct mbuf *m, int offset)
2966 struct pfsync_tdb *ut = (struct pfsync_tdb *)(m->m_data + offset);
2968 bzero(ut, sizeof(*ut));
2969 ut->spi = t->tdb_spi;
2970 bcopy(&t->tdb_dst, &ut->dst, sizeof(ut->dst));
2972 * When a failover happens, the master's rpl is probably above
2973 * what we see here (we may be up to a second late), so
2974 * increase it a bit for outbound tdbs to manage most such
2977 * For now, just add an offset that is likely to be larger
2978 * than the number of packets we can see in one second. The RFC
2979 * just says the next packet must have a higher seq value.
2981 * XXX What is a good algorithm for this? We could use
2982 * a rate-determined increase, but to know it, we would have
2983 * to extend struct tdb.
2984 * XXX pt->rpl can wrap over MAXINT, but if so the real tdb
2985 * will soon be replaced anyway. For now, just don't handle
2988 #define RPL_INCR 16384
2989 ut->rpl = htonl(t->tdb_rpl + (ISSET(t->tdb_flags, TDBF_PFSYNC_RPL) ?
2991 ut->cur_bytes = htobe64(t->tdb_cur_bytes);
2992 ut->sproto = t->tdb_sproto;
2994 return (sizeof(*ut));
2999 pfsync_bulk_start(void)
3002 struct pfsync_softc *sc = V_pfsyncif;
3004 struct pfsync_softc *sc = pfsyncif;
3008 if (V_pf_status.debug >= PF_DEBUG_MISC)
3010 if (pf_status.debug >= PF_DEBUG_MISC)
3012 printf("pfsync: received bulk update request\n");
3016 if (TAILQ_EMPTY(&V_state_list))
3018 if (TAILQ_EMPTY(&state_list))
3020 pfsync_bulk_status(PFSYNC_BUS_END);
3022 sc->sc_ureq_received = time_uptime;
3023 if (sc->sc_bulk_next == NULL)
3025 sc->sc_bulk_next = TAILQ_FIRST(&V_state_list);
3027 sc->sc_bulk_next = TAILQ_FIRST(&state_list);
3029 sc->sc_bulk_last = sc->sc_bulk_next;
3031 pfsync_bulk_status(PFSYNC_BUS_START);
3032 callout_reset(&sc->sc_bulk_tmo, 1,
3033 pfsync_bulk_update, sc);
3041 pfsync_bulk_update(void *arg)
3043 struct pfsync_softc *sc = arg;
3044 struct pf_state *st = sc->sc_bulk_next;
3052 CURVNET_SET(sc->sc_ifp->if_vnet);
3055 if (st->sync_state == PFSYNC_S_NONE &&
3056 st->timeout < PFTM_MAX &&
3057 st->pfsync_time <= sc->sc_ureq_received) {
3058 pfsync_update_state_req(st);
3062 st = TAILQ_NEXT(st, entry_list);
3065 st = TAILQ_FIRST(&V_state_list);
3067 st = TAILQ_FIRST(&state_list);
3070 if (st == sc->sc_bulk_last) {
3072 sc->sc_bulk_next = NULL;
3073 sc->sc_bulk_last = NULL;
3074 pfsync_bulk_status(PFSYNC_BUS_END);
3079 if (i > 1 && (sc->sc_ifp->if_mtu - sc->sc_len) <
3081 if (i > 1 && (sc->sc_if.if_mtu - sc->sc_len) <
3083 sizeof(struct pfsync_state)) {
3084 /* we've filled a packet */
3085 sc->sc_bulk_next = st;
3087 callout_reset(&sc->sc_bulk_tmo, 1,
3088 pfsync_bulk_update, sc);
3090 timeout_add(&sc->sc_bulk_tmo, 1);
3103 pfsync_bulk_status(u_int8_t status)
3106 struct pfsync_subheader subh;
3107 struct pfsync_bus bus;
3111 struct pfsync_softc *sc = V_pfsyncif;
3113 struct pfsync_softc *sc = pfsyncif;
3118 bzero(&r, sizeof(r));
3120 r.subh.action = PFSYNC_ACT_BUS;
3121 r.subh.count = htons(1);
3124 r.bus.creatorid = V_pf_status.hostid;
3126 r.bus.creatorid = pf_status.hostid;
3128 r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received);
3129 r.bus.status = status;
3131 pfsync_send_plus(&r, sizeof(r));
3135 pfsync_bulk_fail(void *arg)
3137 struct pfsync_softc *sc = arg;
3140 CURVNET_SET(sc->sc_ifp->if_vnet);
3143 if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
3146 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
3147 pfsync_bulk_fail, V_pfsyncif);
3149 timeout_add_sec(&sc->sc_bulkfail_tmo, 5);
3152 pfsync_request_update(0, 0);
3155 /* Pretend like the transfer was ok */
3156 sc->sc_ureq_sent = 0;
3157 sc->sc_bulk_tries = 0;
3161 if (!sc->pfsync_sync_ok)
3163 if (!pfsync_sync_ok)
3165 carp_group_demote_adj(&sc->sc_if, -1);
3169 sc->pfsync_sync_ok = 1;
3174 if (V_pf_status.debug >= PF_DEBUG_MISC)
3176 if (pf_status.debug >= PF_DEBUG_MISC)
3178 printf("pfsync: failed to receive bulk update\n");
3187 pfsync_send_plus(void *plus, size_t pluslen)
3190 struct pfsync_softc *sc = V_pfsyncif;
3192 struct pfsync_softc *sc = pfsyncif;
3199 if (sc->sc_len + pluslen > sc->sc_ifp->if_mtu) {
3201 if (sc->sc_len + pluslen > sc->sc_if.if_mtu) {
3209 sc->sc_len += (sc->sc_pluslen = pluslen);
3220 struct pfsync_softc *sc = V_pfsyncif;
3222 struct pfsync_softc *sc = pfsyncif;
3226 if (sc == NULL || !ISSET(sc->sc_ifp->if_flags, IFF_DRV_RUNNING))
3228 if (sc == NULL || !ISSET(sc->sc_if.if_flags, IFF_RUNNING))
3236 pfsync_state_in_use(struct pf_state *st)
3239 struct pfsync_softc *sc = V_pfsyncif;
3241 struct pfsync_softc *sc = pfsyncif;
3247 if (st->sync_state != PFSYNC_S_NONE ||
3248 st == sc->sc_bulk_next ||
3249 st == sc->sc_bulk_last)
3259 pfsync_timeout(void *arg)
3261 #if defined(__FreeBSD__) && defined(VIMAGE)
3262 struct pfsync_softc *sc = arg;
3267 CURVNET_SET(sc->sc_ifp->if_vnet);
3287 /* this is a softnet/netisr handler */
3290 pfsyncintr(void *arg)
3292 struct pfsync_softc *sc = arg;
3295 CURVNET_SET(sc->sc_ifp->if_vnet);
3299 IF_DEQUEUE(&sc->sc_ifp->if_snd, m);
3303 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL)
3305 V_pfsyncstats.pfsyncs_opackets++;
3307 V_pfsyncstats.pfsyncs_oerrors++;
3325 pfsync_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
3330 /* All sysctl names at this level are terminal. */
3335 case PFSYNCCTL_STATS:
3338 return (sysctl_struct(oldp, oldlenp, newp, newlen,
3339 &V_pfsyncstats, sizeof(V_pfsyncstats)));
3342 return (ENOPROTOOPT);
3347 pfsync_ifdetach(void *arg, struct ifnet *ifp)
3349 struct pfsync_softc *sc = (struct pfsync_softc *)arg;
3350 struct ip_moptions *imo;
3352 if (sc == NULL || sc->sc_sync_if != ifp)
3353 return; /* not for us; unlocked read */
3355 CURVNET_SET(sc->sc_ifp->if_vnet);
3359 /* Deal with a member interface going away from under us. */
3360 sc->sc_sync_if = NULL;
3362 if (imo->imo_num_memberships > 0) {
3363 KASSERT(imo->imo_num_memberships == 1,
3364 ("%s: imo_num_memberships != 1", __func__));
3366 * Our event handler is always called after protocol
3367 * domains have been detached from the underlying ifnet.
3368 * Do not call in_delmulti(); we held a single reference
3369 * which the protocol domain has purged in in_purgemaddrs().
3372 imo->imo_membership[--imo->imo_num_memberships] = NULL;
3374 imo->imo_multicast_ifp = NULL;
3383 vnet_pfsync_init(const void *unused)
3389 error = swi_add(NULL, "pfsync", pfsyncintr, V_pfsyncif,
3390 SWI_NET, INTR_MPSAFE, &pfsync_swi.pfsync_swi_cookie);
3392 panic("%s: swi_add %d", __func__, error);
3394 pfsync_state_import_ptr = pfsync_state_import;
3395 pfsync_up_ptr = pfsync_up;
3396 pfsync_insert_state_ptr = pfsync_insert_state;
3397 pfsync_update_state_ptr = pfsync_update_state;
3398 pfsync_delete_state_ptr = pfsync_delete_state;
3399 pfsync_clear_states_ptr = pfsync_clear_states;
3400 pfsync_state_in_use_ptr = pfsync_state_in_use;
3401 pfsync_defer_ptr = pfsync_defer;
3407 vnet_pfsync_uninit(const void *unused)
3410 swi_remove(pfsync_swi.pfsync_swi_cookie);
3412 pfsync_state_import_ptr = NULL;
3413 pfsync_up_ptr = NULL;
3414 pfsync_insert_state_ptr = NULL;
3415 pfsync_update_state_ptr = NULL;
3416 pfsync_delete_state_ptr = NULL;
3417 pfsync_clear_states_ptr = NULL;
3418 pfsync_state_in_use_ptr = NULL;
3419 pfsync_defer_ptr = NULL;
3421 if_clone_detach(&pfsync_cloner);
3426 /* Define startup order. */
3427 #define PFSYNC_SYSINIT_ORDER SI_SUB_PROTO_IF
3428 #define PFSYNC_MODEVENT_ORDER (SI_ORDER_FIRST) /* On boot slot in here. */
3429 #define PFSYNC_VNET_ORDER (PFSYNC_MODEVENT_ORDER + 2) /* Later still. */
3433 * VNET_SYSINIT is called for each existing vnet and each new vnet.
3435 VNET_SYSINIT(vnet_pfsync_init, PFSYNC_SYSINIT_ORDER, PFSYNC_VNET_ORDER,
3436 vnet_pfsync_init, NULL);
3439 * Closing up shop. These are done in REVERSE ORDER,
3440 * Not called on reboot.
3441 * VNET_SYSUNINIT is called for each exiting vnet as it exits.
3443 VNET_SYSUNINIT(vnet_pfsync_uninit, PFSYNC_SYSINIT_ORDER, PFSYNC_VNET_ORDER,
3444 vnet_pfsync_uninit, NULL);
3446 pfsync_modevent(module_t mod, int type, void *data)
3458 if_clone_detach(&pfsync_cloner);
3469 static moduledata_t pfsync_mod = {
3475 #define PFSYNC_MODVER 1
3477 DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
3478 MODULE_VERSION(pfsync, PFSYNC_MODVER);
3479 MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER);
3480 #endif /* __FreeBSD__ */