1 /* $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $ */
4 * Copyright (c) 2002 Michael Shalayeff
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
30 * Copyright (c) 2009 David Gwynne <dlg@openbsd.org>
32 * Permission to use, copy, modify, and distribute this software for any
33 * purpose with or without fee is hereby granted, provided that the above
34 * copyright notice and this permission notice appear in all copies.
36 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
37 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
38 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
39 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
40 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
41 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
42 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
46 * Revisions picked from OpenBSD after revision 1.110 import:
47 * 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates
48 * 1.120, 1.175 - use monotonic time_uptime
49 * 1.122 - reduce number of updates for non-TCP sessions
51 * 1.146 - bzero() mbuf before sparsely filling it with data
52 * 1.170 - SIOCSIFMTU checks
57 #include "opt_inet6.h"
60 #include <sys/cdefs.h>
61 __FBSDID("$FreeBSD$");
64 #endif /* __FreeBSD__ */
66 #include <sys/param.h>
67 #include <sys/kernel.h>
70 #include <sys/interrupt.h>
74 #include <sys/systm.h>
77 #include <sys/socket.h>
79 #include <sys/endian.h>
80 #include <sys/malloc.h>
81 #include <sys/module.h>
82 #include <sys/sockio.h>
83 #include <sys/taskqueue.h>
85 #include <sys/mutex.h>
86 #include <sys/protosw.h>
88 #include <sys/ioctl.h>
89 #include <sys/timeout.h>
91 #include <sys/sysctl.h>
98 #include <net/if_clone.h>
100 #include <net/if_types.h>
101 #include <net/route.h>
103 #include <net/netisr.h>
105 #include <net/vnet.h>
108 #include <netinet/in.h>
109 #include <netinet/if_ether.h>
110 #include <netinet/tcp.h>
111 #include <netinet/tcp_seq.h>
114 #include <netinet/in_systm.h>
115 #include <netinet/in_var.h>
116 #include <netinet/ip.h>
117 #include <netinet/ip_var.h>
121 #include <netinet6/nd6.h>
125 #include <netinet/ip_carp.h>
129 #include <netinet/ip_carp.h>
133 #include <net/pfvar.h>
134 #include <net/if_pfsync.h>
137 #include "bpfilter.h"
141 #define PFSYNC_MINPKT ( \
142 sizeof(struct ip) + \
143 sizeof(struct pfsync_header) + \
144 sizeof(struct pfsync_subheader) + \
145 sizeof(struct pfsync_eof))
153 int pfsync_input_hmac(struct mbuf *, int);
155 int pfsync_upd_tcp(struct pf_state *, struct pfsync_state_peer *,
156 struct pfsync_state_peer *);
158 int pfsync_in_clr(struct pfsync_pkt *, struct mbuf *, int, int);
159 int pfsync_in_ins(struct pfsync_pkt *, struct mbuf *, int, int);
160 int pfsync_in_iack(struct pfsync_pkt *, struct mbuf *, int, int);
161 int pfsync_in_upd(struct pfsync_pkt *, struct mbuf *, int, int);
162 int pfsync_in_upd_c(struct pfsync_pkt *, struct mbuf *, int, int);
163 int pfsync_in_ureq(struct pfsync_pkt *, struct mbuf *, int, int);
164 int pfsync_in_del(struct pfsync_pkt *, struct mbuf *, int, int);
165 int pfsync_in_del_c(struct pfsync_pkt *, struct mbuf *, int, int);
166 int pfsync_in_bus(struct pfsync_pkt *, struct mbuf *, int, int);
167 int pfsync_in_tdb(struct pfsync_pkt *, struct mbuf *, int, int);
168 int pfsync_in_eof(struct pfsync_pkt *, struct mbuf *, int, int);
170 int pfsync_in_error(struct pfsync_pkt *, struct mbuf *, int, int);
172 int (*pfsync_acts[])(struct pfsync_pkt *, struct mbuf *, int, int) = {
173 pfsync_in_clr, /* PFSYNC_ACT_CLR */
174 pfsync_in_ins, /* PFSYNC_ACT_INS */
175 pfsync_in_iack, /* PFSYNC_ACT_INS_ACK */
176 pfsync_in_upd, /* PFSYNC_ACT_UPD */
177 pfsync_in_upd_c, /* PFSYNC_ACT_UPD_C */
178 pfsync_in_ureq, /* PFSYNC_ACT_UPD_REQ */
179 pfsync_in_del, /* PFSYNC_ACT_DEL */
180 pfsync_in_del_c, /* PFSYNC_ACT_DEL_C */
181 pfsync_in_error, /* PFSYNC_ACT_INS_F */
182 pfsync_in_error, /* PFSYNC_ACT_DEL_F */
183 pfsync_in_bus, /* PFSYNC_ACT_BUS */
184 pfsync_in_tdb, /* PFSYNC_ACT_TDB */
185 pfsync_in_eof /* PFSYNC_ACT_EOF */
189 int (*write)(struct pf_state *, struct mbuf *, int);
194 /* we have one of these for every PFSYNC_S_ */
195 int pfsync_out_state(struct pf_state *, struct mbuf *, int);
196 int pfsync_out_iack(struct pf_state *, struct mbuf *, int);
197 int pfsync_out_upd_c(struct pf_state *, struct mbuf *, int);
198 int pfsync_out_del(struct pf_state *, struct mbuf *, int);
200 struct pfsync_q pfsync_qs[] = {
201 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_INS },
202 { pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK },
203 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_UPD },
204 { pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C },
205 { pfsync_out_del, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C }
208 void pfsync_q_ins(struct pf_state *, int);
209 void pfsync_q_del(struct pf_state *);
211 struct pfsync_upd_req_item {
212 TAILQ_ENTRY(pfsync_upd_req_item) ur_entry;
213 struct pfsync_upd_req ur_msg;
215 TAILQ_HEAD(pfsync_upd_reqs, pfsync_upd_req_item);
217 struct pfsync_deferral {
218 TAILQ_ENTRY(pfsync_deferral) pd_entry;
219 struct pf_state *pd_st;
222 struct callout pd_tmo;
224 struct timeout pd_tmo;
227 TAILQ_HEAD(pfsync_deferrals, pfsync_deferral);
229 #define PFSYNC_PLSIZE MAX(sizeof(struct pfsync_upd_req_item), \
230 sizeof(struct pfsync_deferral))
233 int pfsync_out_tdb(struct tdb *, struct mbuf *, int);
236 struct pfsync_softc {
238 struct ifnet *sc_ifp;
242 struct ifnet *sc_sync_if;
250 struct ip_moptions sc_imo;
252 struct in_addr sc_sync_peer;
253 u_int8_t sc_maxupdates;
258 struct ip sc_template;
260 struct pf_state_queue sc_qs[PFSYNC_S_COUNT];
263 struct pfsync_upd_reqs sc_upd_req_list;
265 struct pfsync_deferrals sc_deferrals;
271 u_int32_t sc_ureq_sent;
274 struct callout sc_bulkfail_tmo;
276 struct timeout sc_bulkfail_tmo;
279 u_int32_t sc_ureq_received;
280 struct pf_state *sc_bulk_next;
281 struct pf_state *sc_bulk_last;
283 struct callout sc_bulk_tmo;
285 struct timeout sc_bulk_tmo;
288 TAILQ_HEAD(, tdb) sc_tdb_q;
291 struct callout sc_tmo;
293 struct timeout sc_tmo;
298 static MALLOC_DEFINE(M_PFSYNC, "pfsync", "pfsync data");
299 static VNET_DEFINE(struct pfsync_softc *, pfsyncif) = NULL;
300 #define V_pfsyncif VNET(pfsyncif)
301 static VNET_DEFINE(void *, pfsync_swi_cookie) = NULL;
302 #define V_pfsync_swi_cookie VNET(pfsync_swi_cookie)
303 static VNET_DEFINE(struct pfsyncstats, pfsyncstats);
304 #define V_pfsyncstats VNET(pfsyncstats)
305 static VNET_DEFINE(int, pfsync_carp_adj) = CARP_MAXSKEW;
306 #define V_pfsync_carp_adj VNET(pfsync_carp_adj)
308 static void pfsyncintr(void *);
309 static int pfsync_multicast_setup(struct pfsync_softc *);
310 static void pfsync_multicast_cleanup(struct pfsync_softc *);
311 static int pfsync_init(void);
312 static void pfsync_uninit(void);
313 static void pfsync_sendout1(int);
315 #define schednetisr(NETISR_PFSYNC) swi_sched(V_pfsync_swi_cookie, 0)
317 SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW, 0, "PFSYNC");
318 SYSCTL_VNET_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_RW,
319 &VNET_NAME(pfsyncstats), pfsyncstats,
320 "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)");
321 SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_RW,
322 &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment");
324 struct pfsync_softc *pfsyncif = NULL;
325 struct pfsyncstats pfsyncstats;
326 #define V_pfsyncstats pfsyncstats
329 void pfsyncattach(int);
331 int pfsync_clone_create(struct if_clone *, int, caddr_t);
332 void pfsync_clone_destroy(struct ifnet *);
334 int pfsync_clone_create(struct if_clone *, int);
335 int pfsync_clone_destroy(struct ifnet *);
337 int pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
338 struct pf_state_peer *);
339 void pfsync_update_net_tdb(struct pfsync_tdb *);
340 int pfsyncoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
346 int pfsyncioctl(struct ifnet *, u_long, caddr_t);
347 void pfsyncstart(struct ifnet *);
349 struct mbuf *pfsync_if_dequeue(struct ifnet *);
351 void pfsync_deferred(struct pf_state *, int);
352 void pfsync_undefer(struct pfsync_deferral *, int);
353 void pfsync_defer_tmo(void *);
355 void pfsync_request_update(u_int32_t, u_int64_t);
356 void pfsync_update_state_req(struct pf_state *);
358 void pfsync_drop(struct pfsync_softc *);
359 void pfsync_sendout(void);
360 void pfsync_send_plus(void *, size_t);
361 void pfsync_timeout(void *);
362 void pfsync_tdb_timeout(void *);
364 void pfsync_bulk_start(void);
365 void pfsync_bulk_status(u_int8_t);
366 void pfsync_bulk_update(void *);
367 void pfsync_bulk_fail(void *);
371 #define betoh64 (unsigned long long)be64toh
372 #define timeout_del callout_stop
375 #define PFSYNC_MAX_BULKTRIES 12
381 VNET_DEFINE(struct ifc_simple_data, pfsync_cloner_data);
382 VNET_DEFINE(struct if_clone, pfsync_cloner);
383 #define V_pfsync_cloner_data VNET(pfsync_cloner_data)
384 #define V_pfsync_cloner VNET(pfsync_cloner)
385 IFC_SIMPLE_DECLARE(pfsync, 1);
387 struct if_clone pfsync_cloner =
388 IF_CLONE_INITIALIZER("pfsync", pfsync_clone_create, pfsync_clone_destroy);
392 pfsyncattach(int npfsync)
394 if_clone_attach(&pfsync_cloner);
398 pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param)
400 pfsync_clone_create(struct if_clone *ifc, int unit)
403 struct pfsync_softc *sc;
411 sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO);
412 sc->pfsync_sync_ok = 1;
415 sc = malloc(sizeof(*pfsyncif), M_DEVBUF, M_NOWAIT | M_ZERO);
418 for (q = 0; q < PFSYNC_S_COUNT; q++)
419 TAILQ_INIT(&sc->sc_qs[q]);
422 sc->sc_pool = uma_zcreate("pfsync", PFSYNC_PLSIZE, NULL, NULL, NULL,
423 NULL, UMA_ALIGN_PTR, 0);
425 pool_init(&sc->sc_pool, PFSYNC_PLSIZE, 0, 0, 0, "pfsync", NULL);
427 TAILQ_INIT(&sc->sc_upd_req_list);
428 TAILQ_INIT(&sc->sc_deferrals);
431 TAILQ_INIT(&sc->sc_tdb_q);
433 sc->sc_len = PFSYNC_MINPKT;
434 sc->sc_maxupdates = 128;
437 sc->sc_imo.imo_membership = (struct in_multi **)malloc(
438 (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_IPMOPTS,
440 sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS;
444 ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC);
446 uma_zdestroy(sc->sc_pool);
450 if_initname(ifp, ifc->ifc_name, unit);
453 snprintf(ifp->if_xname, sizeof ifp->if_xname, "pfsync%d", unit);
456 ifp->if_ioctl = pfsyncioctl;
457 ifp->if_output = pfsyncoutput;
458 ifp->if_start = pfsyncstart;
459 ifp->if_type = IFT_PFSYNC;
460 ifp->if_snd.ifq_maxlen = ifqmaxlen;
461 ifp->if_hdrlen = sizeof(struct pfsync_header);
462 ifp->if_mtu = ETHERMTU;
464 callout_init(&sc->sc_tmo, CALLOUT_MPSAFE);
465 callout_init_mtx(&sc->sc_bulk_tmo, &pf_task_mtx, 0);
466 callout_init(&sc->sc_bulkfail_tmo, CALLOUT_MPSAFE);
468 timeout_set(&sc->sc_tmo, pfsync_timeout, sc);
469 timeout_set(&sc->sc_bulk_tmo, pfsync_bulk_update, sc);
470 timeout_set(&sc->sc_bulkfail_tmo, pfsync_bulk_fail, sc);
478 if_addgroup(ifp, "carp");
484 bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
486 bpfattach(&sc->sc_if.if_bpf, ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
504 pfsync_clone_destroy(struct ifnet *ifp)
506 struct pfsync_softc *sc = ifp->if_softc;
511 timeout_del(&sc->sc_bulkfail_tmo);
512 timeout_del(&sc->sc_bulk_tmo);
513 timeout_del(&sc->sc_tmo);
516 if (!sc->pfsync_sync_ok && carp_demote_adj_p)
517 (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy");
521 carp_group_demote_adj(&sc->sc_if, -1);
531 while (sc->sc_deferred > 0)
532 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0);
535 UMA_DESTROY(sc->sc_pool);
537 pool_destroy(&sc->sc_pool);
541 if (sc->sc_imo.imo_membership)
542 pfsync_multicast_cleanup(sc);
545 free(sc->sc_imo.imo_membership, M_IPMOPTS);
561 pfsync_if_dequeue(struct ifnet *ifp)
569 IF_LOCK(&ifp->if_snd);
570 _IF_DROP(&ifp->if_snd);
571 _IF_DEQUEUE(&ifp->if_snd, m);
572 IF_UNLOCK(&ifp->if_snd);
575 IF_DEQUEUE(&ifp->if_snd, m);
583 * Start output on the pfsync interface.
586 pfsyncstart(struct ifnet *ifp)
590 while ((m = pfsync_if_dequeue(ifp)) != NULL) {
592 IF_DROP(&ifp->if_snd);
599 pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
600 struct pf_state_peer *d)
602 if (s->scrub.scrub_flag && d->scrub == NULL) {
604 d->scrub = pool_get(&V_pf_state_scrub_pl, PR_NOWAIT | PR_ZERO);
606 d->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT | PR_ZERO);
608 if (d->scrub == NULL)
617 pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
619 bzero(sp, sizeof(struct pfsync_state));
621 /* copy from state key */
622 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
623 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
624 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
625 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
626 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
627 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
628 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
629 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
630 sp->proto = st->key[PF_SK_WIRE]->proto;
631 sp->af = st->key[PF_SK_WIRE]->af;
633 /* copy from state */
634 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
635 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
636 sp->creation = htonl(time_uptime - st->creation);
637 sp->expire = pf_state_expires(st);
638 if (sp->expire <= time_second)
639 sp->expire = htonl(0);
641 sp->expire = htonl(sp->expire - time_second);
643 sp->direction = st->direction;
645 sp->timeout = st->timeout;
646 sp->state_flags = st->state_flags;
648 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
649 if (st->nat_src_node)
650 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
652 bcopy(&st->id, &sp->id, sizeof(sp->id));
653 sp->creatorid = st->creatorid;
654 pf_state_peer_hton(&st->src, &sp->src);
655 pf_state_peer_hton(&st->dst, &sp->dst);
657 if (st->rule.ptr == NULL)
658 sp->rule = htonl(-1);
660 sp->rule = htonl(st->rule.ptr->nr);
661 if (st->anchor.ptr == NULL)
662 sp->anchor = htonl(-1);
664 sp->anchor = htonl(st->anchor.ptr->nr);
665 if (st->nat_rule.ptr == NULL)
666 sp->nat_rule = htonl(-1);
668 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
670 pf_state_counter_hton(st->packets[0], sp->packets[0]);
671 pf_state_counter_hton(st->packets[1], sp->packets[1]);
672 pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
673 pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
679 pfsync_state_import(struct pfsync_state *sp, u_int8_t flags)
681 struct pf_state *st = NULL;
682 struct pf_state_key *skw = NULL, *sks = NULL;
683 struct pf_rule *r = NULL;
691 if (sp->creatorid == 0 && V_pf_status.debug >= PF_DEBUG_MISC) {
693 if (sp->creatorid == 0 && pf_status.debug >= PF_DEBUG_MISC) {
695 printf("pfsync_state_import: invalid creator id:"
696 " %08x\n", ntohl(sp->creatorid));
700 if ((kif = pfi_kif_get(sp->ifname)) == NULL) {
702 if (V_pf_status.debug >= PF_DEBUG_MISC)
704 if (pf_status.debug >= PF_DEBUG_MISC)
706 printf("pfsync_state_import: "
707 "unknown interface: %s\n", sp->ifname);
708 if (flags & PFSYNC_SI_IOCTL)
710 return (0); /* skip this state */
714 * If the ruleset checksums match or the state is coming from the ioctl,
715 * it's safe to associate the state with the rule of that number.
717 if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) &&
718 (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) <
719 pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount)
720 r = pf_main_ruleset.rules[
721 PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)];
724 r = &V_pf_default_rule;
726 r = &pf_default_rule;
729 if ((r->max_states && r->states_cur >= r->max_states))
733 if (flags & PFSYNC_SI_IOCTL)
734 pool_flags = PR_WAITOK | PR_ZERO;
736 pool_flags = PR_NOWAIT | PR_ZERO;
738 if ((st = pool_get(&V_pf_state_pl, pool_flags)) == NULL)
741 if (flags & PFSYNC_SI_IOCTL)
742 pool_flags = PR_WAITOK | PR_LIMITFAIL | PR_ZERO;
744 pool_flags = PR_LIMITFAIL | PR_ZERO;
746 if ((st = pool_get(&pf_state_pl, pool_flags)) == NULL)
750 if ((skw = pf_alloc_state_key(pool_flags)) == NULL)
753 if (PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0],
754 &sp->key[PF_SK_STACK].addr[0], sp->af) ||
755 PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1],
756 &sp->key[PF_SK_STACK].addr[1], sp->af) ||
757 sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] ||
758 sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1]) {
759 if ((sks = pf_alloc_state_key(pool_flags)) == NULL)
764 /* allocate memory for scrub info */
765 if (pfsync_alloc_scrub_memory(&sp->src, &st->src) ||
766 pfsync_alloc_scrub_memory(&sp->dst, &st->dst))
769 /* copy to state key(s) */
770 skw->addr[0] = sp->key[PF_SK_WIRE].addr[0];
771 skw->addr[1] = sp->key[PF_SK_WIRE].addr[1];
772 skw->port[0] = sp->key[PF_SK_WIRE].port[0];
773 skw->port[1] = sp->key[PF_SK_WIRE].port[1];
774 skw->proto = sp->proto;
777 sks->addr[0] = sp->key[PF_SK_STACK].addr[0];
778 sks->addr[1] = sp->key[PF_SK_STACK].addr[1];
779 sks->port[0] = sp->key[PF_SK_STACK].port[0];
780 sks->port[1] = sp->key[PF_SK_STACK].port[1];
781 sks->proto = sp->proto;
786 bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr));
787 st->creation = time_uptime - ntohl(sp->creation);
788 st->expire = time_second;
790 /* XXX No adaptive scaling. */
791 st->expire -= r->timeout[sp->timeout] - ntohl(sp->expire);
794 st->expire = ntohl(sp->expire) + time_second;
795 st->direction = sp->direction;
797 st->timeout = sp->timeout;
798 st->state_flags = sp->state_flags;
800 bcopy(sp->id, &st->id, sizeof(st->id));
801 st->creatorid = sp->creatorid;
802 pf_state_peer_ntoh(&sp->src, &st->src);
803 pf_state_peer_ntoh(&sp->dst, &st->dst);
806 st->nat_rule.ptr = NULL;
807 st->anchor.ptr = NULL;
810 st->pfsync_time = time_uptime;
811 st->sync_state = PFSYNC_S_NONE;
813 /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
817 if (!ISSET(flags, PFSYNC_SI_IOCTL))
818 SET(st->state_flags, PFSTATE_NOSYNC);
820 if ((error = pf_state_insert(kif, skw, sks, st)) != 0) {
821 /* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */
826 if (!ISSET(flags, PFSYNC_SI_IOCTL)) {
827 CLR(st->state_flags, PFSTATE_NOSYNC);
828 if (ISSET(st->state_flags, PFSTATE_ACK)) {
829 pfsync_q_ins(st, PFSYNC_S_IACK);
830 schednetisr(NETISR_PFSYNC);
833 CLR(st->state_flags, PFSTATE_ACK);
843 pool_put(&V_pf_state_key_pl, skw);
845 pool_put(&V_pf_state_key_pl, sks);
848 pool_put(&pf_state_key_pl, skw);
850 pool_put(&pf_state_key_pl, sks);
853 cleanup_state: /* pf_state_insert frees the state keys */
857 pool_put(&V_pf_state_scrub_pl, st->dst.scrub);
859 pool_put(&V_pf_state_scrub_pl, st->src.scrub);
860 pool_put(&V_pf_state_pl, st);
863 pool_put(&pf_state_scrub_pl, st->dst.scrub);
865 pool_put(&pf_state_scrub_pl, st->src.scrub);
866 pool_put(&pf_state_pl, st);
874 pfsync_input(struct mbuf *m, __unused int off)
876 pfsync_input(struct mbuf *m, ...)
880 struct pfsync_softc *sc = V_pfsyncif;
882 struct pfsync_softc *sc = pfsyncif;
884 struct pfsync_pkt pkt;
885 struct ip *ip = mtod(m, struct ip *);
886 struct pfsync_header *ph;
887 struct pfsync_subheader subh;
892 V_pfsyncstats.pfsyncs_ipackets++;
894 /* verify that we have a sync interface configured */
896 if (!sc || !sc->sc_sync_if || !V_pf_status.running)
898 if (!sc || !sc->sc_sync_if || !pf_status.running)
902 /* verify that the packet came in on the right interface */
903 if (sc->sc_sync_if != m->m_pkthdr.rcvif) {
904 V_pfsyncstats.pfsyncs_badif++;
909 sc->sc_ifp->if_ipackets++;
910 sc->sc_ifp->if_ibytes += m->m_pkthdr.len;
912 sc->sc_if.if_ipackets++;
913 sc->sc_if.if_ibytes += m->m_pkthdr.len;
915 /* verify that the IP TTL is 255. */
916 if (ip->ip_ttl != PFSYNC_DFLTTL) {
917 V_pfsyncstats.pfsyncs_badttl++;
921 offset = ip->ip_hl << 2;
922 if (m->m_pkthdr.len < offset + sizeof(*ph)) {
923 V_pfsyncstats.pfsyncs_hdrops++;
927 if (offset + sizeof(*ph) > m->m_len) {
928 if (m_pullup(m, offset + sizeof(*ph)) == NULL) {
929 V_pfsyncstats.pfsyncs_hdrops++;
932 ip = mtod(m, struct ip *);
934 ph = (struct pfsync_header *)((char *)ip + offset);
936 /* verify the version */
937 if (ph->version != PFSYNC_VERSION) {
938 V_pfsyncstats.pfsyncs_badver++;
943 if (pfsync_input_hmac(m, offset) != 0) {
949 /* Cheaper to grab this now than having to mess with mbufs later */
951 pkt.src = ip->ip_src;
955 if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
957 if (!bcmp(&ph->pfcksum, &pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
959 pkt.flags |= PFSYNC_SI_CKSUM;
961 offset += sizeof(*ph);
963 m_copydata(m, offset, sizeof(subh), (caddr_t)&subh);
964 offset += sizeof(subh);
966 if (subh.action >= PFSYNC_ACT_MAX) {
967 V_pfsyncstats.pfsyncs_badact++;
971 rv = (*pfsync_acts[subh.action])(&pkt, m, offset,
984 pfsync_in_clr(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
986 struct pfsync_clr *clr;
988 int len = sizeof(*clr) * count;
991 struct pf_state *st, *nexts;
992 struct pf_state_key *sk, *nextsk;
993 struct pf_state_item *si;
997 mp = m_pulldown(m, offset, len, &offp);
999 V_pfsyncstats.pfsyncs_badlen++;
1002 clr = (struct pfsync_clr *)(mp->m_data + offp);
1008 for (i = 0; i < count; i++) {
1009 creatorid = clr[i].creatorid;
1011 if (clr[i].ifname[0] == '\0') {
1013 for (st = RB_MIN(pf_state_tree_id, &V_tree_id);
1015 nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, st);
1017 for (st = RB_MIN(pf_state_tree_id, &tree_id);
1019 nexts = RB_NEXT(pf_state_tree_id, &tree_id, st);
1021 if (st->creatorid == creatorid) {
1022 SET(st->state_flags, PFSTATE_NOSYNC);
1023 pf_unlink_state(st);
1027 if (pfi_kif_get(clr[i].ifname) == NULL)
1032 for (sk = RB_MIN(pf_state_tree, &V_pf_statetbl);
1034 for (sk = RB_MIN(pf_state_tree, &pf_statetbl);
1037 nextsk = RB_NEXT(pf_state_tree,
1039 &V_pf_statetbl, sk);
1043 TAILQ_FOREACH(si, &sk->states, entry) {
1044 if (si->s->creatorid == creatorid) {
1045 SET(si->s->state_flags,
1047 pf_unlink_state(si->s);
1062 pfsync_in_ins(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1065 struct pfsync_state *sa, *sp;
1066 int len = sizeof(*sp) * count;
1071 mp = m_pulldown(m, offset, len, &offp);
1073 V_pfsyncstats.pfsyncs_badlen++;
1076 sa = (struct pfsync_state *)(mp->m_data + offp);
1082 for (i = 0; i < count; i++) {
1085 /* check for invalid values */
1086 if (sp->timeout >= PFTM_MAX ||
1087 sp->src.state > PF_TCPS_PROXY_DST ||
1088 sp->dst.state > PF_TCPS_PROXY_DST ||
1089 sp->direction > PF_OUT ||
1090 (sp->af != AF_INET && sp->af != AF_INET6)) {
1092 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1094 if (pf_status.debug >= PF_DEBUG_MISC) {
1096 printf("pfsync_input: PFSYNC5_ACT_INS: "
1099 V_pfsyncstats.pfsyncs_badval++;
1103 if (pfsync_state_import(sp, pkt->flags) == ENOMEM) {
1104 /* drop out, but process the rest of the actions */
1117 pfsync_in_iack(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1119 struct pfsync_ins_ack *ia, *iaa;
1120 struct pf_state_cmp id_key;
1121 struct pf_state *st;
1124 int len = count * sizeof(*ia);
1128 mp = m_pulldown(m, offset, len, &offp);
1130 V_pfsyncstats.pfsyncs_badlen++;
1133 iaa = (struct pfsync_ins_ack *)(mp->m_data + offp);
1139 for (i = 0; i < count; i++) {
1142 bcopy(&ia->id, &id_key.id, sizeof(id_key.id));
1143 id_key.creatorid = ia->creatorid;
1145 st = pf_find_state_byid(&id_key);
1149 if (ISSET(st->state_flags, PFSTATE_ACK))
1150 pfsync_deferred(st, 0);
1157 * XXX this is not yet implemented, but we know the size of the
1158 * message so we can skip it.
1161 return (count * sizeof(struct pfsync_ins_ack));
1165 pfsync_upd_tcp(struct pf_state *st, struct pfsync_state_peer *src,
1166 struct pfsync_state_peer *dst)
1171 * The state should never go backwards except
1172 * for syn-proxy states. Neither should the
1173 * sequence window slide backwards.
1175 if (st->src.state > src->state &&
1176 (st->src.state < PF_TCPS_PROXY_SRC ||
1177 src->state >= PF_TCPS_PROXY_SRC))
1179 else if (SEQ_GT(st->src.seqlo, ntohl(src->seqlo)))
1181 else if (st->dst.state > dst->state) {
1182 /* There might still be useful
1183 * information about the src state here,
1184 * so import that part of the update,
1185 * then "fail" so we send the updated
1186 * state back to the peer who is missing
1187 * our what we know. */
1188 pf_state_peer_ntoh(src, &st->src);
1189 /* XXX do anything with timeouts? */
1191 } else if (st->dst.state >= TCPS_SYN_SENT &&
1192 SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo)))
1199 pfsync_in_upd(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1201 struct pfsync_state *sa, *sp;
1202 struct pf_state_cmp id_key;
1203 struct pf_state_key *sk;
1204 struct pf_state *st;
1208 int len = count * sizeof(*sp);
1212 mp = m_pulldown(m, offset, len, &offp);
1214 V_pfsyncstats.pfsyncs_badlen++;
1217 sa = (struct pfsync_state *)(mp->m_data + offp);
1223 for (i = 0; i < count; i++) {
1226 /* check for invalid values */
1227 if (sp->timeout >= PFTM_MAX ||
1228 sp->src.state > PF_TCPS_PROXY_DST ||
1229 sp->dst.state > PF_TCPS_PROXY_DST) {
1231 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1233 if (pf_status.debug >= PF_DEBUG_MISC) {
1235 printf("pfsync_input: PFSYNC_ACT_UPD: "
1238 V_pfsyncstats.pfsyncs_badval++;
1242 bcopy(sp->id, &id_key.id, sizeof(id_key.id));
1243 id_key.creatorid = sp->creatorid;
1245 st = pf_find_state_byid(&id_key);
1247 /* insert the update */
1248 if (pfsync_state_import(sp, 0))
1249 V_pfsyncstats.pfsyncs_badstate++;
1253 if (ISSET(st->state_flags, PFSTATE_ACK))
1254 pfsync_deferred(st, 1);
1256 sk = st->key[PF_SK_WIRE]; /* XXX right one? */
1258 if (sk->proto == IPPROTO_TCP)
1259 sfail = pfsync_upd_tcp(st, &sp->src, &sp->dst);
1262 * Non-TCP protocol state machine always go
1265 if (st->src.state > sp->src.state)
1267 else if (st->dst.state > sp->dst.state)
1273 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1275 if (pf_status.debug >= PF_DEBUG_MISC) {
1277 printf("pfsync: %s stale update (%d)"
1278 " id: %016llx creatorid: %08x\n",
1279 (sfail < 7 ? "ignoring" : "partial"),
1280 sfail, betoh64(st->id),
1281 ntohl(st->creatorid));
1283 V_pfsyncstats.pfsyncs_stale++;
1285 pfsync_update_state(st);
1286 schednetisr(NETISR_PFSYNC);
1289 pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
1290 pf_state_peer_ntoh(&sp->src, &st->src);
1291 pf_state_peer_ntoh(&sp->dst, &st->dst);
1292 st->expire = ntohl(sp->expire) + time_second;
1293 st->timeout = sp->timeout;
1294 st->pfsync_time = time_uptime;
1305 pfsync_in_upd_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1307 struct pfsync_upd_c *ua, *up;
1308 struct pf_state_key *sk;
1309 struct pf_state_cmp id_key;
1310 struct pf_state *st;
1312 int len = count * sizeof(*up);
1319 mp = m_pulldown(m, offset, len, &offp);
1321 V_pfsyncstats.pfsyncs_badlen++;
1324 ua = (struct pfsync_upd_c *)(mp->m_data + offp);
1330 for (i = 0; i < count; i++) {
1333 /* check for invalid values */
1334 if (up->timeout >= PFTM_MAX ||
1335 up->src.state > PF_TCPS_PROXY_DST ||
1336 up->dst.state > PF_TCPS_PROXY_DST) {
1338 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1340 if (pf_status.debug >= PF_DEBUG_MISC) {
1342 printf("pfsync_input: "
1343 "PFSYNC_ACT_UPD_C: "
1346 V_pfsyncstats.pfsyncs_badval++;
1350 bcopy(&up->id, &id_key.id, sizeof(id_key.id));
1351 id_key.creatorid = up->creatorid;
1353 st = pf_find_state_byid(&id_key);
1355 /* We don't have this state. Ask for it. */
1356 pfsync_request_update(id_key.creatorid, id_key.id);
1360 if (ISSET(st->state_flags, PFSTATE_ACK))
1361 pfsync_deferred(st, 1);
1363 sk = st->key[PF_SK_WIRE]; /* XXX right one? */
1365 if (sk->proto == IPPROTO_TCP)
1366 sfail = pfsync_upd_tcp(st, &up->src, &up->dst);
1369 * Non-TCP protocol state machine always go forwards
1371 if (st->src.state > up->src.state)
1373 else if (st->dst.state > up->dst.state)
1379 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1381 if (pf_status.debug >= PF_DEBUG_MISC) {
1383 printf("pfsync: ignoring stale update "
1385 "creatorid: %08x\n", sfail,
1387 ntohl(st->creatorid));
1389 V_pfsyncstats.pfsyncs_stale++;
1391 pfsync_update_state(st);
1392 schednetisr(NETISR_PFSYNC);
1395 pfsync_alloc_scrub_memory(&up->dst, &st->dst);
1396 pf_state_peer_ntoh(&up->src, &st->src);
1397 pf_state_peer_ntoh(&up->dst, &st->dst);
1398 st->expire = ntohl(up->expire) + time_second;
1399 st->timeout = up->timeout;
1400 st->pfsync_time = time_uptime;
1411 pfsync_in_ureq(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1413 struct pfsync_upd_req *ur, *ura;
1415 int len = count * sizeof(*ur);
1418 struct pf_state_cmp id_key;
1419 struct pf_state *st;
1421 mp = m_pulldown(m, offset, len, &offp);
1423 V_pfsyncstats.pfsyncs_badlen++;
1426 ura = (struct pfsync_upd_req *)(mp->m_data + offp);
1431 for (i = 0; i < count; i++) {
1434 bcopy(&ur->id, &id_key.id, sizeof(id_key.id));
1435 id_key.creatorid = ur->creatorid;
1437 if (id_key.id == 0 && id_key.creatorid == 0)
1438 pfsync_bulk_start();
1440 st = pf_find_state_byid(&id_key);
1442 V_pfsyncstats.pfsyncs_badstate++;
1445 if (ISSET(st->state_flags, PFSTATE_NOSYNC))
1448 pfsync_update_state_req(st);
1459 pfsync_in_del(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1462 struct pfsync_state *sa, *sp;
1463 struct pf_state_cmp id_key;
1464 struct pf_state *st;
1465 int len = count * sizeof(*sp);
1469 mp = m_pulldown(m, offset, len, &offp);
1471 V_pfsyncstats.pfsyncs_badlen++;
1474 sa = (struct pfsync_state *)(mp->m_data + offp);
1480 for (i = 0; i < count; i++) {
1483 bcopy(sp->id, &id_key.id, sizeof(id_key.id));
1484 id_key.creatorid = sp->creatorid;
1486 st = pf_find_state_byid(&id_key);
1488 V_pfsyncstats.pfsyncs_badstate++;
1491 SET(st->state_flags, PFSTATE_NOSYNC);
1492 pf_unlink_state(st);
1503 pfsync_in_del_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1506 struct pfsync_del_c *sa, *sp;
1507 struct pf_state_cmp id_key;
1508 struct pf_state *st;
1509 int len = count * sizeof(*sp);
1513 mp = m_pulldown(m, offset, len, &offp);
1515 V_pfsyncstats.pfsyncs_badlen++;
1518 sa = (struct pfsync_del_c *)(mp->m_data + offp);
1524 for (i = 0; i < count; i++) {
1527 bcopy(&sp->id, &id_key.id, sizeof(id_key.id));
1528 id_key.creatorid = sp->creatorid;
1530 st = pf_find_state_byid(&id_key);
1532 V_pfsyncstats.pfsyncs_badstate++;
1536 SET(st->state_flags, PFSTATE_NOSYNC);
1537 pf_unlink_state(st);
1548 pfsync_in_bus(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1551 struct pfsync_softc *sc = V_pfsyncif;
1553 struct pfsync_softc *sc = pfsyncif;
1555 struct pfsync_bus *bus;
1557 int len = count * sizeof(*bus);
1560 /* If we're not waiting for a bulk update, who cares. */
1561 if (sc->sc_ureq_sent == 0)
1564 mp = m_pulldown(m, offset, len, &offp);
1566 V_pfsyncstats.pfsyncs_badlen++;
1569 bus = (struct pfsync_bus *)(mp->m_data + offp);
1571 switch (bus->status) {
1572 case PFSYNC_BUS_START:
1574 callout_reset(&sc->sc_bulkfail_tmo, 4 * hz +
1575 V_pf_pool_limits[PF_LIMIT_STATES].limit /
1576 ((sc->sc_ifp->if_mtu - PFSYNC_MINPKT) /
1577 sizeof(struct pfsync_state)),
1578 pfsync_bulk_fail, V_pfsyncif);
1580 timeout_add(&sc->sc_bulkfail_tmo, 4 * hz +
1581 pf_pool_limits[PF_LIMIT_STATES].limit /
1582 ((sc->sc_if.if_mtu - PFSYNC_MINPKT) /
1583 sizeof(struct pfsync_state)));
1586 if (V_pf_status.debug >= PF_DEBUG_MISC)
1588 if (pf_status.debug >= PF_DEBUG_MISC)
1590 printf("pfsync: received bulk update start\n");
1593 case PFSYNC_BUS_END:
1594 if (time_uptime - ntohl(bus->endtime) >=
1596 /* that's it, we're happy */
1597 sc->sc_ureq_sent = 0;
1598 sc->sc_bulk_tries = 0;
1599 timeout_del(&sc->sc_bulkfail_tmo);
1601 if (!sc->pfsync_sync_ok && carp_demote_adj_p)
1602 (*carp_demote_adj_p)(-V_pfsync_carp_adj,
1603 "pfsync bulk done");
1604 sc->pfsync_sync_ok = 1;
1607 if (!pfsync_sync_ok)
1608 carp_group_demote_adj(&sc->sc_if, -1);
1613 if (V_pf_status.debug >= PF_DEBUG_MISC)
1615 if (pf_status.debug >= PF_DEBUG_MISC)
1617 printf("pfsync: received valid "
1618 "bulk update end\n");
1621 if (V_pf_status.debug >= PF_DEBUG_MISC)
1623 if (pf_status.debug >= PF_DEBUG_MISC)
1625 printf("pfsync: received invalid "
1626 "bulk update end: bad timestamp\n");
1635 pfsync_in_tdb(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1637 int len = count * sizeof(struct pfsync_tdb);
1640 struct pfsync_tdb *tp;
1646 mp = m_pulldown(m, offset, len, &offp);
1648 V_pfsyncstats.pfsyncs_badlen++;
1651 tp = (struct pfsync_tdb *)(mp->m_data + offp);
1657 for (i = 0; i < count; i++)
1658 pfsync_update_net_tdb(&tp[i]);
1669 /* Update an in-kernel tdb. Silently fail if no tdb is found. */
1671 pfsync_update_net_tdb(struct pfsync_tdb *pt)
1676 /* check for invalid values */
1677 if (ntohl(pt->spi) <= SPI_RESERVED_MAX ||
1678 (pt->dst.sa.sa_family != AF_INET &&
1679 pt->dst.sa.sa_family != AF_INET6))
1683 tdb = gettdb(pt->spi, &pt->dst, pt->sproto);
1685 pt->rpl = ntohl(pt->rpl);
1686 pt->cur_bytes = betoh64(pt->cur_bytes);
1688 /* Neither replay nor byte counter should ever decrease. */
1689 if (pt->rpl < tdb->tdb_rpl ||
1690 pt->cur_bytes < tdb->tdb_cur_bytes) {
1695 tdb->tdb_rpl = pt->rpl;
1696 tdb->tdb_cur_bytes = pt->cur_bytes;
1703 if (V_pf_status.debug >= PF_DEBUG_MISC)
1705 if (pf_status.debug >= PF_DEBUG_MISC)
1707 printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: "
1709 V_pfsyncstats.pfsyncs_badstate++;
1716 pfsync_in_eof(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1718 /* check if we are at the right place in the packet */
1719 if (offset != m->m_pkthdr.len - sizeof(struct pfsync_eof))
1720 V_pfsyncstats.pfsyncs_badact++;
1722 /* we're done. free and let the caller return */
1728 pfsync_in_error(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1730 V_pfsyncstats.pfsyncs_badact++;
1737 pfsyncoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
1750 pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1753 struct proc *p = curproc;
1755 struct pfsync_softc *sc = ifp->if_softc;
1756 struct ifreq *ifr = (struct ifreq *)data;
1757 struct ip_moptions *imo = &sc->sc_imo;
1758 struct pfsyncreq pfsyncr;
1767 case SIOCSIFDSTADDR:
1771 if (ifp->if_flags & IFF_UP)
1772 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1774 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1776 if (ifp->if_flags & IFF_UP)
1777 ifp->if_flags |= IFF_RUNNING;
1779 ifp->if_flags &= ~IFF_RUNNING;
1783 if (!sc->sc_sync_if ||
1784 ifr->ifr_mtu <= PFSYNC_MINPKT ||
1785 ifr->ifr_mtu > sc->sc_sync_if->if_mtu)
1787 if (ifr->ifr_mtu < ifp->if_mtu) {
1798 ifp->if_mtu = ifr->ifr_mtu;
1801 bzero(&pfsyncr, sizeof(pfsyncr));
1802 if (sc->sc_sync_if) {
1803 strlcpy(pfsyncr.pfsyncr_syncdev,
1804 sc->sc_sync_if->if_xname, IFNAMSIZ);
1806 pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer;
1807 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
1808 return (copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr)));
1812 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
1814 if ((error = suser(p, p->p_acflag)) != 0)
1817 if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr))))
1823 if (pfsyncr.pfsyncr_syncpeer.s_addr == 0)
1825 sc->sc_sync_peer.s_addr = htonl(INADDR_PFSYNC_GROUP);
1827 sc->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP;
1830 sc->sc_sync_peer.s_addr =
1831 pfsyncr.pfsyncr_syncpeer.s_addr;
1833 if (pfsyncr.pfsyncr_maxupdates > 255)
1842 sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates;
1844 if (pfsyncr.pfsyncr_syncdev[0] == 0) {
1845 sc->sc_sync_if = NULL;
1848 if (imo->imo_membership)
1849 pfsync_multicast_cleanup(sc);
1851 if (imo->imo_num_memberships > 0) {
1852 in_delmulti(imo->imo_membership[
1853 --imo->imo_num_memberships]);
1854 imo->imo_multicast_ifp = NULL;
1863 if ((sifp = ifunit(pfsyncr.pfsyncr_syncdev)) == NULL)
1871 if (sifp->if_mtu < sc->sc_ifp->if_mtu ||
1873 if (sifp->if_mtu < sc->sc_if.if_mtu ||
1875 (sc->sc_sync_if != NULL &&
1876 sifp->if_mtu < sc->sc_sync_if->if_mtu) ||
1877 sifp->if_mtu < MCLBYTES - sizeof(struct ip))
1879 sc->sc_sync_if = sifp;
1882 if (imo->imo_membership) {
1884 pfsync_multicast_cleanup(sc);
1888 if (imo->imo_num_memberships > 0) {
1889 in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
1890 imo->imo_multicast_ifp = NULL;
1895 if (sc->sc_sync_if &&
1896 sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) {
1898 error = pfsync_multicast_setup(sc);
1904 if (sc->sc_sync_if &&
1905 sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
1906 struct in_addr addr;
1908 if (!(sc->sc_sync_if->if_flags & IFF_MULTICAST)) {
1909 sc->sc_sync_if = NULL;
1911 return (EADDRNOTAVAIL);
1914 addr.s_addr = INADDR_PFSYNC_GROUP;
1916 if ((imo->imo_membership[0] =
1917 in_addmulti(&addr, sc->sc_sync_if)) == NULL) {
1918 sc->sc_sync_if = NULL;
1922 imo->imo_num_memberships++;
1923 imo->imo_multicast_ifp = sc->sc_sync_if;
1924 imo->imo_multicast_ttl = PFSYNC_DFLTTL;
1925 imo->imo_multicast_loop = 0;
1927 #endif /* !__FreeBSD__ */
1929 ip = &sc->sc_template;
1930 bzero(ip, sizeof(*ip));
1931 ip->ip_v = IPVERSION;
1932 ip->ip_hl = sizeof(sc->sc_template) >> 2;
1933 ip->ip_tos = IPTOS_LOWDELAY;
1934 /* len and id are set later */
1938 ip->ip_off = htons(IP_DF);
1940 ip->ip_ttl = PFSYNC_DFLTTL;
1941 ip->ip_p = IPPROTO_PFSYNC;
1942 ip->ip_src.s_addr = INADDR_ANY;
1943 ip->ip_dst.s_addr = sc->sc_sync_peer.s_addr;
1945 if (sc->sc_sync_if) {
1946 /* Request a full state table update. */
1947 sc->sc_ureq_sent = time_uptime;
1949 if (sc->pfsync_sync_ok && carp_demote_adj_p)
1950 (*carp_demote_adj_p)(V_pfsync_carp_adj,
1951 "pfsync bulk start");
1952 sc->pfsync_sync_ok = 0;
1956 carp_group_demote_adj(&sc->sc_if, 1);
1961 if (V_pf_status.debug >= PF_DEBUG_MISC)
1963 if (pf_status.debug >= PF_DEBUG_MISC)
1965 printf("pfsync: requesting bulk update\n");
1967 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
1968 pfsync_bulk_fail, V_pfsyncif);
1970 timeout_add_sec(&sc->sc_bulkfail_tmo, 5);
1972 pfsync_request_update(0, 0);
1989 pfsync_out_state(struct pf_state *st, struct mbuf *m, int offset)
1991 struct pfsync_state *sp = (struct pfsync_state *)(m->m_data + offset);
1993 pfsync_state_export(sp, st);
1995 return (sizeof(*sp));
1999 pfsync_out_iack(struct pf_state *st, struct mbuf *m, int offset)
2001 struct pfsync_ins_ack *iack =
2002 (struct pfsync_ins_ack *)(m->m_data + offset);
2005 iack->creatorid = st->creatorid;
2007 return (sizeof(*iack));
2011 pfsync_out_upd_c(struct pf_state *st, struct mbuf *m, int offset)
2013 struct pfsync_upd_c *up = (struct pfsync_upd_c *)(m->m_data + offset);
2015 bzero(up, sizeof(*up));
2017 pf_state_peer_hton(&st->src, &up->src);
2018 pf_state_peer_hton(&st->dst, &up->dst);
2019 up->creatorid = st->creatorid;
2021 up->expire = pf_state_expires(st);
2022 if (up->expire <= time_second)
2023 up->expire = htonl(0);
2025 up->expire = htonl(up->expire - time_second);
2026 up->timeout = st->timeout;
2028 return (sizeof(*up));
2032 pfsync_out_del(struct pf_state *st, struct mbuf *m, int offset)
2034 struct pfsync_del_c *dp = (struct pfsync_del_c *)(m->m_data + offset);
2037 dp->creatorid = st->creatorid;
2039 SET(st->state_flags, PFSTATE_NOSYNC);
2041 return (sizeof(*dp));
2045 pfsync_drop(struct pfsync_softc *sc)
2047 struct pf_state *st;
2048 struct pfsync_upd_req_item *ur;
2054 for (q = 0; q < PFSYNC_S_COUNT; q++) {
2055 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2058 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) {
2061 KASSERT(st->sync_state == q,
2062 ("%s: st->sync_state == q",
2065 KASSERT(st->sync_state == q);
2068 st->sync_state = PFSYNC_S_NONE;
2070 TAILQ_INIT(&sc->sc_qs[q]);
2073 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
2074 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
2075 pool_put(&sc->sc_pool, ur);
2081 if (!TAILQ_EMPTY(&sc->sc_tdb_q)) {
2082 TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry)
2083 CLR(t->tdb_flags, TDBF_PFSYNC);
2085 TAILQ_INIT(&sc->sc_tdb_q);
2089 sc->sc_len = PFSYNC_MINPKT;
2093 void pfsync_sendout()
2099 pfsync_sendout1(int schedswi)
2101 struct pfsync_softc *sc = V_pfsyncif;
2104 pfsync_sendout(void)
2106 struct pfsync_softc *sc = pfsyncif;
2110 struct ifnet *ifp = sc->sc_ifp;
2112 struct ifnet *ifp = &sc->sc_if;
2117 struct pfsync_header *ph;
2118 struct pfsync_subheader *subh;
2119 struct pf_state *st;
2120 struct pfsync_upd_req_item *ur;
2133 if (sc == NULL || sc->sc_len == PFSYNC_MINPKT)
2137 if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) {
2139 if (sc->sc_sync_if == NULL) {
2146 m = m_get2(M_NOWAIT, MT_DATA, M_PKTHDR, max_linkhdr + sc->sc_len);
2148 sc->sc_ifp->if_oerrors++;
2149 V_pfsyncstats.pfsyncs_onomem++;
2153 MGETHDR(m, M_DONTWAIT, MT_DATA);
2155 sc->sc_if.if_oerrors++;
2156 pfsyncstats.pfsyncs_onomem++;
2161 if (max_linkhdr + sc->sc_len > MHLEN) {
2162 MCLGETI(m, M_DONTWAIT, NULL, max_linkhdr + sc->sc_len);
2163 if (!ISSET(m->m_flags, M_EXT)) {
2165 sc->sc_if.if_oerrors++;
2166 pfsyncstats.pfsyncs_onomem++;
2172 m->m_data += max_linkhdr;
2173 m->m_len = m->m_pkthdr.len = sc->sc_len;
2175 /* build the ip header */
2176 ip = (struct ip *)m->m_data;
2177 bcopy(&sc->sc_template, ip, sizeof(*ip));
2178 offset = sizeof(*ip);
2181 ip->ip_len = m->m_pkthdr.len;
2183 ip->ip_len = htons(m->m_pkthdr.len);
2185 ip->ip_id = htons(ip_randomid());
2187 /* build the pfsync header */
2188 ph = (struct pfsync_header *)(m->m_data + offset);
2189 bzero(ph, sizeof(*ph));
2190 offset += sizeof(*ph);
2192 ph->version = PFSYNC_VERSION;
2193 ph->len = htons(sc->sc_len - sizeof(*ip));
2195 bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
2197 bcopy(pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
2200 /* walk the queues */
2201 for (q = 0; q < PFSYNC_S_COUNT; q++) {
2202 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2205 subh = (struct pfsync_subheader *)(m->m_data + offset);
2206 offset += sizeof(*subh);
2209 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) {
2212 KASSERT(st->sync_state == q,
2213 ("%s: st->sync_state == q",
2216 KASSERT(st->sync_state == q);
2220 offset += pfsync_qs[q].write(st, m, offset);
2221 st->sync_state = PFSYNC_S_NONE;
2224 TAILQ_INIT(&sc->sc_qs[q]);
2226 bzero(subh, sizeof(*subh));
2227 subh->action = pfsync_qs[q].action;
2228 subh->count = htons(count);
2231 if (!TAILQ_EMPTY(&sc->sc_upd_req_list)) {
2232 subh = (struct pfsync_subheader *)(m->m_data + offset);
2233 offset += sizeof(*subh);
2236 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
2237 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
2239 bcopy(&ur->ur_msg, m->m_data + offset,
2240 sizeof(ur->ur_msg));
2241 offset += sizeof(ur->ur_msg);
2243 pool_put(&sc->sc_pool, ur);
2248 bzero(subh, sizeof(*subh));
2249 subh->action = PFSYNC_ACT_UPD_REQ;
2250 subh->count = htons(count);
2253 /* has someone built a custom region for us to add? */
2254 if (sc->sc_plus != NULL) {
2255 bcopy(sc->sc_plus, m->m_data + offset, sc->sc_pluslen);
2256 offset += sc->sc_pluslen;
2262 if (!TAILQ_EMPTY(&sc->sc_tdb_q)) {
2263 subh = (struct pfsync_subheader *)(m->m_data + offset);
2264 offset += sizeof(*subh);
2267 TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry) {
2268 offset += pfsync_out_tdb(t, m, offset);
2269 CLR(t->tdb_flags, TDBF_PFSYNC);
2273 TAILQ_INIT(&sc->sc_tdb_q);
2275 bzero(subh, sizeof(*subh));
2276 subh->action = PFSYNC_ACT_TDB;
2277 subh->count = htons(count);
2281 subh = (struct pfsync_subheader *)(m->m_data + offset);
2282 offset += sizeof(*subh);
2284 bzero(subh, sizeof(*subh));
2285 subh->action = PFSYNC_ACT_EOF;
2286 subh->count = htons(1);
2288 /* XXX write checksum in EOF here */
2290 /* we're done, let's put it on the wire */
2293 m->m_data += sizeof(*ip);
2294 m->m_len = m->m_pkthdr.len = sc->sc_len - sizeof(*ip);
2298 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
2300 m->m_data -= sizeof(*ip);
2301 m->m_len = m->m_pkthdr.len = sc->sc_len;
2304 if (sc->sc_sync_if == NULL) {
2305 sc->sc_len = PFSYNC_MINPKT;
2312 sc->sc_ifp->if_opackets++;
2313 sc->sc_ifp->if_obytes += m->m_pkthdr.len;
2314 sc->sc_len = PFSYNC_MINPKT;
2316 if (!_IF_QFULL(&sc->sc_ifp->if_snd))
2317 _IF_ENQUEUE(&sc->sc_ifp->if_snd, m);
2320 sc->sc_ifp->if_snd.ifq_drops++;
2323 swi_sched(V_pfsync_swi_cookie, 0);
2325 sc->sc_if.if_opackets++;
2326 sc->sc_if.if_obytes += m->m_pkthdr.len;
2328 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL) == 0)
2329 pfsyncstats.pfsyncs_opackets++;
2331 pfsyncstats.pfsyncs_oerrors++;
2334 sc->sc_len = PFSYNC_MINPKT;
2339 pfsync_insert_state(struct pf_state *st)
2342 struct pfsync_softc *sc = V_pfsyncif;
2344 struct pfsync_softc *sc = pfsyncif;
2350 splassert(IPL_SOFTNET);
2353 if (ISSET(st->rule.ptr->rule_flag, PFRULE_NOSYNC) ||
2354 st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) {
2355 SET(st->state_flags, PFSTATE_NOSYNC);
2359 if (sc == NULL || ISSET(st->state_flags, PFSTATE_NOSYNC))
2364 KASSERT(st->sync_state == PFSYNC_S_NONE,
2365 ("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__));
2367 KASSERT(st->sync_state == PFSYNC_S_NONE);
2371 if (sc->sc_len == PFSYNC_MINPKT)
2373 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2376 timeout_add_sec(&sc->sc_tmo, 1);
2379 pfsync_q_ins(st, PFSYNC_S_INS);
2381 if (ISSET(st->state_flags, PFSTATE_ACK))
2382 schednetisr(NETISR_PFSYNC);
2384 st->sync_updates = 0;
2390 pfsync_defer(struct pf_state *st, struct mbuf *m)
2393 struct pfsync_softc *sc = V_pfsyncif;
2395 struct pfsync_softc *sc = pfsyncif;
2397 struct pfsync_deferral *pd;
2402 splassert(IPL_SOFTNET);
2405 if (sc->sc_deferred >= 128)
2406 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0);
2408 pd = pool_get(&sc->sc_pool, M_NOWAIT);
2414 m->m_flags |= M_SKIP_FIREWALL;
2416 m->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
2418 SET(st->state_flags, PFSTATE_ACK);
2423 TAILQ_INSERT_TAIL(&sc->sc_deferrals, pd, pd_entry);
2425 callout_init(&pd->pd_tmo, CALLOUT_MPSAFE);
2426 callout_reset(&pd->pd_tmo, defer, pfsync_defer_tmo,
2429 timeout_set(&pd->pd_tmo, pfsync_defer_tmo, pd);
2430 timeout_add(&pd->pd_tmo, defer);
2437 pfsync_undefer(struct pfsync_deferral *pd, int drop)
2440 struct pfsync_softc *sc = V_pfsyncif;
2442 struct pfsync_softc *sc = pfsyncif;
2449 splassert(IPL_SOFTNET);
2452 TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry);
2455 CLR(pd->pd_st->state_flags, PFSTATE_ACK);
2456 timeout_del(&pd->pd_tmo); /* bah */
2462 /* XXX: use pf_defered?! */
2465 ip_output(pd->pd_m, (void *)NULL, (void *)NULL, 0,
2466 (void *)NULL, (void *)NULL);
2473 pool_put(&sc->sc_pool, pd);
2477 pfsync_defer_tmo(void *arg)
2479 #if defined(__FreeBSD__) && defined(VIMAGE)
2480 struct pfsync_deferral *pd = arg;
2486 CURVNET_SET(pd->pd_m->m_pkthdr.rcvif->if_vnet); /* XXX */
2489 pfsync_undefer(arg, 0);
2498 pfsync_deferred(struct pf_state *st, int drop)
2501 struct pfsync_softc *sc = V_pfsyncif;
2503 struct pfsync_softc *sc = pfsyncif;
2505 struct pfsync_deferral *pd;
2507 TAILQ_FOREACH(pd, &sc->sc_deferrals, pd_entry) {
2508 if (pd->pd_st == st) {
2509 pfsync_undefer(pd, drop);
2514 panic("pfsync_send_deferred: unable to find deferred state");
2517 u_int pfsync_upds = 0;
2520 pfsync_update_state(struct pf_state *st)
2523 struct pfsync_softc *sc = V_pfsyncif;
2525 struct pfsync_softc *sc = pfsyncif;
2532 splassert(IPL_SOFTNET);
2538 if (ISSET(st->state_flags, PFSTATE_ACK))
2539 pfsync_deferred(st, 0);
2540 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2541 if (st->sync_state != PFSYNC_S_NONE)
2546 if (sc->sc_len == PFSYNC_MINPKT)
2548 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2551 timeout_add_sec(&sc->sc_tmo, 1);
2554 switch (st->sync_state) {
2555 case PFSYNC_S_UPD_C:
2558 /* we're already handling it */
2560 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) {
2562 if (st->sync_updates >= sc->sc_maxupdates)
2570 pfsync_q_ins(st, PFSYNC_S_UPD_C);
2571 st->sync_updates = 0;
2575 panic("pfsync_update_state: unexpected sync state %d",
2579 if (sync || (time_uptime - st->pfsync_time) < 2) {
2581 schednetisr(NETISR_PFSYNC);
2586 pfsync_request_update(u_int32_t creatorid, u_int64_t id)
2589 struct pfsync_softc *sc = V_pfsyncif;
2591 struct pfsync_softc *sc = pfsyncif;
2593 struct pfsync_upd_req_item *item;
2594 size_t nlen = sizeof(struct pfsync_upd_req);
2600 * this code does nothing to prevent multiple update requests for the
2601 * same state being generated.
2604 item = pool_get(&sc->sc_pool, PR_NOWAIT);
2610 item->ur_msg.id = id;
2611 item->ur_msg.creatorid = creatorid;
2613 if (TAILQ_EMPTY(&sc->sc_upd_req_list))
2614 nlen += sizeof(struct pfsync_subheader);
2617 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) {
2619 if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2625 nlen = sizeof(struct pfsync_subheader) +
2626 sizeof(struct pfsync_upd_req);
2629 TAILQ_INSERT_TAIL(&sc->sc_upd_req_list, item, ur_entry);
2632 schednetisr(NETISR_PFSYNC);
2636 pfsync_update_state_req(struct pf_state *st)
2639 struct pfsync_softc *sc = V_pfsyncif;
2641 struct pfsync_softc *sc = pfsyncif;
2647 panic("pfsync_update_state_req: nonexistant instance");
2649 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2650 if (st->sync_state != PFSYNC_S_NONE)
2655 switch (st->sync_state) {
2656 case PFSYNC_S_UPD_C:
2660 pfsync_q_ins(st, PFSYNC_S_UPD);
2661 schednetisr(NETISR_PFSYNC);
2667 /* we're already handling it */
2671 panic("pfsync_update_state_req: unexpected sync state %d",
2677 pfsync_delete_state(struct pf_state *st)
2680 struct pfsync_softc *sc = V_pfsyncif;
2682 struct pfsync_softc *sc = pfsyncif;
2688 splassert(IPL_SOFTNET);
2694 if (ISSET(st->state_flags, PFSTATE_ACK))
2695 pfsync_deferred(st, 1);
2696 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2697 if (st->sync_state != PFSYNC_S_NONE)
2702 if (sc->sc_len == PFSYNC_MINPKT)
2704 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2707 timeout_add_sec(&sc->sc_tmo, 1);
2710 switch (st->sync_state) {
2712 /* we never got to tell the world so just forget about it */
2716 case PFSYNC_S_UPD_C:
2720 /* FALLTHROUGH to putting it on the del list */
2723 pfsync_q_ins(st, PFSYNC_S_DEL);
2727 panic("pfsync_delete_state: unexpected sync state %d",
2733 pfsync_clear_states(u_int32_t creatorid, const char *ifname)
2736 struct pfsync_subheader subh;
2737 struct pfsync_clr clr;
2741 struct pfsync_softc *sc = V_pfsyncif;
2743 struct pfsync_softc *sc = pfsyncif;
2749 splassert(IPL_SOFTNET);
2755 bzero(&r, sizeof(r));
2757 r.subh.action = PFSYNC_ACT_CLR;
2758 r.subh.count = htons(1);
2760 strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname));
2761 r.clr.creatorid = creatorid;
2763 pfsync_send_plus(&r, sizeof(r));
2767 pfsync_q_ins(struct pf_state *st, int q)
2770 struct pfsync_softc *sc = V_pfsyncif;
2772 struct pfsync_softc *sc = pfsyncif;
2774 size_t nlen = pfsync_qs[q].len;
2780 KASSERT(st->sync_state == PFSYNC_S_NONE,
2781 ("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__));
2783 KASSERT(st->sync_state == PFSYNC_S_NONE);
2786 #if 1 || defined(PFSYNC_DEBUG)
2787 if (sc->sc_len < PFSYNC_MINPKT)
2789 panic("pfsync pkt len is too low %zu", sc->sc_len);
2791 panic("pfsync pkt len is too low %d", sc->sc_len);
2794 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2795 nlen += sizeof(struct pfsync_subheader);
2798 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) {
2800 if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2806 nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len;
2810 TAILQ_INSERT_TAIL(&sc->sc_qs[q], st, sync_list);
2815 pfsync_q_del(struct pf_state *st)
2818 struct pfsync_softc *sc = V_pfsyncif;
2820 struct pfsync_softc *sc = pfsyncif;
2822 int q = st->sync_state;
2825 KASSERT(st->sync_state != PFSYNC_S_NONE,
2826 ("%s: st->sync_state != PFSYNC_S_NONE", __FUNCTION__));
2828 KASSERT(st->sync_state != PFSYNC_S_NONE);
2831 sc->sc_len -= pfsync_qs[q].len;
2832 TAILQ_REMOVE(&sc->sc_qs[q], st, sync_list);
2833 st->sync_state = PFSYNC_S_NONE;
2835 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2836 sc->sc_len -= sizeof(struct pfsync_subheader);
2841 pfsync_update_tdb(struct tdb *t, int output)
2844 struct pfsync_softc *sc = V_pfsyncif;
2846 struct pfsync_softc *sc = pfsyncif;
2848 size_t nlen = sizeof(struct pfsync_tdb);
2854 if (!ISSET(t->tdb_flags, TDBF_PFSYNC)) {
2855 if (TAILQ_EMPTY(&sc->sc_tdb_q))
2856 nlen += sizeof(struct pfsync_subheader);
2858 if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2865 nlen = sizeof(struct pfsync_subheader) +
2866 sizeof(struct pfsync_tdb);
2870 TAILQ_INSERT_TAIL(&sc->sc_tdb_q, t, tdb_sync_entry);
2871 SET(t->tdb_flags, TDBF_PFSYNC);
2874 if (++t->tdb_updates >= sc->sc_maxupdates)
2875 schednetisr(NETISR_PFSYNC);
2879 SET(t->tdb_flags, TDBF_PFSYNC_RPL);
2881 CLR(t->tdb_flags, TDBF_PFSYNC_RPL);
2885 pfsync_delete_tdb(struct tdb *t)
2888 struct pfsync_softc *sc = V_pfsyncif;
2890 struct pfsync_softc *sc = pfsyncif;
2893 if (sc == NULL || !ISSET(t->tdb_flags, TDBF_PFSYNC))
2896 sc->sc_len -= sizeof(struct pfsync_tdb);
2897 TAILQ_REMOVE(&sc->sc_tdb_q, t, tdb_sync_entry);
2898 CLR(t->tdb_flags, TDBF_PFSYNC);
2900 if (TAILQ_EMPTY(&sc->sc_tdb_q))
2901 sc->sc_len -= sizeof(struct pfsync_subheader);
2905 pfsync_out_tdb(struct tdb *t, struct mbuf *m, int offset)
2907 struct pfsync_tdb *ut = (struct pfsync_tdb *)(m->m_data + offset);
2909 bzero(ut, sizeof(*ut));
2910 ut->spi = t->tdb_spi;
2911 bcopy(&t->tdb_dst, &ut->dst, sizeof(ut->dst));
2913 * When a failover happens, the master's rpl is probably above
2914 * what we see here (we may be up to a second late), so
2915 * increase it a bit for outbound tdbs to manage most such
2918 * For now, just add an offset that is likely to be larger
2919 * than the number of packets we can see in one second. The RFC
2920 * just says the next packet must have a higher seq value.
2922 * XXX What is a good algorithm for this? We could use
2923 * a rate-determined increase, but to know it, we would have
2924 * to extend struct tdb.
2925 * XXX pt->rpl can wrap over MAXINT, but if so the real tdb
2926 * will soon be replaced anyway. For now, just don't handle
2929 #define RPL_INCR 16384
2930 ut->rpl = htonl(t->tdb_rpl + (ISSET(t->tdb_flags, TDBF_PFSYNC_RPL) ?
2932 ut->cur_bytes = htobe64(t->tdb_cur_bytes);
2933 ut->sproto = t->tdb_sproto;
2935 return (sizeof(*ut));
2940 pfsync_bulk_start(void)
2943 struct pfsync_softc *sc = V_pfsyncif;
2945 struct pfsync_softc *sc = pfsyncif;
2949 if (V_pf_status.debug >= PF_DEBUG_MISC)
2951 if (pf_status.debug >= PF_DEBUG_MISC)
2953 printf("pfsync: received bulk update request\n");
2957 if (TAILQ_EMPTY(&V_state_list))
2959 if (TAILQ_EMPTY(&state_list))
2961 pfsync_bulk_status(PFSYNC_BUS_END);
2963 sc->sc_ureq_received = time_uptime;
2964 if (sc->sc_bulk_next == NULL)
2966 sc->sc_bulk_next = TAILQ_FIRST(&V_state_list);
2968 sc->sc_bulk_next = TAILQ_FIRST(&state_list);
2970 sc->sc_bulk_last = sc->sc_bulk_next;
2972 pfsync_bulk_status(PFSYNC_BUS_START);
2973 callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update, sc);
2978 pfsync_bulk_update(void *arg)
2980 struct pfsync_softc *sc = arg;
2981 struct pf_state *st = sc->sc_bulk_next;
2989 CURVNET_SET(sc->sc_ifp->if_vnet);
2992 if (st->sync_state == PFSYNC_S_NONE &&
2993 st->timeout < PFTM_MAX &&
2994 st->pfsync_time <= sc->sc_ureq_received) {
2995 pfsync_update_state_req(st);
2999 st = TAILQ_NEXT(st, entry_list);
3002 st = TAILQ_FIRST(&V_state_list);
3004 st = TAILQ_FIRST(&state_list);
3007 if (st == sc->sc_bulk_last) {
3009 sc->sc_bulk_next = NULL;
3010 sc->sc_bulk_last = NULL;
3011 pfsync_bulk_status(PFSYNC_BUS_END);
3016 if (i > 1 && (sc->sc_ifp->if_mtu - sc->sc_len) <
3018 if (i > 1 && (sc->sc_if.if_mtu - sc->sc_len) <
3020 sizeof(struct pfsync_state)) {
3021 /* we've filled a packet */
3022 sc->sc_bulk_next = st;
3024 callout_reset(&sc->sc_bulk_tmo, 1,
3025 pfsync_bulk_update, sc);
3027 timeout_add(&sc->sc_bulk_tmo, 1);
3040 pfsync_bulk_status(u_int8_t status)
3043 struct pfsync_subheader subh;
3044 struct pfsync_bus bus;
3048 struct pfsync_softc *sc = V_pfsyncif;
3050 struct pfsync_softc *sc = pfsyncif;
3055 bzero(&r, sizeof(r));
3057 r.subh.action = PFSYNC_ACT_BUS;
3058 r.subh.count = htons(1);
3061 r.bus.creatorid = V_pf_status.hostid;
3063 r.bus.creatorid = pf_status.hostid;
3065 r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received);
3066 r.bus.status = status;
3068 pfsync_send_plus(&r, sizeof(r));
3072 pfsync_bulk_fail(void *arg)
3074 struct pfsync_softc *sc = arg;
3077 CURVNET_SET(sc->sc_ifp->if_vnet);
3080 if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
3083 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
3084 pfsync_bulk_fail, V_pfsyncif);
3086 timeout_add_sec(&sc->sc_bulkfail_tmo, 5);
3089 pfsync_request_update(0, 0);
3092 /* Pretend like the transfer was ok */
3093 sc->sc_ureq_sent = 0;
3094 sc->sc_bulk_tries = 0;
3096 if (!sc->pfsync_sync_ok && carp_demote_adj_p)
3097 (*carp_demote_adj_p)(-V_pfsync_carp_adj,
3098 "pfsync bulk fail");
3099 sc->pfsync_sync_ok = 1;
3102 if (!pfsync_sync_ok)
3103 carp_group_demote_adj(&sc->sc_if, -1);
3108 if (V_pf_status.debug >= PF_DEBUG_MISC)
3110 if (pf_status.debug >= PF_DEBUG_MISC)
3112 printf("pfsync: failed to receive bulk update\n");
3121 pfsync_send_plus(void *plus, size_t pluslen)
3124 struct pfsync_softc *sc = V_pfsyncif;
3126 struct pfsync_softc *sc = pfsyncif;
3133 if (sc->sc_len + pluslen > sc->sc_ifp->if_mtu) {
3135 if (sc->sc_len + pluslen > sc->sc_if.if_mtu) {
3143 sc->sc_len += (sc->sc_pluslen = pluslen);
3154 struct pfsync_softc *sc = V_pfsyncif;
3156 struct pfsync_softc *sc = pfsyncif;
3160 if (sc == NULL || !ISSET(sc->sc_ifp->if_flags, IFF_DRV_RUNNING))
3162 if (sc == NULL || !ISSET(sc->sc_if.if_flags, IFF_RUNNING))
3170 pfsync_state_in_use(struct pf_state *st)
3173 struct pfsync_softc *sc = V_pfsyncif;
3175 struct pfsync_softc *sc = pfsyncif;
3181 if (st->sync_state != PFSYNC_S_NONE ||
3182 st == sc->sc_bulk_next ||
3183 st == sc->sc_bulk_last)
3193 pfsync_timeout(void *arg)
3195 #if defined(__FreeBSD__) && defined(VIMAGE)
3196 struct pfsync_softc *sc = arg;
3201 CURVNET_SET(sc->sc_ifp->if_vnet);
3221 /* this is a softnet/netisr handler */
3224 pfsyncintr(void *arg)
3226 struct pfsync_softc *sc = arg;
3229 CURVNET_SET(sc->sc_ifp->if_vnet);
3233 if (sc->sc_len > PFSYNC_MINPKT)
3235 _IF_DEQUEUE_ALL(&sc->sc_ifp->if_snd, m);
3238 for (; m != NULL; m = n) {
3241 m->m_nextpkt = NULL;
3242 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL)
3244 V_pfsyncstats.pfsyncs_opackets++;
3246 V_pfsyncstats.pfsyncs_oerrors++;
3264 pfsync_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
3269 /* All sysctl names at this level are terminal. */
3274 case PFSYNCCTL_STATS:
3277 return (sysctl_struct(oldp, oldlenp, newp, newlen,
3278 &V_pfsyncstats, sizeof(V_pfsyncstats)));
3281 return (ENOPROTOOPT);
3286 pfsync_multicast_setup(struct pfsync_softc *sc)
3288 struct ip_moptions *imo = &sc->sc_imo;
3291 if (!(sc->sc_sync_if->if_flags & IFF_MULTICAST)) {
3292 sc->sc_sync_if = NULL;
3293 return (EADDRNOTAVAIL);
3296 imo->imo_membership = (struct in_multi **)malloc(
3297 (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_PFSYNC,
3299 imo->imo_max_memberships = IP_MIN_MEMBERSHIPS;
3300 imo->imo_multicast_vif = -1;
3302 if ((error = in_joingroup(sc->sc_sync_if, &sc->sc_sync_peer, NULL,
3303 &imo->imo_membership[0])) != 0) {
3304 free(imo->imo_membership, M_PFSYNC);
3307 imo->imo_num_memberships++;
3308 imo->imo_multicast_ifp = sc->sc_sync_if;
3309 imo->imo_multicast_ttl = PFSYNC_DFLTTL;
3310 imo->imo_multicast_loop = 0;
3316 pfsync_multicast_cleanup(struct pfsync_softc *sc)
3318 struct ip_moptions *imo = &sc->sc_imo;
3320 in_leavegroup(imo->imo_membership[0], NULL);
3321 free(imo->imo_membership, M_PFSYNC);
3322 imo->imo_membership = NULL;
3323 imo->imo_multicast_ifp = NULL;
3327 extern struct domain inetdomain;
3328 static struct protosw in_pfsync_protosw = {
3329 .pr_type = SOCK_RAW,
3330 .pr_domain = &inetdomain,
3331 .pr_protocol = IPPROTO_PFSYNC,
3332 .pr_flags = PR_ATOMIC|PR_ADDR,
3333 .pr_input = pfsync_input,
3334 .pr_output = (pr_output_t *)rip_output,
3335 .pr_ctloutput = rip_ctloutput,
3336 .pr_usrreqs = &rip_usrreqs
3343 VNET_ITERATOR_DECL(vnet_iter);
3347 VNET_FOREACH(vnet_iter) {
3348 CURVNET_SET(vnet_iter);
3349 V_pfsync_cloner = pfsync_cloner;
3350 V_pfsync_cloner_data = pfsync_cloner_data;
3351 V_pfsync_cloner.ifc_data = &V_pfsync_cloner_data;
3352 if_clone_attach(&V_pfsync_cloner);
3353 error = swi_add(NULL, "pfsync", pfsyncintr, V_pfsyncif,
3354 SWI_NET, INTR_MPSAFE, &V_pfsync_swi_cookie);
3359 VNET_LIST_RUNLOCK();
3361 error = pf_proto_register(PF_INET, &in_pfsync_protosw);
3364 error = ipproto_register(IPPROTO_PFSYNC);
3366 pf_proto_unregister(PF_INET, IPPROTO_PFSYNC, SOCK_RAW);
3371 pfsync_state_import_ptr = pfsync_state_import;
3372 pfsync_up_ptr = pfsync_up;
3373 pfsync_insert_state_ptr = pfsync_insert_state;
3374 pfsync_update_state_ptr = pfsync_update_state;
3375 pfsync_delete_state_ptr = pfsync_delete_state;
3376 pfsync_clear_states_ptr = pfsync_clear_states;
3377 pfsync_state_in_use_ptr = pfsync_state_in_use;
3378 pfsync_defer_ptr = pfsync_defer;
3386 VNET_FOREACH(vnet_iter) {
3387 CURVNET_SET(vnet_iter);
3388 if (V_pfsync_swi_cookie) {
3389 swi_remove(V_pfsync_swi_cookie);
3390 if_clone_detach(&V_pfsync_cloner);
3394 VNET_LIST_RUNLOCK();
3402 VNET_ITERATOR_DECL(vnet_iter);
3405 pfsync_state_import_ptr = NULL;
3406 pfsync_up_ptr = NULL;
3407 pfsync_insert_state_ptr = NULL;
3408 pfsync_update_state_ptr = NULL;
3409 pfsync_delete_state_ptr = NULL;
3410 pfsync_clear_states_ptr = NULL;
3411 pfsync_state_in_use_ptr = NULL;
3412 pfsync_defer_ptr = NULL;
3415 ipproto_unregister(IPPROTO_PFSYNC);
3416 pf_proto_unregister(PF_INET, IPPROTO_PFSYNC, SOCK_RAW);
3418 VNET_FOREACH(vnet_iter) {
3419 CURVNET_SET(vnet_iter);
3420 swi_remove(V_pfsync_swi_cookie);
3421 if_clone_detach(&V_pfsync_cloner);
3424 VNET_LIST_RUNLOCK();
3428 pfsync_modevent(module_t mod, int type, void *data)
3434 error = pfsync_init();
3438 * Module should not be unloaded due to race conditions.
3453 static moduledata_t pfsync_mod = {
3459 #define PFSYNC_MODVER 1
3461 DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY);
3462 MODULE_VERSION(pfsync, PFSYNC_MODVER);
3463 MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER);
3464 #endif /* __FreeBSD__ */