2 * Copyright (c) 2002 Michael Shalayeff
3 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
24 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
25 * THE POSSIBILITY OF SUCH DAMAGE.
29 * Copyright (c) 2009 David Gwynne <dlg@openbsd.org>
31 * Permission to use, copy, modify, and distribute this software for any
32 * purpose with or without fee is hereby granted, provided that the above
33 * copyright notice and this permission notice appear in all copies.
35 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
36 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
37 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
38 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
39 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
40 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
41 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
45 * $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $
47 * Revisions picked from OpenBSD after revision 1.110 import:
48 * 1.119 - don't m_copydata() beyond the len of mbuf in pfsync_input()
49 * 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates
50 * 1.120, 1.175 - use monotonic time_uptime
51 * 1.122 - reduce number of updates for non-TCP sessions
52 * 1.125, 1.127 - rewrite merge or stale processing
54 * 1.146 - bzero() mbuf before sparsely filling it with data
55 * 1.170 - SIOCSIFMTU checks
56 * 1.126, 1.142 - deferred packets processing
57 * 1.173 - correct expire time processing
60 #include <sys/cdefs.h>
61 __FBSDID("$FreeBSD$");
64 #include "opt_inet6.h"
67 #include <sys/param.h>
69 #include <sys/endian.h>
70 #include <sys/interrupt.h>
71 #include <sys/kernel.h>
74 #include <sys/module.h>
75 #include <sys/mutex.h>
77 #include <sys/protosw.h>
78 #include <sys/socket.h>
79 #include <sys/sockio.h>
80 #include <sys/sysctl.h>
84 #include <net/if_clone.h>
85 #include <net/if_types.h>
86 #include <net/pfvar.h>
87 #include <net/if_pfsync.h>
89 #include <netinet/if_ether.h>
90 #include <netinet/in.h>
91 #include <netinet/in_var.h>
92 #include <netinet/ip.h>
93 #include <netinet/ip_carp.h>
94 #include <netinet/ip_var.h>
95 #include <netinet/tcp.h>
96 #include <netinet/tcp_fsm.h>
97 #include <netinet/tcp_seq.h>
99 #define PFSYNC_MINPKT ( \
100 sizeof(struct ip) + \
101 sizeof(struct pfsync_header) + \
102 sizeof(struct pfsync_subheader) )
110 static int pfsync_upd_tcp(struct pf_state *, struct pfsync_state_peer *,
111 struct pfsync_state_peer *);
112 static int pfsync_in_clr(struct pfsync_pkt *, struct mbuf *, int, int);
113 static int pfsync_in_ins(struct pfsync_pkt *, struct mbuf *, int, int);
114 static int pfsync_in_iack(struct pfsync_pkt *, struct mbuf *, int, int);
115 static int pfsync_in_upd(struct pfsync_pkt *, struct mbuf *, int, int);
116 static int pfsync_in_upd_c(struct pfsync_pkt *, struct mbuf *, int, int);
117 static int pfsync_in_ureq(struct pfsync_pkt *, struct mbuf *, int, int);
118 static int pfsync_in_del(struct pfsync_pkt *, struct mbuf *, int, int);
119 static int pfsync_in_del_c(struct pfsync_pkt *, struct mbuf *, int, int);
120 static int pfsync_in_bus(struct pfsync_pkt *, struct mbuf *, int, int);
121 static int pfsync_in_tdb(struct pfsync_pkt *, struct mbuf *, int, int);
122 static int pfsync_in_eof(struct pfsync_pkt *, struct mbuf *, int, int);
123 static int pfsync_in_error(struct pfsync_pkt *, struct mbuf *, int, int);
125 static int (*pfsync_acts[])(struct pfsync_pkt *, struct mbuf *, int, int) = {
126 pfsync_in_clr, /* PFSYNC_ACT_CLR */
127 pfsync_in_ins, /* PFSYNC_ACT_INS */
128 pfsync_in_iack, /* PFSYNC_ACT_INS_ACK */
129 pfsync_in_upd, /* PFSYNC_ACT_UPD */
130 pfsync_in_upd_c, /* PFSYNC_ACT_UPD_C */
131 pfsync_in_ureq, /* PFSYNC_ACT_UPD_REQ */
132 pfsync_in_del, /* PFSYNC_ACT_DEL */
133 pfsync_in_del_c, /* PFSYNC_ACT_DEL_C */
134 pfsync_in_error, /* PFSYNC_ACT_INS_F */
135 pfsync_in_error, /* PFSYNC_ACT_DEL_F */
136 pfsync_in_bus, /* PFSYNC_ACT_BUS */
137 pfsync_in_tdb, /* PFSYNC_ACT_TDB */
138 pfsync_in_eof /* PFSYNC_ACT_EOF */
142 void (*write)(struct pf_state *, void *);
147 /* we have one of these for every PFSYNC_S_ */
148 static void pfsync_out_state(struct pf_state *, void *);
149 static void pfsync_out_iack(struct pf_state *, void *);
150 static void pfsync_out_upd_c(struct pf_state *, void *);
151 static void pfsync_out_del(struct pf_state *, void *);
153 static struct pfsync_q pfsync_qs[] = {
154 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_INS },
155 { pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK },
156 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_UPD },
157 { pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C },
158 { pfsync_out_del, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C }
161 static void pfsync_q_ins(struct pf_state *, int);
162 static void pfsync_q_del(struct pf_state *);
164 static void pfsync_update_state(struct pf_state *);
166 struct pfsync_upd_req_item {
167 TAILQ_ENTRY(pfsync_upd_req_item) ur_entry;
168 struct pfsync_upd_req ur_msg;
171 struct pfsync_deferral {
172 struct pfsync_softc *pd_sc;
173 TAILQ_ENTRY(pfsync_deferral) pd_entry;
175 struct callout pd_tmo;
177 struct pf_state *pd_st;
181 struct pfsync_softc {
183 struct ifnet *sc_ifp;
184 struct ifnet *sc_sync_if;
185 struct ip_moptions sc_imo;
186 struct in_addr sc_sync_peer;
188 #define PFSYNCF_OK 0x00000001
189 #define PFSYNCF_DEFER 0x00000002
190 #define PFSYNCF_PUSH 0x00000004
191 uint8_t sc_maxupdates;
192 struct ip sc_template;
193 struct callout sc_tmo;
198 TAILQ_HEAD(, pf_state) sc_qs[PFSYNC_S_COUNT];
199 TAILQ_HEAD(, pfsync_upd_req_item) sc_upd_req_list;
200 TAILQ_HEAD(, pfsync_deferral) sc_deferrals;
205 /* Bulk update info */
206 struct mtx sc_bulk_mtx;
207 uint32_t sc_ureq_sent;
209 uint32_t sc_ureq_received;
211 uint64_t sc_bulk_stateid;
212 uint32_t sc_bulk_creatorid;
213 struct callout sc_bulk_tmo;
214 struct callout sc_bulkfail_tmo;
217 #define PFSYNC_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
218 #define PFSYNC_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
219 #define PFSYNC_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
221 #define PFSYNC_BLOCK(sc) mtx_lock(&(sc)->sc_bulk_mtx)
222 #define PFSYNC_BUNLOCK(sc) mtx_unlock(&(sc)->sc_bulk_mtx)
223 #define PFSYNC_BLOCK_ASSERT(sc) mtx_assert(&(sc)->sc_bulk_mtx, MA_OWNED)
225 static const char pfsyncname[] = "pfsync";
226 static MALLOC_DEFINE(M_PFSYNC, pfsyncname, "pfsync(4) data");
227 static VNET_DEFINE(struct pfsync_softc *, pfsyncif) = NULL;
228 #define V_pfsyncif VNET(pfsyncif)
229 static VNET_DEFINE(void *, pfsync_swi_cookie) = NULL;
230 #define V_pfsync_swi_cookie VNET(pfsync_swi_cookie)
231 static VNET_DEFINE(struct pfsyncstats, pfsyncstats);
232 #define V_pfsyncstats VNET(pfsyncstats)
233 static VNET_DEFINE(int, pfsync_carp_adj) = CARP_MAXSKEW;
234 #define V_pfsync_carp_adj VNET(pfsync_carp_adj)
236 static void pfsync_timeout(void *);
237 static void pfsync_push(struct pfsync_softc *);
238 static void pfsyncintr(void *);
239 static int pfsync_multicast_setup(struct pfsync_softc *, struct ifnet *,
241 static void pfsync_multicast_cleanup(struct pfsync_softc *);
242 static void pfsync_pointers_init(void);
243 static void pfsync_pointers_uninit(void);
244 static int pfsync_init(void);
245 static void pfsync_uninit(void);
247 SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW, 0, "PFSYNC");
248 SYSCTL_VNET_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_RW,
249 &VNET_NAME(pfsyncstats), pfsyncstats,
250 "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)");
251 SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_RW,
252 &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment");
254 static int pfsync_clone_create(struct if_clone *, int, caddr_t);
255 static void pfsync_clone_destroy(struct ifnet *);
256 static int pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
257 struct pf_state_peer *);
258 static int pfsyncoutput(struct ifnet *, struct mbuf *,
259 const struct sockaddr *, struct route *);
260 static int pfsyncioctl(struct ifnet *, u_long, caddr_t);
262 static int pfsync_defer(struct pf_state *, struct mbuf *);
263 static void pfsync_undefer(struct pfsync_deferral *, int);
264 static void pfsync_undefer_state(struct pf_state *, int);
265 static void pfsync_defer_tmo(void *);
267 static void pfsync_request_update(u_int32_t, u_int64_t);
268 static void pfsync_update_state_req(struct pf_state *);
270 static void pfsync_drop(struct pfsync_softc *);
271 static void pfsync_sendout(int);
272 static void pfsync_send_plus(void *, size_t);
274 static void pfsync_bulk_start(void);
275 static void pfsync_bulk_status(u_int8_t);
276 static void pfsync_bulk_update(void *);
277 static void pfsync_bulk_fail(void *);
280 static void pfsync_update_net_tdb(struct pfsync_tdb *);
283 #define PFSYNC_MAX_BULKTRIES 12
285 VNET_DEFINE(struct if_clone *, pfsync_cloner);
286 #define V_pfsync_cloner VNET(pfsync_cloner)
289 pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param)
291 struct pfsync_softc *sc;
298 sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO);
299 sc->sc_flags |= PFSYNCF_OK;
301 for (q = 0; q < PFSYNC_S_COUNT; q++)
302 TAILQ_INIT(&sc->sc_qs[q]);
304 TAILQ_INIT(&sc->sc_upd_req_list);
305 TAILQ_INIT(&sc->sc_deferrals);
307 sc->sc_len = PFSYNC_MINPKT;
308 sc->sc_maxupdates = 128;
310 ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC);
315 if_initname(ifp, pfsyncname, unit);
317 ifp->if_ioctl = pfsyncioctl;
318 ifp->if_output = pfsyncoutput;
319 ifp->if_type = IFT_PFSYNC;
320 ifp->if_snd.ifq_maxlen = ifqmaxlen;
321 ifp->if_hdrlen = sizeof(struct pfsync_header);
322 ifp->if_mtu = ETHERMTU;
323 mtx_init(&sc->sc_mtx, pfsyncname, NULL, MTX_DEF);
324 mtx_init(&sc->sc_bulk_mtx, "pfsync bulk", NULL, MTX_DEF);
325 callout_init(&sc->sc_tmo, CALLOUT_MPSAFE);
326 callout_init_mtx(&sc->sc_bulk_tmo, &sc->sc_bulk_mtx, 0);
327 callout_init_mtx(&sc->sc_bulkfail_tmo, &sc->sc_bulk_mtx, 0);
331 bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
339 pfsync_clone_destroy(struct ifnet *ifp)
341 struct pfsync_softc *sc = ifp->if_softc;
344 * At this stage, everything should have already been
345 * cleared by pfsync_uninit(), and we have only to
348 while (sc->sc_deferred > 0) {
349 struct pfsync_deferral *pd = TAILQ_FIRST(&sc->sc_deferrals);
351 TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry);
353 if (callout_stop(&pd->pd_tmo)) {
354 pf_release_state(pd->pd_st);
359 callout_drain(&pd->pd_tmo);
364 callout_drain(&sc->sc_tmo);
365 callout_drain(&sc->sc_bulkfail_tmo);
366 callout_drain(&sc->sc_bulk_tmo);
368 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
369 (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy");
376 if (sc->sc_imo.imo_membership)
377 pfsync_multicast_cleanup(sc);
378 mtx_destroy(&sc->sc_mtx);
379 mtx_destroy(&sc->sc_bulk_mtx);
386 pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
387 struct pf_state_peer *d)
389 if (s->scrub.scrub_flag && d->scrub == NULL) {
390 d->scrub = uma_zalloc(V_pf_state_scrub_z, M_NOWAIT | M_ZERO);
391 if (d->scrub == NULL)
400 pfsync_state_import(struct pfsync_state *sp, u_int8_t flags)
402 struct pfsync_softc *sc = V_pfsyncif;
403 struct pf_state *st = NULL;
404 struct pf_state_key *skw = NULL, *sks = NULL;
405 struct pf_rule *r = NULL;
411 if (sp->creatorid == 0) {
412 if (V_pf_status.debug >= PF_DEBUG_MISC)
413 printf("%s: invalid creator id: %08x\n", __func__,
414 ntohl(sp->creatorid));
418 if ((kif = pfi_kif_find(sp->ifname)) == NULL) {
419 if (V_pf_status.debug >= PF_DEBUG_MISC)
420 printf("%s: unknown interface: %s\n", __func__,
422 if (flags & PFSYNC_SI_IOCTL)
424 return (0); /* skip this state */
428 * If the ruleset checksums match or the state is coming from the ioctl,
429 * it's safe to associate the state with the rule of that number.
431 if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) &&
432 (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) <
433 pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount)
434 r = pf_main_ruleset.rules[
435 PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)];
437 r = &V_pf_default_rule;
439 if ((r->max_states && r->states_cur >= r->max_states))
443 * XXXGL: consider M_WAITOK in ioctl path after.
445 if ((st = uma_zalloc(V_pf_state_z, M_NOWAIT | M_ZERO)) == NULL)
448 if ((skw = uma_zalloc(V_pf_state_key_z, M_NOWAIT)) == NULL)
451 if (PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0],
452 &sp->key[PF_SK_STACK].addr[0], sp->af) ||
453 PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1],
454 &sp->key[PF_SK_STACK].addr[1], sp->af) ||
455 sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] ||
456 sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1]) {
457 sks = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
463 /* allocate memory for scrub info */
464 if (pfsync_alloc_scrub_memory(&sp->src, &st->src) ||
465 pfsync_alloc_scrub_memory(&sp->dst, &st->dst))
468 /* copy to state key(s) */
469 skw->addr[0] = sp->key[PF_SK_WIRE].addr[0];
470 skw->addr[1] = sp->key[PF_SK_WIRE].addr[1];
471 skw->port[0] = sp->key[PF_SK_WIRE].port[0];
472 skw->port[1] = sp->key[PF_SK_WIRE].port[1];
473 skw->proto = sp->proto;
476 sks->addr[0] = sp->key[PF_SK_STACK].addr[0];
477 sks->addr[1] = sp->key[PF_SK_STACK].addr[1];
478 sks->port[0] = sp->key[PF_SK_STACK].port[0];
479 sks->port[1] = sp->key[PF_SK_STACK].port[1];
480 sks->proto = sp->proto;
485 bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr));
486 st->creation = time_uptime - ntohl(sp->creation);
487 st->expire = time_uptime;
491 timeout = r->timeout[sp->timeout];
493 timeout = V_pf_default_rule.timeout[sp->timeout];
495 /* sp->expire may have been adaptively scaled by export. */
496 st->expire -= timeout - ntohl(sp->expire);
499 st->direction = sp->direction;
501 st->timeout = sp->timeout;
502 st->state_flags = sp->state_flags;
505 st->creatorid = sp->creatorid;
506 pf_state_peer_ntoh(&sp->src, &st->src);
507 pf_state_peer_ntoh(&sp->dst, &st->dst);
510 st->nat_rule.ptr = NULL;
511 st->anchor.ptr = NULL;
514 st->pfsync_time = time_uptime;
515 st->sync_state = PFSYNC_S_NONE;
517 /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
521 if (!(flags & PFSYNC_SI_IOCTL))
522 st->state_flags |= PFSTATE_NOSYNC;
524 if ((error = pf_state_insert(kif, skw, sks, st)) != 0) {
525 /* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */
530 if (!(flags & PFSYNC_SI_IOCTL)) {
531 st->state_flags &= ~PFSTATE_NOSYNC;
532 if (st->state_flags & PFSTATE_ACK) {
533 pfsync_q_ins(st, PFSYNC_S_IACK);
537 st->state_flags &= ~PFSTATE_ACK;
547 uma_zfree(V_pf_state_key_z, skw);
549 uma_zfree(V_pf_state_key_z, sks);
551 cleanup_state: /* pf_state_insert() frees the state keys. */
554 uma_zfree(V_pf_state_scrub_z, st->dst.scrub);
556 uma_zfree(V_pf_state_scrub_z, st->src.scrub);
557 uma_zfree(V_pf_state_z, st);
563 pfsync_input(struct mbuf *m, __unused int off)
565 struct pfsync_softc *sc = V_pfsyncif;
566 struct pfsync_pkt pkt;
567 struct ip *ip = mtod(m, struct ip *);
568 struct pfsync_header *ph;
569 struct pfsync_subheader subh;
575 V_pfsyncstats.pfsyncs_ipackets++;
577 /* Verify that we have a sync interface configured. */
578 if (!sc || !sc->sc_sync_if || !V_pf_status.running ||
579 (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
582 /* verify that the packet came in on the right interface */
583 if (sc->sc_sync_if != m->m_pkthdr.rcvif) {
584 V_pfsyncstats.pfsyncs_badif++;
588 sc->sc_ifp->if_ipackets++;
589 sc->sc_ifp->if_ibytes += m->m_pkthdr.len;
590 /* verify that the IP TTL is 255. */
591 if (ip->ip_ttl != PFSYNC_DFLTTL) {
592 V_pfsyncstats.pfsyncs_badttl++;
596 offset = ip->ip_hl << 2;
597 if (m->m_pkthdr.len < offset + sizeof(*ph)) {
598 V_pfsyncstats.pfsyncs_hdrops++;
602 if (offset + sizeof(*ph) > m->m_len) {
603 if (m_pullup(m, offset + sizeof(*ph)) == NULL) {
604 V_pfsyncstats.pfsyncs_hdrops++;
607 ip = mtod(m, struct ip *);
609 ph = (struct pfsync_header *)((char *)ip + offset);
611 /* verify the version */
612 if (ph->version != PFSYNC_VERSION) {
613 V_pfsyncstats.pfsyncs_badver++;
617 len = ntohs(ph->len) + offset;
618 if (m->m_pkthdr.len < len) {
619 V_pfsyncstats.pfsyncs_badlen++;
623 /* Cheaper to grab this now than having to mess with mbufs later */
625 pkt.src = ip->ip_src;
629 * Trusting pf_chksum during packet processing, as well as seeking
630 * in interface name tree, require holding PF_RULES_RLOCK().
633 if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
634 pkt.flags |= PFSYNC_SI_CKSUM;
636 offset += sizeof(*ph);
637 while (offset <= len - sizeof(subh)) {
638 m_copydata(m, offset, sizeof(subh), (caddr_t)&subh);
639 offset += sizeof(subh);
641 if (subh.action >= PFSYNC_ACT_MAX) {
642 V_pfsyncstats.pfsyncs_badact++;
647 count = ntohs(subh.count);
648 V_pfsyncstats.pfsyncs_iacts[subh.action] += count;
649 rv = (*pfsync_acts[subh.action])(&pkt, m, offset, count);
664 pfsync_in_clr(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
666 struct pfsync_clr *clr;
668 int len = sizeof(*clr) * count;
672 mp = m_pulldown(m, offset, len, &offp);
674 V_pfsyncstats.pfsyncs_badlen++;
677 clr = (struct pfsync_clr *)(mp->m_data + offp);
679 for (i = 0; i < count; i++) {
680 creatorid = clr[i].creatorid;
682 if (clr[i].ifname[0] != '\0' &&
683 pfi_kif_find(clr[i].ifname) == NULL)
686 for (int i = 0; i <= V_pf_hashmask; i++) {
687 struct pf_idhash *ih = &V_pf_idhash[i];
691 LIST_FOREACH(s, &ih->states, entry) {
692 if (s->creatorid == creatorid) {
693 s->state_flags |= PFSTATE_NOSYNC;
694 pf_unlink_state(s, PF_ENTER_LOCKED);
698 PF_HASHROW_UNLOCK(ih);
706 pfsync_in_ins(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
709 struct pfsync_state *sa, *sp;
710 int len = sizeof(*sp) * count;
713 mp = m_pulldown(m, offset, len, &offp);
715 V_pfsyncstats.pfsyncs_badlen++;
718 sa = (struct pfsync_state *)(mp->m_data + offp);
720 for (i = 0; i < count; i++) {
723 /* Check for invalid values. */
724 if (sp->timeout >= PFTM_MAX ||
725 sp->src.state > PF_TCPS_PROXY_DST ||
726 sp->dst.state > PF_TCPS_PROXY_DST ||
727 sp->direction > PF_OUT ||
728 (sp->af != AF_INET && sp->af != AF_INET6)) {
729 if (V_pf_status.debug >= PF_DEBUG_MISC)
730 printf("%s: invalid value\n", __func__);
731 V_pfsyncstats.pfsyncs_badval++;
735 if (pfsync_state_import(sp, pkt->flags) == ENOMEM)
736 /* Drop out, but process the rest of the actions. */
744 pfsync_in_iack(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
746 struct pfsync_ins_ack *ia, *iaa;
750 int len = count * sizeof(*ia);
753 mp = m_pulldown(m, offset, len, &offp);
755 V_pfsyncstats.pfsyncs_badlen++;
758 iaa = (struct pfsync_ins_ack *)(mp->m_data + offp);
760 for (i = 0; i < count; i++) {
763 st = pf_find_state_byid(ia->id, ia->creatorid);
767 if (st->state_flags & PFSTATE_ACK) {
768 PFSYNC_LOCK(V_pfsyncif);
769 pfsync_undefer_state(st, 0);
770 PFSYNC_UNLOCK(V_pfsyncif);
775 * XXX this is not yet implemented, but we know the size of the
776 * message so we can skip it.
779 return (count * sizeof(struct pfsync_ins_ack));
783 pfsync_upd_tcp(struct pf_state *st, struct pfsync_state_peer *src,
784 struct pfsync_state_peer *dst)
788 PF_STATE_LOCK_ASSERT(st);
791 * The state should never go backwards except
792 * for syn-proxy states. Neither should the
793 * sequence window slide backwards.
795 if ((st->src.state > src->state &&
796 (st->src.state < PF_TCPS_PROXY_SRC ||
797 src->state >= PF_TCPS_PROXY_SRC)) ||
799 (st->src.state == src->state &&
800 SEQ_GT(st->src.seqlo, ntohl(src->seqlo))))
803 pf_state_peer_ntoh(src, &st->src);
805 if ((st->dst.state > dst->state) ||
807 (st->dst.state >= TCPS_SYN_SENT &&
808 SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo))))
811 pf_state_peer_ntoh(dst, &st->dst);
817 pfsync_in_upd(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
819 struct pfsync_softc *sc = V_pfsyncif;
820 struct pfsync_state *sa, *sp;
825 int len = count * sizeof(*sp);
828 mp = m_pulldown(m, offset, len, &offp);
830 V_pfsyncstats.pfsyncs_badlen++;
833 sa = (struct pfsync_state *)(mp->m_data + offp);
835 for (i = 0; i < count; i++) {
838 /* check for invalid values */
839 if (sp->timeout >= PFTM_MAX ||
840 sp->src.state > PF_TCPS_PROXY_DST ||
841 sp->dst.state > PF_TCPS_PROXY_DST) {
842 if (V_pf_status.debug >= PF_DEBUG_MISC) {
843 printf("pfsync_input: PFSYNC_ACT_UPD: "
846 V_pfsyncstats.pfsyncs_badval++;
850 st = pf_find_state_byid(sp->id, sp->creatorid);
852 /* insert the update */
853 if (pfsync_state_import(sp, 0))
854 V_pfsyncstats.pfsyncs_badstate++;
858 if (st->state_flags & PFSTATE_ACK) {
860 pfsync_undefer_state(st, 1);
864 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP)
865 sync = pfsync_upd_tcp(st, &sp->src, &sp->dst);
870 * Non-TCP protocol state machine always go
873 if (st->src.state > sp->src.state)
876 pf_state_peer_ntoh(&sp->src, &st->src);
877 if (st->dst.state > sp->dst.state)
880 pf_state_peer_ntoh(&sp->dst, &st->dst);
883 pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
884 pf_state_peer_ntoh(&sp->dst, &st->dst);
885 st->expire = time_uptime;
886 st->timeout = sp->timeout;
888 st->pfsync_time = time_uptime;
891 V_pfsyncstats.pfsyncs_stale++;
893 pfsync_update_state(st);
907 pfsync_in_upd_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
909 struct pfsync_softc *sc = V_pfsyncif;
910 struct pfsync_upd_c *ua, *up;
912 int len = count * sizeof(*up);
917 mp = m_pulldown(m, offset, len, &offp);
919 V_pfsyncstats.pfsyncs_badlen++;
922 ua = (struct pfsync_upd_c *)(mp->m_data + offp);
924 for (i = 0; i < count; i++) {
927 /* check for invalid values */
928 if (up->timeout >= PFTM_MAX ||
929 up->src.state > PF_TCPS_PROXY_DST ||
930 up->dst.state > PF_TCPS_PROXY_DST) {
931 if (V_pf_status.debug >= PF_DEBUG_MISC) {
932 printf("pfsync_input: "
936 V_pfsyncstats.pfsyncs_badval++;
940 st = pf_find_state_byid(up->id, up->creatorid);
942 /* We don't have this state. Ask for it. */
944 pfsync_request_update(up->creatorid, up->id);
949 if (st->state_flags & PFSTATE_ACK) {
951 pfsync_undefer_state(st, 1);
955 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP)
956 sync = pfsync_upd_tcp(st, &up->src, &up->dst);
961 * Non-TCP protocol state machine always go
964 if (st->src.state > up->src.state)
967 pf_state_peer_ntoh(&up->src, &st->src);
968 if (st->dst.state > up->dst.state)
971 pf_state_peer_ntoh(&up->dst, &st->dst);
974 pfsync_alloc_scrub_memory(&up->dst, &st->dst);
975 pf_state_peer_ntoh(&up->dst, &st->dst);
976 st->expire = time_uptime;
977 st->timeout = up->timeout;
979 st->pfsync_time = time_uptime;
982 V_pfsyncstats.pfsyncs_stale++;
984 pfsync_update_state(st);
998 pfsync_in_ureq(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1000 struct pfsync_upd_req *ur, *ura;
1002 int len = count * sizeof(*ur);
1005 struct pf_state *st;
1007 mp = m_pulldown(m, offset, len, &offp);
1009 V_pfsyncstats.pfsyncs_badlen++;
1012 ura = (struct pfsync_upd_req *)(mp->m_data + offp);
1014 for (i = 0; i < count; i++) {
1017 if (ur->id == 0 && ur->creatorid == 0)
1018 pfsync_bulk_start();
1020 st = pf_find_state_byid(ur->id, ur->creatorid);
1022 V_pfsyncstats.pfsyncs_badstate++;
1025 if (st->state_flags & PFSTATE_NOSYNC) {
1026 PF_STATE_UNLOCK(st);
1030 pfsync_update_state_req(st);
1031 PF_STATE_UNLOCK(st);
1039 pfsync_in_del(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1042 struct pfsync_state *sa, *sp;
1043 struct pf_state *st;
1044 int len = count * sizeof(*sp);
1047 mp = m_pulldown(m, offset, len, &offp);
1049 V_pfsyncstats.pfsyncs_badlen++;
1052 sa = (struct pfsync_state *)(mp->m_data + offp);
1054 for (i = 0; i < count; i++) {
1057 st = pf_find_state_byid(sp->id, sp->creatorid);
1059 V_pfsyncstats.pfsyncs_badstate++;
1062 st->state_flags |= PFSTATE_NOSYNC;
1063 pf_unlink_state(st, PF_ENTER_LOCKED);
1070 pfsync_in_del_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1073 struct pfsync_del_c *sa, *sp;
1074 struct pf_state *st;
1075 int len = count * sizeof(*sp);
1078 mp = m_pulldown(m, offset, len, &offp);
1080 V_pfsyncstats.pfsyncs_badlen++;
1083 sa = (struct pfsync_del_c *)(mp->m_data + offp);
1085 for (i = 0; i < count; i++) {
1088 st = pf_find_state_byid(sp->id, sp->creatorid);
1090 V_pfsyncstats.pfsyncs_badstate++;
1094 st->state_flags |= PFSTATE_NOSYNC;
1095 pf_unlink_state(st, PF_ENTER_LOCKED);
1102 pfsync_in_bus(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1104 struct pfsync_softc *sc = V_pfsyncif;
1105 struct pfsync_bus *bus;
1107 int len = count * sizeof(*bus);
1112 /* If we're not waiting for a bulk update, who cares. */
1113 if (sc->sc_ureq_sent == 0) {
1118 mp = m_pulldown(m, offset, len, &offp);
1121 V_pfsyncstats.pfsyncs_badlen++;
1124 bus = (struct pfsync_bus *)(mp->m_data + offp);
1126 switch (bus->status) {
1127 case PFSYNC_BUS_START:
1128 callout_reset(&sc->sc_bulkfail_tmo, 4 * hz +
1129 V_pf_limits[PF_LIMIT_STATES].limit /
1130 ((sc->sc_ifp->if_mtu - PFSYNC_MINPKT) /
1131 sizeof(struct pfsync_state)),
1132 pfsync_bulk_fail, sc);
1133 if (V_pf_status.debug >= PF_DEBUG_MISC)
1134 printf("pfsync: received bulk update start\n");
1137 case PFSYNC_BUS_END:
1138 if (time_uptime - ntohl(bus->endtime) >=
1140 /* that's it, we're happy */
1141 sc->sc_ureq_sent = 0;
1142 sc->sc_bulk_tries = 0;
1143 callout_stop(&sc->sc_bulkfail_tmo);
1144 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
1145 (*carp_demote_adj_p)(-V_pfsync_carp_adj,
1146 "pfsync bulk done");
1147 sc->sc_flags |= PFSYNCF_OK;
1148 if (V_pf_status.debug >= PF_DEBUG_MISC)
1149 printf("pfsync: received valid "
1150 "bulk update end\n");
1152 if (V_pf_status.debug >= PF_DEBUG_MISC)
1153 printf("pfsync: received invalid "
1154 "bulk update end: bad timestamp\n");
1164 pfsync_in_tdb(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1166 int len = count * sizeof(struct pfsync_tdb);
1169 struct pfsync_tdb *tp;
1175 mp = m_pulldown(m, offset, len, &offp);
1177 V_pfsyncstats.pfsyncs_badlen++;
1180 tp = (struct pfsync_tdb *)(mp->m_data + offp);
1182 for (i = 0; i < count; i++)
1183 pfsync_update_net_tdb(&tp[i]);
1190 /* Update an in-kernel tdb. Silently fail if no tdb is found. */
1192 pfsync_update_net_tdb(struct pfsync_tdb *pt)
1197 /* check for invalid values */
1198 if (ntohl(pt->spi) <= SPI_RESERVED_MAX ||
1199 (pt->dst.sa.sa_family != AF_INET &&
1200 pt->dst.sa.sa_family != AF_INET6))
1203 tdb = gettdb(pt->spi, &pt->dst, pt->sproto);
1205 pt->rpl = ntohl(pt->rpl);
1206 pt->cur_bytes = (unsigned long long)be64toh(pt->cur_bytes);
1208 /* Neither replay nor byte counter should ever decrease. */
1209 if (pt->rpl < tdb->tdb_rpl ||
1210 pt->cur_bytes < tdb->tdb_cur_bytes) {
1214 tdb->tdb_rpl = pt->rpl;
1215 tdb->tdb_cur_bytes = pt->cur_bytes;
1220 if (V_pf_status.debug >= PF_DEBUG_MISC)
1221 printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: "
1223 V_pfsyncstats.pfsyncs_badstate++;
1230 pfsync_in_eof(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1232 /* check if we are at the right place in the packet */
1233 if (offset != m->m_pkthdr.len)
1234 V_pfsyncstats.pfsyncs_badlen++;
1236 /* we're done. free and let the caller return */
1242 pfsync_in_error(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1244 V_pfsyncstats.pfsyncs_badact++;
1251 pfsyncoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
1260 pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1262 struct pfsync_softc *sc = ifp->if_softc;
1263 struct ifreq *ifr = (struct ifreq *)data;
1264 struct pfsyncreq pfsyncr;
1270 if (ifp->if_flags & IFF_UP) {
1271 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1273 pfsync_pointers_init();
1275 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1277 pfsync_pointers_uninit();
1281 if (!sc->sc_sync_if ||
1282 ifr->ifr_mtu <= PFSYNC_MINPKT ||
1283 ifr->ifr_mtu > sc->sc_sync_if->if_mtu)
1285 if (ifr->ifr_mtu < ifp->if_mtu) {
1287 if (sc->sc_len > PFSYNC_MINPKT)
1291 ifp->if_mtu = ifr->ifr_mtu;
1294 bzero(&pfsyncr, sizeof(pfsyncr));
1296 if (sc->sc_sync_if) {
1297 strlcpy(pfsyncr.pfsyncr_syncdev,
1298 sc->sc_sync_if->if_xname, IFNAMSIZ);
1300 pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer;
1301 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
1302 pfsyncr.pfsyncr_defer = (PFSYNCF_DEFER ==
1303 (sc->sc_flags & PFSYNCF_DEFER));
1305 return (copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr)));
1309 struct ip_moptions *imo = &sc->sc_imo;
1314 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
1316 if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr))))
1319 if (pfsyncr.pfsyncr_maxupdates > 255)
1322 if (pfsyncr.pfsyncr_syncdev[0] == 0)
1324 else if ((sifp = ifunit_ref(pfsyncr.pfsyncr_syncdev)) == NULL)
1327 if (sifp != NULL && (
1328 pfsyncr.pfsyncr_syncpeer.s_addr == 0 ||
1329 pfsyncr.pfsyncr_syncpeer.s_addr ==
1330 htonl(INADDR_PFSYNC_GROUP)))
1331 mship = malloc((sizeof(struct in_multi *) *
1332 IP_MIN_MEMBERSHIPS), M_PFSYNC, M_WAITOK | M_ZERO);
1335 if (pfsyncr.pfsyncr_syncpeer.s_addr == 0)
1336 sc->sc_sync_peer.s_addr = htonl(INADDR_PFSYNC_GROUP);
1338 sc->sc_sync_peer.s_addr =
1339 pfsyncr.pfsyncr_syncpeer.s_addr;
1341 sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates;
1342 if (pfsyncr.pfsyncr_defer) {
1343 sc->sc_flags |= PFSYNCF_DEFER;
1344 pfsync_defer_ptr = pfsync_defer;
1346 sc->sc_flags &= ~PFSYNCF_DEFER;
1347 pfsync_defer_ptr = NULL;
1352 if_rele(sc->sc_sync_if);
1353 sc->sc_sync_if = NULL;
1354 if (imo->imo_membership)
1355 pfsync_multicast_cleanup(sc);
1360 if (sc->sc_len > PFSYNC_MINPKT &&
1361 (sifp->if_mtu < sc->sc_ifp->if_mtu ||
1362 (sc->sc_sync_if != NULL &&
1363 sifp->if_mtu < sc->sc_sync_if->if_mtu) ||
1364 sifp->if_mtu < MCLBYTES - sizeof(struct ip)))
1367 if (imo->imo_membership)
1368 pfsync_multicast_cleanup(sc);
1370 if (sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) {
1371 error = pfsync_multicast_setup(sc, sifp, mship);
1374 free(mship, M_PFSYNC);
1379 if_rele(sc->sc_sync_if);
1380 sc->sc_sync_if = sifp;
1382 ip = &sc->sc_template;
1383 bzero(ip, sizeof(*ip));
1384 ip->ip_v = IPVERSION;
1385 ip->ip_hl = sizeof(sc->sc_template) >> 2;
1386 ip->ip_tos = IPTOS_LOWDELAY;
1387 /* len and id are set later. */
1388 ip->ip_off = htons(IP_DF);
1389 ip->ip_ttl = PFSYNC_DFLTTL;
1390 ip->ip_p = IPPROTO_PFSYNC;
1391 ip->ip_src.s_addr = INADDR_ANY;
1392 ip->ip_dst.s_addr = sc->sc_sync_peer.s_addr;
1394 /* Request a full state table update. */
1395 if ((sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
1396 (*carp_demote_adj_p)(V_pfsync_carp_adj,
1397 "pfsync bulk start");
1398 sc->sc_flags &= ~PFSYNCF_OK;
1399 if (V_pf_status.debug >= PF_DEBUG_MISC)
1400 printf("pfsync: requesting bulk update\n");
1401 pfsync_request_update(0, 0);
1404 sc->sc_ureq_sent = time_uptime;
1405 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail,
1419 pfsync_out_state(struct pf_state *st, void *buf)
1421 struct pfsync_state *sp = buf;
1423 pfsync_state_export(sp, st);
1427 pfsync_out_iack(struct pf_state *st, void *buf)
1429 struct pfsync_ins_ack *iack = buf;
1432 iack->creatorid = st->creatorid;
1436 pfsync_out_upd_c(struct pf_state *st, void *buf)
1438 struct pfsync_upd_c *up = buf;
1440 bzero(up, sizeof(*up));
1442 pf_state_peer_hton(&st->src, &up->src);
1443 pf_state_peer_hton(&st->dst, &up->dst);
1444 up->creatorid = st->creatorid;
1445 up->timeout = st->timeout;
1449 pfsync_out_del(struct pf_state *st, void *buf)
1451 struct pfsync_del_c *dp = buf;
1454 dp->creatorid = st->creatorid;
1455 st->state_flags |= PFSTATE_NOSYNC;
1459 pfsync_drop(struct pfsync_softc *sc)
1461 struct pf_state *st, *next;
1462 struct pfsync_upd_req_item *ur;
1465 for (q = 0; q < PFSYNC_S_COUNT; q++) {
1466 if (TAILQ_EMPTY(&sc->sc_qs[q]))
1469 TAILQ_FOREACH_SAFE(st, &sc->sc_qs[q], sync_list, next) {
1470 KASSERT(st->sync_state == q,
1471 ("%s: st->sync_state == q",
1473 st->sync_state = PFSYNC_S_NONE;
1474 pf_release_state(st);
1476 TAILQ_INIT(&sc->sc_qs[q]);
1479 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
1480 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
1485 sc->sc_len = PFSYNC_MINPKT;
1489 pfsync_sendout(int schedswi)
1491 struct pfsync_softc *sc = V_pfsyncif;
1492 struct ifnet *ifp = sc->sc_ifp;
1495 struct pfsync_header *ph;
1496 struct pfsync_subheader *subh;
1497 struct pf_state *st;
1498 struct pfsync_upd_req_item *ur;
1502 KASSERT(sc != NULL, ("%s: null sc", __func__));
1503 KASSERT(sc->sc_len > PFSYNC_MINPKT,
1504 ("%s: sc_len %zu", __func__, sc->sc_len));
1505 PFSYNC_LOCK_ASSERT(sc);
1507 if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) {
1512 m = m_get2(max_linkhdr + sc->sc_len, M_NOWAIT, MT_DATA, M_PKTHDR);
1514 sc->sc_ifp->if_oerrors++;
1515 V_pfsyncstats.pfsyncs_onomem++;
1518 m->m_data += max_linkhdr;
1519 m->m_len = m->m_pkthdr.len = sc->sc_len;
1521 /* build the ip header */
1522 ip = (struct ip *)m->m_data;
1523 bcopy(&sc->sc_template, ip, sizeof(*ip));
1524 offset = sizeof(*ip);
1526 ip->ip_len = htons(m->m_pkthdr.len);
1527 ip->ip_id = htons(ip_randomid());
1529 /* build the pfsync header */
1530 ph = (struct pfsync_header *)(m->m_data + offset);
1531 bzero(ph, sizeof(*ph));
1532 offset += sizeof(*ph);
1534 ph->version = PFSYNC_VERSION;
1535 ph->len = htons(sc->sc_len - sizeof(*ip));
1536 bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
1538 /* walk the queues */
1539 for (q = 0; q < PFSYNC_S_COUNT; q++) {
1540 if (TAILQ_EMPTY(&sc->sc_qs[q]))
1543 subh = (struct pfsync_subheader *)(m->m_data + offset);
1544 offset += sizeof(*subh);
1547 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) {
1548 KASSERT(st->sync_state == q,
1549 ("%s: st->sync_state == q",
1552 * XXXGL: some of write methods do unlocked reads
1555 pfsync_qs[q].write(st, m->m_data + offset);
1556 offset += pfsync_qs[q].len;
1557 st->sync_state = PFSYNC_S_NONE;
1558 pf_release_state(st);
1561 TAILQ_INIT(&sc->sc_qs[q]);
1563 bzero(subh, sizeof(*subh));
1564 subh->action = pfsync_qs[q].action;
1565 subh->count = htons(count);
1566 V_pfsyncstats.pfsyncs_oacts[pfsync_qs[q].action] += count;
1569 if (!TAILQ_EMPTY(&sc->sc_upd_req_list)) {
1570 subh = (struct pfsync_subheader *)(m->m_data + offset);
1571 offset += sizeof(*subh);
1574 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
1575 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
1577 bcopy(&ur->ur_msg, m->m_data + offset,
1578 sizeof(ur->ur_msg));
1579 offset += sizeof(ur->ur_msg);
1584 bzero(subh, sizeof(*subh));
1585 subh->action = PFSYNC_ACT_UPD_REQ;
1586 subh->count = htons(count);
1587 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_UPD_REQ] += count;
1590 /* has someone built a custom region for us to add? */
1591 if (sc->sc_plus != NULL) {
1592 bcopy(sc->sc_plus, m->m_data + offset, sc->sc_pluslen);
1593 offset += sc->sc_pluslen;
1598 subh = (struct pfsync_subheader *)(m->m_data + offset);
1599 offset += sizeof(*subh);
1601 bzero(subh, sizeof(*subh));
1602 subh->action = PFSYNC_ACT_EOF;
1603 subh->count = htons(1);
1604 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_EOF]++;
1606 /* we're done, let's put it on the wire */
1608 m->m_data += sizeof(*ip);
1609 m->m_len = m->m_pkthdr.len = sc->sc_len - sizeof(*ip);
1611 m->m_data -= sizeof(*ip);
1612 m->m_len = m->m_pkthdr.len = sc->sc_len;
1615 if (sc->sc_sync_if == NULL) {
1616 sc->sc_len = PFSYNC_MINPKT;
1621 sc->sc_ifp->if_opackets++;
1622 sc->sc_ifp->if_obytes += m->m_pkthdr.len;
1623 sc->sc_len = PFSYNC_MINPKT;
1625 if (!_IF_QFULL(&sc->sc_ifp->if_snd))
1626 _IF_ENQUEUE(&sc->sc_ifp->if_snd, m);
1629 sc->sc_ifp->if_snd.ifq_drops++;
1632 swi_sched(V_pfsync_swi_cookie, 0);
1636 pfsync_insert_state(struct pf_state *st)
1638 struct pfsync_softc *sc = V_pfsyncif;
1640 if (st->state_flags & PFSTATE_NOSYNC)
1643 if ((st->rule.ptr->rule_flag & PFRULE_NOSYNC) ||
1644 st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) {
1645 st->state_flags |= PFSTATE_NOSYNC;
1649 KASSERT(st->sync_state == PFSYNC_S_NONE,
1650 ("%s: st->sync_state %u", __func__, st->sync_state));
1653 if (sc->sc_len == PFSYNC_MINPKT)
1654 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, V_pfsyncif);
1656 pfsync_q_ins(st, PFSYNC_S_INS);
1659 st->sync_updates = 0;
1663 pfsync_defer(struct pf_state *st, struct mbuf *m)
1665 struct pfsync_softc *sc = V_pfsyncif;
1666 struct pfsync_deferral *pd;
1668 if (m->m_flags & (M_BCAST|M_MCAST))
1673 if (sc == NULL || !(sc->sc_ifp->if_flags & IFF_DRV_RUNNING) ||
1674 !(sc->sc_flags & PFSYNCF_DEFER)) {
1679 if (sc->sc_deferred >= 128)
1680 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0);
1682 pd = malloc(sizeof(*pd), M_PFSYNC, M_NOWAIT);
1687 m->m_flags |= M_SKIP_FIREWALL;
1688 st->state_flags |= PFSTATE_ACK;
1696 TAILQ_INSERT_TAIL(&sc->sc_deferrals, pd, pd_entry);
1697 callout_init_mtx(&pd->pd_tmo, &sc->sc_mtx, CALLOUT_RETURNUNLOCKED);
1698 callout_reset(&pd->pd_tmo, 10, pfsync_defer_tmo, pd);
1706 pfsync_undefer(struct pfsync_deferral *pd, int drop)
1708 struct pfsync_softc *sc = pd->pd_sc;
1709 struct mbuf *m = pd->pd_m;
1710 struct pf_state *st = pd->pd_st;
1712 PFSYNC_LOCK_ASSERT(sc);
1714 TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry);
1716 pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */
1718 pf_release_state(st);
1723 _IF_ENQUEUE(&sc->sc_ifp->if_snd, m);
1729 pfsync_defer_tmo(void *arg)
1731 struct pfsync_deferral *pd = arg;
1732 struct pfsync_softc *sc = pd->pd_sc;
1733 struct mbuf *m = pd->pd_m;
1734 struct pf_state *st = pd->pd_st;
1736 PFSYNC_LOCK_ASSERT(sc);
1738 CURVNET_SET(m->m_pkthdr.rcvif->if_vnet);
1740 TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry);
1742 pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */
1743 if (pd->pd_refs == 0)
1747 ip_output(m, NULL, NULL, 0, NULL, NULL);
1749 pf_release_state(st);
1755 pfsync_undefer_state(struct pf_state *st, int drop)
1757 struct pfsync_softc *sc = V_pfsyncif;
1758 struct pfsync_deferral *pd;
1760 PFSYNC_LOCK_ASSERT(sc);
1762 TAILQ_FOREACH(pd, &sc->sc_deferrals, pd_entry) {
1763 if (pd->pd_st == st) {
1764 if (callout_stop(&pd->pd_tmo))
1765 pfsync_undefer(pd, drop);
1770 panic("%s: unable to find deferred state", __func__);
1774 pfsync_update_state(struct pf_state *st)
1776 struct pfsync_softc *sc = V_pfsyncif;
1779 PF_STATE_LOCK_ASSERT(st);
1782 if (st->state_flags & PFSTATE_ACK)
1783 pfsync_undefer_state(st, 0);
1784 if (st->state_flags & PFSTATE_NOSYNC) {
1785 if (st->sync_state != PFSYNC_S_NONE)
1791 if (sc->sc_len == PFSYNC_MINPKT)
1792 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, V_pfsyncif);
1794 switch (st->sync_state) {
1795 case PFSYNC_S_UPD_C:
1798 /* we're already handling it */
1800 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) {
1802 if (st->sync_updates >= sc->sc_maxupdates)
1810 pfsync_q_ins(st, PFSYNC_S_UPD_C);
1811 st->sync_updates = 0;
1815 panic("%s: unexpected sync state %d", __func__, st->sync_state);
1818 if (sync || (time_uptime - st->pfsync_time) < 2)
1825 pfsync_request_update(u_int32_t creatorid, u_int64_t id)
1827 struct pfsync_softc *sc = V_pfsyncif;
1828 struct pfsync_upd_req_item *item;
1829 size_t nlen = sizeof(struct pfsync_upd_req);
1831 PFSYNC_LOCK_ASSERT(sc);
1834 * This code does a bit to prevent multiple update requests for the
1835 * same state being generated. It searches current subheader queue,
1836 * but it doesn't lookup into queue of already packed datagrams.
1838 TAILQ_FOREACH(item, &sc->sc_upd_req_list, ur_entry)
1839 if (item->ur_msg.id == id &&
1840 item->ur_msg.creatorid == creatorid)
1843 item = malloc(sizeof(*item), M_PFSYNC, M_NOWAIT);
1845 return; /* XXX stats */
1847 item->ur_msg.id = id;
1848 item->ur_msg.creatorid = creatorid;
1850 if (TAILQ_EMPTY(&sc->sc_upd_req_list))
1851 nlen += sizeof(struct pfsync_subheader);
1853 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) {
1856 nlen = sizeof(struct pfsync_subheader) +
1857 sizeof(struct pfsync_upd_req);
1860 TAILQ_INSERT_TAIL(&sc->sc_upd_req_list, item, ur_entry);
1865 pfsync_update_state_req(struct pf_state *st)
1867 struct pfsync_softc *sc = V_pfsyncif;
1869 PF_STATE_LOCK_ASSERT(st);
1872 if (st->state_flags & PFSTATE_NOSYNC) {
1873 if (st->sync_state != PFSYNC_S_NONE)
1879 switch (st->sync_state) {
1880 case PFSYNC_S_UPD_C:
1884 pfsync_q_ins(st, PFSYNC_S_UPD);
1891 /* we're already handling it */
1895 panic("%s: unexpected sync state %d", __func__, st->sync_state);
1902 pfsync_delete_state(struct pf_state *st)
1904 struct pfsync_softc *sc = V_pfsyncif;
1907 if (st->state_flags & PFSTATE_ACK)
1908 pfsync_undefer_state(st, 1);
1909 if (st->state_flags & PFSTATE_NOSYNC) {
1910 if (st->sync_state != PFSYNC_S_NONE)
1916 if (sc->sc_len == PFSYNC_MINPKT)
1917 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, V_pfsyncif);
1919 switch (st->sync_state) {
1921 /* We never got to tell the world so just forget about it. */
1925 case PFSYNC_S_UPD_C:
1929 /* FALLTHROUGH to putting it on the del list */
1932 pfsync_q_ins(st, PFSYNC_S_DEL);
1936 panic("%s: unexpected sync state %d", __func__, st->sync_state);
1942 pfsync_clear_states(u_int32_t creatorid, const char *ifname)
1944 struct pfsync_softc *sc = V_pfsyncif;
1946 struct pfsync_subheader subh;
1947 struct pfsync_clr clr;
1950 bzero(&r, sizeof(r));
1952 r.subh.action = PFSYNC_ACT_CLR;
1953 r.subh.count = htons(1);
1954 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_CLR]++;
1956 strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname));
1957 r.clr.creatorid = creatorid;
1960 pfsync_send_plus(&r, sizeof(r));
1965 pfsync_q_ins(struct pf_state *st, int q)
1967 struct pfsync_softc *sc = V_pfsyncif;
1968 size_t nlen = pfsync_qs[q].len;
1970 PFSYNC_LOCK_ASSERT(sc);
1972 KASSERT(st->sync_state == PFSYNC_S_NONE,
1973 ("%s: st->sync_state %u", __func__, st->sync_state));
1974 KASSERT(sc->sc_len >= PFSYNC_MINPKT, ("pfsync pkt len is too low %zu",
1977 if (TAILQ_EMPTY(&sc->sc_qs[q]))
1978 nlen += sizeof(struct pfsync_subheader);
1980 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) {
1983 nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len;
1987 TAILQ_INSERT_TAIL(&sc->sc_qs[q], st, sync_list);
1993 pfsync_q_del(struct pf_state *st)
1995 struct pfsync_softc *sc = V_pfsyncif;
1996 int q = st->sync_state;
1998 PFSYNC_LOCK_ASSERT(sc);
1999 KASSERT(st->sync_state != PFSYNC_S_NONE,
2000 ("%s: st->sync_state != PFSYNC_S_NONE", __func__));
2002 sc->sc_len -= pfsync_qs[q].len;
2003 TAILQ_REMOVE(&sc->sc_qs[q], st, sync_list);
2004 st->sync_state = PFSYNC_S_NONE;
2005 pf_release_state(st);
2007 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2008 sc->sc_len -= sizeof(struct pfsync_subheader);
2012 pfsync_bulk_start(void)
2014 struct pfsync_softc *sc = V_pfsyncif;
2016 if (V_pf_status.debug >= PF_DEBUG_MISC)
2017 printf("pfsync: received bulk update request\n");
2021 sc->sc_ureq_received = time_uptime;
2022 sc->sc_bulk_hashid = 0;
2023 sc->sc_bulk_stateid = 0;
2024 pfsync_bulk_status(PFSYNC_BUS_START);
2025 callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update, sc);
2030 pfsync_bulk_update(void *arg)
2032 struct pfsync_softc *sc = arg;
2036 PFSYNC_BLOCK_ASSERT(sc);
2037 CURVNET_SET(sc->sc_ifp->if_vnet);
2040 * Start with last state from previous invocation.
2041 * It may had gone, in this case start from the
2044 s = pf_find_state_byid(sc->sc_bulk_stateid, sc->sc_bulk_creatorid);
2049 i = sc->sc_bulk_hashid;
2051 for (; i <= V_pf_hashmask; i++) {
2052 struct pf_idhash *ih = &V_pf_idhash[i];
2055 PF_HASHROW_ASSERT(ih);
2057 PF_HASHROW_LOCK(ih);
2058 s = LIST_FIRST(&ih->states);
2061 for (; s; s = LIST_NEXT(s, entry)) {
2063 if (sent > 1 && (sc->sc_ifp->if_mtu - sc->sc_len) <
2064 sizeof(struct pfsync_state)) {
2065 /* We've filled a packet. */
2066 sc->sc_bulk_hashid = i;
2067 sc->sc_bulk_stateid = s->id;
2068 sc->sc_bulk_creatorid = s->creatorid;
2069 PF_HASHROW_UNLOCK(ih);
2070 callout_reset(&sc->sc_bulk_tmo, 1,
2071 pfsync_bulk_update, sc);
2075 if (s->sync_state == PFSYNC_S_NONE &&
2076 s->timeout < PFTM_MAX &&
2077 s->pfsync_time <= sc->sc_ureq_received) {
2078 pfsync_update_state_req(s);
2082 PF_HASHROW_UNLOCK(ih);
2086 pfsync_bulk_status(PFSYNC_BUS_END);
2093 pfsync_bulk_status(u_int8_t status)
2096 struct pfsync_subheader subh;
2097 struct pfsync_bus bus;
2100 struct pfsync_softc *sc = V_pfsyncif;
2102 bzero(&r, sizeof(r));
2104 r.subh.action = PFSYNC_ACT_BUS;
2105 r.subh.count = htons(1);
2106 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_BUS]++;
2108 r.bus.creatorid = V_pf_status.hostid;
2109 r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received);
2110 r.bus.status = status;
2113 pfsync_send_plus(&r, sizeof(r));
2118 pfsync_bulk_fail(void *arg)
2120 struct pfsync_softc *sc = arg;
2122 CURVNET_SET(sc->sc_ifp->if_vnet);
2124 PFSYNC_BLOCK_ASSERT(sc);
2126 if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
2128 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
2129 pfsync_bulk_fail, V_pfsyncif);
2131 pfsync_request_update(0, 0);
2134 /* Pretend like the transfer was ok. */
2135 sc->sc_ureq_sent = 0;
2136 sc->sc_bulk_tries = 0;
2138 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
2139 (*carp_demote_adj_p)(-V_pfsync_carp_adj,
2140 "pfsync bulk fail");
2141 sc->sc_flags |= PFSYNCF_OK;
2143 if (V_pf_status.debug >= PF_DEBUG_MISC)
2144 printf("pfsync: failed to receive bulk update\n");
2151 pfsync_send_plus(void *plus, size_t pluslen)
2153 struct pfsync_softc *sc = V_pfsyncif;
2155 PFSYNC_LOCK_ASSERT(sc);
2157 if (sc->sc_len + pluslen > sc->sc_ifp->if_mtu)
2161 sc->sc_len += (sc->sc_pluslen = pluslen);
2167 pfsync_timeout(void *arg)
2169 struct pfsync_softc *sc = arg;
2171 CURVNET_SET(sc->sc_ifp->if_vnet);
2179 pfsync_push(struct pfsync_softc *sc)
2182 PFSYNC_LOCK_ASSERT(sc);
2184 sc->sc_flags |= PFSYNCF_PUSH;
2185 swi_sched(V_pfsync_swi_cookie, 0);
2189 pfsyncintr(void *arg)
2191 struct pfsync_softc *sc = arg;
2194 CURVNET_SET(sc->sc_ifp->if_vnet);
2197 if ((sc->sc_flags & PFSYNCF_PUSH) && sc->sc_len > PFSYNC_MINPKT) {
2199 sc->sc_flags &= ~PFSYNCF_PUSH;
2201 _IF_DEQUEUE_ALL(&sc->sc_ifp->if_snd, m);
2204 for (; m != NULL; m = n) {
2207 m->m_nextpkt = NULL;
2210 * We distinguish between a deferral packet and our
2211 * own pfsync packet based on M_SKIP_FIREWALL
2212 * flag. This is XXX.
2214 if (m->m_flags & M_SKIP_FIREWALL)
2215 ip_output(m, NULL, NULL, 0, NULL, NULL);
2216 else if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo,
2218 V_pfsyncstats.pfsyncs_opackets++;
2220 V_pfsyncstats.pfsyncs_oerrors++;
2226 pfsync_multicast_setup(struct pfsync_softc *sc, struct ifnet *ifp, void *mship)
2228 struct ip_moptions *imo = &sc->sc_imo;
2231 if (!(ifp->if_flags & IFF_MULTICAST))
2232 return (EADDRNOTAVAIL);
2234 imo->imo_membership = (struct in_multi **)mship;
2235 imo->imo_max_memberships = IP_MIN_MEMBERSHIPS;
2236 imo->imo_multicast_vif = -1;
2238 if ((error = in_joingroup(ifp, &sc->sc_sync_peer, NULL,
2239 &imo->imo_membership[0])) != 0) {
2240 imo->imo_membership = NULL;
2243 imo->imo_num_memberships++;
2244 imo->imo_multicast_ifp = ifp;
2245 imo->imo_multicast_ttl = PFSYNC_DFLTTL;
2246 imo->imo_multicast_loop = 0;
2252 pfsync_multicast_cleanup(struct pfsync_softc *sc)
2254 struct ip_moptions *imo = &sc->sc_imo;
2256 in_leavegroup(imo->imo_membership[0], NULL);
2257 free(imo->imo_membership, M_PFSYNC);
2258 imo->imo_membership = NULL;
2259 imo->imo_multicast_ifp = NULL;
2263 extern struct domain inetdomain;
2264 static struct protosw in_pfsync_protosw = {
2265 .pr_type = SOCK_RAW,
2266 .pr_domain = &inetdomain,
2267 .pr_protocol = IPPROTO_PFSYNC,
2268 .pr_flags = PR_ATOMIC|PR_ADDR,
2269 .pr_input = pfsync_input,
2270 .pr_output = (pr_output_t *)rip_output,
2271 .pr_ctloutput = rip_ctloutput,
2272 .pr_usrreqs = &rip_usrreqs
2277 pfsync_pointers_init()
2281 pfsync_state_import_ptr = pfsync_state_import;
2282 pfsync_insert_state_ptr = pfsync_insert_state;
2283 pfsync_update_state_ptr = pfsync_update_state;
2284 pfsync_delete_state_ptr = pfsync_delete_state;
2285 pfsync_clear_states_ptr = pfsync_clear_states;
2286 pfsync_defer_ptr = pfsync_defer;
2291 pfsync_pointers_uninit()
2295 pfsync_state_import_ptr = NULL;
2296 pfsync_insert_state_ptr = NULL;
2297 pfsync_update_state_ptr = NULL;
2298 pfsync_delete_state_ptr = NULL;
2299 pfsync_clear_states_ptr = NULL;
2300 pfsync_defer_ptr = NULL;
2307 VNET_ITERATOR_DECL(vnet_iter);
2311 VNET_FOREACH(vnet_iter) {
2312 CURVNET_SET(vnet_iter);
2313 V_pfsync_cloner = if_clone_simple(pfsyncname,
2314 pfsync_clone_create, pfsync_clone_destroy, 1);
2315 error = swi_add(NULL, pfsyncname, pfsyncintr, V_pfsyncif,
2316 SWI_NET, INTR_MPSAFE, &V_pfsync_swi_cookie);
2321 VNET_LIST_RUNLOCK();
2323 error = pf_proto_register(PF_INET, &in_pfsync_protosw);
2326 error = ipproto_register(IPPROTO_PFSYNC);
2328 pf_proto_unregister(PF_INET, IPPROTO_PFSYNC, SOCK_RAW);
2332 pfsync_pointers_init();
2339 VNET_FOREACH(vnet_iter) {
2340 CURVNET_SET(vnet_iter);
2341 if (V_pfsync_swi_cookie) {
2342 swi_remove(V_pfsync_swi_cookie);
2343 if_clone_detach(V_pfsync_cloner);
2347 VNET_LIST_RUNLOCK();
2355 VNET_ITERATOR_DECL(vnet_iter);
2357 pfsync_pointers_uninit();
2359 ipproto_unregister(IPPROTO_PFSYNC);
2360 pf_proto_unregister(PF_INET, IPPROTO_PFSYNC, SOCK_RAW);
2362 VNET_FOREACH(vnet_iter) {
2363 CURVNET_SET(vnet_iter);
2364 if_clone_detach(V_pfsync_cloner);
2365 swi_remove(V_pfsync_swi_cookie);
2368 VNET_LIST_RUNLOCK();
2372 pfsync_modevent(module_t mod, int type, void *data)
2378 error = pfsync_init();
2382 * Module should not be unloaded due to race conditions.
2397 static moduledata_t pfsync_mod = {
2403 #define PFSYNC_MODVER 1
2405 DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY);
2406 MODULE_VERSION(pfsync, PFSYNC_MODVER);
2407 MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER);