]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/netpfil/pf/if_pfsync.c
MFV r361938:
[FreeBSD/FreeBSD.git] / sys / netpfil / pf / if_pfsync.c
1 /*-
2  * SPDX-License-Identifier: (BSD-2-Clause-FreeBSD AND ISC)
3  *
4  * Copyright (c) 2002 Michael Shalayeff
5  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
21  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
25  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
26  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 /*-
31  * Copyright (c) 2009 David Gwynne <dlg@openbsd.org>
32  *
33  * Permission to use, copy, modify, and distribute this software for any
34  * purpose with or without fee is hereby granted, provided that the above
35  * copyright notice and this permission notice appear in all copies.
36  *
37  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
38  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
39  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
40  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
41  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
42  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
43  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
44  */
45
46 /*
47  * $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $
48  *
49  * Revisions picked from OpenBSD after revision 1.110 import:
50  * 1.119 - don't m_copydata() beyond the len of mbuf in pfsync_input()
51  * 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates
52  * 1.120, 1.175 - use monotonic time_uptime
53  * 1.122 - reduce number of updates for non-TCP sessions
54  * 1.125, 1.127 - rewrite merge or stale processing
55  * 1.128 - cleanups
56  * 1.146 - bzero() mbuf before sparsely filling it with data
57  * 1.170 - SIOCSIFMTU checks
58  * 1.126, 1.142 - deferred packets processing
59  * 1.173 - correct expire time processing
60  */
61
62 #include <sys/cdefs.h>
63 __FBSDID("$FreeBSD$");
64
65 #include "opt_inet.h"
66 #include "opt_inet6.h"
67 #include "opt_pf.h"
68
69 #include <sys/param.h>
70 #include <sys/bus.h>
71 #include <sys/endian.h>
72 #include <sys/interrupt.h>
73 #include <sys/kernel.h>
74 #include <sys/lock.h>
75 #include <sys/mbuf.h>
76 #include <sys/module.h>
77 #include <sys/mutex.h>
78 #include <sys/priv.h>
79 #include <sys/protosw.h>
80 #include <sys/smp.h>
81 #include <sys/socket.h>
82 #include <sys/sockio.h>
83 #include <sys/sysctl.h>
84 #include <sys/syslog.h>
85
86 #include <net/bpf.h>
87 #include <net/if.h>
88 #include <net/if_var.h>
89 #include <net/if_clone.h>
90 #include <net/if_types.h>
91 #include <net/vnet.h>
92 #include <net/pfvar.h>
93 #include <net/if_pfsync.h>
94
95 #include <netinet/if_ether.h>
96 #include <netinet/in.h>
97 #include <netinet/in_var.h>
98 #include <netinet/ip.h>
99 #include <netinet/ip_carp.h>
100 #include <netinet/ip_var.h>
101 #include <netinet/tcp.h>
102 #include <netinet/tcp_fsm.h>
103 #include <netinet/tcp_seq.h>
104
105 #define PFSYNC_MINPKT ( \
106         sizeof(struct ip) + \
107         sizeof(struct pfsync_header) + \
108         sizeof(struct pfsync_subheader) )
109
110 struct pfsync_bucket;
111
112 struct pfsync_pkt {
113         struct ip *ip;
114         struct in_addr src;
115         u_int8_t flags;
116 };
117
118 static int      pfsync_upd_tcp(struct pf_state *, struct pfsync_state_peer *,
119                     struct pfsync_state_peer *);
120 static int      pfsync_in_clr(struct pfsync_pkt *, struct mbuf *, int, int);
121 static int      pfsync_in_ins(struct pfsync_pkt *, struct mbuf *, int, int);
122 static int      pfsync_in_iack(struct pfsync_pkt *, struct mbuf *, int, int);
123 static int      pfsync_in_upd(struct pfsync_pkt *, struct mbuf *, int, int);
124 static int      pfsync_in_upd_c(struct pfsync_pkt *, struct mbuf *, int, int);
125 static int      pfsync_in_ureq(struct pfsync_pkt *, struct mbuf *, int, int);
126 static int      pfsync_in_del(struct pfsync_pkt *, struct mbuf *, int, int);
127 static int      pfsync_in_del_c(struct pfsync_pkt *, struct mbuf *, int, int);
128 static int      pfsync_in_bus(struct pfsync_pkt *, struct mbuf *, int, int);
129 static int      pfsync_in_tdb(struct pfsync_pkt *, struct mbuf *, int, int);
130 static int      pfsync_in_eof(struct pfsync_pkt *, struct mbuf *, int, int);
131 static int      pfsync_in_error(struct pfsync_pkt *, struct mbuf *, int, int);
132
133 static int (*pfsync_acts[])(struct pfsync_pkt *, struct mbuf *, int, int) = {
134         pfsync_in_clr,                  /* PFSYNC_ACT_CLR */
135         pfsync_in_ins,                  /* PFSYNC_ACT_INS */
136         pfsync_in_iack,                 /* PFSYNC_ACT_INS_ACK */
137         pfsync_in_upd,                  /* PFSYNC_ACT_UPD */
138         pfsync_in_upd_c,                /* PFSYNC_ACT_UPD_C */
139         pfsync_in_ureq,                 /* PFSYNC_ACT_UPD_REQ */
140         pfsync_in_del,                  /* PFSYNC_ACT_DEL */
141         pfsync_in_del_c,                /* PFSYNC_ACT_DEL_C */
142         pfsync_in_error,                /* PFSYNC_ACT_INS_F */
143         pfsync_in_error,                /* PFSYNC_ACT_DEL_F */
144         pfsync_in_bus,                  /* PFSYNC_ACT_BUS */
145         pfsync_in_tdb,                  /* PFSYNC_ACT_TDB */
146         pfsync_in_eof                   /* PFSYNC_ACT_EOF */
147 };
148
149 struct pfsync_q {
150         void            (*write)(struct pf_state *, void *);
151         size_t          len;
152         u_int8_t        action;
153 };
154
155 /* we have one of these for every PFSYNC_S_ */
156 static void     pfsync_out_state(struct pf_state *, void *);
157 static void     pfsync_out_iack(struct pf_state *, void *);
158 static void     pfsync_out_upd_c(struct pf_state *, void *);
159 static void     pfsync_out_del(struct pf_state *, void *);
160
161 static struct pfsync_q pfsync_qs[] = {
162         { pfsync_out_state, sizeof(struct pfsync_state),   PFSYNC_ACT_INS },
163         { pfsync_out_iack,  sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK },
164         { pfsync_out_state, sizeof(struct pfsync_state),   PFSYNC_ACT_UPD },
165         { pfsync_out_upd_c, sizeof(struct pfsync_upd_c),   PFSYNC_ACT_UPD_C },
166         { pfsync_out_del,   sizeof(struct pfsync_del_c),   PFSYNC_ACT_DEL_C }
167 };
168
169 static void     pfsync_q_ins(struct pf_state *, int, bool);
170 static void     pfsync_q_del(struct pf_state *, bool, struct pfsync_bucket *);
171
172 static void     pfsync_update_state(struct pf_state *);
173
174 struct pfsync_upd_req_item {
175         TAILQ_ENTRY(pfsync_upd_req_item)        ur_entry;
176         struct pfsync_upd_req                   ur_msg;
177 };
178
179 struct pfsync_deferral {
180         struct pfsync_softc             *pd_sc;
181         TAILQ_ENTRY(pfsync_deferral)    pd_entry;
182         u_int                           pd_refs;
183         struct callout                  pd_tmo;
184
185         struct pf_state                 *pd_st;
186         struct mbuf                     *pd_m;
187 };
188
189 struct pfsync_sofct;
190
191 struct pfsync_bucket
192 {
193         int                     b_id;
194         struct pfsync_softc     *b_sc;
195         struct mtx              b_mtx;
196         struct callout          b_tmo;
197         int                     b_flags;
198 #define PFSYNCF_BUCKET_PUSH     0x00000001
199
200         size_t                  b_len;
201         TAILQ_HEAD(, pf_state)                  b_qs[PFSYNC_S_COUNT];
202         TAILQ_HEAD(, pfsync_upd_req_item)       b_upd_req_list;
203         TAILQ_HEAD(, pfsync_deferral)           b_deferrals;
204         u_int                   b_deferred;
205         void                    *b_plus;
206         size_t                  b_pluslen;
207
208         struct  ifaltq b_snd;
209 };
210
211 struct pfsync_softc {
212         /* Configuration */
213         struct ifnet            *sc_ifp;
214         struct ifnet            *sc_sync_if;
215         struct ip_moptions      sc_imo;
216         struct in_addr          sc_sync_peer;
217         uint32_t                sc_flags;
218 #define PFSYNCF_OK              0x00000001
219 #define PFSYNCF_DEFER           0x00000002
220         uint8_t                 sc_maxupdates;
221         struct ip               sc_template;
222         struct mtx              sc_mtx;
223
224         /* Queued data */
225         struct pfsync_bucket    *sc_buckets;
226
227         /* Bulk update info */
228         struct mtx              sc_bulk_mtx;
229         uint32_t                sc_ureq_sent;
230         int                     sc_bulk_tries;
231         uint32_t                sc_ureq_received;
232         int                     sc_bulk_hashid;
233         uint64_t                sc_bulk_stateid;
234         uint32_t                sc_bulk_creatorid;
235         struct callout          sc_bulk_tmo;
236         struct callout          sc_bulkfail_tmo;
237 };
238
239 #define PFSYNC_LOCK(sc)         mtx_lock(&(sc)->sc_mtx)
240 #define PFSYNC_UNLOCK(sc)       mtx_unlock(&(sc)->sc_mtx)
241 #define PFSYNC_LOCK_ASSERT(sc)  mtx_assert(&(sc)->sc_mtx, MA_OWNED)
242
243 #define PFSYNC_BUCKET_LOCK(b)           mtx_lock(&(b)->b_mtx)
244 #define PFSYNC_BUCKET_UNLOCK(b)         mtx_unlock(&(b)->b_mtx)
245 #define PFSYNC_BUCKET_LOCK_ASSERT(b)    mtx_assert(&(b)->b_mtx, MA_OWNED)
246
247 #define PFSYNC_BLOCK(sc)        mtx_lock(&(sc)->sc_bulk_mtx)
248 #define PFSYNC_BUNLOCK(sc)      mtx_unlock(&(sc)->sc_bulk_mtx)
249 #define PFSYNC_BLOCK_ASSERT(sc) mtx_assert(&(sc)->sc_bulk_mtx, MA_OWNED)
250
251 static const char pfsyncname[] = "pfsync";
252 static MALLOC_DEFINE(M_PFSYNC, pfsyncname, "pfsync(4) data");
253 VNET_DEFINE_STATIC(struct pfsync_softc  *, pfsyncif) = NULL;
254 #define V_pfsyncif              VNET(pfsyncif)
255 VNET_DEFINE_STATIC(void *, pfsync_swi_cookie) = NULL;
256 #define V_pfsync_swi_cookie     VNET(pfsync_swi_cookie)
257 VNET_DEFINE_STATIC(struct pfsyncstats, pfsyncstats);
258 #define V_pfsyncstats           VNET(pfsyncstats)
259 VNET_DEFINE_STATIC(int, pfsync_carp_adj) = CARP_MAXSKEW;
260 #define V_pfsync_carp_adj       VNET(pfsync_carp_adj)
261
262 static void     pfsync_timeout(void *);
263 static void     pfsync_push(struct pfsync_bucket *);
264 static void     pfsync_push_all(struct pfsync_softc *);
265 static void     pfsyncintr(void *);
266 static int      pfsync_multicast_setup(struct pfsync_softc *, struct ifnet *,
267                     struct in_mfilter *imf);
268 static void     pfsync_multicast_cleanup(struct pfsync_softc *);
269 static void     pfsync_pointers_init(void);
270 static void     pfsync_pointers_uninit(void);
271 static int      pfsync_init(void);
272 static void     pfsync_uninit(void);
273
274 static unsigned long pfsync_buckets;
275
276 SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
277     "PFSYNC");
278 SYSCTL_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_VNET | CTLFLAG_RW,
279     &VNET_NAME(pfsyncstats), pfsyncstats,
280     "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)");
281 SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_RW,
282     &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment");
283 SYSCTL_ULONG(_net_pfsync, OID_AUTO, pfsync_buckets, CTLFLAG_RDTUN,
284     &pfsync_buckets, 0, "Number of pfsync hash buckets");
285
286 static int      pfsync_clone_create(struct if_clone *, int, caddr_t);
287 static void     pfsync_clone_destroy(struct ifnet *);
288 static int      pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
289                     struct pf_state_peer *);
290 static int      pfsyncoutput(struct ifnet *, struct mbuf *,
291                     const struct sockaddr *, struct route *);
292 static int      pfsyncioctl(struct ifnet *, u_long, caddr_t);
293
294 static int      pfsync_defer(struct pf_state *, struct mbuf *);
295 static void     pfsync_undefer(struct pfsync_deferral *, int);
296 static void     pfsync_undefer_state(struct pf_state *, int);
297 static void     pfsync_defer_tmo(void *);
298
299 static void     pfsync_request_update(u_int32_t, u_int64_t);
300 static bool     pfsync_update_state_req(struct pf_state *);
301
302 static void     pfsync_drop(struct pfsync_softc *);
303 static void     pfsync_sendout(int, int);
304 static void     pfsync_send_plus(void *, size_t);
305
306 static void     pfsync_bulk_start(void);
307 static void     pfsync_bulk_status(u_int8_t);
308 static void     pfsync_bulk_update(void *);
309 static void     pfsync_bulk_fail(void *);
310
311 static void     pfsync_detach_ifnet(struct ifnet *);
312 #ifdef IPSEC
313 static void     pfsync_update_net_tdb(struct pfsync_tdb *);
314 #endif
315 static struct pfsync_bucket     *pfsync_get_bucket(struct pfsync_softc *,
316                     struct pf_state *);
317
318
319 #define PFSYNC_MAX_BULKTRIES    12
320
321 VNET_DEFINE(struct if_clone *, pfsync_cloner);
322 #define V_pfsync_cloner VNET(pfsync_cloner)
323
324 static int
325 pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param)
326 {
327         struct pfsync_softc *sc;
328         struct ifnet *ifp;
329         struct pfsync_bucket *b;
330         int c, q;
331
332         if (unit != 0)
333                 return (EINVAL);
334
335         if (! pfsync_buckets)
336                 pfsync_buckets = mp_ncpus * 2;
337
338         sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO);
339         sc->sc_flags |= PFSYNCF_OK;
340         sc->sc_maxupdates = 128;
341
342         ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC);
343         if (ifp == NULL) {
344                 free(sc, M_PFSYNC);
345                 return (ENOSPC);
346         }
347         if_initname(ifp, pfsyncname, unit);
348         ifp->if_softc = sc;
349         ifp->if_ioctl = pfsyncioctl;
350         ifp->if_output = pfsyncoutput;
351         ifp->if_type = IFT_PFSYNC;
352         ifp->if_hdrlen = sizeof(struct pfsync_header);
353         ifp->if_mtu = ETHERMTU;
354         mtx_init(&sc->sc_mtx, pfsyncname, NULL, MTX_DEF);
355         mtx_init(&sc->sc_bulk_mtx, "pfsync bulk", NULL, MTX_DEF);
356         callout_init_mtx(&sc->sc_bulk_tmo, &sc->sc_bulk_mtx, 0);
357         callout_init_mtx(&sc->sc_bulkfail_tmo, &sc->sc_bulk_mtx, 0);
358
359         if_attach(ifp);
360
361         bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
362
363         sc->sc_buckets = mallocarray(pfsync_buckets, sizeof(*sc->sc_buckets),
364             M_PFSYNC, M_ZERO | M_WAITOK);
365         for (c = 0; c < pfsync_buckets; c++) {
366                 b = &sc->sc_buckets[c];
367                 mtx_init(&b->b_mtx, "pfsync bucket", NULL, MTX_DEF);
368
369                 b->b_id = c;
370                 b->b_sc = sc;
371                 b->b_len = PFSYNC_MINPKT;
372
373                 for (q = 0; q < PFSYNC_S_COUNT; q++)
374                         TAILQ_INIT(&b->b_qs[q]);
375
376                 TAILQ_INIT(&b->b_upd_req_list);
377                 TAILQ_INIT(&b->b_deferrals);
378
379                 callout_init(&b->b_tmo, 1);
380
381                 b->b_snd.ifq_maxlen = ifqmaxlen;
382         }
383
384         V_pfsyncif = sc;
385
386         return (0);
387 }
388
389 static void
390 pfsync_clone_destroy(struct ifnet *ifp)
391 {
392         struct pfsync_softc *sc = ifp->if_softc;
393         struct pfsync_bucket *b;
394         int c;
395
396         for (c = 0; c < pfsync_buckets; c++) {
397                 b = &sc->sc_buckets[c];
398                 /*
399                  * At this stage, everything should have already been
400                  * cleared by pfsync_uninit(), and we have only to
401                  * drain callouts.
402                  */
403                 while (b->b_deferred > 0) {
404                         struct pfsync_deferral *pd =
405                             TAILQ_FIRST(&b->b_deferrals);
406
407                         TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry);
408                         b->b_deferred--;
409                         if (callout_stop(&pd->pd_tmo) > 0) {
410                                 pf_release_state(pd->pd_st);
411                                 m_freem(pd->pd_m);
412                                 free(pd, M_PFSYNC);
413                         } else {
414                                 pd->pd_refs++;
415                                 callout_drain(&pd->pd_tmo);
416                                 free(pd, M_PFSYNC);
417                         }
418                 }
419
420                 callout_drain(&b->b_tmo);
421         }
422
423         callout_drain(&sc->sc_bulkfail_tmo);
424         callout_drain(&sc->sc_bulk_tmo);
425
426         if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
427                 (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy");
428         bpfdetach(ifp);
429         if_detach(ifp);
430
431         pfsync_drop(sc);
432
433         if_free(ifp);
434         pfsync_multicast_cleanup(sc);
435         mtx_destroy(&sc->sc_mtx);
436         mtx_destroy(&sc->sc_bulk_mtx);
437
438         free(sc->sc_buckets, M_PFSYNC);
439         free(sc, M_PFSYNC);
440
441         V_pfsyncif = NULL;
442 }
443
444 static int
445 pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
446     struct pf_state_peer *d)
447 {
448         if (s->scrub.scrub_flag && d->scrub == NULL) {
449                 d->scrub = uma_zalloc(V_pf_state_scrub_z, M_NOWAIT | M_ZERO);
450                 if (d->scrub == NULL)
451                         return (ENOMEM);
452         }
453
454         return (0);
455 }
456
457
458 static int
459 pfsync_state_import(struct pfsync_state *sp, u_int8_t flags)
460 {
461         struct pfsync_softc *sc = V_pfsyncif;
462 #ifndef __NO_STRICT_ALIGNMENT
463         struct pfsync_state_key key[2];
464 #endif
465         struct pfsync_state_key *kw, *ks;
466         struct pf_state *st = NULL;
467         struct pf_state_key *skw = NULL, *sks = NULL;
468         struct pf_rule *r = NULL;
469         struct pfi_kif  *kif;
470         int error;
471
472         PF_RULES_RASSERT();
473
474         if (sp->creatorid == 0) {
475                 if (V_pf_status.debug >= PF_DEBUG_MISC)
476                         printf("%s: invalid creator id: %08x\n", __func__,
477                             ntohl(sp->creatorid));
478                 return (EINVAL);
479         }
480
481         if ((kif = pfi_kif_find(sp->ifname)) == NULL) {
482                 if (V_pf_status.debug >= PF_DEBUG_MISC)
483                         printf("%s: unknown interface: %s\n", __func__,
484                             sp->ifname);
485                 if (flags & PFSYNC_SI_IOCTL)
486                         return (EINVAL);
487                 return (0);     /* skip this state */
488         }
489
490         /*
491          * If the ruleset checksums match or the state is coming from the ioctl,
492          * it's safe to associate the state with the rule of that number.
493          */
494         if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) &&
495             (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) <
496             pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount)
497                 r = pf_main_ruleset.rules[
498                     PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)];
499         else
500                 r = &V_pf_default_rule;
501
502         if ((r->max_states &&
503             counter_u64_fetch(r->states_cur) >= r->max_states))
504                 goto cleanup;
505
506         /*
507          * XXXGL: consider M_WAITOK in ioctl path after.
508          */
509         if ((st = uma_zalloc(V_pf_state_z, M_NOWAIT | M_ZERO)) == NULL)
510                 goto cleanup;
511
512         if ((skw = uma_zalloc(V_pf_state_key_z, M_NOWAIT)) == NULL)
513                 goto cleanup;
514
515 #ifndef __NO_STRICT_ALIGNMENT
516         bcopy(&sp->key, key, sizeof(struct pfsync_state_key) * 2);
517         kw = &key[PF_SK_WIRE];
518         ks = &key[PF_SK_STACK];
519 #else
520         kw = &sp->key[PF_SK_WIRE];
521         ks = &sp->key[PF_SK_STACK];
522 #endif
523
524         if (PF_ANEQ(&kw->addr[0], &ks->addr[0], sp->af) ||
525             PF_ANEQ(&kw->addr[1], &ks->addr[1], sp->af) ||
526             kw->port[0] != ks->port[0] ||
527             kw->port[1] != ks->port[1]) {
528                 sks = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
529                 if (sks == NULL)
530                         goto cleanup;
531         } else
532                 sks = skw;
533
534         /* allocate memory for scrub info */
535         if (pfsync_alloc_scrub_memory(&sp->src, &st->src) ||
536             pfsync_alloc_scrub_memory(&sp->dst, &st->dst))
537                 goto cleanup;
538
539         /* Copy to state key(s). */
540         skw->addr[0] = kw->addr[0];
541         skw->addr[1] = kw->addr[1];
542         skw->port[0] = kw->port[0];
543         skw->port[1] = kw->port[1];
544         skw->proto = sp->proto;
545         skw->af = sp->af;
546         if (sks != skw) {
547                 sks->addr[0] = ks->addr[0];
548                 sks->addr[1] = ks->addr[1];
549                 sks->port[0] = ks->port[0];
550                 sks->port[1] = ks->port[1];
551                 sks->proto = sp->proto;
552                 sks->af = sp->af;
553         }
554
555         /* copy to state */
556         bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr));
557         st->creation = time_uptime - ntohl(sp->creation);
558         st->expire = time_uptime;
559         if (sp->expire) {
560                 uint32_t timeout;
561
562                 timeout = r->timeout[sp->timeout];
563                 if (!timeout)
564                         timeout = V_pf_default_rule.timeout[sp->timeout];
565
566                 /* sp->expire may have been adaptively scaled by export. */
567                 st->expire -= timeout - ntohl(sp->expire);
568         }
569
570         st->direction = sp->direction;
571         st->log = sp->log;
572         st->timeout = sp->timeout;
573         st->state_flags = sp->state_flags;
574
575         st->id = sp->id;
576         st->creatorid = sp->creatorid;
577         pf_state_peer_ntoh(&sp->src, &st->src);
578         pf_state_peer_ntoh(&sp->dst, &st->dst);
579
580         st->rule.ptr = r;
581         st->nat_rule.ptr = NULL;
582         st->anchor.ptr = NULL;
583         st->rt_kif = NULL;
584
585         st->pfsync_time = time_uptime;
586         st->sync_state = PFSYNC_S_NONE;
587
588         if (!(flags & PFSYNC_SI_IOCTL))
589                 st->state_flags |= PFSTATE_NOSYNC;
590
591         if ((error = pf_state_insert(kif, skw, sks, st)) != 0)
592                 goto cleanup_state;
593
594         /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
595         counter_u64_add(r->states_cur, 1);
596         counter_u64_add(r->states_tot, 1);
597
598         if (!(flags & PFSYNC_SI_IOCTL)) {
599                 st->state_flags &= ~PFSTATE_NOSYNC;
600                 if (st->state_flags & PFSTATE_ACK) {
601                         pfsync_q_ins(st, PFSYNC_S_IACK, true);
602                         pfsync_push_all(sc);
603                 }
604         }
605         st->state_flags &= ~PFSTATE_ACK;
606         PF_STATE_UNLOCK(st);
607
608         return (0);
609
610 cleanup:
611         error = ENOMEM;
612         if (skw == sks)
613                 sks = NULL;
614         if (skw != NULL)
615                 uma_zfree(V_pf_state_key_z, skw);
616         if (sks != NULL)
617                 uma_zfree(V_pf_state_key_z, sks);
618
619 cleanup_state:  /* pf_state_insert() frees the state keys. */
620         if (st) {
621                 if (st->dst.scrub)
622                         uma_zfree(V_pf_state_scrub_z, st->dst.scrub);
623                 if (st->src.scrub)
624                         uma_zfree(V_pf_state_scrub_z, st->src.scrub);
625                 uma_zfree(V_pf_state_z, st);
626         }
627         return (error);
628 }
629
630 static int
631 pfsync_input(struct mbuf **mp, int *offp __unused, int proto __unused)
632 {
633         struct pfsync_softc *sc = V_pfsyncif;
634         struct pfsync_pkt pkt;
635         struct mbuf *m = *mp;
636         struct ip *ip = mtod(m, struct ip *);
637         struct pfsync_header *ph;
638         struct pfsync_subheader subh;
639
640         int offset, len;
641         int rv;
642         uint16_t count;
643
644         PF_RULES_RLOCK_TRACKER;
645
646         *mp = NULL;
647         V_pfsyncstats.pfsyncs_ipackets++;
648
649         /* Verify that we have a sync interface configured. */
650         if (!sc || !sc->sc_sync_if || !V_pf_status.running ||
651             (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
652                 goto done;
653
654         /* verify that the packet came in on the right interface */
655         if (sc->sc_sync_if != m->m_pkthdr.rcvif) {
656                 V_pfsyncstats.pfsyncs_badif++;
657                 goto done;
658         }
659
660         if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1);
661         if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
662         /* verify that the IP TTL is 255. */
663         if (ip->ip_ttl != PFSYNC_DFLTTL) {
664                 V_pfsyncstats.pfsyncs_badttl++;
665                 goto done;
666         }
667
668         offset = ip->ip_hl << 2;
669         if (m->m_pkthdr.len < offset + sizeof(*ph)) {
670                 V_pfsyncstats.pfsyncs_hdrops++;
671                 goto done;
672         }
673
674         if (offset + sizeof(*ph) > m->m_len) {
675                 if (m_pullup(m, offset + sizeof(*ph)) == NULL) {
676                         V_pfsyncstats.pfsyncs_hdrops++;
677                         return (IPPROTO_DONE);
678                 }
679                 ip = mtod(m, struct ip *);
680         }
681         ph = (struct pfsync_header *)((char *)ip + offset);
682
683         /* verify the version */
684         if (ph->version != PFSYNC_VERSION) {
685                 V_pfsyncstats.pfsyncs_badver++;
686                 goto done;
687         }
688
689         len = ntohs(ph->len) + offset;
690         if (m->m_pkthdr.len < len) {
691                 V_pfsyncstats.pfsyncs_badlen++;
692                 goto done;
693         }
694
695         /* Cheaper to grab this now than having to mess with mbufs later */
696         pkt.ip = ip;
697         pkt.src = ip->ip_src;
698         pkt.flags = 0;
699
700         /*
701          * Trusting pf_chksum during packet processing, as well as seeking
702          * in interface name tree, require holding PF_RULES_RLOCK().
703          */
704         PF_RULES_RLOCK();
705         if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
706                 pkt.flags |= PFSYNC_SI_CKSUM;
707
708         offset += sizeof(*ph);
709         while (offset <= len - sizeof(subh)) {
710                 m_copydata(m, offset, sizeof(subh), (caddr_t)&subh);
711                 offset += sizeof(subh);
712
713                 if (subh.action >= PFSYNC_ACT_MAX) {
714                         V_pfsyncstats.pfsyncs_badact++;
715                         PF_RULES_RUNLOCK();
716                         goto done;
717                 }
718
719                 count = ntohs(subh.count);
720                 V_pfsyncstats.pfsyncs_iacts[subh.action] += count;
721                 rv = (*pfsync_acts[subh.action])(&pkt, m, offset, count);
722                 if (rv == -1) {
723                         PF_RULES_RUNLOCK();
724                         return (IPPROTO_DONE);
725                 }
726
727                 offset += rv;
728         }
729         PF_RULES_RUNLOCK();
730
731 done:
732         m_freem(m);
733         return (IPPROTO_DONE);
734 }
735
736 static int
737 pfsync_in_clr(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
738 {
739         struct pfsync_clr *clr;
740         struct mbuf *mp;
741         int len = sizeof(*clr) * count;
742         int i, offp;
743         u_int32_t creatorid;
744
745         mp = m_pulldown(m, offset, len, &offp);
746         if (mp == NULL) {
747                 V_pfsyncstats.pfsyncs_badlen++;
748                 return (-1);
749         }
750         clr = (struct pfsync_clr *)(mp->m_data + offp);
751
752         for (i = 0; i < count; i++) {
753                 creatorid = clr[i].creatorid;
754
755                 if (clr[i].ifname[0] != '\0' &&
756                     pfi_kif_find(clr[i].ifname) == NULL)
757                         continue;
758
759                 for (int i = 0; i <= pf_hashmask; i++) {
760                         struct pf_idhash *ih = &V_pf_idhash[i];
761                         struct pf_state *s;
762 relock:
763                         PF_HASHROW_LOCK(ih);
764                         LIST_FOREACH(s, &ih->states, entry) {
765                                 if (s->creatorid == creatorid) {
766                                         s->state_flags |= PFSTATE_NOSYNC;
767                                         pf_unlink_state(s, PF_ENTER_LOCKED);
768                                         goto relock;
769                                 }
770                         }
771                         PF_HASHROW_UNLOCK(ih);
772                 }
773         }
774
775         return (len);
776 }
777
778 static int
779 pfsync_in_ins(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
780 {
781         struct mbuf *mp;
782         struct pfsync_state *sa, *sp;
783         int len = sizeof(*sp) * count;
784         int i, offp;
785
786         mp = m_pulldown(m, offset, len, &offp);
787         if (mp == NULL) {
788                 V_pfsyncstats.pfsyncs_badlen++;
789                 return (-1);
790         }
791         sa = (struct pfsync_state *)(mp->m_data + offp);
792
793         for (i = 0; i < count; i++) {
794                 sp = &sa[i];
795
796                 /* Check for invalid values. */
797                 if (sp->timeout >= PFTM_MAX ||
798                     sp->src.state > PF_TCPS_PROXY_DST ||
799                     sp->dst.state > PF_TCPS_PROXY_DST ||
800                     sp->direction > PF_OUT ||
801                     (sp->af != AF_INET && sp->af != AF_INET6)) {
802                         if (V_pf_status.debug >= PF_DEBUG_MISC)
803                                 printf("%s: invalid value\n", __func__);
804                         V_pfsyncstats.pfsyncs_badval++;
805                         continue;
806                 }
807
808                 if (pfsync_state_import(sp, pkt->flags) == ENOMEM)
809                         /* Drop out, but process the rest of the actions. */
810                         break;
811         }
812
813         return (len);
814 }
815
816 static int
817 pfsync_in_iack(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
818 {
819         struct pfsync_ins_ack *ia, *iaa;
820         struct pf_state *st;
821
822         struct mbuf *mp;
823         int len = count * sizeof(*ia);
824         int offp, i;
825
826         mp = m_pulldown(m, offset, len, &offp);
827         if (mp == NULL) {
828                 V_pfsyncstats.pfsyncs_badlen++;
829                 return (-1);
830         }
831         iaa = (struct pfsync_ins_ack *)(mp->m_data + offp);
832
833         for (i = 0; i < count; i++) {
834                 ia = &iaa[i];
835
836                 st = pf_find_state_byid(ia->id, ia->creatorid);
837                 if (st == NULL)
838                         continue;
839
840                 if (st->state_flags & PFSTATE_ACK) {
841                         pfsync_undefer_state(st, 0);
842                 }
843                 PF_STATE_UNLOCK(st);
844         }
845         /*
846          * XXX this is not yet implemented, but we know the size of the
847          * message so we can skip it.
848          */
849
850         return (count * sizeof(struct pfsync_ins_ack));
851 }
852
853 static int
854 pfsync_upd_tcp(struct pf_state *st, struct pfsync_state_peer *src,
855     struct pfsync_state_peer *dst)
856 {
857         int sync = 0;
858
859         PF_STATE_LOCK_ASSERT(st);
860
861         /*
862          * The state should never go backwards except
863          * for syn-proxy states.  Neither should the
864          * sequence window slide backwards.
865          */
866         if ((st->src.state > src->state &&
867             (st->src.state < PF_TCPS_PROXY_SRC ||
868             src->state >= PF_TCPS_PROXY_SRC)) ||
869
870             (st->src.state == src->state &&
871             SEQ_GT(st->src.seqlo, ntohl(src->seqlo))))
872                 sync++;
873         else
874                 pf_state_peer_ntoh(src, &st->src);
875
876         if ((st->dst.state > dst->state) ||
877
878             (st->dst.state >= TCPS_SYN_SENT &&
879             SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo))))
880                 sync++;
881         else
882                 pf_state_peer_ntoh(dst, &st->dst);
883
884         return (sync);
885 }
886
887 static int
888 pfsync_in_upd(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
889 {
890         struct pfsync_softc *sc = V_pfsyncif;
891         struct pfsync_state *sa, *sp;
892         struct pf_state *st;
893         int sync;
894
895         struct mbuf *mp;
896         int len = count * sizeof(*sp);
897         int offp, i;
898
899         mp = m_pulldown(m, offset, len, &offp);
900         if (mp == NULL) {
901                 V_pfsyncstats.pfsyncs_badlen++;
902                 return (-1);
903         }
904         sa = (struct pfsync_state *)(mp->m_data + offp);
905
906         for (i = 0; i < count; i++) {
907                 sp = &sa[i];
908
909                 /* check for invalid values */
910                 if (sp->timeout >= PFTM_MAX ||
911                     sp->src.state > PF_TCPS_PROXY_DST ||
912                     sp->dst.state > PF_TCPS_PROXY_DST) {
913                         if (V_pf_status.debug >= PF_DEBUG_MISC) {
914                                 printf("pfsync_input: PFSYNC_ACT_UPD: "
915                                     "invalid value\n");
916                         }
917                         V_pfsyncstats.pfsyncs_badval++;
918                         continue;
919                 }
920
921                 st = pf_find_state_byid(sp->id, sp->creatorid);
922                 if (st == NULL) {
923                         /* insert the update */
924                         if (pfsync_state_import(sp, pkt->flags))
925                                 V_pfsyncstats.pfsyncs_badstate++;
926                         continue;
927                 }
928
929                 if (st->state_flags & PFSTATE_ACK) {
930                         pfsync_undefer_state(st, 1);
931                 }
932
933                 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP)
934                         sync = pfsync_upd_tcp(st, &sp->src, &sp->dst);
935                 else {
936                         sync = 0;
937
938                         /*
939                          * Non-TCP protocol state machine always go
940                          * forwards
941                          */
942                         if (st->src.state > sp->src.state)
943                                 sync++;
944                         else
945                                 pf_state_peer_ntoh(&sp->src, &st->src);
946                         if (st->dst.state > sp->dst.state)
947                                 sync++;
948                         else
949                                 pf_state_peer_ntoh(&sp->dst, &st->dst);
950                 }
951                 if (sync < 2) {
952                         pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
953                         pf_state_peer_ntoh(&sp->dst, &st->dst);
954                         st->expire = time_uptime;
955                         st->timeout = sp->timeout;
956                 }
957                 st->pfsync_time = time_uptime;
958
959                 if (sync) {
960                         V_pfsyncstats.pfsyncs_stale++;
961
962                         pfsync_update_state(st);
963                         PF_STATE_UNLOCK(st);
964                         pfsync_push_all(sc);
965                         continue;
966                 }
967                 PF_STATE_UNLOCK(st);
968         }
969
970         return (len);
971 }
972
973 static int
974 pfsync_in_upd_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
975 {
976         struct pfsync_softc *sc = V_pfsyncif;
977         struct pfsync_upd_c *ua, *up;
978         struct pf_state *st;
979         int len = count * sizeof(*up);
980         int sync;
981         struct mbuf *mp;
982         int offp, i;
983
984         mp = m_pulldown(m, offset, len, &offp);
985         if (mp == NULL) {
986                 V_pfsyncstats.pfsyncs_badlen++;
987                 return (-1);
988         }
989         ua = (struct pfsync_upd_c *)(mp->m_data + offp);
990
991         for (i = 0; i < count; i++) {
992                 up = &ua[i];
993
994                 /* check for invalid values */
995                 if (up->timeout >= PFTM_MAX ||
996                     up->src.state > PF_TCPS_PROXY_DST ||
997                     up->dst.state > PF_TCPS_PROXY_DST) {
998                         if (V_pf_status.debug >= PF_DEBUG_MISC) {
999                                 printf("pfsync_input: "
1000                                     "PFSYNC_ACT_UPD_C: "
1001                                     "invalid value\n");
1002                         }
1003                         V_pfsyncstats.pfsyncs_badval++;
1004                         continue;
1005                 }
1006
1007                 st = pf_find_state_byid(up->id, up->creatorid);
1008                 if (st == NULL) {
1009                         /* We don't have this state. Ask for it. */
1010                         PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]);
1011                         pfsync_request_update(up->creatorid, up->id);
1012                         PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]);
1013                         continue;
1014                 }
1015
1016                 if (st->state_flags & PFSTATE_ACK) {
1017                         pfsync_undefer_state(st, 1);
1018                 }
1019
1020                 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP)
1021                         sync = pfsync_upd_tcp(st, &up->src, &up->dst);
1022                 else {
1023                         sync = 0;
1024
1025                         /*
1026                          * Non-TCP protocol state machine always go
1027                          * forwards
1028                          */
1029                         if (st->src.state > up->src.state)
1030                                 sync++;
1031                         else
1032                                 pf_state_peer_ntoh(&up->src, &st->src);
1033                         if (st->dst.state > up->dst.state)
1034                                 sync++;
1035                         else
1036                                 pf_state_peer_ntoh(&up->dst, &st->dst);
1037                 }
1038                 if (sync < 2) {
1039                         pfsync_alloc_scrub_memory(&up->dst, &st->dst);
1040                         pf_state_peer_ntoh(&up->dst, &st->dst);
1041                         st->expire = time_uptime;
1042                         st->timeout = up->timeout;
1043                 }
1044                 st->pfsync_time = time_uptime;
1045
1046                 if (sync) {
1047                         V_pfsyncstats.pfsyncs_stale++;
1048
1049                         pfsync_update_state(st);
1050                         PF_STATE_UNLOCK(st);
1051                         pfsync_push_all(sc);
1052                         continue;
1053                 }
1054                 PF_STATE_UNLOCK(st);
1055         }
1056
1057         return (len);
1058 }
1059
1060 static int
1061 pfsync_in_ureq(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1062 {
1063         struct pfsync_upd_req *ur, *ura;
1064         struct mbuf *mp;
1065         int len = count * sizeof(*ur);
1066         int i, offp;
1067
1068         struct pf_state *st;
1069
1070         mp = m_pulldown(m, offset, len, &offp);
1071         if (mp == NULL) {
1072                 V_pfsyncstats.pfsyncs_badlen++;
1073                 return (-1);
1074         }
1075         ura = (struct pfsync_upd_req *)(mp->m_data + offp);
1076
1077         for (i = 0; i < count; i++) {
1078                 ur = &ura[i];
1079
1080                 if (ur->id == 0 && ur->creatorid == 0)
1081                         pfsync_bulk_start();
1082                 else {
1083                         st = pf_find_state_byid(ur->id, ur->creatorid);
1084                         if (st == NULL) {
1085                                 V_pfsyncstats.pfsyncs_badstate++;
1086                                 continue;
1087                         }
1088                         if (st->state_flags & PFSTATE_NOSYNC) {
1089                                 PF_STATE_UNLOCK(st);
1090                                 continue;
1091                         }
1092
1093                         pfsync_update_state_req(st);
1094                         PF_STATE_UNLOCK(st);
1095                 }
1096         }
1097
1098         return (len);
1099 }
1100
1101 static int
1102 pfsync_in_del(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1103 {
1104         struct mbuf *mp;
1105         struct pfsync_state *sa, *sp;
1106         struct pf_state *st;
1107         int len = count * sizeof(*sp);
1108         int offp, i;
1109
1110         mp = m_pulldown(m, offset, len, &offp);
1111         if (mp == NULL) {
1112                 V_pfsyncstats.pfsyncs_badlen++;
1113                 return (-1);
1114         }
1115         sa = (struct pfsync_state *)(mp->m_data + offp);
1116
1117         for (i = 0; i < count; i++) {
1118                 sp = &sa[i];
1119
1120                 st = pf_find_state_byid(sp->id, sp->creatorid);
1121                 if (st == NULL) {
1122                         V_pfsyncstats.pfsyncs_badstate++;
1123                         continue;
1124                 }
1125                 st->state_flags |= PFSTATE_NOSYNC;
1126                 pf_unlink_state(st, PF_ENTER_LOCKED);
1127         }
1128
1129         return (len);
1130 }
1131
1132 static int
1133 pfsync_in_del_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1134 {
1135         struct mbuf *mp;
1136         struct pfsync_del_c *sa, *sp;
1137         struct pf_state *st;
1138         int len = count * sizeof(*sp);
1139         int offp, i;
1140
1141         mp = m_pulldown(m, offset, len, &offp);
1142         if (mp == NULL) {
1143                 V_pfsyncstats.pfsyncs_badlen++;
1144                 return (-1);
1145         }
1146         sa = (struct pfsync_del_c *)(mp->m_data + offp);
1147
1148         for (i = 0; i < count; i++) {
1149                 sp = &sa[i];
1150
1151                 st = pf_find_state_byid(sp->id, sp->creatorid);
1152                 if (st == NULL) {
1153                         V_pfsyncstats.pfsyncs_badstate++;
1154                         continue;
1155                 }
1156
1157                 st->state_flags |= PFSTATE_NOSYNC;
1158                 pf_unlink_state(st, PF_ENTER_LOCKED);
1159         }
1160
1161         return (len);
1162 }
1163
1164 static int
1165 pfsync_in_bus(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1166 {
1167         struct pfsync_softc *sc = V_pfsyncif;
1168         struct pfsync_bus *bus;
1169         struct mbuf *mp;
1170         int len = count * sizeof(*bus);
1171         int offp;
1172
1173         PFSYNC_BLOCK(sc);
1174
1175         /* If we're not waiting for a bulk update, who cares. */
1176         if (sc->sc_ureq_sent == 0) {
1177                 PFSYNC_BUNLOCK(sc);
1178                 return (len);
1179         }
1180
1181         mp = m_pulldown(m, offset, len, &offp);
1182         if (mp == NULL) {
1183                 PFSYNC_BUNLOCK(sc);
1184                 V_pfsyncstats.pfsyncs_badlen++;
1185                 return (-1);
1186         }
1187         bus = (struct pfsync_bus *)(mp->m_data + offp);
1188
1189         switch (bus->status) {
1190         case PFSYNC_BUS_START:
1191                 callout_reset(&sc->sc_bulkfail_tmo, 4 * hz +
1192                     V_pf_limits[PF_LIMIT_STATES].limit /
1193                     ((sc->sc_ifp->if_mtu - PFSYNC_MINPKT) /
1194                     sizeof(struct pfsync_state)),
1195                     pfsync_bulk_fail, sc);
1196                 if (V_pf_status.debug >= PF_DEBUG_MISC)
1197                         printf("pfsync: received bulk update start\n");
1198                 break;
1199
1200         case PFSYNC_BUS_END:
1201                 if (time_uptime - ntohl(bus->endtime) >=
1202                     sc->sc_ureq_sent) {
1203                         /* that's it, we're happy */
1204                         sc->sc_ureq_sent = 0;
1205                         sc->sc_bulk_tries = 0;
1206                         callout_stop(&sc->sc_bulkfail_tmo);
1207                         if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
1208                                 (*carp_demote_adj_p)(-V_pfsync_carp_adj,
1209                                     "pfsync bulk done");
1210                         sc->sc_flags |= PFSYNCF_OK;
1211                         if (V_pf_status.debug >= PF_DEBUG_MISC)
1212                                 printf("pfsync: received valid "
1213                                     "bulk update end\n");
1214                 } else {
1215                         if (V_pf_status.debug >= PF_DEBUG_MISC)
1216                                 printf("pfsync: received invalid "
1217                                     "bulk update end: bad timestamp\n");
1218                 }
1219                 break;
1220         }
1221         PFSYNC_BUNLOCK(sc);
1222
1223         return (len);
1224 }
1225
1226 static int
1227 pfsync_in_tdb(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1228 {
1229         int len = count * sizeof(struct pfsync_tdb);
1230
1231 #if defined(IPSEC)
1232         struct pfsync_tdb *tp;
1233         struct mbuf *mp;
1234         int offp;
1235         int i;
1236         int s;
1237
1238         mp = m_pulldown(m, offset, len, &offp);
1239         if (mp == NULL) {
1240                 V_pfsyncstats.pfsyncs_badlen++;
1241                 return (-1);
1242         }
1243         tp = (struct pfsync_tdb *)(mp->m_data + offp);
1244
1245         for (i = 0; i < count; i++)
1246                 pfsync_update_net_tdb(&tp[i]);
1247 #endif
1248
1249         return (len);
1250 }
1251
1252 #if defined(IPSEC)
1253 /* Update an in-kernel tdb. Silently fail if no tdb is found. */
1254 static void
1255 pfsync_update_net_tdb(struct pfsync_tdb *pt)
1256 {
1257         struct tdb              *tdb;
1258         int                      s;
1259
1260         /* check for invalid values */
1261         if (ntohl(pt->spi) <= SPI_RESERVED_MAX ||
1262             (pt->dst.sa.sa_family != AF_INET &&
1263             pt->dst.sa.sa_family != AF_INET6))
1264                 goto bad;
1265
1266         tdb = gettdb(pt->spi, &pt->dst, pt->sproto);
1267         if (tdb) {
1268                 pt->rpl = ntohl(pt->rpl);
1269                 pt->cur_bytes = (unsigned long long)be64toh(pt->cur_bytes);
1270
1271                 /* Neither replay nor byte counter should ever decrease. */
1272                 if (pt->rpl < tdb->tdb_rpl ||
1273                     pt->cur_bytes < tdb->tdb_cur_bytes) {
1274                         goto bad;
1275                 }
1276
1277                 tdb->tdb_rpl = pt->rpl;
1278                 tdb->tdb_cur_bytes = pt->cur_bytes;
1279         }
1280         return;
1281
1282 bad:
1283         if (V_pf_status.debug >= PF_DEBUG_MISC)
1284                 printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: "
1285                     "invalid value\n");
1286         V_pfsyncstats.pfsyncs_badstate++;
1287         return;
1288 }
1289 #endif
1290
1291
1292 static int
1293 pfsync_in_eof(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1294 {
1295         /* check if we are at the right place in the packet */
1296         if (offset != m->m_pkthdr.len)
1297                 V_pfsyncstats.pfsyncs_badlen++;
1298
1299         /* we're done. free and let the caller return */
1300         m_freem(m);
1301         return (-1);
1302 }
1303
1304 static int
1305 pfsync_in_error(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1306 {
1307         V_pfsyncstats.pfsyncs_badact++;
1308
1309         m_freem(m);
1310         return (-1);
1311 }
1312
1313 static int
1314 pfsyncoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
1315         struct route *rt)
1316 {
1317         m_freem(m);
1318         return (0);
1319 }
1320
1321 /* ARGSUSED */
1322 static int
1323 pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1324 {
1325         struct pfsync_softc *sc = ifp->if_softc;
1326         struct ifreq *ifr = (struct ifreq *)data;
1327         struct pfsyncreq pfsyncr;
1328         int error;
1329         int c;
1330
1331         switch (cmd) {
1332         case SIOCSIFFLAGS:
1333                 PFSYNC_LOCK(sc);
1334                 if (ifp->if_flags & IFF_UP) {
1335                         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1336                         PFSYNC_UNLOCK(sc);
1337                         pfsync_pointers_init();
1338                 } else {
1339                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1340                         PFSYNC_UNLOCK(sc);
1341                         pfsync_pointers_uninit();
1342                 }
1343                 break;
1344         case SIOCSIFMTU:
1345                 if (!sc->sc_sync_if ||
1346                     ifr->ifr_mtu <= PFSYNC_MINPKT ||
1347                     ifr->ifr_mtu > sc->sc_sync_if->if_mtu)
1348                         return (EINVAL);
1349                 if (ifr->ifr_mtu < ifp->if_mtu) {
1350                         for (c = 0; c < pfsync_buckets; c++) {
1351                                 PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]);
1352                                 if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT)
1353                                         pfsync_sendout(1, c);
1354                                 PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]);
1355                         }
1356                 }
1357                 ifp->if_mtu = ifr->ifr_mtu;
1358                 break;
1359         case SIOCGETPFSYNC:
1360                 bzero(&pfsyncr, sizeof(pfsyncr));
1361                 PFSYNC_LOCK(sc);
1362                 if (sc->sc_sync_if) {
1363                         strlcpy(pfsyncr.pfsyncr_syncdev,
1364                             sc->sc_sync_if->if_xname, IFNAMSIZ);
1365                 }
1366                 pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer;
1367                 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
1368                 pfsyncr.pfsyncr_defer = (PFSYNCF_DEFER ==
1369                     (sc->sc_flags & PFSYNCF_DEFER));
1370                 PFSYNC_UNLOCK(sc);
1371                 return (copyout(&pfsyncr, ifr_data_get_ptr(ifr),
1372                     sizeof(pfsyncr)));
1373
1374         case SIOCSETPFSYNC:
1375             {
1376                 struct in_mfilter *imf = NULL;
1377                 struct ifnet *sifp;
1378                 struct ip *ip;
1379
1380                 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
1381                         return (error);
1382                 if ((error = copyin(ifr_data_get_ptr(ifr), &pfsyncr,
1383                     sizeof(pfsyncr))))
1384                         return (error);
1385
1386                 if (pfsyncr.pfsyncr_maxupdates > 255)
1387                         return (EINVAL);
1388
1389                 if (pfsyncr.pfsyncr_syncdev[0] == 0)
1390                         sifp = NULL;
1391                 else if ((sifp = ifunit_ref(pfsyncr.pfsyncr_syncdev)) == NULL)
1392                         return (EINVAL);
1393
1394                 if (sifp != NULL && (
1395                     pfsyncr.pfsyncr_syncpeer.s_addr == 0 ||
1396                     pfsyncr.pfsyncr_syncpeer.s_addr ==
1397                     htonl(INADDR_PFSYNC_GROUP)))
1398                         imf = ip_mfilter_alloc(M_WAITOK, 0, 0);
1399
1400                 PFSYNC_LOCK(sc);
1401                 if (pfsyncr.pfsyncr_syncpeer.s_addr == 0)
1402                         sc->sc_sync_peer.s_addr = htonl(INADDR_PFSYNC_GROUP);
1403                 else
1404                         sc->sc_sync_peer.s_addr =
1405                             pfsyncr.pfsyncr_syncpeer.s_addr;
1406
1407                 sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates;
1408                 if (pfsyncr.pfsyncr_defer) {
1409                         sc->sc_flags |= PFSYNCF_DEFER;
1410                         V_pfsync_defer_ptr = pfsync_defer;
1411                 } else {
1412                         sc->sc_flags &= ~PFSYNCF_DEFER;
1413                         V_pfsync_defer_ptr = NULL;
1414                 }
1415
1416                 if (sifp == NULL) {
1417                         if (sc->sc_sync_if)
1418                                 if_rele(sc->sc_sync_if);
1419                         sc->sc_sync_if = NULL;
1420                         pfsync_multicast_cleanup(sc);
1421                         PFSYNC_UNLOCK(sc);
1422                         break;
1423                 }
1424
1425                 for (c = 0; c < pfsync_buckets; c++) {
1426                         PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]);
1427                         if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT &&
1428                             (sifp->if_mtu < sc->sc_ifp->if_mtu ||
1429                             (sc->sc_sync_if != NULL &&
1430                             sifp->if_mtu < sc->sc_sync_if->if_mtu) ||
1431                             sifp->if_mtu < MCLBYTES - sizeof(struct ip)))
1432                                 pfsync_sendout(1, c);
1433                         PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]);
1434                 }
1435
1436                 pfsync_multicast_cleanup(sc);
1437
1438                 if (sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) {
1439                         error = pfsync_multicast_setup(sc, sifp, imf);
1440                         if (error) {
1441                                 if_rele(sifp);
1442                                 ip_mfilter_free(imf);
1443                                 PFSYNC_UNLOCK(sc);
1444                                 return (error);
1445                         }
1446                 }
1447                 if (sc->sc_sync_if)
1448                         if_rele(sc->sc_sync_if);
1449                 sc->sc_sync_if = sifp;
1450
1451                 ip = &sc->sc_template;
1452                 bzero(ip, sizeof(*ip));
1453                 ip->ip_v = IPVERSION;
1454                 ip->ip_hl = sizeof(sc->sc_template) >> 2;
1455                 ip->ip_tos = IPTOS_LOWDELAY;
1456                 /* len and id are set later. */
1457                 ip->ip_off = htons(IP_DF);
1458                 ip->ip_ttl = PFSYNC_DFLTTL;
1459                 ip->ip_p = IPPROTO_PFSYNC;
1460                 ip->ip_src.s_addr = INADDR_ANY;
1461                 ip->ip_dst.s_addr = sc->sc_sync_peer.s_addr;
1462
1463                 /* Request a full state table update. */
1464                 if ((sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
1465                         (*carp_demote_adj_p)(V_pfsync_carp_adj,
1466                             "pfsync bulk start");
1467                 sc->sc_flags &= ~PFSYNCF_OK;
1468                 if (V_pf_status.debug >= PF_DEBUG_MISC)
1469                         printf("pfsync: requesting bulk update\n");
1470                 PFSYNC_UNLOCK(sc);
1471                 PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]);
1472                 pfsync_request_update(0, 0);
1473                 PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]);
1474                 PFSYNC_BLOCK(sc);
1475                 sc->sc_ureq_sent = time_uptime;
1476                 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail,
1477                     sc);
1478                 PFSYNC_BUNLOCK(sc);
1479
1480                 break;
1481             }
1482         default:
1483                 return (ENOTTY);
1484         }
1485
1486         return (0);
1487 }
1488
1489 static void
1490 pfsync_out_state(struct pf_state *st, void *buf)
1491 {
1492         struct pfsync_state *sp = buf;
1493
1494         pfsync_state_export(sp, st);
1495 }
1496
1497 static void
1498 pfsync_out_iack(struct pf_state *st, void *buf)
1499 {
1500         struct pfsync_ins_ack *iack = buf;
1501
1502         iack->id = st->id;
1503         iack->creatorid = st->creatorid;
1504 }
1505
1506 static void
1507 pfsync_out_upd_c(struct pf_state *st, void *buf)
1508 {
1509         struct pfsync_upd_c *up = buf;
1510
1511         bzero(up, sizeof(*up));
1512         up->id = st->id;
1513         pf_state_peer_hton(&st->src, &up->src);
1514         pf_state_peer_hton(&st->dst, &up->dst);
1515         up->creatorid = st->creatorid;
1516         up->timeout = st->timeout;
1517 }
1518
1519 static void
1520 pfsync_out_del(struct pf_state *st, void *buf)
1521 {
1522         struct pfsync_del_c *dp = buf;
1523
1524         dp->id = st->id;
1525         dp->creatorid = st->creatorid;
1526         st->state_flags |= PFSTATE_NOSYNC;
1527 }
1528
1529 static void
1530 pfsync_drop(struct pfsync_softc *sc)
1531 {
1532         struct pf_state *st, *next;
1533         struct pfsync_upd_req_item *ur;
1534         struct pfsync_bucket *b;
1535         int c, q;
1536
1537         for (c = 0; c < pfsync_buckets; c++) {
1538                 b = &sc->sc_buckets[c];
1539                 for (q = 0; q < PFSYNC_S_COUNT; q++) {
1540                         if (TAILQ_EMPTY(&b->b_qs[q]))
1541                                 continue;
1542
1543                         TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, next) {
1544                                 KASSERT(st->sync_state == q,
1545                                         ("%s: st->sync_state == q",
1546                                                 __func__));
1547                                 st->sync_state = PFSYNC_S_NONE;
1548                                 pf_release_state(st);
1549                         }
1550                         TAILQ_INIT(&b->b_qs[q]);
1551                 }
1552
1553                 while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) {
1554                         TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry);
1555                         free(ur, M_PFSYNC);
1556                 }
1557
1558                 b->b_len = PFSYNC_MINPKT;
1559                 b->b_plus = NULL;
1560         }
1561 }
1562
1563 static void
1564 pfsync_sendout(int schedswi, int c)
1565 {
1566         struct pfsync_softc *sc = V_pfsyncif;
1567         struct ifnet *ifp = sc->sc_ifp;
1568         struct mbuf *m;
1569         struct ip *ip;
1570         struct pfsync_header *ph;
1571         struct pfsync_subheader *subh;
1572         struct pf_state *st, *st_next;
1573         struct pfsync_upd_req_item *ur;
1574         struct pfsync_bucket *b = &sc->sc_buckets[c];
1575         int offset;
1576         int q, count = 0;
1577
1578         KASSERT(sc != NULL, ("%s: null sc", __func__));
1579         KASSERT(b->b_len > PFSYNC_MINPKT,
1580             ("%s: sc_len %zu", __func__, b->b_len));
1581         PFSYNC_BUCKET_LOCK_ASSERT(b);
1582
1583         if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) {
1584                 pfsync_drop(sc);
1585                 return;
1586         }
1587
1588         m = m_get2(max_linkhdr + b->b_len, M_NOWAIT, MT_DATA, M_PKTHDR);
1589         if (m == NULL) {
1590                 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
1591                 V_pfsyncstats.pfsyncs_onomem++;
1592                 return;
1593         }
1594         m->m_data += max_linkhdr;
1595         m->m_len = m->m_pkthdr.len = b->b_len;
1596
1597         /* build the ip header */
1598         ip = (struct ip *)m->m_data;
1599         bcopy(&sc->sc_template, ip, sizeof(*ip));
1600         offset = sizeof(*ip);
1601
1602         ip->ip_len = htons(m->m_pkthdr.len);
1603         ip_fillid(ip);
1604
1605         /* build the pfsync header */
1606         ph = (struct pfsync_header *)(m->m_data + offset);
1607         bzero(ph, sizeof(*ph));
1608         offset += sizeof(*ph);
1609
1610         ph->version = PFSYNC_VERSION;
1611         ph->len = htons(b->b_len - sizeof(*ip));
1612         bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
1613
1614         /* walk the queues */
1615         for (q = 0; q < PFSYNC_S_COUNT; q++) {
1616                 if (TAILQ_EMPTY(&b->b_qs[q]))
1617                         continue;
1618
1619                 subh = (struct pfsync_subheader *)(m->m_data + offset);
1620                 offset += sizeof(*subh);
1621
1622                 count = 0;
1623                 TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, st_next) {
1624                         KASSERT(st->sync_state == q,
1625                                 ("%s: st->sync_state == q",
1626                                         __func__));
1627                         /*
1628                          * XXXGL: some of write methods do unlocked reads
1629                          * of state data :(
1630                          */
1631                         pfsync_qs[q].write(st, m->m_data + offset);
1632                         offset += pfsync_qs[q].len;
1633                         st->sync_state = PFSYNC_S_NONE;
1634                         pf_release_state(st);
1635                         count++;
1636                 }
1637                 TAILQ_INIT(&b->b_qs[q]);
1638
1639                 bzero(subh, sizeof(*subh));
1640                 subh->action = pfsync_qs[q].action;
1641                 subh->count = htons(count);
1642                 V_pfsyncstats.pfsyncs_oacts[pfsync_qs[q].action] += count;
1643         }
1644
1645         if (!TAILQ_EMPTY(&b->b_upd_req_list)) {
1646                 subh = (struct pfsync_subheader *)(m->m_data + offset);
1647                 offset += sizeof(*subh);
1648
1649                 count = 0;
1650                 while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) {
1651                         TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry);
1652
1653                         bcopy(&ur->ur_msg, m->m_data + offset,
1654                             sizeof(ur->ur_msg));
1655                         offset += sizeof(ur->ur_msg);
1656                         free(ur, M_PFSYNC);
1657                         count++;
1658                 }
1659
1660                 bzero(subh, sizeof(*subh));
1661                 subh->action = PFSYNC_ACT_UPD_REQ;
1662                 subh->count = htons(count);
1663                 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_UPD_REQ] += count;
1664         }
1665
1666         /* has someone built a custom region for us to add? */
1667         if (b->b_plus != NULL) {
1668                 bcopy(b->b_plus, m->m_data + offset, b->b_pluslen);
1669                 offset += b->b_pluslen;
1670
1671                 b->b_plus = NULL;
1672         }
1673
1674         subh = (struct pfsync_subheader *)(m->m_data + offset);
1675         offset += sizeof(*subh);
1676
1677         bzero(subh, sizeof(*subh));
1678         subh->action = PFSYNC_ACT_EOF;
1679         subh->count = htons(1);
1680         V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_EOF]++;
1681
1682         /* we're done, let's put it on the wire */
1683         if (ifp->if_bpf) {
1684                 m->m_data += sizeof(*ip);
1685                 m->m_len = m->m_pkthdr.len = b->b_len - sizeof(*ip);
1686                 BPF_MTAP(ifp, m);
1687                 m->m_data -= sizeof(*ip);
1688                 m->m_len = m->m_pkthdr.len = b->b_len;
1689         }
1690
1691         if (sc->sc_sync_if == NULL) {
1692                 b->b_len = PFSYNC_MINPKT;
1693                 m_freem(m);
1694                 return;
1695         }
1696
1697         if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1);
1698         if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
1699         b->b_len = PFSYNC_MINPKT;
1700
1701         if (!_IF_QFULL(&b->b_snd))
1702                 _IF_ENQUEUE(&b->b_snd, m);
1703         else {
1704                 m_freem(m);
1705                 if_inc_counter(sc->sc_ifp, IFCOUNTER_OQDROPS, 1);
1706         }
1707         if (schedswi)
1708                 swi_sched(V_pfsync_swi_cookie, 0);
1709 }
1710
1711 static void
1712 pfsync_insert_state(struct pf_state *st)
1713 {
1714         struct pfsync_softc *sc = V_pfsyncif;
1715         struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
1716
1717         if (st->state_flags & PFSTATE_NOSYNC)
1718                 return;
1719
1720         if ((st->rule.ptr->rule_flag & PFRULE_NOSYNC) ||
1721             st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) {
1722                 st->state_flags |= PFSTATE_NOSYNC;
1723                 return;
1724         }
1725
1726         KASSERT(st->sync_state == PFSYNC_S_NONE,
1727                 ("%s: st->sync_state %u", __func__, st->sync_state));
1728
1729         PFSYNC_BUCKET_LOCK(b);
1730         if (b->b_len == PFSYNC_MINPKT)
1731                 callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b);
1732
1733         pfsync_q_ins(st, PFSYNC_S_INS, true);
1734         PFSYNC_BUCKET_UNLOCK(b);
1735
1736         st->sync_updates = 0;
1737 }
1738
1739 static int
1740 pfsync_defer(struct pf_state *st, struct mbuf *m)
1741 {
1742         struct pfsync_softc *sc = V_pfsyncif;
1743         struct pfsync_deferral *pd;
1744         struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
1745
1746         if (m->m_flags & (M_BCAST|M_MCAST))
1747                 return (0);
1748
1749         PFSYNC_LOCK(sc);
1750
1751         if (sc == NULL || !(sc->sc_ifp->if_flags & IFF_DRV_RUNNING) ||
1752             !(sc->sc_flags & PFSYNCF_DEFER)) {
1753                 PFSYNC_UNLOCK(sc);
1754                 return (0);
1755         }
1756
1757         if (b->b_deferred >= 128)
1758                 pfsync_undefer(TAILQ_FIRST(&b->b_deferrals), 0);
1759
1760         pd = malloc(sizeof(*pd), M_PFSYNC, M_NOWAIT);
1761         if (pd == NULL)
1762                 return (0);
1763         b->b_deferred++;
1764
1765         m->m_flags |= M_SKIP_FIREWALL;
1766         st->state_flags |= PFSTATE_ACK;
1767
1768         pd->pd_sc = sc;
1769         pd->pd_refs = 0;
1770         pd->pd_st = st;
1771         pf_ref_state(st);
1772         pd->pd_m = m;
1773
1774         TAILQ_INSERT_TAIL(&b->b_deferrals, pd, pd_entry);
1775         callout_init_mtx(&pd->pd_tmo, &b->b_mtx, CALLOUT_RETURNUNLOCKED);
1776         callout_reset(&pd->pd_tmo, 10, pfsync_defer_tmo, pd);
1777
1778         pfsync_push(b);
1779
1780         return (1);
1781 }
1782
1783 static void
1784 pfsync_undefer(struct pfsync_deferral *pd, int drop)
1785 {
1786         struct pfsync_softc *sc = pd->pd_sc;
1787         struct mbuf *m = pd->pd_m;
1788         struct pf_state *st = pd->pd_st;
1789         struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
1790
1791         PFSYNC_BUCKET_LOCK_ASSERT(b);
1792
1793         TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry);
1794         b->b_deferred--;
1795         pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */
1796         free(pd, M_PFSYNC);
1797         pf_release_state(st);
1798
1799         if (drop)
1800                 m_freem(m);
1801         else {
1802                 _IF_ENQUEUE(&b->b_snd, m);
1803                 pfsync_push(b);
1804         }
1805 }
1806
1807 static void
1808 pfsync_defer_tmo(void *arg)
1809 {
1810         struct epoch_tracker et;
1811         struct pfsync_deferral *pd = arg;
1812         struct pfsync_softc *sc = pd->pd_sc;
1813         struct mbuf *m = pd->pd_m;
1814         struct pf_state *st = pd->pd_st;
1815         struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
1816
1817         PFSYNC_BUCKET_LOCK_ASSERT(b);
1818
1819         NET_EPOCH_ENTER(et);
1820         CURVNET_SET(m->m_pkthdr.rcvif->if_vnet);
1821
1822         TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry);
1823         b->b_deferred--;
1824         pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */
1825         if (pd->pd_refs == 0)
1826                 free(pd, M_PFSYNC);
1827         PFSYNC_UNLOCK(sc);
1828
1829         ip_output(m, NULL, NULL, 0, NULL, NULL);
1830
1831         pf_release_state(st);
1832
1833         CURVNET_RESTORE();
1834         NET_EPOCH_EXIT(et);
1835 }
1836
1837 static void
1838 pfsync_undefer_state(struct pf_state *st, int drop)
1839 {
1840         struct pfsync_softc *sc = V_pfsyncif;
1841         struct pfsync_deferral *pd;
1842         struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
1843
1844         PFSYNC_BUCKET_LOCK(b);
1845
1846         TAILQ_FOREACH(pd, &b->b_deferrals, pd_entry) {
1847                  if (pd->pd_st == st) {
1848                         if (callout_stop(&pd->pd_tmo) > 0)
1849                                 pfsync_undefer(pd, drop);
1850
1851                         PFSYNC_BUCKET_UNLOCK(b);
1852                         return;
1853                 }
1854         }
1855         PFSYNC_BUCKET_UNLOCK(b);
1856
1857         panic("%s: unable to find deferred state", __func__);
1858 }
1859
1860 static struct pfsync_bucket*
1861 pfsync_get_bucket(struct pfsync_softc *sc, struct pf_state *st)
1862 {
1863         int c = PF_IDHASH(st) % pfsync_buckets;
1864         return &sc->sc_buckets[c];
1865 }
1866
1867 static void
1868 pfsync_update_state(struct pf_state *st)
1869 {
1870         struct pfsync_softc *sc = V_pfsyncif;
1871         bool sync = false, ref = true;
1872         struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
1873
1874         PF_STATE_LOCK_ASSERT(st);
1875         PFSYNC_BUCKET_LOCK(b);
1876
1877         if (st->state_flags & PFSTATE_ACK)
1878                 pfsync_undefer_state(st, 0);
1879         if (st->state_flags & PFSTATE_NOSYNC) {
1880                 if (st->sync_state != PFSYNC_S_NONE)
1881                         pfsync_q_del(st, true, b);
1882                 PFSYNC_BUCKET_UNLOCK(b);
1883                 return;
1884         }
1885
1886         if (b->b_len == PFSYNC_MINPKT)
1887                 callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b);
1888
1889         switch (st->sync_state) {
1890         case PFSYNC_S_UPD_C:
1891         case PFSYNC_S_UPD:
1892         case PFSYNC_S_INS:
1893                 /* we're already handling it */
1894
1895                 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) {
1896                         st->sync_updates++;
1897                         if (st->sync_updates >= sc->sc_maxupdates)
1898                                 sync = true;
1899                 }
1900                 break;
1901
1902         case PFSYNC_S_IACK:
1903                 pfsync_q_del(st, false, b);
1904                 ref = false;
1905                 /* FALLTHROUGH */
1906
1907         case PFSYNC_S_NONE:
1908                 pfsync_q_ins(st, PFSYNC_S_UPD_C, ref);
1909                 st->sync_updates = 0;
1910                 break;
1911
1912         default:
1913                 panic("%s: unexpected sync state %d", __func__, st->sync_state);
1914         }
1915
1916         if (sync || (time_uptime - st->pfsync_time) < 2)
1917                 pfsync_push(b);
1918
1919         PFSYNC_BUCKET_UNLOCK(b);
1920 }
1921
1922 static void
1923 pfsync_request_update(u_int32_t creatorid, u_int64_t id)
1924 {
1925         struct pfsync_softc *sc = V_pfsyncif;
1926         struct pfsync_bucket *b = &sc->sc_buckets[0];
1927         struct pfsync_upd_req_item *item;
1928         size_t nlen = sizeof(struct pfsync_upd_req);
1929
1930         PFSYNC_BUCKET_LOCK_ASSERT(b);
1931
1932         /*
1933          * This code does a bit to prevent multiple update requests for the
1934          * same state being generated. It searches current subheader queue,
1935          * but it doesn't lookup into queue of already packed datagrams.
1936          */
1937         TAILQ_FOREACH(item, &b->b_upd_req_list, ur_entry)
1938                 if (item->ur_msg.id == id &&
1939                     item->ur_msg.creatorid == creatorid)
1940                         return;
1941
1942         item = malloc(sizeof(*item), M_PFSYNC, M_NOWAIT);
1943         if (item == NULL)
1944                 return; /* XXX stats */
1945
1946         item->ur_msg.id = id;
1947         item->ur_msg.creatorid = creatorid;
1948
1949         if (TAILQ_EMPTY(&b->b_upd_req_list))
1950                 nlen += sizeof(struct pfsync_subheader);
1951
1952         if (b->b_len + nlen > sc->sc_ifp->if_mtu) {
1953                 pfsync_sendout(1, 0);
1954
1955                 nlen = sizeof(struct pfsync_subheader) +
1956                     sizeof(struct pfsync_upd_req);
1957         }
1958
1959         TAILQ_INSERT_TAIL(&b->b_upd_req_list, item, ur_entry);
1960         b->b_len += nlen;
1961 }
1962
1963 static bool
1964 pfsync_update_state_req(struct pf_state *st)
1965 {
1966         struct pfsync_softc *sc = V_pfsyncif;
1967         bool ref = true, full = false;
1968         struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
1969
1970         PF_STATE_LOCK_ASSERT(st);
1971         PFSYNC_BUCKET_LOCK(b);
1972
1973         if (st->state_flags & PFSTATE_NOSYNC) {
1974                 if (st->sync_state != PFSYNC_S_NONE)
1975                         pfsync_q_del(st, true, b);
1976                 PFSYNC_BUCKET_UNLOCK(b);
1977                 return (full);
1978         }
1979
1980         switch (st->sync_state) {
1981         case PFSYNC_S_UPD_C:
1982         case PFSYNC_S_IACK:
1983                 pfsync_q_del(st, false, b);
1984                 ref = false;
1985                 /* FALLTHROUGH */
1986
1987         case PFSYNC_S_NONE:
1988                 pfsync_q_ins(st, PFSYNC_S_UPD, ref);
1989                 pfsync_push(b);
1990                 break;
1991
1992         case PFSYNC_S_INS:
1993         case PFSYNC_S_UPD:
1994         case PFSYNC_S_DEL:
1995                 /* we're already handling it */
1996                 break;
1997
1998         default:
1999                 panic("%s: unexpected sync state %d", __func__, st->sync_state);
2000         }
2001
2002         if ((sc->sc_ifp->if_mtu - b->b_len) < sizeof(struct pfsync_state))
2003                 full = true;
2004
2005         PFSYNC_BUCKET_UNLOCK(b);
2006
2007         return (full);
2008 }
2009
2010 static void
2011 pfsync_delete_state(struct pf_state *st)
2012 {
2013         struct pfsync_softc *sc = V_pfsyncif;
2014         struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
2015         bool ref = true;
2016
2017         PFSYNC_BUCKET_LOCK(b);
2018         if (st->state_flags & PFSTATE_ACK)
2019                 pfsync_undefer_state(st, 1);
2020         if (st->state_flags & PFSTATE_NOSYNC) {
2021                 if (st->sync_state != PFSYNC_S_NONE)
2022                         pfsync_q_del(st, true, b);
2023                 PFSYNC_BUCKET_UNLOCK(b);
2024                 return;
2025         }
2026
2027         if (b->b_len == PFSYNC_MINPKT)
2028                 callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b);
2029
2030         switch (st->sync_state) {
2031         case PFSYNC_S_INS:
2032                 /* We never got to tell the world so just forget about it. */
2033                 pfsync_q_del(st, true, b);
2034                 break;
2035
2036         case PFSYNC_S_UPD_C:
2037         case PFSYNC_S_UPD:
2038         case PFSYNC_S_IACK:
2039                 pfsync_q_del(st, false, b);
2040                 ref = false;
2041                 /* FALLTHROUGH */
2042
2043         case PFSYNC_S_NONE:
2044                 pfsync_q_ins(st, PFSYNC_S_DEL, ref);
2045                 break;
2046
2047         default:
2048                 panic("%s: unexpected sync state %d", __func__, st->sync_state);
2049         }
2050
2051         PFSYNC_BUCKET_UNLOCK(b);
2052 }
2053
2054 static void
2055 pfsync_clear_states(u_int32_t creatorid, const char *ifname)
2056 {
2057         struct {
2058                 struct pfsync_subheader subh;
2059                 struct pfsync_clr clr;
2060         } __packed r;
2061
2062         bzero(&r, sizeof(r));
2063
2064         r.subh.action = PFSYNC_ACT_CLR;
2065         r.subh.count = htons(1);
2066         V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_CLR]++;
2067
2068         strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname));
2069         r.clr.creatorid = creatorid;
2070
2071         pfsync_send_plus(&r, sizeof(r));
2072 }
2073
2074 static void
2075 pfsync_q_ins(struct pf_state *st, int q, bool ref)
2076 {
2077         struct pfsync_softc *sc = V_pfsyncif;
2078         size_t nlen = pfsync_qs[q].len;
2079         struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
2080
2081         PFSYNC_BUCKET_LOCK_ASSERT(b);
2082
2083         KASSERT(st->sync_state == PFSYNC_S_NONE,
2084                 ("%s: st->sync_state %u", __func__, st->sync_state));
2085         KASSERT(b->b_len >= PFSYNC_MINPKT, ("pfsync pkt len is too low %zu",
2086             b->b_len));
2087
2088         if (TAILQ_EMPTY(&b->b_qs[q]))
2089                 nlen += sizeof(struct pfsync_subheader);
2090
2091         if (b->b_len + nlen > sc->sc_ifp->if_mtu) {
2092                 pfsync_sendout(1, b->b_id);
2093
2094                 nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len;
2095         }
2096
2097         b->b_len += nlen;
2098         TAILQ_INSERT_TAIL(&b->b_qs[q], st, sync_list);
2099         st->sync_state = q;
2100         if (ref)
2101                 pf_ref_state(st);
2102 }
2103
2104 static void
2105 pfsync_q_del(struct pf_state *st, bool unref, struct pfsync_bucket *b)
2106 {
2107         int q = st->sync_state;
2108
2109         PFSYNC_BUCKET_LOCK_ASSERT(b);
2110         KASSERT(st->sync_state != PFSYNC_S_NONE,
2111                 ("%s: st->sync_state != PFSYNC_S_NONE", __func__));
2112
2113         b->b_len -= pfsync_qs[q].len;
2114         TAILQ_REMOVE(&b->b_qs[q], st, sync_list);
2115         st->sync_state = PFSYNC_S_NONE;
2116         if (unref)
2117                 pf_release_state(st);
2118
2119         if (TAILQ_EMPTY(&b->b_qs[q]))
2120                 b->b_len -= sizeof(struct pfsync_subheader);
2121 }
2122
2123 static void
2124 pfsync_bulk_start(void)
2125 {
2126         struct pfsync_softc *sc = V_pfsyncif;
2127
2128         if (V_pf_status.debug >= PF_DEBUG_MISC)
2129                 printf("pfsync: received bulk update request\n");
2130
2131         PFSYNC_BLOCK(sc);
2132
2133         sc->sc_ureq_received = time_uptime;
2134         sc->sc_bulk_hashid = 0;
2135         sc->sc_bulk_stateid = 0;
2136         pfsync_bulk_status(PFSYNC_BUS_START);
2137         callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update, sc);
2138         PFSYNC_BUNLOCK(sc);
2139 }
2140
2141 static void
2142 pfsync_bulk_update(void *arg)
2143 {
2144         struct pfsync_softc *sc = arg;
2145         struct pf_state *s;
2146         int i, sent = 0;
2147
2148         PFSYNC_BLOCK_ASSERT(sc);
2149         CURVNET_SET(sc->sc_ifp->if_vnet);
2150
2151         /*
2152          * Start with last state from previous invocation.
2153          * It may had gone, in this case start from the
2154          * hash slot.
2155          */
2156         s = pf_find_state_byid(sc->sc_bulk_stateid, sc->sc_bulk_creatorid);
2157
2158         if (s != NULL)
2159                 i = PF_IDHASH(s);
2160         else
2161                 i = sc->sc_bulk_hashid;
2162
2163         for (; i <= pf_hashmask; i++) {
2164                 struct pf_idhash *ih = &V_pf_idhash[i];
2165
2166                 if (s != NULL)
2167                         PF_HASHROW_ASSERT(ih);
2168                 else {
2169                         PF_HASHROW_LOCK(ih);
2170                         s = LIST_FIRST(&ih->states);
2171                 }
2172
2173                 for (; s; s = LIST_NEXT(s, entry)) {
2174                         if (s->sync_state == PFSYNC_S_NONE &&
2175                             s->timeout < PFTM_MAX &&
2176                             s->pfsync_time <= sc->sc_ureq_received) {
2177                                 if (pfsync_update_state_req(s)) {
2178                                         /* We've filled a packet. */
2179                                         sc->sc_bulk_hashid = i;
2180                                         sc->sc_bulk_stateid = s->id;
2181                                         sc->sc_bulk_creatorid = s->creatorid;
2182                                         PF_HASHROW_UNLOCK(ih);
2183                                         callout_reset(&sc->sc_bulk_tmo, 1,
2184                                             pfsync_bulk_update, sc);
2185                                         goto full;
2186                                 }
2187                                 sent++;
2188                         }
2189                 }
2190                 PF_HASHROW_UNLOCK(ih);
2191         }
2192
2193         /* We're done. */
2194         pfsync_bulk_status(PFSYNC_BUS_END);
2195 full:
2196         CURVNET_RESTORE();
2197 }
2198
2199 static void
2200 pfsync_bulk_status(u_int8_t status)
2201 {
2202         struct {
2203                 struct pfsync_subheader subh;
2204                 struct pfsync_bus bus;
2205         } __packed r;
2206
2207         struct pfsync_softc *sc = V_pfsyncif;
2208
2209         bzero(&r, sizeof(r));
2210
2211         r.subh.action = PFSYNC_ACT_BUS;
2212         r.subh.count = htons(1);
2213         V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_BUS]++;
2214
2215         r.bus.creatorid = V_pf_status.hostid;
2216         r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received);
2217         r.bus.status = status;
2218
2219         pfsync_send_plus(&r, sizeof(r));
2220 }
2221
2222 static void
2223 pfsync_bulk_fail(void *arg)
2224 {
2225         struct pfsync_softc *sc = arg;
2226         struct pfsync_bucket *b = &sc->sc_buckets[0];
2227
2228         CURVNET_SET(sc->sc_ifp->if_vnet);
2229
2230         PFSYNC_BLOCK_ASSERT(sc);
2231
2232         if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
2233                 /* Try again */
2234                 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
2235                     pfsync_bulk_fail, V_pfsyncif);
2236                 PFSYNC_BUCKET_LOCK(b);
2237                 pfsync_request_update(0, 0);
2238                 PFSYNC_BUCKET_UNLOCK(b);
2239         } else {
2240                 /* Pretend like the transfer was ok. */
2241                 sc->sc_ureq_sent = 0;
2242                 sc->sc_bulk_tries = 0;
2243                 PFSYNC_LOCK(sc);
2244                 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
2245                         (*carp_demote_adj_p)(-V_pfsync_carp_adj,
2246                             "pfsync bulk fail");
2247                 sc->sc_flags |= PFSYNCF_OK;
2248                 PFSYNC_UNLOCK(sc);
2249                 if (V_pf_status.debug >= PF_DEBUG_MISC)
2250                         printf("pfsync: failed to receive bulk update\n");
2251         }
2252
2253         CURVNET_RESTORE();
2254 }
2255
2256 static void
2257 pfsync_send_plus(void *plus, size_t pluslen)
2258 {
2259         struct pfsync_softc *sc = V_pfsyncif;
2260         struct pfsync_bucket *b = &sc->sc_buckets[0];
2261
2262         PFSYNC_BUCKET_LOCK(b);
2263
2264         if (b->b_len + pluslen > sc->sc_ifp->if_mtu)
2265                 pfsync_sendout(1, b->b_id);
2266
2267         b->b_plus = plus;
2268         b->b_len += (b->b_pluslen = pluslen);
2269
2270         pfsync_sendout(1, b->b_id);
2271         PFSYNC_BUCKET_UNLOCK(b);
2272 }
2273
2274 static void
2275 pfsync_timeout(void *arg)
2276 {
2277         struct pfsync_bucket *b = arg;
2278
2279         CURVNET_SET(b->b_sc->sc_ifp->if_vnet);
2280         PFSYNC_BUCKET_LOCK(b);
2281         pfsync_push(b);
2282         PFSYNC_BUCKET_UNLOCK(b);
2283         CURVNET_RESTORE();
2284 }
2285
2286 static void
2287 pfsync_push(struct pfsync_bucket *b)
2288 {
2289
2290         PFSYNC_BUCKET_LOCK_ASSERT(b);
2291
2292         b->b_flags |= PFSYNCF_BUCKET_PUSH;
2293         swi_sched(V_pfsync_swi_cookie, 0);
2294 }
2295
2296 static void
2297 pfsync_push_all(struct pfsync_softc *sc)
2298 {
2299         int c;
2300         struct pfsync_bucket *b;
2301
2302         for (c = 0; c < pfsync_buckets; c++) {
2303                 b = &sc->sc_buckets[c];
2304
2305                 PFSYNC_BUCKET_LOCK(b);
2306                 pfsync_push(b);
2307                 PFSYNC_BUCKET_UNLOCK(b);
2308         }
2309 }
2310
2311 static void
2312 pfsyncintr(void *arg)
2313 {
2314         struct epoch_tracker et;
2315         struct pfsync_softc *sc = arg;
2316         struct pfsync_bucket *b;
2317         struct mbuf *m, *n;
2318         int c;
2319
2320         NET_EPOCH_ENTER(et);
2321         CURVNET_SET(sc->sc_ifp->if_vnet);
2322
2323         for (c = 0; c < pfsync_buckets; c++) {
2324                 b = &sc->sc_buckets[c];
2325
2326                 PFSYNC_BUCKET_LOCK(b);
2327                 if ((b->b_flags & PFSYNCF_BUCKET_PUSH) && b->b_len > PFSYNC_MINPKT) {
2328                         pfsync_sendout(0, b->b_id);
2329                         b->b_flags &= ~PFSYNCF_BUCKET_PUSH;
2330                 }
2331                 _IF_DEQUEUE_ALL(&b->b_snd, m);
2332                 PFSYNC_BUCKET_UNLOCK(b);
2333
2334                 for (; m != NULL; m = n) {
2335
2336                         n = m->m_nextpkt;
2337                         m->m_nextpkt = NULL;
2338
2339                         /*
2340                          * We distinguish between a deferral packet and our
2341                          * own pfsync packet based on M_SKIP_FIREWALL
2342                          * flag. This is XXX.
2343                          */
2344                         if (m->m_flags & M_SKIP_FIREWALL)
2345                                 ip_output(m, NULL, NULL, 0, NULL, NULL);
2346                         else if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo,
2347                             NULL) == 0)
2348                                 V_pfsyncstats.pfsyncs_opackets++;
2349                         else
2350                                 V_pfsyncstats.pfsyncs_oerrors++;
2351                 }
2352         }
2353         CURVNET_RESTORE();
2354         NET_EPOCH_EXIT(et);
2355 }
2356
2357 static int
2358 pfsync_multicast_setup(struct pfsync_softc *sc, struct ifnet *ifp,
2359     struct in_mfilter *imf)
2360 {
2361         struct ip_moptions *imo = &sc->sc_imo;
2362         int error;
2363
2364         if (!(ifp->if_flags & IFF_MULTICAST))
2365                 return (EADDRNOTAVAIL);
2366
2367         imo->imo_multicast_vif = -1;
2368
2369         if ((error = in_joingroup(ifp, &sc->sc_sync_peer, NULL,
2370             &imf->imf_inm)) != 0)
2371                 return (error);
2372
2373         ip_mfilter_init(&imo->imo_head);
2374         ip_mfilter_insert(&imo->imo_head, imf);
2375         imo->imo_multicast_ifp = ifp;
2376         imo->imo_multicast_ttl = PFSYNC_DFLTTL;
2377         imo->imo_multicast_loop = 0;
2378
2379         return (0);
2380 }
2381
2382 static void
2383 pfsync_multicast_cleanup(struct pfsync_softc *sc)
2384 {
2385         struct ip_moptions *imo = &sc->sc_imo;
2386         struct in_mfilter *imf;
2387
2388         while ((imf = ip_mfilter_first(&imo->imo_head)) != NULL) {
2389                 ip_mfilter_remove(&imo->imo_head, imf);
2390                 in_leavegroup(imf->imf_inm, NULL);
2391                 ip_mfilter_free(imf);
2392         }
2393         imo->imo_multicast_ifp = NULL;
2394 }
2395
2396 void
2397 pfsync_detach_ifnet(struct ifnet *ifp)
2398 {
2399         struct pfsync_softc *sc = V_pfsyncif;
2400
2401         if (sc == NULL)
2402                 return;
2403
2404         PFSYNC_LOCK(sc);
2405
2406         if (sc->sc_sync_if == ifp) {
2407                 /* We don't need mutlicast cleanup here, because the interface
2408                  * is going away. We do need to ensure we don't try to do
2409                  * cleanup later.
2410                  */
2411                 ip_mfilter_init(&sc->sc_imo.imo_head);
2412                 sc->sc_imo.imo_multicast_ifp = NULL;
2413                 sc->sc_sync_if = NULL;
2414         }
2415
2416         PFSYNC_UNLOCK(sc);
2417 }
2418
2419 #ifdef INET
2420 extern  struct domain inetdomain;
2421 static struct protosw in_pfsync_protosw = {
2422         .pr_type =              SOCK_RAW,
2423         .pr_domain =            &inetdomain,
2424         .pr_protocol =          IPPROTO_PFSYNC,
2425         .pr_flags =             PR_ATOMIC|PR_ADDR,
2426         .pr_input =             pfsync_input,
2427         .pr_output =            rip_output,
2428         .pr_ctloutput =         rip_ctloutput,
2429         .pr_usrreqs =           &rip_usrreqs
2430 };
2431 #endif
2432
2433 static void
2434 pfsync_pointers_init()
2435 {
2436
2437         PF_RULES_WLOCK();
2438         V_pfsync_state_import_ptr = pfsync_state_import;
2439         V_pfsync_insert_state_ptr = pfsync_insert_state;
2440         V_pfsync_update_state_ptr = pfsync_update_state;
2441         V_pfsync_delete_state_ptr = pfsync_delete_state;
2442         V_pfsync_clear_states_ptr = pfsync_clear_states;
2443         V_pfsync_defer_ptr = pfsync_defer;
2444         PF_RULES_WUNLOCK();
2445 }
2446
2447 static void
2448 pfsync_pointers_uninit()
2449 {
2450
2451         PF_RULES_WLOCK();
2452         V_pfsync_state_import_ptr = NULL;
2453         V_pfsync_insert_state_ptr = NULL;
2454         V_pfsync_update_state_ptr = NULL;
2455         V_pfsync_delete_state_ptr = NULL;
2456         V_pfsync_clear_states_ptr = NULL;
2457         V_pfsync_defer_ptr = NULL;
2458         PF_RULES_WUNLOCK();
2459 }
2460
2461 static void
2462 vnet_pfsync_init(const void *unused __unused)
2463 {
2464         int error;
2465
2466         V_pfsync_cloner = if_clone_simple(pfsyncname,
2467             pfsync_clone_create, pfsync_clone_destroy, 1);
2468         error = swi_add(NULL, pfsyncname, pfsyncintr, V_pfsyncif,
2469             SWI_NET, INTR_MPSAFE, &V_pfsync_swi_cookie);
2470         if (error) {
2471                 if_clone_detach(V_pfsync_cloner);
2472                 log(LOG_INFO, "swi_add() failed in %s\n", __func__);
2473         }
2474
2475         pfsync_pointers_init();
2476 }
2477 VNET_SYSINIT(vnet_pfsync_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY,
2478     vnet_pfsync_init, NULL);
2479
2480 static void
2481 vnet_pfsync_uninit(const void *unused __unused)
2482 {
2483
2484         pfsync_pointers_uninit();
2485
2486         if_clone_detach(V_pfsync_cloner);
2487         swi_remove(V_pfsync_swi_cookie);
2488 }
2489
2490 VNET_SYSUNINIT(vnet_pfsync_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_FOURTH,
2491     vnet_pfsync_uninit, NULL);
2492
2493 static int
2494 pfsync_init()
2495 {
2496 #ifdef INET
2497         int error;
2498
2499         pfsync_detach_ifnet_ptr = pfsync_detach_ifnet;
2500
2501         error = pf_proto_register(PF_INET, &in_pfsync_protosw);
2502         if (error)
2503                 return (error);
2504         error = ipproto_register(IPPROTO_PFSYNC);
2505         if (error) {
2506                 pf_proto_unregister(PF_INET, IPPROTO_PFSYNC, SOCK_RAW);
2507                 return (error);
2508         }
2509 #endif
2510
2511         return (0);
2512 }
2513
2514 static void
2515 pfsync_uninit()
2516 {
2517         pfsync_detach_ifnet_ptr = NULL;
2518
2519 #ifdef INET
2520         ipproto_unregister(IPPROTO_PFSYNC);
2521         pf_proto_unregister(PF_INET, IPPROTO_PFSYNC, SOCK_RAW);
2522 #endif
2523 }
2524
2525 static int
2526 pfsync_modevent(module_t mod, int type, void *data)
2527 {
2528         int error = 0;
2529
2530         switch (type) {
2531         case MOD_LOAD:
2532                 error = pfsync_init();
2533                 break;
2534         case MOD_UNLOAD:
2535                 pfsync_uninit();
2536                 break;
2537         default:
2538                 error = EINVAL;
2539                 break;
2540         }
2541
2542         return (error);
2543 }
2544
2545 static moduledata_t pfsync_mod = {
2546         pfsyncname,
2547         pfsync_modevent,
2548         0
2549 };
2550
2551 #define PFSYNC_MODVER 1
2552
2553 /* Stay on FIREWALL as we depend on pf being initialized and on inetdomain. */
2554 DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY);
2555 MODULE_VERSION(pfsync, PFSYNC_MODVER);
2556 MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER);