]> CyberLeo.Net >> Repos - FreeBSD/releng/10.2.git/blob - sys/netpfil/pf/pf.c
- Copy stable/10@285827 to releng/10.2 in preparation for 10.2-RC1
[FreeBSD/releng/10.2.git] / sys / netpfil / pf / pf.c
1 /*-
2  * Copyright (c) 2001 Daniel Hartmeier
3  * Copyright (c) 2002 - 2008 Henning Brauer
4  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  *    - Redistributions of source code must retain the above copyright
12  *      notice, this list of conditions and the following disclaimer.
13  *    - Redistributions in binary form must reproduce the above
14  *      copyright notice, this list of conditions and the following
15  *      disclaimer in the documentation and/or other materials provided
16  *      with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * Effort sponsored in part by the Defense Advanced Research Projects
32  * Agency (DARPA) and Air Force Research Laboratory, Air Force
33  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
34  *
35  *      $OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $
36  */
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_bpf.h"
44 #include "opt_pf.h"
45
46 #include <sys/param.h>
47 #include <sys/bus.h>
48 #include <sys/endian.h>
49 #include <sys/hash.h>
50 #include <sys/interrupt.h>
51 #include <sys/kernel.h>
52 #include <sys/kthread.h>
53 #include <sys/limits.h>
54 #include <sys/mbuf.h>
55 #include <sys/md5.h>
56 #include <sys/random.h>
57 #include <sys/refcount.h>
58 #include <sys/socket.h>
59 #include <sys/sysctl.h>
60 #include <sys/taskqueue.h>
61 #include <sys/ucred.h>
62
63 #include <net/if.h>
64 #include <net/if_types.h>
65 #include <net/route.h>
66 #include <net/radix_mpath.h>
67 #include <net/vnet.h>
68
69 #include <net/pfvar.h>
70 #include <net/if_pflog.h>
71 #include <net/if_pfsync.h>
72
73 #include <netinet/in_pcb.h>
74 #include <netinet/in_var.h>
75 #include <netinet/ip.h>
76 #include <netinet/ip_fw.h>
77 #include <netinet/ip_icmp.h>
78 #include <netinet/icmp_var.h>
79 #include <netinet/ip_var.h>
80 #include <netinet/tcp.h>
81 #include <netinet/tcp_fsm.h>
82 #include <netinet/tcp_seq.h>
83 #include <netinet/tcp_timer.h>
84 #include <netinet/tcp_var.h>
85 #include <netinet/udp.h>
86 #include <netinet/udp_var.h>
87
88 #include <netpfil/ipfw/ip_fw_private.h> /* XXX: only for DIR_IN/DIR_OUT */
89
90 #ifdef INET6
91 #include <netinet/ip6.h>
92 #include <netinet/icmp6.h>
93 #include <netinet6/nd6.h>
94 #include <netinet6/ip6_var.h>
95 #include <netinet6/in6_pcb.h>
96 #endif /* INET6 */
97
98 #include <machine/in_cksum.h>
99 #include <security/mac/mac_framework.h>
100
101 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
102
103 /*
104  * Global variables
105  */
106
107 /* state tables */
108 VNET_DEFINE(struct pf_altqqueue,         pf_altqs[2]);
109 VNET_DEFINE(struct pf_palist,            pf_pabuf);
110 VNET_DEFINE(struct pf_altqqueue *,       pf_altqs_active);
111 VNET_DEFINE(struct pf_altqqueue *,       pf_altqs_inactive);
112 VNET_DEFINE(struct pf_kstatus,           pf_status);
113
114 VNET_DEFINE(u_int32_t,                   ticket_altqs_active);
115 VNET_DEFINE(u_int32_t,                   ticket_altqs_inactive);
116 VNET_DEFINE(int,                         altqs_inactive_open);
117 VNET_DEFINE(u_int32_t,                   ticket_pabuf);
118
119 VNET_DEFINE(MD5_CTX,                     pf_tcp_secret_ctx);
120 #define V_pf_tcp_secret_ctx              VNET(pf_tcp_secret_ctx)
121 VNET_DEFINE(u_char,                      pf_tcp_secret[16]);
122 #define V_pf_tcp_secret                  VNET(pf_tcp_secret)
123 VNET_DEFINE(int,                         pf_tcp_secret_init);
124 #define V_pf_tcp_secret_init             VNET(pf_tcp_secret_init)
125 VNET_DEFINE(int,                         pf_tcp_iss_off);
126 #define V_pf_tcp_iss_off                 VNET(pf_tcp_iss_off)
127
128 /*
129  * Queue for pf_intr() sends.
130  */
131 static MALLOC_DEFINE(M_PFTEMP, "pf_temp", "pf(4) temporary allocations");
132 struct pf_send_entry {
133         STAILQ_ENTRY(pf_send_entry)     pfse_next;
134         struct mbuf                     *pfse_m;
135         enum {
136                 PFSE_IP,
137                 PFSE_IP6,
138                 PFSE_ICMP,
139                 PFSE_ICMP6,
140         }                               pfse_type;
141         union {
142                 struct route            ro;
143                 struct {
144                         int             type;
145                         int             code;
146                         int             mtu;
147                 } icmpopts;
148         } u;
149 #define pfse_ro         u.ro
150 #define pfse_icmp_type  u.icmpopts.type
151 #define pfse_icmp_code  u.icmpopts.code
152 #define pfse_icmp_mtu   u.icmpopts.mtu
153 };
154
155 STAILQ_HEAD(pf_send_head, pf_send_entry);
156 static VNET_DEFINE(struct pf_send_head, pf_sendqueue);
157 #define V_pf_sendqueue  VNET(pf_sendqueue)
158
159 static struct mtx pf_sendqueue_mtx;
160 #define PF_SENDQ_LOCK()         mtx_lock(&pf_sendqueue_mtx)
161 #define PF_SENDQ_UNLOCK()       mtx_unlock(&pf_sendqueue_mtx)
162
163 /*
164  * Queue for pf_overload_task() tasks.
165  */
166 struct pf_overload_entry {
167         SLIST_ENTRY(pf_overload_entry)  next;
168         struct pf_addr                  addr;
169         sa_family_t                     af;
170         uint8_t                         dir;
171         struct pf_rule                  *rule;
172 };
173
174 SLIST_HEAD(pf_overload_head, pf_overload_entry);
175 static VNET_DEFINE(struct pf_overload_head, pf_overloadqueue);
176 #define V_pf_overloadqueue      VNET(pf_overloadqueue)
177 static VNET_DEFINE(struct task, pf_overloadtask);
178 #define V_pf_overloadtask       VNET(pf_overloadtask)
179
180 static struct mtx pf_overloadqueue_mtx;
181 #define PF_OVERLOADQ_LOCK()     mtx_lock(&pf_overloadqueue_mtx)
182 #define PF_OVERLOADQ_UNLOCK()   mtx_unlock(&pf_overloadqueue_mtx)
183
184 VNET_DEFINE(struct pf_rulequeue, pf_unlinked_rules);
185 struct mtx pf_unlnkdrules_mtx;
186
187 static VNET_DEFINE(uma_zone_t,  pf_sources_z);
188 #define V_pf_sources_z  VNET(pf_sources_z)
189 uma_zone_t              pf_mtag_z;
190 VNET_DEFINE(uma_zone_t,  pf_state_z);
191 VNET_DEFINE(uma_zone_t,  pf_state_key_z);
192
193 VNET_DEFINE(uint64_t, pf_stateid[MAXCPU]);
194 #define PFID_CPUBITS    8
195 #define PFID_CPUSHIFT   (sizeof(uint64_t) * NBBY - PFID_CPUBITS)
196 #define PFID_CPUMASK    ((uint64_t)((1 << PFID_CPUBITS) - 1) << PFID_CPUSHIFT)
197 #define PFID_MAXID      (~PFID_CPUMASK)
198 CTASSERT((1 << PFID_CPUBITS) >= MAXCPU);
199
200 static void              pf_src_tree_remove_state(struct pf_state *);
201 static void              pf_init_threshold(struct pf_threshold *, u_int32_t,
202                             u_int32_t);
203 static void              pf_add_threshold(struct pf_threshold *);
204 static int               pf_check_threshold(struct pf_threshold *);
205
206 static void              pf_change_ap(struct pf_addr *, u_int16_t *,
207                             u_int16_t *, u_int16_t *, struct pf_addr *,
208                             u_int16_t, u_int8_t, sa_family_t);
209 static int               pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
210                             struct tcphdr *, struct pf_state_peer *);
211 static void              pf_change_icmp(struct pf_addr *, u_int16_t *,
212                             struct pf_addr *, struct pf_addr *, u_int16_t,
213                             u_int16_t *, u_int16_t *, u_int16_t *,
214                             u_int16_t *, u_int8_t, sa_family_t);
215 static void              pf_send_tcp(struct mbuf *,
216                             const struct pf_rule *, sa_family_t,
217                             const struct pf_addr *, const struct pf_addr *,
218                             u_int16_t, u_int16_t, u_int32_t, u_int32_t,
219                             u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
220                             u_int16_t, struct ifnet *);
221 static void              pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
222                             sa_family_t, struct pf_rule *);
223 static void              pf_detach_state(struct pf_state *);
224 static int               pf_state_key_attach(struct pf_state_key *,
225                             struct pf_state_key *, struct pf_state *);
226 static void              pf_state_key_detach(struct pf_state *, int);
227 static int               pf_state_key_ctor(void *, int, void *, int);
228 static u_int32_t         pf_tcp_iss(struct pf_pdesc *);
229 static int               pf_test_rule(struct pf_rule **, struct pf_state **,
230                             int, struct pfi_kif *, struct mbuf *, int,
231                             struct pf_pdesc *, struct pf_rule **,
232                             struct pf_ruleset **, struct inpcb *);
233 static int               pf_create_state(struct pf_rule *, struct pf_rule *,
234                             struct pf_rule *, struct pf_pdesc *,
235                             struct pf_src_node *, struct pf_state_key *,
236                             struct pf_state_key *, struct mbuf *, int,
237                             u_int16_t, u_int16_t, int *, struct pfi_kif *,
238                             struct pf_state **, int, u_int16_t, u_int16_t,
239                             int);
240 static int               pf_test_fragment(struct pf_rule **, int,
241                             struct pfi_kif *, struct mbuf *, void *,
242                             struct pf_pdesc *, struct pf_rule **,
243                             struct pf_ruleset **);
244 static int               pf_tcp_track_full(struct pf_state_peer *,
245                             struct pf_state_peer *, struct pf_state **,
246                             struct pfi_kif *, struct mbuf *, int,
247                             struct pf_pdesc *, u_short *, int *);
248 static int               pf_tcp_track_sloppy(struct pf_state_peer *,
249                             struct pf_state_peer *, struct pf_state **,
250                             struct pf_pdesc *, u_short *);
251 static int               pf_test_state_tcp(struct pf_state **, int,
252                             struct pfi_kif *, struct mbuf *, int,
253                             void *, struct pf_pdesc *, u_short *);
254 static int               pf_test_state_udp(struct pf_state **, int,
255                             struct pfi_kif *, struct mbuf *, int,
256                             void *, struct pf_pdesc *);
257 static int               pf_test_state_icmp(struct pf_state **, int,
258                             struct pfi_kif *, struct mbuf *, int,
259                             void *, struct pf_pdesc *, u_short *);
260 static int               pf_test_state_other(struct pf_state **, int,
261                             struct pfi_kif *, struct mbuf *, struct pf_pdesc *);
262 static u_int8_t          pf_get_wscale(struct mbuf *, int, u_int16_t,
263                             sa_family_t);
264 static u_int16_t         pf_get_mss(struct mbuf *, int, u_int16_t,
265                             sa_family_t);
266 static u_int16_t         pf_calc_mss(struct pf_addr *, sa_family_t,
267                                 int, u_int16_t);
268 static int               pf_check_proto_cksum(struct mbuf *, int, int,
269                             u_int8_t, sa_family_t);
270 static void              pf_print_state_parts(struct pf_state *,
271                             struct pf_state_key *, struct pf_state_key *);
272 static int               pf_addr_wrap_neq(struct pf_addr_wrap *,
273                             struct pf_addr_wrap *);
274 static struct pf_state  *pf_find_state(struct pfi_kif *,
275                             struct pf_state_key_cmp *, u_int);
276 static int               pf_src_connlimit(struct pf_state **);
277 static void              pf_overload_task(void *v, int pending);
278 static int               pf_insert_src_node(struct pf_src_node **,
279                             struct pf_rule *, struct pf_addr *, sa_family_t);
280 static u_int             pf_purge_expired_states(u_int, int);
281 static void              pf_purge_unlinked_rules(void);
282 static int               pf_mtag_uminit(void *, int, int);
283 static void              pf_mtag_free(struct m_tag *);
284 #ifdef INET
285 static void              pf_route(struct mbuf **, struct pf_rule *, int,
286                             struct ifnet *, struct pf_state *,
287                             struct pf_pdesc *);
288 #endif /* INET */
289 #ifdef INET6
290 static void              pf_change_a6(struct pf_addr *, u_int16_t *,
291                             struct pf_addr *, u_int8_t);
292 static void              pf_route6(struct mbuf **, struct pf_rule *, int,
293                             struct ifnet *, struct pf_state *,
294                             struct pf_pdesc *);
295 #endif /* INET6 */
296
297 int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
298
299 VNET_DECLARE(int, pf_end_threads);
300
301 VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
302
303 #define PACKET_LOOPED(pd)       ((pd)->pf_mtag &&                       \
304                                  (pd)->pf_mtag->flags & PF_PACKET_LOOPED)
305
306 #define STATE_LOOKUP(i, k, d, s, pd)                                    \
307         do {                                                            \
308                 (s) = pf_find_state((i), (k), (d));                     \
309                 if ((s) == NULL)                                        \
310                         return (PF_DROP);                               \
311                 if (PACKET_LOOPED(pd))                                  \
312                         return (PF_PASS);                               \
313                 if ((d) == PF_OUT &&                                    \
314                     (((s)->rule.ptr->rt == PF_ROUTETO &&                \
315                     (s)->rule.ptr->direction == PF_OUT) ||              \
316                     ((s)->rule.ptr->rt == PF_REPLYTO &&                 \
317                     (s)->rule.ptr->direction == PF_IN)) &&              \
318                     (s)->rt_kif != NULL &&                              \
319                     (s)->rt_kif != (i))                                 \
320                         return (PF_PASS);                               \
321         } while (0)
322
323 #define BOUND_IFACE(r, k) \
324         ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all
325
326 #define STATE_INC_COUNTERS(s)                                           \
327         do {                                                            \
328                 counter_u64_add(s->rule.ptr->states_cur, 1);            \
329                 counter_u64_add(s->rule.ptr->states_tot, 1);            \
330                 if (s->anchor.ptr != NULL) {                            \
331                         counter_u64_add(s->anchor.ptr->states_cur, 1);  \
332                         counter_u64_add(s->anchor.ptr->states_tot, 1);  \
333                 }                                                       \
334                 if (s->nat_rule.ptr != NULL) {                          \
335                         counter_u64_add(s->nat_rule.ptr->states_cur, 1);\
336                         counter_u64_add(s->nat_rule.ptr->states_tot, 1);\
337                 }                                                       \
338         } while (0)
339
340 #define STATE_DEC_COUNTERS(s)                                           \
341         do {                                                            \
342                 if (s->nat_rule.ptr != NULL)                            \
343                         counter_u64_add(s->nat_rule.ptr->states_cur, -1);\
344                 if (s->anchor.ptr != NULL)                              \
345                         counter_u64_add(s->anchor.ptr->states_cur, -1); \
346                 counter_u64_add(s->rule.ptr->states_cur, -1);           \
347         } while (0)
348
349 static MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
350 VNET_DEFINE(struct pf_keyhash *, pf_keyhash);
351 VNET_DEFINE(struct pf_idhash *, pf_idhash);
352 VNET_DEFINE(struct pf_srchash *, pf_srchash);
353
354 SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW, 0, "pf(4)");
355
356 u_long  pf_hashmask;
357 u_long  pf_srchashmask;
358 static u_long   pf_hashsize;
359 static u_long   pf_srchashsize;
360
361 SYSCTL_ULONG(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN,
362     &pf_hashsize, 0, "Size of pf(4) states hashtable");
363 SYSCTL_ULONG(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN,
364     &pf_srchashsize, 0, "Size of pf(4) source nodes hashtable");
365
366 VNET_DEFINE(void *, pf_swi_cookie);
367
368 VNET_DEFINE(uint32_t, pf_hashseed);
369 #define V_pf_hashseed   VNET(pf_hashseed)
370
371 int
372 pf_addr_cmp(struct pf_addr *a, struct pf_addr *b, sa_family_t af)
373 {
374
375         switch (af) {
376 #ifdef INET
377         case AF_INET:
378                 if (a->addr32[0] > b->addr32[0])
379                         return (1);
380                 if (a->addr32[0] < b->addr32[0])
381                         return (-1);
382                 break;
383 #endif /* INET */
384 #ifdef INET6
385         case AF_INET6:
386                 if (a->addr32[3] > b->addr32[3])
387                         return (1);
388                 if (a->addr32[3] < b->addr32[3])
389                         return (-1);
390                 if (a->addr32[2] > b->addr32[2])
391                         return (1);
392                 if (a->addr32[2] < b->addr32[2])
393                         return (-1);
394                 if (a->addr32[1] > b->addr32[1])
395                         return (1);
396                 if (a->addr32[1] < b->addr32[1])
397                         return (-1);
398                 if (a->addr32[0] > b->addr32[0])
399                         return (1);
400                 if (a->addr32[0] < b->addr32[0])
401                         return (-1);
402                 break;
403 #endif /* INET6 */
404         default:
405                 panic("%s: unknown address family %u", __func__, af);
406         }
407         return (0);
408 }
409
410 static __inline uint32_t
411 pf_hashkey(struct pf_state_key *sk)
412 {
413         uint32_t h;
414
415         h = murmur3_aligned_32((uint32_t *)sk,
416                                sizeof(struct pf_state_key_cmp),
417                                V_pf_hashseed);
418
419         return (h & pf_hashmask);
420 }
421
422 static __inline uint32_t
423 pf_hashsrc(struct pf_addr *addr, sa_family_t af)
424 {
425         uint32_t h;
426
427         switch (af) {
428         case AF_INET:
429                 h = murmur3_aligned_32((uint32_t *)&addr->v4,
430                                        sizeof(addr->v4), V_pf_hashseed);
431                 break;
432         case AF_INET6:
433                 h = murmur3_aligned_32((uint32_t *)&addr->v6,
434                                        sizeof(addr->v6), V_pf_hashseed);
435                 break;
436         default:
437                 panic("%s: unknown address family %u", __func__, af);
438         }
439
440         return (h & pf_srchashmask);
441 }
442
443 #ifdef INET6
444 void
445 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
446 {
447         switch (af) {
448 #ifdef INET
449         case AF_INET:
450                 dst->addr32[0] = src->addr32[0];
451                 break;
452 #endif /* INET */
453         case AF_INET6:
454                 dst->addr32[0] = src->addr32[0];
455                 dst->addr32[1] = src->addr32[1];
456                 dst->addr32[2] = src->addr32[2];
457                 dst->addr32[3] = src->addr32[3];
458                 break;
459         }
460 }
461 #endif /* INET6 */
462
463 static void
464 pf_init_threshold(struct pf_threshold *threshold,
465     u_int32_t limit, u_int32_t seconds)
466 {
467         threshold->limit = limit * PF_THRESHOLD_MULT;
468         threshold->seconds = seconds;
469         threshold->count = 0;
470         threshold->last = time_uptime;
471 }
472
473 static void
474 pf_add_threshold(struct pf_threshold *threshold)
475 {
476         u_int32_t t = time_uptime, diff = t - threshold->last;
477
478         if (diff >= threshold->seconds)
479                 threshold->count = 0;
480         else
481                 threshold->count -= threshold->count * diff /
482                     threshold->seconds;
483         threshold->count += PF_THRESHOLD_MULT;
484         threshold->last = t;
485 }
486
487 static int
488 pf_check_threshold(struct pf_threshold *threshold)
489 {
490         return (threshold->count > threshold->limit);
491 }
492
493 static int
494 pf_src_connlimit(struct pf_state **state)
495 {
496         struct pf_overload_entry *pfoe;
497         int bad = 0;
498
499         PF_STATE_LOCK_ASSERT(*state);
500
501         (*state)->src_node->conn++;
502         (*state)->src.tcp_est = 1;
503         pf_add_threshold(&(*state)->src_node->conn_rate);
504
505         if ((*state)->rule.ptr->max_src_conn &&
506             (*state)->rule.ptr->max_src_conn <
507             (*state)->src_node->conn) {
508                 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONN], 1);
509                 bad++;
510         }
511
512         if ((*state)->rule.ptr->max_src_conn_rate.limit &&
513             pf_check_threshold(&(*state)->src_node->conn_rate)) {
514                 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONNRATE], 1);
515                 bad++;
516         }
517
518         if (!bad)
519                 return (0);
520
521         /* Kill this state. */
522         (*state)->timeout = PFTM_PURGE;
523         (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
524
525         if ((*state)->rule.ptr->overload_tbl == NULL)
526                 return (1);
527
528         /* Schedule overloading and flushing task. */
529         pfoe = malloc(sizeof(*pfoe), M_PFTEMP, M_NOWAIT);
530         if (pfoe == NULL)
531                 return (1);     /* too bad :( */
532
533         bcopy(&(*state)->src_node->addr, &pfoe->addr, sizeof(pfoe->addr));
534         pfoe->af = (*state)->key[PF_SK_WIRE]->af;
535         pfoe->rule = (*state)->rule.ptr;
536         pfoe->dir = (*state)->direction;
537         PF_OVERLOADQ_LOCK();
538         SLIST_INSERT_HEAD(&V_pf_overloadqueue, pfoe, next);
539         PF_OVERLOADQ_UNLOCK();
540         taskqueue_enqueue(taskqueue_swi, &V_pf_overloadtask);
541
542         return (1);
543 }
544
545 static void
546 pf_overload_task(void *v, int pending)
547 {
548         struct pf_overload_head queue;
549         struct pfr_addr p;
550         struct pf_overload_entry *pfoe, *pfoe1;
551         uint32_t killed = 0;
552
553         CURVNET_SET((struct vnet *)v);
554
555         PF_OVERLOADQ_LOCK();
556         queue = V_pf_overloadqueue;
557         SLIST_INIT(&V_pf_overloadqueue);
558         PF_OVERLOADQ_UNLOCK();
559
560         bzero(&p, sizeof(p));
561         SLIST_FOREACH(pfoe, &queue, next) {
562                 counter_u64_add(V_pf_status.lcounters[LCNT_OVERLOAD_TABLE], 1);
563                 if (V_pf_status.debug >= PF_DEBUG_MISC) {
564                         printf("%s: blocking address ", __func__);
565                         pf_print_host(&pfoe->addr, 0, pfoe->af);
566                         printf("\n");
567                 }
568
569                 p.pfra_af = pfoe->af;
570                 switch (pfoe->af) {
571 #ifdef INET
572                 case AF_INET:
573                         p.pfra_net = 32;
574                         p.pfra_ip4addr = pfoe->addr.v4;
575                         break;
576 #endif
577 #ifdef INET6
578                 case AF_INET6:
579                         p.pfra_net = 128;
580                         p.pfra_ip6addr = pfoe->addr.v6;
581                         break;
582 #endif
583                 }
584
585                 PF_RULES_WLOCK();
586                 pfr_insert_kentry(pfoe->rule->overload_tbl, &p, time_second);
587                 PF_RULES_WUNLOCK();
588         }
589
590         /*
591          * Remove those entries, that don't need flushing.
592          */
593         SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
594                 if (pfoe->rule->flush == 0) {
595                         SLIST_REMOVE(&queue, pfoe, pf_overload_entry, next);
596                         free(pfoe, M_PFTEMP);
597                 } else
598                         counter_u64_add(
599                             V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH], 1);
600
601         /* If nothing to flush, return. */
602         if (SLIST_EMPTY(&queue)) {
603                 CURVNET_RESTORE();
604                 return;
605         }
606
607         for (int i = 0; i <= pf_hashmask; i++) {
608                 struct pf_idhash *ih = &V_pf_idhash[i];
609                 struct pf_state_key *sk;
610                 struct pf_state *s;
611
612                 PF_HASHROW_LOCK(ih);
613                 LIST_FOREACH(s, &ih->states, entry) {
614                     sk = s->key[PF_SK_WIRE];
615                     SLIST_FOREACH(pfoe, &queue, next)
616                         if (sk->af == pfoe->af &&
617                             ((pfoe->rule->flush & PF_FLUSH_GLOBAL) ||
618                             pfoe->rule == s->rule.ptr) &&
619                             ((pfoe->dir == PF_OUT &&
620                             PF_AEQ(&pfoe->addr, &sk->addr[1], sk->af)) ||
621                             (pfoe->dir == PF_IN &&
622                             PF_AEQ(&pfoe->addr, &sk->addr[0], sk->af)))) {
623                                 s->timeout = PFTM_PURGE;
624                                 s->src.state = s->dst.state = TCPS_CLOSED;
625                                 killed++;
626                         }
627                 }
628                 PF_HASHROW_UNLOCK(ih);
629         }
630         SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
631                 free(pfoe, M_PFTEMP);
632         if (V_pf_status.debug >= PF_DEBUG_MISC)
633                 printf("%s: %u states killed", __func__, killed);
634
635         CURVNET_RESTORE();
636 }
637
638 /*
639  * Can return locked on failure, so that we can consistently
640  * allocate and insert a new one.
641  */
642 struct pf_src_node *
643 pf_find_src_node(struct pf_addr *src, struct pf_rule *rule, sa_family_t af,
644         int returnlocked)
645 {
646         struct pf_srchash *sh;
647         struct pf_src_node *n;
648
649         counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_SEARCH], 1);
650
651         sh = &V_pf_srchash[pf_hashsrc(src, af)];
652         PF_HASHROW_LOCK(sh);
653         LIST_FOREACH(n, &sh->nodes, entry)
654                 if (n->rule.ptr == rule && n->af == af &&
655                     ((af == AF_INET && n->addr.v4.s_addr == src->v4.s_addr) ||
656                     (af == AF_INET6 && bcmp(&n->addr, src, sizeof(*src)) == 0)))
657                         break;
658         if (n != NULL || returnlocked == 0)
659                 PF_HASHROW_UNLOCK(sh);
660
661         return (n);
662 }
663
664 static int
665 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
666     struct pf_addr *src, sa_family_t af)
667 {
668
669         KASSERT((rule->rule_flag & PFRULE_RULESRCTRACK ||
670             rule->rpool.opts & PF_POOL_STICKYADDR),
671             ("%s for non-tracking rule %p", __func__, rule));
672
673         if (*sn == NULL)
674                 *sn = pf_find_src_node(src, rule, af, 1);
675
676         if (*sn == NULL) {
677                 struct pf_srchash *sh = &V_pf_srchash[pf_hashsrc(src, af)];
678
679                 PF_HASHROW_ASSERT(sh);
680
681                 if (!rule->max_src_nodes ||
682                     counter_u64_fetch(rule->src_nodes) < rule->max_src_nodes)
683                         (*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO);
684                 else
685                         counter_u64_add(V_pf_status.lcounters[LCNT_SRCNODES],
686                             1);
687                 if ((*sn) == NULL) {
688                         PF_HASHROW_UNLOCK(sh);
689                         return (-1);
690                 }
691
692                 pf_init_threshold(&(*sn)->conn_rate,
693                     rule->max_src_conn_rate.limit,
694                     rule->max_src_conn_rate.seconds);
695
696                 (*sn)->af = af;
697                 (*sn)->rule.ptr = rule;
698                 PF_ACPY(&(*sn)->addr, src, af);
699                 LIST_INSERT_HEAD(&sh->nodes, *sn, entry);
700                 (*sn)->creation = time_uptime;
701                 (*sn)->ruletype = rule->action;
702                 if ((*sn)->rule.ptr != NULL)
703                         counter_u64_add((*sn)->rule.ptr->src_nodes, 1);
704                 PF_HASHROW_UNLOCK(sh);
705                 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_INSERT], 1);
706         } else {
707                 if (rule->max_src_states &&
708                     (*sn)->states >= rule->max_src_states) {
709                         counter_u64_add(V_pf_status.lcounters[LCNT_SRCSTATES],
710                             1);
711                         return (-1);
712                 }
713         }
714         return (0);
715 }
716
717 void
718 pf_unlink_src_node_locked(struct pf_src_node *src)
719 {
720 #ifdef INVARIANTS
721         struct pf_srchash *sh;
722
723         sh = &V_pf_srchash[pf_hashsrc(&src->addr, src->af)];
724         PF_HASHROW_ASSERT(sh);
725 #endif
726         LIST_REMOVE(src, entry);
727         if (src->rule.ptr)
728                 counter_u64_add(src->rule.ptr->src_nodes, -1);
729         counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
730 }
731
732 void
733 pf_unlink_src_node(struct pf_src_node *src)
734 {
735         struct pf_srchash *sh;
736
737         sh = &V_pf_srchash[pf_hashsrc(&src->addr, src->af)];
738         PF_HASHROW_LOCK(sh);
739         pf_unlink_src_node_locked(src);
740         PF_HASHROW_UNLOCK(sh);
741 }
742
743 static void
744 pf_free_src_node(struct pf_src_node *sn)
745 {
746
747         KASSERT(sn->states == 0, ("%s: %p has refs", __func__, sn));
748         uma_zfree(V_pf_sources_z, sn);
749 }
750
751 u_int
752 pf_free_src_nodes(struct pf_src_node_list *head)
753 {
754         struct pf_src_node *sn, *tmp;
755         u_int count = 0;
756
757         LIST_FOREACH_SAFE(sn, head, entry, tmp) {
758                 pf_free_src_node(sn);
759                 count++;
760         }
761
762         return (count);
763 }
764
765 void
766 pf_mtag_initialize()
767 {
768
769         pf_mtag_z = uma_zcreate("pf mtags", sizeof(struct m_tag) +
770             sizeof(struct pf_mtag), NULL, NULL, pf_mtag_uminit, NULL,
771             UMA_ALIGN_PTR, 0);
772 }
773
774 /* Per-vnet data storage structures initialization. */
775 void
776 pf_initialize()
777 {
778         struct pf_keyhash       *kh;
779         struct pf_idhash        *ih;
780         struct pf_srchash       *sh;
781         u_int i;
782
783         TUNABLE_ULONG_FETCH("net.pf.states_hashsize", &pf_hashsize);
784         if (pf_hashsize == 0 || !powerof2(pf_hashsize))
785                 pf_hashsize = PF_HASHSIZ;
786         TUNABLE_ULONG_FETCH("net.pf.source_nodes_hashsize", &pf_srchashsize);
787         if (pf_srchashsize == 0 || !powerof2(pf_srchashsize))
788                 pf_srchashsize = PF_HASHSIZ / 4;
789
790         V_pf_hashseed = arc4random();
791
792         /* States and state keys storage. */
793         V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_state),
794             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
795         V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z;
796         uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT);
797         uma_zone_set_warning(V_pf_state_z, "PF states limit reached");
798
799         V_pf_state_key_z = uma_zcreate("pf state keys",
800             sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL,
801             UMA_ALIGN_PTR, 0);
802         V_pf_keyhash = malloc(pf_hashsize * sizeof(struct pf_keyhash),
803             M_PFHASH, M_WAITOK | M_ZERO);
804         V_pf_idhash = malloc(pf_hashsize * sizeof(struct pf_idhash),
805             M_PFHASH, M_WAITOK | M_ZERO);
806         pf_hashmask = pf_hashsize - 1;
807         for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
808             i++, kh++, ih++) {
809                 mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK);
810                 mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF);
811         }
812
813         /* Source nodes. */
814         V_pf_sources_z = uma_zcreate("pf source nodes",
815             sizeof(struct pf_src_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
816             0);
817         V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z;
818         uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT);
819         uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached");
820         V_pf_srchash = malloc(pf_srchashsize * sizeof(struct pf_srchash),
821           M_PFHASH, M_WAITOK|M_ZERO);
822         pf_srchashmask = pf_srchashsize - 1;
823         for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++)
824                 mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF);
825
826         /* ALTQ */
827         TAILQ_INIT(&V_pf_altqs[0]);
828         TAILQ_INIT(&V_pf_altqs[1]);
829         TAILQ_INIT(&V_pf_pabuf);
830         V_pf_altqs_active = &V_pf_altqs[0];
831         V_pf_altqs_inactive = &V_pf_altqs[1];
832
833
834         /* Send & overload+flush queues. */
835         STAILQ_INIT(&V_pf_sendqueue);
836         SLIST_INIT(&V_pf_overloadqueue);
837         TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, curvnet);
838         mtx_init(&pf_sendqueue_mtx, "pf send queue", NULL, MTX_DEF);
839         mtx_init(&pf_overloadqueue_mtx, "pf overload/flush queue", NULL,
840             MTX_DEF);
841
842         /* Unlinked, but may be referenced rules. */
843         TAILQ_INIT(&V_pf_unlinked_rules);
844         mtx_init(&pf_unlnkdrules_mtx, "pf unlinked rules", NULL, MTX_DEF);
845 }
846
847 void
848 pf_mtag_cleanup()
849 {
850
851         uma_zdestroy(pf_mtag_z);
852 }
853
854 void
855 pf_cleanup()
856 {
857         struct pf_keyhash       *kh;
858         struct pf_idhash        *ih;
859         struct pf_srchash       *sh;
860         struct pf_send_entry    *pfse, *next;
861         u_int i;
862
863         for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
864             i++, kh++, ih++) {
865                 KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty",
866                     __func__));
867                 KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty",
868                     __func__));
869                 mtx_destroy(&kh->lock);
870                 mtx_destroy(&ih->lock);
871         }
872         free(V_pf_keyhash, M_PFHASH);
873         free(V_pf_idhash, M_PFHASH);
874
875         for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
876                 KASSERT(LIST_EMPTY(&sh->nodes),
877                     ("%s: source node hash not empty", __func__));
878                 mtx_destroy(&sh->lock);
879         }
880         free(V_pf_srchash, M_PFHASH);
881
882         STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) {
883                 m_freem(pfse->pfse_m);
884                 free(pfse, M_PFTEMP);
885         }
886
887         mtx_destroy(&pf_sendqueue_mtx);
888         mtx_destroy(&pf_overloadqueue_mtx);
889         mtx_destroy(&pf_unlnkdrules_mtx);
890
891         uma_zdestroy(V_pf_sources_z);
892         uma_zdestroy(V_pf_state_z);
893         uma_zdestroy(V_pf_state_key_z);
894 }
895
896 static int
897 pf_mtag_uminit(void *mem, int size, int how)
898 {
899         struct m_tag *t;
900
901         t = (struct m_tag *)mem;
902         t->m_tag_cookie = MTAG_ABI_COMPAT;
903         t->m_tag_id = PACKET_TAG_PF;
904         t->m_tag_len = sizeof(struct pf_mtag);
905         t->m_tag_free = pf_mtag_free;
906
907         return (0);
908 }
909
910 static void
911 pf_mtag_free(struct m_tag *t)
912 {
913
914         uma_zfree(pf_mtag_z, t);
915 }
916
917 struct pf_mtag *
918 pf_get_mtag(struct mbuf *m)
919 {
920         struct m_tag *mtag;
921
922         if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) != NULL)
923                 return ((struct pf_mtag *)(mtag + 1));
924
925         mtag = uma_zalloc(pf_mtag_z, M_NOWAIT);
926         if (mtag == NULL)
927                 return (NULL);
928         bzero(mtag + 1, sizeof(struct pf_mtag));
929         m_tag_prepend(m, mtag);
930
931         return ((struct pf_mtag *)(mtag + 1));
932 }
933
934 static int
935 pf_state_key_attach(struct pf_state_key *skw, struct pf_state_key *sks,
936     struct pf_state *s)
937 {
938         struct pf_keyhash       *khs, *khw, *kh;
939         struct pf_state_key     *sk, *cur;
940         struct pf_state         *si, *olds = NULL;
941         int idx;
942
943         KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
944         KASSERT(s->key[PF_SK_WIRE] == NULL, ("%s: state has key", __func__));
945         KASSERT(s->key[PF_SK_STACK] == NULL, ("%s: state has key", __func__));
946
947         /*
948          * We need to lock hash slots of both keys. To avoid deadlock
949          * we always lock the slot with lower address first. Unlock order
950          * isn't important.
951          *
952          * We also need to lock ID hash slot before dropping key
953          * locks. On success we return with ID hash slot locked.
954          */
955
956         if (skw == sks) {
957                 khs = khw = &V_pf_keyhash[pf_hashkey(skw)];
958                 PF_HASHROW_LOCK(khs);
959         } else {
960                 khs = &V_pf_keyhash[pf_hashkey(sks)];
961                 khw = &V_pf_keyhash[pf_hashkey(skw)];
962                 if (khs == khw) {
963                         PF_HASHROW_LOCK(khs);
964                 } else if (khs < khw) {
965                         PF_HASHROW_LOCK(khs);
966                         PF_HASHROW_LOCK(khw);
967                 } else {
968                         PF_HASHROW_LOCK(khw);
969                         PF_HASHROW_LOCK(khs);
970                 }
971         }
972
973 #define KEYS_UNLOCK()   do {                    \
974         if (khs != khw) {                       \
975                 PF_HASHROW_UNLOCK(khs);         \
976                 PF_HASHROW_UNLOCK(khw);         \
977         } else                                  \
978                 PF_HASHROW_UNLOCK(khs);         \
979 } while (0)
980
981         /*
982          * First run: start with wire key.
983          */
984         sk = skw;
985         kh = khw;
986         idx = PF_SK_WIRE;
987
988 keyattach:
989         LIST_FOREACH(cur, &kh->keys, entry)
990                 if (bcmp(cur, sk, sizeof(struct pf_state_key_cmp)) == 0)
991                         break;
992
993         if (cur != NULL) {
994                 /* Key exists. Check for same kif, if none, add to key. */
995                 TAILQ_FOREACH(si, &cur->states[idx], key_list[idx]) {
996                         struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(si)];
997
998                         PF_HASHROW_LOCK(ih);
999                         if (si->kif == s->kif &&
1000                             si->direction == s->direction) {
1001                                 if (sk->proto == IPPROTO_TCP &&
1002                                     si->src.state >= TCPS_FIN_WAIT_2 &&
1003                                     si->dst.state >= TCPS_FIN_WAIT_2) {
1004                                         /*
1005                                          * New state matches an old >FIN_WAIT_2
1006                                          * state. We can't drop key hash locks,
1007                                          * thus we can't unlink it properly.
1008                                          *
1009                                          * As a workaround we drop it into
1010                                          * TCPS_CLOSED state, schedule purge
1011                                          * ASAP and push it into the very end
1012                                          * of the slot TAILQ, so that it won't
1013                                          * conflict with our new state.
1014                                          */
1015                                         si->src.state = si->dst.state =
1016                                             TCPS_CLOSED;
1017                                         si->timeout = PFTM_PURGE;
1018                                         olds = si;
1019                                 } else {
1020                                         if (V_pf_status.debug >= PF_DEBUG_MISC) {
1021                                                 printf("pf: %s key attach "
1022                                                     "failed on %s: ",
1023                                                     (idx == PF_SK_WIRE) ?
1024                                                     "wire" : "stack",
1025                                                     s->kif->pfik_name);
1026                                                 pf_print_state_parts(s,
1027                                                     (idx == PF_SK_WIRE) ?
1028                                                     sk : NULL,
1029                                                     (idx == PF_SK_STACK) ?
1030                                                     sk : NULL);
1031                                                 printf(", existing: ");
1032                                                 pf_print_state_parts(si,
1033                                                     (idx == PF_SK_WIRE) ?
1034                                                     sk : NULL,
1035                                                     (idx == PF_SK_STACK) ?
1036                                                     sk : NULL);
1037                                                 printf("\n");
1038                                         }
1039                                         PF_HASHROW_UNLOCK(ih);
1040                                         KEYS_UNLOCK();
1041                                         uma_zfree(V_pf_state_key_z, sk);
1042                                         if (idx == PF_SK_STACK)
1043                                                 pf_detach_state(s);
1044                                         return (EEXIST); /* collision! */
1045                                 }
1046                         }
1047                         PF_HASHROW_UNLOCK(ih);
1048                 }
1049                 uma_zfree(V_pf_state_key_z, sk);
1050                 s->key[idx] = cur;
1051         } else {
1052                 LIST_INSERT_HEAD(&kh->keys, sk, entry);
1053                 s->key[idx] = sk;
1054         }
1055
1056 stateattach:
1057         /* List is sorted, if-bound states before floating. */
1058         if (s->kif == V_pfi_all)
1059                 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], s, key_list[idx]);
1060         else
1061                 TAILQ_INSERT_HEAD(&s->key[idx]->states[idx], s, key_list[idx]);
1062
1063         if (olds) {
1064                 TAILQ_REMOVE(&s->key[idx]->states[idx], olds, key_list[idx]);
1065                 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], olds,
1066                     key_list[idx]);
1067                 olds = NULL;
1068         }
1069
1070         /*
1071          * Attach done. See how should we (or should not?)
1072          * attach a second key.
1073          */
1074         if (sks == skw) {
1075                 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
1076                 idx = PF_SK_STACK;
1077                 sks = NULL;
1078                 goto stateattach;
1079         } else if (sks != NULL) {
1080                 /*
1081                  * Continue attaching with stack key.
1082                  */
1083                 sk = sks;
1084                 kh = khs;
1085                 idx = PF_SK_STACK;
1086                 sks = NULL;
1087                 goto keyattach;
1088         }
1089
1090         PF_STATE_LOCK(s);
1091         KEYS_UNLOCK();
1092
1093         KASSERT(s->key[PF_SK_WIRE] != NULL && s->key[PF_SK_STACK] != NULL,
1094             ("%s failure", __func__));
1095
1096         return (0);
1097 #undef  KEYS_UNLOCK
1098 }
1099
1100 static void
1101 pf_detach_state(struct pf_state *s)
1102 {
1103         struct pf_state_key *sks = s->key[PF_SK_STACK];
1104         struct pf_keyhash *kh;
1105
1106         if (sks != NULL) {
1107                 kh = &V_pf_keyhash[pf_hashkey(sks)];
1108                 PF_HASHROW_LOCK(kh);
1109                 if (s->key[PF_SK_STACK] != NULL)
1110                         pf_state_key_detach(s, PF_SK_STACK);
1111                 /*
1112                  * If both point to same key, then we are done.
1113                  */
1114                 if (sks == s->key[PF_SK_WIRE]) {
1115                         pf_state_key_detach(s, PF_SK_WIRE);
1116                         PF_HASHROW_UNLOCK(kh);
1117                         return;
1118                 }
1119                 PF_HASHROW_UNLOCK(kh);
1120         }
1121
1122         if (s->key[PF_SK_WIRE] != NULL) {
1123                 kh = &V_pf_keyhash[pf_hashkey(s->key[PF_SK_WIRE])];
1124                 PF_HASHROW_LOCK(kh);
1125                 if (s->key[PF_SK_WIRE] != NULL)
1126                         pf_state_key_detach(s, PF_SK_WIRE);
1127                 PF_HASHROW_UNLOCK(kh);
1128         }
1129 }
1130
1131 static void
1132 pf_state_key_detach(struct pf_state *s, int idx)
1133 {
1134         struct pf_state_key *sk = s->key[idx];
1135 #ifdef INVARIANTS
1136         struct pf_keyhash *kh = &V_pf_keyhash[pf_hashkey(sk)];
1137
1138         PF_HASHROW_ASSERT(kh);
1139 #endif
1140         TAILQ_REMOVE(&sk->states[idx], s, key_list[idx]);
1141         s->key[idx] = NULL;
1142
1143         if (TAILQ_EMPTY(&sk->states[0]) && TAILQ_EMPTY(&sk->states[1])) {
1144                 LIST_REMOVE(sk, entry);
1145                 uma_zfree(V_pf_state_key_z, sk);
1146         }
1147 }
1148
1149 static int
1150 pf_state_key_ctor(void *mem, int size, void *arg, int flags)
1151 {
1152         struct pf_state_key *sk = mem;
1153
1154         bzero(sk, sizeof(struct pf_state_key_cmp));
1155         TAILQ_INIT(&sk->states[PF_SK_WIRE]);
1156         TAILQ_INIT(&sk->states[PF_SK_STACK]);
1157
1158         return (0);
1159 }
1160
1161 struct pf_state_key *
1162 pf_state_key_setup(struct pf_pdesc *pd, struct pf_addr *saddr,
1163         struct pf_addr *daddr, u_int16_t sport, u_int16_t dport)
1164 {
1165         struct pf_state_key *sk;
1166
1167         sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1168         if (sk == NULL)
1169                 return (NULL);
1170
1171         PF_ACPY(&sk->addr[pd->sidx], saddr, pd->af);
1172         PF_ACPY(&sk->addr[pd->didx], daddr, pd->af);
1173         sk->port[pd->sidx] = sport;
1174         sk->port[pd->didx] = dport;
1175         sk->proto = pd->proto;
1176         sk->af = pd->af;
1177
1178         return (sk);
1179 }
1180
1181 struct pf_state_key *
1182 pf_state_key_clone(struct pf_state_key *orig)
1183 {
1184         struct pf_state_key *sk;
1185
1186         sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1187         if (sk == NULL)
1188                 return (NULL);
1189
1190         bcopy(orig, sk, sizeof(struct pf_state_key_cmp));
1191
1192         return (sk);
1193 }
1194
1195 int
1196 pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw,
1197     struct pf_state_key *sks, struct pf_state *s)
1198 {
1199         struct pf_idhash *ih;
1200         struct pf_state *cur;
1201         int error;
1202
1203         KASSERT(TAILQ_EMPTY(&sks->states[0]) && TAILQ_EMPTY(&sks->states[1]),
1204             ("%s: sks not pristine", __func__));
1205         KASSERT(TAILQ_EMPTY(&skw->states[0]) && TAILQ_EMPTY(&skw->states[1]),
1206             ("%s: skw not pristine", __func__));
1207         KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1208
1209         s->kif = kif;
1210
1211         if (s->id == 0 && s->creatorid == 0) {
1212                 /* XXX: should be atomic, but probability of collision low */
1213                 if ((s->id = V_pf_stateid[curcpu]++) == PFID_MAXID)
1214                         V_pf_stateid[curcpu] = 1;
1215                 s->id |= (uint64_t )curcpu << PFID_CPUSHIFT;
1216                 s->id = htobe64(s->id);
1217                 s->creatorid = V_pf_status.hostid;
1218         }
1219
1220         /* Returns with ID locked on success. */
1221         if ((error = pf_state_key_attach(skw, sks, s)) != 0)
1222                 return (error);
1223
1224         ih = &V_pf_idhash[PF_IDHASH(s)];
1225         PF_HASHROW_ASSERT(ih);
1226         LIST_FOREACH(cur, &ih->states, entry)
1227                 if (cur->id == s->id && cur->creatorid == s->creatorid)
1228                         break;
1229
1230         if (cur != NULL) {
1231                 PF_HASHROW_UNLOCK(ih);
1232                 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1233                         printf("pf: state ID collision: "
1234                             "id: %016llx creatorid: %08x\n",
1235                             (unsigned long long)be64toh(s->id),
1236                             ntohl(s->creatorid));
1237                 }
1238                 pf_detach_state(s);
1239                 return (EEXIST);
1240         }
1241         LIST_INSERT_HEAD(&ih->states, s, entry);
1242         /* One for keys, one for ID hash. */
1243         refcount_init(&s->refs, 2);
1244
1245         counter_u64_add(V_pf_status.fcounters[FCNT_STATE_INSERT], 1);
1246         if (pfsync_insert_state_ptr != NULL)
1247                 pfsync_insert_state_ptr(s);
1248
1249         /* Returns locked. */
1250         return (0);
1251 }
1252
1253 /*
1254  * Find state by ID: returns with locked row on success.
1255  */
1256 struct pf_state *
1257 pf_find_state_byid(uint64_t id, uint32_t creatorid)
1258 {
1259         struct pf_idhash *ih;
1260         struct pf_state *s;
1261
1262         counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1263
1264         ih = &V_pf_idhash[(be64toh(id) % (pf_hashmask + 1))];
1265
1266         PF_HASHROW_LOCK(ih);
1267         LIST_FOREACH(s, &ih->states, entry)
1268                 if (s->id == id && s->creatorid == creatorid)
1269                         break;
1270
1271         if (s == NULL)
1272                 PF_HASHROW_UNLOCK(ih);
1273
1274         return (s);
1275 }
1276
1277 /*
1278  * Find state by key.
1279  * Returns with ID hash slot locked on success.
1280  */
1281 static struct pf_state *
1282 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir)
1283 {
1284         struct pf_keyhash       *kh;
1285         struct pf_state_key     *sk;
1286         struct pf_state         *s;
1287         int idx;
1288
1289         counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1290
1291         kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1292
1293         PF_HASHROW_LOCK(kh);
1294         LIST_FOREACH(sk, &kh->keys, entry)
1295                 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1296                         break;
1297         if (sk == NULL) {
1298                 PF_HASHROW_UNLOCK(kh);
1299                 return (NULL);
1300         }
1301
1302         idx = (dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK);
1303
1304         /* List is sorted, if-bound states before floating ones. */
1305         TAILQ_FOREACH(s, &sk->states[idx], key_list[idx])
1306                 if (s->kif == V_pfi_all || s->kif == kif) {
1307                         PF_STATE_LOCK(s);
1308                         PF_HASHROW_UNLOCK(kh);
1309                         if (s->timeout >= PFTM_MAX) {
1310                                 /*
1311                                  * State is either being processed by
1312                                  * pf_unlink_state() in an other thread, or
1313                                  * is scheduled for immediate expiry.
1314                                  */
1315                                 PF_STATE_UNLOCK(s);
1316                                 return (NULL);
1317                         }
1318                         return (s);
1319                 }
1320         PF_HASHROW_UNLOCK(kh);
1321
1322         return (NULL);
1323 }
1324
1325 struct pf_state *
1326 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1327 {
1328         struct pf_keyhash       *kh;
1329         struct pf_state_key     *sk;
1330         struct pf_state         *s, *ret = NULL;
1331         int                      idx, inout = 0;
1332
1333         counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1334
1335         kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1336
1337         PF_HASHROW_LOCK(kh);
1338         LIST_FOREACH(sk, &kh->keys, entry)
1339                 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1340                         break;
1341         if (sk == NULL) {
1342                 PF_HASHROW_UNLOCK(kh);
1343                 return (NULL);
1344         }
1345         switch (dir) {
1346         case PF_IN:
1347                 idx = PF_SK_WIRE;
1348                 break;
1349         case PF_OUT:
1350                 idx = PF_SK_STACK;
1351                 break;
1352         case PF_INOUT:
1353                 idx = PF_SK_WIRE;
1354                 inout = 1;
1355                 break;
1356         default:
1357                 panic("%s: dir %u", __func__, dir);
1358         }
1359 second_run:
1360         TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) {
1361                 if (more == NULL) {
1362                         PF_HASHROW_UNLOCK(kh);
1363                         return (s);
1364                 }
1365
1366                 if (ret)
1367                         (*more)++;
1368                 else
1369                         ret = s;
1370         }
1371         if (inout == 1) {
1372                 inout = 0;
1373                 idx = PF_SK_STACK;
1374                 goto second_run;
1375         }
1376         PF_HASHROW_UNLOCK(kh);
1377
1378         return (ret);
1379 }
1380
1381 /* END state table stuff */
1382
1383 static void
1384 pf_send(struct pf_send_entry *pfse)
1385 {
1386
1387         PF_SENDQ_LOCK();
1388         STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next);
1389         PF_SENDQ_UNLOCK();
1390         swi_sched(V_pf_swi_cookie, 0);
1391 }
1392
1393 void
1394 pf_intr(void *v)
1395 {
1396         struct pf_send_head queue;
1397         struct pf_send_entry *pfse, *next;
1398
1399         CURVNET_SET((struct vnet *)v);
1400
1401         PF_SENDQ_LOCK();
1402         queue = V_pf_sendqueue;
1403         STAILQ_INIT(&V_pf_sendqueue);
1404         PF_SENDQ_UNLOCK();
1405
1406         STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) {
1407                 switch (pfse->pfse_type) {
1408 #ifdef INET
1409                 case PFSE_IP:
1410                         ip_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL);
1411                         break;
1412                 case PFSE_ICMP:
1413                         icmp_error(pfse->pfse_m, pfse->pfse_icmp_type,
1414                             pfse->pfse_icmp_code, 0, pfse->pfse_icmp_mtu);
1415                         break;
1416 #endif /* INET */
1417 #ifdef INET6
1418                 case PFSE_IP6:
1419                         ip6_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL,
1420                             NULL);
1421                         break;
1422                 case PFSE_ICMP6:
1423                         icmp6_error(pfse->pfse_m, pfse->pfse_icmp_type,
1424                             pfse->pfse_icmp_code, pfse->pfse_icmp_mtu);
1425                         break;
1426 #endif /* INET6 */
1427                 default:
1428                         panic("%s: unknown type", __func__);
1429                 }
1430                 free(pfse, M_PFTEMP);
1431         }
1432         CURVNET_RESTORE();
1433 }
1434
1435 void
1436 pf_purge_thread(void *v)
1437 {
1438         u_int idx = 0;
1439
1440         CURVNET_SET((struct vnet *)v);
1441
1442         for (;;) {
1443                 PF_RULES_RLOCK();
1444                 rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftm", hz / 10);
1445
1446                 if (V_pf_end_threads) {
1447                         /*
1448                          * To cleanse up all kifs and rules we need
1449                          * two runs: first one clears reference flags,
1450                          * then pf_purge_expired_states() doesn't
1451                          * raise them, and then second run frees.
1452                          */
1453                         PF_RULES_RUNLOCK();
1454                         pf_purge_unlinked_rules();
1455                         pfi_kif_purge();
1456
1457                         /*
1458                          * Now purge everything.
1459                          */
1460                         pf_purge_expired_states(0, pf_hashmask);
1461                         pf_purge_expired_fragments();
1462                         pf_purge_expired_src_nodes();
1463
1464                         /*
1465                          * Now all kifs & rules should be unreferenced,
1466                          * thus should be successfully freed.
1467                          */
1468                         pf_purge_unlinked_rules();
1469                         pfi_kif_purge();
1470
1471                         /*
1472                          * Announce success and exit.
1473                          */
1474                         PF_RULES_RLOCK();
1475                         V_pf_end_threads++;
1476                         PF_RULES_RUNLOCK();
1477                         wakeup(pf_purge_thread);
1478                         kproc_exit(0);
1479                 }
1480                 PF_RULES_RUNLOCK();
1481
1482                 /* Process 1/interval fraction of the state table every run. */
1483                 idx = pf_purge_expired_states(idx, pf_hashmask /
1484                             (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
1485
1486                 /* Purge other expired types every PFTM_INTERVAL seconds. */
1487                 if (idx == 0) {
1488                         /*
1489                          * Order is important:
1490                          * - states and src nodes reference rules
1491                          * - states and rules reference kifs
1492                          */
1493                         pf_purge_expired_fragments();
1494                         pf_purge_expired_src_nodes();
1495                         pf_purge_unlinked_rules();
1496                         pfi_kif_purge();
1497                 }
1498         }
1499         /* not reached */
1500         CURVNET_RESTORE();
1501 }
1502
1503 u_int32_t
1504 pf_state_expires(const struct pf_state *state)
1505 {
1506         u_int32_t       timeout;
1507         u_int32_t       start;
1508         u_int32_t       end;
1509         u_int32_t       states;
1510
1511         /* handle all PFTM_* > PFTM_MAX here */
1512         if (state->timeout == PFTM_PURGE)
1513                 return (time_uptime);
1514         KASSERT(state->timeout != PFTM_UNLINKED,
1515             ("pf_state_expires: timeout == PFTM_UNLINKED"));
1516         KASSERT((state->timeout < PFTM_MAX),
1517             ("pf_state_expires: timeout > PFTM_MAX"));
1518         timeout = state->rule.ptr->timeout[state->timeout];
1519         if (!timeout)
1520                 timeout = V_pf_default_rule.timeout[state->timeout];
1521         start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1522         if (start) {
1523                 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1524                 states = counter_u64_fetch(state->rule.ptr->states_cur);
1525         } else {
1526                 start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1527                 end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1528                 states = V_pf_status.states;
1529         }
1530         if (end && states > start && start < end) {
1531                 if (states < end)
1532                         return (state->expire + timeout * (end - states) /
1533                             (end - start));
1534                 else
1535                         return (time_uptime);
1536         }
1537         return (state->expire + timeout);
1538 }
1539
1540 void
1541 pf_purge_expired_src_nodes()
1542 {
1543         struct pf_src_node_list  freelist;
1544         struct pf_srchash       *sh;
1545         struct pf_src_node      *cur, *next;
1546         int i;
1547
1548         LIST_INIT(&freelist);
1549         for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
1550             PF_HASHROW_LOCK(sh);
1551             LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next)
1552                 if (cur->states == 0 && cur->expire <= time_uptime) {
1553                         pf_unlink_src_node_locked(cur);
1554                         LIST_INSERT_HEAD(&freelist, cur, entry);
1555                 } else if (cur->rule.ptr != NULL)
1556                         cur->rule.ptr->rule_flag |= PFRULE_REFS;
1557             PF_HASHROW_UNLOCK(sh);
1558         }
1559
1560         pf_free_src_nodes(&freelist);
1561
1562         V_pf_status.src_nodes = uma_zone_get_cur(V_pf_sources_z);
1563 }
1564
1565 static void
1566 pf_src_tree_remove_state(struct pf_state *s)
1567 {
1568         u_int32_t timeout;
1569
1570         if (s->src_node != NULL) {
1571                 if (s->src.tcp_est)
1572                         --s->src_node->conn;
1573                 if (--s->src_node->states == 0) {
1574                         timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
1575                         if (!timeout)
1576                                 timeout =
1577                                     V_pf_default_rule.timeout[PFTM_SRC_NODE];
1578                         s->src_node->expire = time_uptime + timeout;
1579                 }
1580         }
1581         if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1582                 if (--s->nat_src_node->states == 0) {
1583                         timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
1584                         if (!timeout)
1585                                 timeout =
1586                                     V_pf_default_rule.timeout[PFTM_SRC_NODE];
1587                         s->nat_src_node->expire = time_uptime + timeout;
1588                 }
1589         }
1590         s->src_node = s->nat_src_node = NULL;
1591 }
1592
1593 /*
1594  * Unlink and potentilly free a state. Function may be
1595  * called with ID hash row locked, but always returns
1596  * unlocked, since it needs to go through key hash locking.
1597  */
1598 int
1599 pf_unlink_state(struct pf_state *s, u_int flags)
1600 {
1601         struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)];
1602
1603         if ((flags & PF_ENTER_LOCKED) == 0)
1604                 PF_HASHROW_LOCK(ih);
1605         else
1606                 PF_HASHROW_ASSERT(ih);
1607
1608         if (s->timeout == PFTM_UNLINKED) {
1609                 /*
1610                  * State is being processed
1611                  * by pf_unlink_state() in
1612                  * an other thread.
1613                  */
1614                 PF_HASHROW_UNLOCK(ih);
1615                 return (0);     /* XXXGL: undefined actually */
1616         }
1617
1618         if (s->src.state == PF_TCPS_PROXY_DST) {
1619                 /* XXX wire key the right one? */
1620                 pf_send_tcp(NULL, s->rule.ptr, s->key[PF_SK_WIRE]->af,
1621                     &s->key[PF_SK_WIRE]->addr[1],
1622                     &s->key[PF_SK_WIRE]->addr[0],
1623                     s->key[PF_SK_WIRE]->port[1],
1624                     s->key[PF_SK_WIRE]->port[0],
1625                     s->src.seqhi, s->src.seqlo + 1,
1626                     TH_RST|TH_ACK, 0, 0, 0, 1, s->tag, NULL);
1627         }
1628
1629         LIST_REMOVE(s, entry);
1630         pf_src_tree_remove_state(s);
1631
1632         if (pfsync_delete_state_ptr != NULL)
1633                 pfsync_delete_state_ptr(s);
1634
1635         STATE_DEC_COUNTERS(s);
1636
1637         s->timeout = PFTM_UNLINKED;
1638
1639         PF_HASHROW_UNLOCK(ih);
1640
1641         pf_detach_state(s);
1642         refcount_release(&s->refs);
1643
1644         return (pf_release_state(s));
1645 }
1646
1647 void
1648 pf_free_state(struct pf_state *cur)
1649 {
1650
1651         KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur));
1652         KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__,
1653             cur->timeout));
1654
1655         pf_normalize_tcp_cleanup(cur);
1656         uma_zfree(V_pf_state_z, cur);
1657         counter_u64_add(V_pf_status.fcounters[FCNT_STATE_REMOVALS], 1);
1658 }
1659
1660 /*
1661  * Called only from pf_purge_thread(), thus serialized.
1662  */
1663 static u_int
1664 pf_purge_expired_states(u_int i, int maxcheck)
1665 {
1666         struct pf_idhash *ih;
1667         struct pf_state *s;
1668
1669         V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1670
1671         /*
1672          * Go through hash and unlink states that expire now.
1673          */
1674         while (maxcheck > 0) {
1675
1676                 ih = &V_pf_idhash[i];
1677 relock:
1678                 PF_HASHROW_LOCK(ih);
1679                 LIST_FOREACH(s, &ih->states, entry) {
1680                         if (pf_state_expires(s) <= time_uptime) {
1681                                 V_pf_status.states -=
1682                                     pf_unlink_state(s, PF_ENTER_LOCKED);
1683                                 goto relock;
1684                         }
1685                         s->rule.ptr->rule_flag |= PFRULE_REFS;
1686                         if (s->nat_rule.ptr != NULL)
1687                                 s->nat_rule.ptr->rule_flag |= PFRULE_REFS;
1688                         if (s->anchor.ptr != NULL)
1689                                 s->anchor.ptr->rule_flag |= PFRULE_REFS;
1690                         s->kif->pfik_flags |= PFI_IFLAG_REFS;
1691                         if (s->rt_kif)
1692                                 s->rt_kif->pfik_flags |= PFI_IFLAG_REFS;
1693                 }
1694                 PF_HASHROW_UNLOCK(ih);
1695
1696                 /* Return when we hit end of hash. */
1697                 if (++i > pf_hashmask) {
1698                         V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1699                         return (0);
1700                 }
1701
1702                 maxcheck--;
1703         }
1704
1705         V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1706
1707         return (i);
1708 }
1709
1710 static void
1711 pf_purge_unlinked_rules()
1712 {
1713         struct pf_rulequeue tmpq;
1714         struct pf_rule *r, *r1;
1715
1716         /*
1717          * If we have overloading task pending, then we'd
1718          * better skip purging this time. There is a tiny
1719          * probability that overloading task references
1720          * an already unlinked rule.
1721          */
1722         PF_OVERLOADQ_LOCK();
1723         if (!SLIST_EMPTY(&V_pf_overloadqueue)) {
1724                 PF_OVERLOADQ_UNLOCK();
1725                 return;
1726         }
1727         PF_OVERLOADQ_UNLOCK();
1728
1729         /*
1730          * Do naive mark-and-sweep garbage collecting of old rules.
1731          * Reference flag is raised by pf_purge_expired_states()
1732          * and pf_purge_expired_src_nodes().
1733          *
1734          * To avoid LOR between PF_UNLNKDRULES_LOCK/PF_RULES_WLOCK,
1735          * use a temporary queue.
1736          */
1737         TAILQ_INIT(&tmpq);
1738         PF_UNLNKDRULES_LOCK();
1739         TAILQ_FOREACH_SAFE(r, &V_pf_unlinked_rules, entries, r1) {
1740                 if (!(r->rule_flag & PFRULE_REFS)) {
1741                         TAILQ_REMOVE(&V_pf_unlinked_rules, r, entries);
1742                         TAILQ_INSERT_TAIL(&tmpq, r, entries);
1743                 } else
1744                         r->rule_flag &= ~PFRULE_REFS;
1745         }
1746         PF_UNLNKDRULES_UNLOCK();
1747
1748         if (!TAILQ_EMPTY(&tmpq)) {
1749                 PF_RULES_WLOCK();
1750                 TAILQ_FOREACH_SAFE(r, &tmpq, entries, r1) {
1751                         TAILQ_REMOVE(&tmpq, r, entries);
1752                         pf_free_rule(r);
1753                 }
1754                 PF_RULES_WUNLOCK();
1755         }
1756 }
1757
1758 void
1759 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
1760 {
1761         switch (af) {
1762 #ifdef INET
1763         case AF_INET: {
1764                 u_int32_t a = ntohl(addr->addr32[0]);
1765                 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
1766                     (a>>8)&255, a&255);
1767                 if (p) {
1768                         p = ntohs(p);
1769                         printf(":%u", p);
1770                 }
1771                 break;
1772         }
1773 #endif /* INET */
1774 #ifdef INET6
1775         case AF_INET6: {
1776                 u_int16_t b;
1777                 u_int8_t i, curstart, curend, maxstart, maxend;
1778                 curstart = curend = maxstart = maxend = 255;
1779                 for (i = 0; i < 8; i++) {
1780                         if (!addr->addr16[i]) {
1781                                 if (curstart == 255)
1782                                         curstart = i;
1783                                 curend = i;
1784                         } else {
1785                                 if ((curend - curstart) >
1786                                     (maxend - maxstart)) {
1787                                         maxstart = curstart;
1788                                         maxend = curend;
1789                                 }
1790                                 curstart = curend = 255;
1791                         }
1792                 }
1793                 if ((curend - curstart) >
1794                     (maxend - maxstart)) {
1795                         maxstart = curstart;
1796                         maxend = curend;
1797                 }
1798                 for (i = 0; i < 8; i++) {
1799                         if (i >= maxstart && i <= maxend) {
1800                                 if (i == 0)
1801                                         printf(":");
1802                                 if (i == maxend)
1803                                         printf(":");
1804                         } else {
1805                                 b = ntohs(addr->addr16[i]);
1806                                 printf("%x", b);
1807                                 if (i < 7)
1808                                         printf(":");
1809                         }
1810                 }
1811                 if (p) {
1812                         p = ntohs(p);
1813                         printf("[%u]", p);
1814                 }
1815                 break;
1816         }
1817 #endif /* INET6 */
1818         }
1819 }
1820
1821 void
1822 pf_print_state(struct pf_state *s)
1823 {
1824         pf_print_state_parts(s, NULL, NULL);
1825 }
1826
1827 static void
1828 pf_print_state_parts(struct pf_state *s,
1829     struct pf_state_key *skwp, struct pf_state_key *sksp)
1830 {
1831         struct pf_state_key *skw, *sks;
1832         u_int8_t proto, dir;
1833
1834         /* Do our best to fill these, but they're skipped if NULL */
1835         skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
1836         sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
1837         proto = skw ? skw->proto : (sks ? sks->proto : 0);
1838         dir = s ? s->direction : 0;
1839
1840         switch (proto) {
1841         case IPPROTO_IPV4:
1842                 printf("IPv4");
1843                 break;
1844         case IPPROTO_IPV6:
1845                 printf("IPv6");
1846                 break;
1847         case IPPROTO_TCP:
1848                 printf("TCP");
1849                 break;
1850         case IPPROTO_UDP:
1851                 printf("UDP");
1852                 break;
1853         case IPPROTO_ICMP:
1854                 printf("ICMP");
1855                 break;
1856         case IPPROTO_ICMPV6:
1857                 printf("ICMPv6");
1858                 break;
1859         default:
1860                 printf("%u", skw->proto);
1861                 break;
1862         }
1863         switch (dir) {
1864         case PF_IN:
1865                 printf(" in");
1866                 break;
1867         case PF_OUT:
1868                 printf(" out");
1869                 break;
1870         }
1871         if (skw) {
1872                 printf(" wire: ");
1873                 pf_print_host(&skw->addr[0], skw->port[0], skw->af);
1874                 printf(" ");
1875                 pf_print_host(&skw->addr[1], skw->port[1], skw->af);
1876         }
1877         if (sks) {
1878                 printf(" stack: ");
1879                 if (sks != skw) {
1880                         pf_print_host(&sks->addr[0], sks->port[0], sks->af);
1881                         printf(" ");
1882                         pf_print_host(&sks->addr[1], sks->port[1], sks->af);
1883                 } else
1884                         printf("-");
1885         }
1886         if (s) {
1887                 if (proto == IPPROTO_TCP) {
1888                         printf(" [lo=%u high=%u win=%u modulator=%u",
1889                             s->src.seqlo, s->src.seqhi,
1890                             s->src.max_win, s->src.seqdiff);
1891                         if (s->src.wscale && s->dst.wscale)
1892                                 printf(" wscale=%u",
1893                                     s->src.wscale & PF_WSCALE_MASK);
1894                         printf("]");
1895                         printf(" [lo=%u high=%u win=%u modulator=%u",
1896                             s->dst.seqlo, s->dst.seqhi,
1897                             s->dst.max_win, s->dst.seqdiff);
1898                         if (s->src.wscale && s->dst.wscale)
1899                                 printf(" wscale=%u",
1900                                 s->dst.wscale & PF_WSCALE_MASK);
1901                         printf("]");
1902                 }
1903                 printf(" %u:%u", s->src.state, s->dst.state);
1904         }
1905 }
1906
1907 void
1908 pf_print_flags(u_int8_t f)
1909 {
1910         if (f)
1911                 printf(" ");
1912         if (f & TH_FIN)
1913                 printf("F");
1914         if (f & TH_SYN)
1915                 printf("S");
1916         if (f & TH_RST)
1917                 printf("R");
1918         if (f & TH_PUSH)
1919                 printf("P");
1920         if (f & TH_ACK)
1921                 printf("A");
1922         if (f & TH_URG)
1923                 printf("U");
1924         if (f & TH_ECE)
1925                 printf("E");
1926         if (f & TH_CWR)
1927                 printf("W");
1928 }
1929
1930 #define PF_SET_SKIP_STEPS(i)                                    \
1931         do {                                                    \
1932                 while (head[i] != cur) {                        \
1933                         head[i]->skip[i].ptr = cur;             \
1934                         head[i] = TAILQ_NEXT(head[i], entries); \
1935                 }                                               \
1936         } while (0)
1937
1938 void
1939 pf_calc_skip_steps(struct pf_rulequeue *rules)
1940 {
1941         struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
1942         int i;
1943
1944         cur = TAILQ_FIRST(rules);
1945         prev = cur;
1946         for (i = 0; i < PF_SKIP_COUNT; ++i)
1947                 head[i] = cur;
1948         while (cur != NULL) {
1949
1950                 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
1951                         PF_SET_SKIP_STEPS(PF_SKIP_IFP);
1952                 if (cur->direction != prev->direction)
1953                         PF_SET_SKIP_STEPS(PF_SKIP_DIR);
1954                 if (cur->af != prev->af)
1955                         PF_SET_SKIP_STEPS(PF_SKIP_AF);
1956                 if (cur->proto != prev->proto)
1957                         PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
1958                 if (cur->src.neg != prev->src.neg ||
1959                     pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
1960                         PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
1961                 if (cur->src.port[0] != prev->src.port[0] ||
1962                     cur->src.port[1] != prev->src.port[1] ||
1963                     cur->src.port_op != prev->src.port_op)
1964                         PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
1965                 if (cur->dst.neg != prev->dst.neg ||
1966                     pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
1967                         PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
1968                 if (cur->dst.port[0] != prev->dst.port[0] ||
1969                     cur->dst.port[1] != prev->dst.port[1] ||
1970                     cur->dst.port_op != prev->dst.port_op)
1971                         PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
1972
1973                 prev = cur;
1974                 cur = TAILQ_NEXT(cur, entries);
1975         }
1976         for (i = 0; i < PF_SKIP_COUNT; ++i)
1977                 PF_SET_SKIP_STEPS(i);
1978 }
1979
1980 static int
1981 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
1982 {
1983         if (aw1->type != aw2->type)
1984                 return (1);
1985         switch (aw1->type) {
1986         case PF_ADDR_ADDRMASK:
1987         case PF_ADDR_RANGE:
1988                 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0))
1989                         return (1);
1990                 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0))
1991                         return (1);
1992                 return (0);
1993         case PF_ADDR_DYNIFTL:
1994                 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
1995         case PF_ADDR_NOROUTE:
1996         case PF_ADDR_URPFFAILED:
1997                 return (0);
1998         case PF_ADDR_TABLE:
1999                 return (aw1->p.tbl != aw2->p.tbl);
2000         default:
2001                 printf("invalid address type: %d\n", aw1->type);
2002                 return (1);
2003         }
2004 }
2005
2006 u_int16_t
2007 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
2008 {
2009         u_int32_t       l;
2010
2011         if (udp && !cksum)
2012                 return (0x0000);
2013         l = cksum + old - new;
2014         l = (l >> 16) + (l & 65535);
2015         l = l & 65535;
2016         if (udp && !l)
2017                 return (0xFFFF);
2018         return (l);
2019 }
2020
2021 static void
2022 pf_change_ap(struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc,
2023     struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af)
2024 {
2025         struct pf_addr  ao;
2026         u_int16_t       po = *p;
2027
2028         PF_ACPY(&ao, a, af);
2029         PF_ACPY(a, an, af);
2030
2031         *p = pn;
2032
2033         switch (af) {
2034 #ifdef INET
2035         case AF_INET:
2036                 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2037                     ao.addr16[0], an->addr16[0], 0),
2038                     ao.addr16[1], an->addr16[1], 0);
2039                 *p = pn;
2040                 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
2041                     ao.addr16[0], an->addr16[0], u),
2042                     ao.addr16[1], an->addr16[1], u),
2043                     po, pn, u);
2044                 break;
2045 #endif /* INET */
2046 #ifdef INET6
2047         case AF_INET6:
2048                 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2049                     pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2050                     pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
2051                     ao.addr16[0], an->addr16[0], u),
2052                     ao.addr16[1], an->addr16[1], u),
2053                     ao.addr16[2], an->addr16[2], u),
2054                     ao.addr16[3], an->addr16[3], u),
2055                     ao.addr16[4], an->addr16[4], u),
2056                     ao.addr16[5], an->addr16[5], u),
2057                     ao.addr16[6], an->addr16[6], u),
2058                     ao.addr16[7], an->addr16[7], u),
2059                     po, pn, u);
2060                 break;
2061 #endif /* INET6 */
2062         }
2063 }
2064
2065
2066 /* Changes a u_int32_t.  Uses a void * so there are no align restrictions */
2067 void
2068 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2069 {
2070         u_int32_t       ao;
2071
2072         memcpy(&ao, a, sizeof(ao));
2073         memcpy(a, &an, sizeof(u_int32_t));
2074         *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2075             ao % 65536, an % 65536, u);
2076 }
2077
2078 #ifdef INET6
2079 static void
2080 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2081 {
2082         struct pf_addr  ao;
2083
2084         PF_ACPY(&ao, a, AF_INET6);
2085         PF_ACPY(a, an, AF_INET6);
2086
2087         *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2088             pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2089             pf_cksum_fixup(pf_cksum_fixup(*c,
2090             ao.addr16[0], an->addr16[0], u),
2091             ao.addr16[1], an->addr16[1], u),
2092             ao.addr16[2], an->addr16[2], u),
2093             ao.addr16[3], an->addr16[3], u),
2094             ao.addr16[4], an->addr16[4], u),
2095             ao.addr16[5], an->addr16[5], u),
2096             ao.addr16[6], an->addr16[6], u),
2097             ao.addr16[7], an->addr16[7], u);
2098 }
2099 #endif /* INET6 */
2100
2101 static void
2102 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2103     struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2104     u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2105 {
2106         struct pf_addr  oia, ooa;
2107
2108         PF_ACPY(&oia, ia, af);
2109         if (oa)
2110                 PF_ACPY(&ooa, oa, af);
2111
2112         /* Change inner protocol port, fix inner protocol checksum. */
2113         if (ip != NULL) {
2114                 u_int16_t       oip = *ip;
2115                 u_int32_t       opc;
2116
2117                 if (pc != NULL)
2118                         opc = *pc;
2119                 *ip = np;
2120                 if (pc != NULL)
2121                         *pc = pf_cksum_fixup(*pc, oip, *ip, u);
2122                 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2123                 if (pc != NULL)
2124                         *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2125         }
2126         /* Change inner ip address, fix inner ip and icmp checksums. */
2127         PF_ACPY(ia, na, af);
2128         switch (af) {
2129 #ifdef INET
2130         case AF_INET: {
2131                 u_int32_t        oh2c = *h2c;
2132
2133                 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2134                     oia.addr16[0], ia->addr16[0], 0),
2135                     oia.addr16[1], ia->addr16[1], 0);
2136                 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2137                     oia.addr16[0], ia->addr16[0], 0),
2138                     oia.addr16[1], ia->addr16[1], 0);
2139                 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2140                 break;
2141         }
2142 #endif /* INET */
2143 #ifdef INET6
2144         case AF_INET6:
2145                 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2146                     pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2147                     pf_cksum_fixup(pf_cksum_fixup(*ic,
2148                     oia.addr16[0], ia->addr16[0], u),
2149                     oia.addr16[1], ia->addr16[1], u),
2150                     oia.addr16[2], ia->addr16[2], u),
2151                     oia.addr16[3], ia->addr16[3], u),
2152                     oia.addr16[4], ia->addr16[4], u),
2153                     oia.addr16[5], ia->addr16[5], u),
2154                     oia.addr16[6], ia->addr16[6], u),
2155                     oia.addr16[7], ia->addr16[7], u);
2156                 break;
2157 #endif /* INET6 */
2158         }
2159         /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
2160         if (oa) {
2161                 PF_ACPY(oa, na, af);
2162                 switch (af) {
2163 #ifdef INET
2164                 case AF_INET:
2165                         *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2166                             ooa.addr16[0], oa->addr16[0], 0),
2167                             ooa.addr16[1], oa->addr16[1], 0);
2168                         break;
2169 #endif /* INET */
2170 #ifdef INET6
2171                 case AF_INET6:
2172                         *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2173                             pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2174                             pf_cksum_fixup(pf_cksum_fixup(*ic,
2175                             ooa.addr16[0], oa->addr16[0], u),
2176                             ooa.addr16[1], oa->addr16[1], u),
2177                             ooa.addr16[2], oa->addr16[2], u),
2178                             ooa.addr16[3], oa->addr16[3], u),
2179                             ooa.addr16[4], oa->addr16[4], u),
2180                             ooa.addr16[5], oa->addr16[5], u),
2181                             ooa.addr16[6], oa->addr16[6], u),
2182                             ooa.addr16[7], oa->addr16[7], u);
2183                         break;
2184 #endif /* INET6 */
2185                 }
2186         }
2187 }
2188
2189
2190 /*
2191  * Need to modulate the sequence numbers in the TCP SACK option
2192  * (credits to Krzysztof Pfaff for report and patch)
2193  */
2194 static int
2195 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2196     struct tcphdr *th, struct pf_state_peer *dst)
2197 {
2198         int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
2199         u_int8_t opts[TCP_MAXOLEN], *opt = opts;
2200         int copyback = 0, i, olen;
2201         struct sackblk sack;
2202
2203 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2204         if (hlen < TCPOLEN_SACKLEN ||
2205             !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
2206                 return 0;
2207
2208         while (hlen >= TCPOLEN_SACKLEN) {
2209                 olen = opt[1];
2210                 switch (*opt) {
2211                 case TCPOPT_EOL:        /* FALLTHROUGH */
2212                 case TCPOPT_NOP:
2213                         opt++;
2214                         hlen--;
2215                         break;
2216                 case TCPOPT_SACK:
2217                         if (olen > hlen)
2218                                 olen = hlen;
2219                         if (olen >= TCPOLEN_SACKLEN) {
2220                                 for (i = 2; i + TCPOLEN_SACK <= olen;
2221                                     i += TCPOLEN_SACK) {
2222                                         memcpy(&sack, &opt[i], sizeof(sack));
2223                                         pf_change_a(&sack.start, &th->th_sum,
2224                                             htonl(ntohl(sack.start) -
2225                                             dst->seqdiff), 0);
2226                                         pf_change_a(&sack.end, &th->th_sum,
2227                                             htonl(ntohl(sack.end) -
2228                                             dst->seqdiff), 0);
2229                                         memcpy(&opt[i], &sack, sizeof(sack));
2230                                 }
2231                                 copyback = 1;
2232                         }
2233                         /* FALLTHROUGH */
2234                 default:
2235                         if (olen < 2)
2236                                 olen = 2;
2237                         hlen -= olen;
2238                         opt += olen;
2239                 }
2240         }
2241
2242         if (copyback)
2243                 m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts);
2244         return (copyback);
2245 }
2246
2247 static void
2248 pf_send_tcp(struct mbuf *replyto, const struct pf_rule *r, sa_family_t af,
2249     const struct pf_addr *saddr, const struct pf_addr *daddr,
2250     u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2251     u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2252     u_int16_t rtag, struct ifnet *ifp)
2253 {
2254         struct pf_send_entry *pfse;
2255         struct mbuf     *m;
2256         int              len, tlen;
2257 #ifdef INET
2258         struct ip       *h = NULL;
2259 #endif /* INET */
2260 #ifdef INET6
2261         struct ip6_hdr  *h6 = NULL;
2262 #endif /* INET6 */
2263         struct tcphdr   *th;
2264         char            *opt;
2265         struct pf_mtag  *pf_mtag;
2266
2267         len = 0;
2268         th = NULL;
2269
2270         /* maximum segment size tcp option */
2271         tlen = sizeof(struct tcphdr);
2272         if (mss)
2273                 tlen += 4;
2274
2275         switch (af) {
2276 #ifdef INET
2277         case AF_INET:
2278                 len = sizeof(struct ip) + tlen;
2279                 break;
2280 #endif /* INET */
2281 #ifdef INET6
2282         case AF_INET6:
2283                 len = sizeof(struct ip6_hdr) + tlen;
2284                 break;
2285 #endif /* INET6 */
2286         default:
2287                 panic("%s: unsupported af %d", __func__, af);
2288         }
2289
2290         /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2291         pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2292         if (pfse == NULL)
2293                 return;
2294         m = m_gethdr(M_NOWAIT, MT_DATA);
2295         if (m == NULL) {
2296                 free(pfse, M_PFTEMP);
2297                 return;
2298         }
2299 #ifdef MAC
2300         mac_netinet_firewall_send(m);
2301 #endif
2302         if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2303                 free(pfse, M_PFTEMP);
2304                 m_freem(m);
2305                 return;
2306         }
2307         if (tag)
2308                 m->m_flags |= M_SKIP_FIREWALL;
2309         pf_mtag->tag = rtag;
2310
2311         if (r != NULL && r->rtableid >= 0)
2312                 M_SETFIB(m, r->rtableid);
2313
2314 #ifdef ALTQ
2315         if (r != NULL && r->qid) {
2316                 pf_mtag->qid = r->qid;
2317
2318                 /* add hints for ecn */
2319                 pf_mtag->hdr = mtod(m, struct ip *);
2320         }
2321 #endif /* ALTQ */
2322         m->m_data += max_linkhdr;
2323         m->m_pkthdr.len = m->m_len = len;
2324         m->m_pkthdr.rcvif = NULL;
2325         bzero(m->m_data, len);
2326         switch (af) {
2327 #ifdef INET
2328         case AF_INET:
2329                 h = mtod(m, struct ip *);
2330
2331                 /* IP header fields included in the TCP checksum */
2332                 h->ip_p = IPPROTO_TCP;
2333                 h->ip_len = htons(tlen);
2334                 h->ip_src.s_addr = saddr->v4.s_addr;
2335                 h->ip_dst.s_addr = daddr->v4.s_addr;
2336
2337                 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
2338                 break;
2339 #endif /* INET */
2340 #ifdef INET6
2341         case AF_INET6:
2342                 h6 = mtod(m, struct ip6_hdr *);
2343
2344                 /* IP header fields included in the TCP checksum */
2345                 h6->ip6_nxt = IPPROTO_TCP;
2346                 h6->ip6_plen = htons(tlen);
2347                 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
2348                 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
2349
2350                 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
2351                 break;
2352 #endif /* INET6 */
2353         }
2354
2355         /* TCP header */
2356         th->th_sport = sport;
2357         th->th_dport = dport;
2358         th->th_seq = htonl(seq);
2359         th->th_ack = htonl(ack);
2360         th->th_off = tlen >> 2;
2361         th->th_flags = flags;
2362         th->th_win = htons(win);
2363
2364         if (mss) {
2365                 opt = (char *)(th + 1);
2366                 opt[0] = TCPOPT_MAXSEG;
2367                 opt[1] = 4;
2368                 HTONS(mss);
2369                 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2370         }
2371
2372         switch (af) {
2373 #ifdef INET
2374         case AF_INET:
2375                 /* TCP checksum */
2376                 th->th_sum = in_cksum(m, len);
2377
2378                 /* Finish the IP header */
2379                 h->ip_v = 4;
2380                 h->ip_hl = sizeof(*h) >> 2;
2381                 h->ip_tos = IPTOS_LOWDELAY;
2382                 h->ip_off = htons(V_path_mtu_discovery ? IP_DF : 0);
2383                 h->ip_len = htons(len);
2384                 h->ip_ttl = ttl ? ttl : V_ip_defttl;
2385                 h->ip_sum = 0;
2386
2387                 pfse->pfse_type = PFSE_IP;
2388                 break;
2389 #endif /* INET */
2390 #ifdef INET6
2391         case AF_INET6:
2392                 /* TCP checksum */
2393                 th->th_sum = in6_cksum(m, IPPROTO_TCP,
2394                     sizeof(struct ip6_hdr), tlen);
2395
2396                 h6->ip6_vfc |= IPV6_VERSION;
2397                 h6->ip6_hlim = IPV6_DEFHLIM;
2398
2399                 pfse->pfse_type = PFSE_IP6;
2400                 break;
2401 #endif /* INET6 */
2402         }
2403         pfse->pfse_m = m;
2404         pf_send(pfse);
2405 }
2406
2407 static void
2408 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
2409     struct pf_rule *r)
2410 {
2411         struct pf_send_entry *pfse;
2412         struct mbuf *m0;
2413         struct pf_mtag *pf_mtag;
2414
2415         /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2416         pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2417         if (pfse == NULL)
2418                 return;
2419
2420         if ((m0 = m_copypacket(m, M_NOWAIT)) == NULL) {
2421                 free(pfse, M_PFTEMP);
2422                 return;
2423         }
2424
2425         if ((pf_mtag = pf_get_mtag(m0)) == NULL) {
2426                 free(pfse, M_PFTEMP);
2427                 return;
2428         }
2429         /* XXX: revisit */
2430         m0->m_flags |= M_SKIP_FIREWALL;
2431
2432         if (r->rtableid >= 0)
2433                 M_SETFIB(m0, r->rtableid);
2434
2435 #ifdef ALTQ
2436         if (r->qid) {
2437                 pf_mtag->qid = r->qid;
2438                 /* add hints for ecn */
2439                 pf_mtag->hdr = mtod(m0, struct ip *);
2440         }
2441 #endif /* ALTQ */
2442
2443         switch (af) {
2444 #ifdef INET
2445         case AF_INET:
2446                 pfse->pfse_type = PFSE_ICMP;
2447                 break;
2448 #endif /* INET */
2449 #ifdef INET6
2450         case AF_INET6:
2451                 pfse->pfse_type = PFSE_ICMP6;
2452                 break;
2453 #endif /* INET6 */
2454         }
2455         pfse->pfse_m = m0;
2456         pfse->pfse_icmp_type = type;
2457         pfse->pfse_icmp_code = code;
2458         pf_send(pfse);
2459 }
2460
2461 /*
2462  * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2463  * If n is 0, they match if they are equal. If n is != 0, they match if they
2464  * are different.
2465  */
2466 int
2467 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
2468     struct pf_addr *b, sa_family_t af)
2469 {
2470         int     match = 0;
2471
2472         switch (af) {
2473 #ifdef INET
2474         case AF_INET:
2475                 if ((a->addr32[0] & m->addr32[0]) ==
2476                     (b->addr32[0] & m->addr32[0]))
2477                         match++;
2478                 break;
2479 #endif /* INET */
2480 #ifdef INET6
2481         case AF_INET6:
2482                 if (((a->addr32[0] & m->addr32[0]) ==
2483                      (b->addr32[0] & m->addr32[0])) &&
2484                     ((a->addr32[1] & m->addr32[1]) ==
2485                      (b->addr32[1] & m->addr32[1])) &&
2486                     ((a->addr32[2] & m->addr32[2]) ==
2487                      (b->addr32[2] & m->addr32[2])) &&
2488                     ((a->addr32[3] & m->addr32[3]) ==
2489                      (b->addr32[3] & m->addr32[3])))
2490                         match++;
2491                 break;
2492 #endif /* INET6 */
2493         }
2494         if (match) {
2495                 if (n)
2496                         return (0);
2497                 else
2498                         return (1);
2499         } else {
2500                 if (n)
2501                         return (1);
2502                 else
2503                         return (0);
2504         }
2505 }
2506
2507 /*
2508  * Return 1 if b <= a <= e, otherwise return 0.
2509  */
2510 int
2511 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
2512     struct pf_addr *a, sa_family_t af)
2513 {
2514         switch (af) {
2515 #ifdef INET
2516         case AF_INET:
2517                 if ((a->addr32[0] < b->addr32[0]) ||
2518                     (a->addr32[0] > e->addr32[0]))
2519                         return (0);
2520                 break;
2521 #endif /* INET */
2522 #ifdef INET6
2523         case AF_INET6: {
2524                 int     i;
2525
2526                 /* check a >= b */
2527                 for (i = 0; i < 4; ++i)
2528                         if (a->addr32[i] > b->addr32[i])
2529                                 break;
2530                         else if (a->addr32[i] < b->addr32[i])
2531                                 return (0);
2532                 /* check a <= e */
2533                 for (i = 0; i < 4; ++i)
2534                         if (a->addr32[i] < e->addr32[i])
2535                                 break;
2536                         else if (a->addr32[i] > e->addr32[i])
2537                                 return (0);
2538                 break;
2539         }
2540 #endif /* INET6 */
2541         }
2542         return (1);
2543 }
2544
2545 static int
2546 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
2547 {
2548         switch (op) {
2549         case PF_OP_IRG:
2550                 return ((p > a1) && (p < a2));
2551         case PF_OP_XRG:
2552                 return ((p < a1) || (p > a2));
2553         case PF_OP_RRG:
2554                 return ((p >= a1) && (p <= a2));
2555         case PF_OP_EQ:
2556                 return (p == a1);
2557         case PF_OP_NE:
2558                 return (p != a1);
2559         case PF_OP_LT:
2560                 return (p < a1);
2561         case PF_OP_LE:
2562                 return (p <= a1);
2563         case PF_OP_GT:
2564                 return (p > a1);
2565         case PF_OP_GE:
2566                 return (p >= a1);
2567         }
2568         return (0); /* never reached */
2569 }
2570
2571 int
2572 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
2573 {
2574         NTOHS(a1);
2575         NTOHS(a2);
2576         NTOHS(p);
2577         return (pf_match(op, a1, a2, p));
2578 }
2579
2580 static int
2581 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
2582 {
2583         if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2584                 return (0);
2585         return (pf_match(op, a1, a2, u));
2586 }
2587
2588 static int
2589 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
2590 {
2591         if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2592                 return (0);
2593         return (pf_match(op, a1, a2, g));
2594 }
2595
2596 int
2597 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag, int mtag)
2598 {
2599         if (*tag == -1)
2600                 *tag = mtag;
2601
2602         return ((!r->match_tag_not && r->match_tag == *tag) ||
2603             (r->match_tag_not && r->match_tag != *tag));
2604 }
2605
2606 int
2607 pf_tag_packet(struct mbuf *m, struct pf_pdesc *pd, int tag)
2608 {
2609
2610         KASSERT(tag > 0, ("%s: tag %d", __func__, tag));
2611
2612         if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(m)) == NULL))
2613                 return (ENOMEM);
2614
2615         pd->pf_mtag->tag = tag;
2616
2617         return (0);
2618 }
2619
2620 #define PF_ANCHOR_STACKSIZE     32
2621 struct pf_anchor_stackframe {
2622         struct pf_ruleset       *rs;
2623         struct pf_rule          *r;     /* XXX: + match bit */
2624         struct pf_anchor        *child;
2625 };
2626
2627 /*
2628  * XXX: We rely on malloc(9) returning pointer aligned addresses.
2629  */
2630 #define PF_ANCHORSTACK_MATCH    0x00000001
2631 #define PF_ANCHORSTACK_MASK     (PF_ANCHORSTACK_MATCH)
2632
2633 #define PF_ANCHOR_MATCH(f)      ((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH)
2634 #define PF_ANCHOR_RULE(f)       (struct pf_rule *)                      \
2635                                 ((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK)
2636 #define PF_ANCHOR_SET_MATCH(f)  do { (f)->r = (void *)                  \
2637                                 ((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH);  \
2638 } while (0)
2639
2640 void
2641 pf_step_into_anchor(struct pf_anchor_stackframe *stack, int *depth,
2642     struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2643     int *match)
2644 {
2645         struct pf_anchor_stackframe     *f;
2646
2647         PF_RULES_RASSERT();
2648
2649         if (match)
2650                 *match = 0;
2651         if (*depth >= PF_ANCHOR_STACKSIZE) {
2652                 printf("%s: anchor stack overflow on %s\n",
2653                     __func__, (*r)->anchor->name);
2654                 *r = TAILQ_NEXT(*r, entries);
2655                 return;
2656         } else if (*depth == 0 && a != NULL)
2657                 *a = *r;
2658         f = stack + (*depth)++;
2659         f->rs = *rs;
2660         f->r = *r;
2661         if ((*r)->anchor_wildcard) {
2662                 struct pf_anchor_node *parent = &(*r)->anchor->children;
2663
2664                 if ((f->child = RB_MIN(pf_anchor_node, parent)) == NULL) {
2665                         *r = NULL;
2666                         return;
2667                 }
2668                 *rs = &f->child->ruleset;
2669         } else {
2670                 f->child = NULL;
2671                 *rs = &(*r)->anchor->ruleset;
2672         }
2673         *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2674 }
2675
2676 int
2677 pf_step_out_of_anchor(struct pf_anchor_stackframe *stack, int *depth,
2678     struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2679     int *match)
2680 {
2681         struct pf_anchor_stackframe     *f;
2682         struct pf_rule *fr;
2683         int quick = 0;
2684
2685         PF_RULES_RASSERT();
2686
2687         do {
2688                 if (*depth <= 0)
2689                         break;
2690                 f = stack + *depth - 1;
2691                 fr = PF_ANCHOR_RULE(f);
2692                 if (f->child != NULL) {
2693                         struct pf_anchor_node *parent;
2694
2695                         /*
2696                          * This block traverses through
2697                          * a wildcard anchor.
2698                          */
2699                         parent = &fr->anchor->children;
2700                         if (match != NULL && *match) {
2701                                 /*
2702                                  * If any of "*" matched, then
2703                                  * "foo/ *" matched, mark frame
2704                                  * appropriately.
2705                                  */
2706                                 PF_ANCHOR_SET_MATCH(f);
2707                                 *match = 0;
2708                         }
2709                         f->child = RB_NEXT(pf_anchor_node, parent, f->child);
2710                         if (f->child != NULL) {
2711                                 *rs = &f->child->ruleset;
2712                                 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2713                                 if (*r == NULL)
2714                                         continue;
2715                                 else
2716                                         break;
2717                         }
2718                 }
2719                 (*depth)--;
2720                 if (*depth == 0 && a != NULL)
2721                         *a = NULL;
2722                 *rs = f->rs;
2723                 if (PF_ANCHOR_MATCH(f) || (match != NULL && *match))
2724                         quick = fr->quick;
2725                 *r = TAILQ_NEXT(fr, entries);
2726         } while (*r == NULL);
2727
2728         return (quick);
2729 }
2730
2731 #ifdef INET6
2732 void
2733 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
2734     struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
2735 {
2736         switch (af) {
2737 #ifdef INET
2738         case AF_INET:
2739                 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2740                 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2741                 break;
2742 #endif /* INET */
2743         case AF_INET6:
2744                 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2745                 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2746                 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
2747                 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
2748                 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
2749                 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
2750                 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
2751                 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
2752                 break;
2753         }
2754 }
2755
2756 void
2757 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
2758 {
2759         switch (af) {
2760 #ifdef INET
2761         case AF_INET:
2762                 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
2763                 break;
2764 #endif /* INET */
2765         case AF_INET6:
2766                 if (addr->addr32[3] == 0xffffffff) {
2767                         addr->addr32[3] = 0;
2768                         if (addr->addr32[2] == 0xffffffff) {
2769                                 addr->addr32[2] = 0;
2770                                 if (addr->addr32[1] == 0xffffffff) {
2771                                         addr->addr32[1] = 0;
2772                                         addr->addr32[0] =
2773                                             htonl(ntohl(addr->addr32[0]) + 1);
2774                                 } else
2775                                         addr->addr32[1] =
2776                                             htonl(ntohl(addr->addr32[1]) + 1);
2777                         } else
2778                                 addr->addr32[2] =
2779                                     htonl(ntohl(addr->addr32[2]) + 1);
2780                 } else
2781                         addr->addr32[3] =
2782                             htonl(ntohl(addr->addr32[3]) + 1);
2783                 break;
2784         }
2785 }
2786 #endif /* INET6 */
2787
2788 int
2789 pf_socket_lookup(int direction, struct pf_pdesc *pd, struct mbuf *m)
2790 {
2791         struct pf_addr          *saddr, *daddr;
2792         u_int16_t                sport, dport;
2793         struct inpcbinfo        *pi;
2794         struct inpcb            *inp;
2795
2796         pd->lookup.uid = UID_MAX;
2797         pd->lookup.gid = GID_MAX;
2798
2799         switch (pd->proto) {
2800         case IPPROTO_TCP:
2801                 if (pd->hdr.tcp == NULL)
2802                         return (-1);
2803                 sport = pd->hdr.tcp->th_sport;
2804                 dport = pd->hdr.tcp->th_dport;
2805                 pi = &V_tcbinfo;
2806                 break;
2807         case IPPROTO_UDP:
2808                 if (pd->hdr.udp == NULL)
2809                         return (-1);
2810                 sport = pd->hdr.udp->uh_sport;
2811                 dport = pd->hdr.udp->uh_dport;
2812                 pi = &V_udbinfo;
2813                 break;
2814         default:
2815                 return (-1);
2816         }
2817         if (direction == PF_IN) {
2818                 saddr = pd->src;
2819                 daddr = pd->dst;
2820         } else {
2821                 u_int16_t       p;
2822
2823                 p = sport;
2824                 sport = dport;
2825                 dport = p;
2826                 saddr = pd->dst;
2827                 daddr = pd->src;
2828         }
2829         switch (pd->af) {
2830 #ifdef INET
2831         case AF_INET:
2832                 inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4,
2833                     dport, INPLOOKUP_RLOCKPCB, NULL, m);
2834                 if (inp == NULL) {
2835                         inp = in_pcblookup_mbuf(pi, saddr->v4, sport,
2836                            daddr->v4, dport, INPLOOKUP_WILDCARD |
2837                            INPLOOKUP_RLOCKPCB, NULL, m);
2838                         if (inp == NULL)
2839                                 return (-1);
2840                 }
2841                 break;
2842 #endif /* INET */
2843 #ifdef INET6
2844         case AF_INET6:
2845                 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6,
2846                     dport, INPLOOKUP_RLOCKPCB, NULL, m);
2847                 if (inp == NULL) {
2848                         inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport,
2849                             &daddr->v6, dport, INPLOOKUP_WILDCARD |
2850                             INPLOOKUP_RLOCKPCB, NULL, m);
2851                         if (inp == NULL)
2852                                 return (-1);
2853                 }
2854                 break;
2855 #endif /* INET6 */
2856
2857         default:
2858                 return (-1);
2859         }
2860         INP_RLOCK_ASSERT(inp);
2861         pd->lookup.uid = inp->inp_cred->cr_uid;
2862         pd->lookup.gid = inp->inp_cred->cr_groups[0];
2863         INP_RUNLOCK(inp);
2864
2865         return (1);
2866 }
2867
2868 static u_int8_t
2869 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
2870 {
2871         int              hlen;
2872         u_int8_t         hdr[60];
2873         u_int8_t        *opt, optlen;
2874         u_int8_t         wscale = 0;
2875
2876         hlen = th_off << 2;             /* hlen <= sizeof(hdr) */
2877         if (hlen <= sizeof(struct tcphdr))
2878                 return (0);
2879         if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
2880                 return (0);
2881         opt = hdr + sizeof(struct tcphdr);
2882         hlen -= sizeof(struct tcphdr);
2883         while (hlen >= 3) {
2884                 switch (*opt) {
2885                 case TCPOPT_EOL:
2886                 case TCPOPT_NOP:
2887                         ++opt;
2888                         --hlen;
2889                         break;
2890                 case TCPOPT_WINDOW:
2891                         wscale = opt[2];
2892                         if (wscale > TCP_MAX_WINSHIFT)
2893                                 wscale = TCP_MAX_WINSHIFT;
2894                         wscale |= PF_WSCALE_FLAG;
2895                         /* FALLTHROUGH */
2896                 default:
2897                         optlen = opt[1];
2898                         if (optlen < 2)
2899                                 optlen = 2;
2900                         hlen -= optlen;
2901                         opt += optlen;
2902                         break;
2903                 }
2904         }
2905         return (wscale);
2906 }
2907
2908 static u_int16_t
2909 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
2910 {
2911         int              hlen;
2912         u_int8_t         hdr[60];
2913         u_int8_t        *opt, optlen;
2914         u_int16_t        mss = V_tcp_mssdflt;
2915
2916         hlen = th_off << 2;     /* hlen <= sizeof(hdr) */
2917         if (hlen <= sizeof(struct tcphdr))
2918                 return (0);
2919         if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
2920                 return (0);
2921         opt = hdr + sizeof(struct tcphdr);
2922         hlen -= sizeof(struct tcphdr);
2923         while (hlen >= TCPOLEN_MAXSEG) {
2924                 switch (*opt) {
2925                 case TCPOPT_EOL:
2926                 case TCPOPT_NOP:
2927                         ++opt;
2928                         --hlen;
2929                         break;
2930                 case TCPOPT_MAXSEG:
2931                         bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
2932                         NTOHS(mss);
2933                         /* FALLTHROUGH */
2934                 default:
2935                         optlen = opt[1];
2936                         if (optlen < 2)
2937                                 optlen = 2;
2938                         hlen -= optlen;
2939                         opt += optlen;
2940                         break;
2941                 }
2942         }
2943         return (mss);
2944 }
2945
2946 static u_int16_t
2947 pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
2948 {
2949 #ifdef INET
2950         struct sockaddr_in      *dst;
2951         struct route             ro;
2952 #endif /* INET */
2953 #ifdef INET6
2954         struct sockaddr_in6     *dst6;
2955         struct route_in6         ro6;
2956 #endif /* INET6 */
2957         struct rtentry          *rt = NULL;
2958         int                      hlen = 0;
2959         u_int16_t                mss = V_tcp_mssdflt;
2960
2961         switch (af) {
2962 #ifdef INET
2963         case AF_INET:
2964                 hlen = sizeof(struct ip);
2965                 bzero(&ro, sizeof(ro));
2966                 dst = (struct sockaddr_in *)&ro.ro_dst;
2967                 dst->sin_family = AF_INET;
2968                 dst->sin_len = sizeof(*dst);
2969                 dst->sin_addr = addr->v4;
2970                 in_rtalloc_ign(&ro, 0, rtableid);
2971                 rt = ro.ro_rt;
2972                 break;
2973 #endif /* INET */
2974 #ifdef INET6
2975         case AF_INET6:
2976                 hlen = sizeof(struct ip6_hdr);
2977                 bzero(&ro6, sizeof(ro6));
2978                 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst;
2979                 dst6->sin6_family = AF_INET6;
2980                 dst6->sin6_len = sizeof(*dst6);
2981                 dst6->sin6_addr = addr->v6;
2982                 in6_rtalloc_ign(&ro6, 0, rtableid);
2983                 rt = ro6.ro_rt;
2984                 break;
2985 #endif /* INET6 */
2986         }
2987
2988         if (rt && rt->rt_ifp) {
2989                 mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr);
2990                 mss = max(V_tcp_mssdflt, mss);
2991                 RTFREE(rt);
2992         }
2993         mss = min(mss, offer);
2994         mss = max(mss, 64);             /* sanity - at least max opt space */
2995         return (mss);
2996 }
2997
2998 static u_int32_t
2999 pf_tcp_iss(struct pf_pdesc *pd)
3000 {
3001         MD5_CTX ctx;
3002         u_int32_t digest[4];
3003
3004         if (V_pf_tcp_secret_init == 0) {
3005                 read_random(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret));
3006                 MD5Init(&V_pf_tcp_secret_ctx);
3007                 MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret,
3008                     sizeof(V_pf_tcp_secret));
3009                 V_pf_tcp_secret_init = 1;
3010         }
3011
3012         ctx = V_pf_tcp_secret_ctx;
3013
3014         MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short));
3015         MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short));
3016         if (pd->af == AF_INET6) {
3017                 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr));
3018                 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr));
3019         } else {
3020                 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr));
3021                 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr));
3022         }
3023         MD5Final((u_char *)digest, &ctx);
3024         V_pf_tcp_iss_off += 4096;
3025 #define ISN_RANDOM_INCREMENT (4096 - 1)
3026         return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) +
3027             V_pf_tcp_iss_off);
3028 #undef  ISN_RANDOM_INCREMENT
3029 }
3030
3031 static int
3032 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
3033     struct pfi_kif *kif, struct mbuf *m, int off, struct pf_pdesc *pd,
3034     struct pf_rule **am, struct pf_ruleset **rsm, struct inpcb *inp)
3035 {
3036         struct pf_rule          *nr = NULL;
3037         struct pf_addr          * const saddr = pd->src;
3038         struct pf_addr          * const daddr = pd->dst;
3039         sa_family_t              af = pd->af;
3040         struct pf_rule          *r, *a = NULL;
3041         struct pf_ruleset       *ruleset = NULL;
3042         struct pf_src_node      *nsn = NULL;
3043         struct tcphdr           *th = pd->hdr.tcp;
3044         struct pf_state_key     *sk = NULL, *nk = NULL;
3045         u_short                  reason;
3046         int                      rewrite = 0, hdrlen = 0;
3047         int                      tag = -1, rtableid = -1;
3048         int                      asd = 0;
3049         int                      match = 0;
3050         int                      state_icmp = 0;
3051         u_int16_t                sport = 0, dport = 0;
3052         u_int16_t                bproto_sum = 0, bip_sum = 0;
3053         u_int8_t                 icmptype = 0, icmpcode = 0;
3054         struct pf_anchor_stackframe     anchor_stack[PF_ANCHOR_STACKSIZE];
3055
3056         PF_RULES_RASSERT();
3057
3058         if (inp != NULL) {
3059                 INP_LOCK_ASSERT(inp);
3060                 pd->lookup.uid = inp->inp_cred->cr_uid;
3061                 pd->lookup.gid = inp->inp_cred->cr_groups[0];
3062                 pd->lookup.done = 1;
3063         }
3064
3065         switch (pd->proto) {
3066         case IPPROTO_TCP:
3067                 sport = th->th_sport;
3068                 dport = th->th_dport;
3069                 hdrlen = sizeof(*th);
3070                 break;
3071         case IPPROTO_UDP:
3072                 sport = pd->hdr.udp->uh_sport;
3073                 dport = pd->hdr.udp->uh_dport;
3074                 hdrlen = sizeof(*pd->hdr.udp);
3075                 break;
3076 #ifdef INET
3077         case IPPROTO_ICMP:
3078                 if (pd->af != AF_INET)
3079                         break;
3080                 sport = dport = pd->hdr.icmp->icmp_id;
3081                 hdrlen = sizeof(*pd->hdr.icmp);
3082                 icmptype = pd->hdr.icmp->icmp_type;
3083                 icmpcode = pd->hdr.icmp->icmp_code;
3084
3085                 if (icmptype == ICMP_UNREACH ||
3086                     icmptype == ICMP_SOURCEQUENCH ||
3087                     icmptype == ICMP_REDIRECT ||
3088                     icmptype == ICMP_TIMXCEED ||
3089                     icmptype == ICMP_PARAMPROB)
3090                         state_icmp++;
3091                 break;
3092 #endif /* INET */
3093 #ifdef INET6
3094         case IPPROTO_ICMPV6:
3095                 if (af != AF_INET6)
3096                         break;
3097                 sport = dport = pd->hdr.icmp6->icmp6_id;
3098                 hdrlen = sizeof(*pd->hdr.icmp6);
3099                 icmptype = pd->hdr.icmp6->icmp6_type;
3100                 icmpcode = pd->hdr.icmp6->icmp6_code;
3101
3102                 if (icmptype == ICMP6_DST_UNREACH ||
3103                     icmptype == ICMP6_PACKET_TOO_BIG ||
3104                     icmptype == ICMP6_TIME_EXCEEDED ||
3105                     icmptype == ICMP6_PARAM_PROB)
3106                         state_icmp++;
3107                 break;
3108 #endif /* INET6 */
3109         default:
3110                 sport = dport = hdrlen = 0;
3111                 break;
3112         }
3113
3114         r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3115
3116         /* check packet for BINAT/NAT/RDR */
3117         if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, &sk,
3118             &nk, saddr, daddr, sport, dport, anchor_stack)) != NULL) {
3119                 KASSERT(sk != NULL, ("%s: null sk", __func__));
3120                 KASSERT(nk != NULL, ("%s: null nk", __func__));
3121
3122                 if (pd->ip_sum)
3123                         bip_sum = *pd->ip_sum;
3124
3125                 switch (pd->proto) {
3126                 case IPPROTO_TCP:
3127                         bproto_sum = th->th_sum;
3128                         pd->proto_sum = &th->th_sum;
3129
3130                         if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3131                             nk->port[pd->sidx] != sport) {
3132                                 pf_change_ap(saddr, &th->th_sport, pd->ip_sum,
3133                                     &th->th_sum, &nk->addr[pd->sidx],
3134                                     nk->port[pd->sidx], 0, af);
3135                                 pd->sport = &th->th_sport;
3136                                 sport = th->th_sport;
3137                         }
3138
3139                         if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3140                             nk->port[pd->didx] != dport) {
3141                                 pf_change_ap(daddr, &th->th_dport, pd->ip_sum,
3142                                     &th->th_sum, &nk->addr[pd->didx],
3143                                     nk->port[pd->didx], 0, af);
3144                                 dport = th->th_dport;
3145                                 pd->dport = &th->th_dport;
3146                         }
3147                         rewrite++;
3148                         break;
3149                 case IPPROTO_UDP:
3150                         bproto_sum = pd->hdr.udp->uh_sum;
3151                         pd->proto_sum = &pd->hdr.udp->uh_sum;
3152
3153                         if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3154                             nk->port[pd->sidx] != sport) {
3155                                 pf_change_ap(saddr, &pd->hdr.udp->uh_sport,
3156                                     pd->ip_sum, &pd->hdr.udp->uh_sum,
3157                                     &nk->addr[pd->sidx],
3158                                     nk->port[pd->sidx], 1, af);
3159                                 sport = pd->hdr.udp->uh_sport;
3160                                 pd->sport = &pd->hdr.udp->uh_sport;
3161                         }
3162
3163                         if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3164                             nk->port[pd->didx] != dport) {
3165                                 pf_change_ap(daddr, &pd->hdr.udp->uh_dport,
3166                                     pd->ip_sum, &pd->hdr.udp->uh_sum,
3167                                     &nk->addr[pd->didx],
3168                                     nk->port[pd->didx], 1, af);
3169                                 dport = pd->hdr.udp->uh_dport;
3170                                 pd->dport = &pd->hdr.udp->uh_dport;
3171                         }
3172                         rewrite++;
3173                         break;
3174 #ifdef INET
3175                 case IPPROTO_ICMP:
3176                         nk->port[0] = nk->port[1];
3177                         if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET))
3178                                 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3179                                     nk->addr[pd->sidx].v4.s_addr, 0);
3180
3181                         if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET))
3182                                 pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
3183                                     nk->addr[pd->didx].v4.s_addr, 0);
3184
3185                         if (nk->port[1] != pd->hdr.icmp->icmp_id) {
3186                                 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
3187                                     pd->hdr.icmp->icmp_cksum, sport,
3188                                     nk->port[1], 0);
3189                                 pd->hdr.icmp->icmp_id = nk->port[1];
3190                                 pd->sport = &pd->hdr.icmp->icmp_id;
3191                         }
3192                         m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
3193                         break;
3194 #endif /* INET */
3195 #ifdef INET6
3196                 case IPPROTO_ICMPV6:
3197                         nk->port[0] = nk->port[1];
3198                         if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6))
3199                                 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
3200                                     &nk->addr[pd->sidx], 0);
3201
3202                         if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6))
3203                                 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
3204                                     &nk->addr[pd->didx], 0);
3205                         rewrite++;
3206                         break;
3207 #endif /* INET */
3208                 default:
3209                         switch (af) {
3210 #ifdef INET
3211                         case AF_INET:
3212                                 if (PF_ANEQ(saddr,
3213                                     &nk->addr[pd->sidx], AF_INET))
3214                                         pf_change_a(&saddr->v4.s_addr,
3215                                             pd->ip_sum,
3216                                             nk->addr[pd->sidx].v4.s_addr, 0);
3217
3218                                 if (PF_ANEQ(daddr,
3219                                     &nk->addr[pd->didx], AF_INET))
3220                                         pf_change_a(&daddr->v4.s_addr,
3221                                             pd->ip_sum,
3222                                             nk->addr[pd->didx].v4.s_addr, 0);
3223                                 break;
3224 #endif /* INET */
3225 #ifdef INET6
3226                         case AF_INET6:
3227                                 if (PF_ANEQ(saddr,
3228                                     &nk->addr[pd->sidx], AF_INET6))
3229                                         PF_ACPY(saddr, &nk->addr[pd->sidx], af);
3230
3231                                 if (PF_ANEQ(daddr,
3232                                     &nk->addr[pd->didx], AF_INET6))
3233                                         PF_ACPY(saddr, &nk->addr[pd->didx], af);
3234                                 break;
3235 #endif /* INET */
3236                         }
3237                         break;
3238                 }
3239                 if (nr->natpass)
3240                         r = NULL;
3241                 pd->nat_rule = nr;
3242         }
3243
3244         while (r != NULL) {
3245                 r->evaluations++;
3246                 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3247                         r = r->skip[PF_SKIP_IFP].ptr;
3248                 else if (r->direction && r->direction != direction)
3249                         r = r->skip[PF_SKIP_DIR].ptr;
3250                 else if (r->af && r->af != af)
3251                         r = r->skip[PF_SKIP_AF].ptr;
3252                 else if (r->proto && r->proto != pd->proto)
3253                         r = r->skip[PF_SKIP_PROTO].ptr;
3254                 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
3255                     r->src.neg, kif, M_GETFIB(m)))
3256                         r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3257                 /* tcp/udp only. port_op always 0 in other cases */
3258                 else if (r->src.port_op && !pf_match_port(r->src.port_op,
3259                     r->src.port[0], r->src.port[1], sport))
3260                         r = r->skip[PF_SKIP_SRC_PORT].ptr;
3261                 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
3262                     r->dst.neg, NULL, M_GETFIB(m)))
3263                         r = r->skip[PF_SKIP_DST_ADDR].ptr;
3264                 /* tcp/udp only. port_op always 0 in other cases */
3265                 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
3266                     r->dst.port[0], r->dst.port[1], dport))
3267                         r = r->skip[PF_SKIP_DST_PORT].ptr;
3268                 /* icmp only. type always 0 in other cases */
3269                 else if (r->type && r->type != icmptype + 1)
3270                         r = TAILQ_NEXT(r, entries);
3271                 /* icmp only. type always 0 in other cases */
3272                 else if (r->code && r->code != icmpcode + 1)
3273                         r = TAILQ_NEXT(r, entries);
3274                 else if (r->tos && !(r->tos == pd->tos))
3275                         r = TAILQ_NEXT(r, entries);
3276                 else if (r->rule_flag & PFRULE_FRAGMENT)
3277                         r = TAILQ_NEXT(r, entries);
3278                 else if (pd->proto == IPPROTO_TCP &&
3279                     (r->flagset & th->th_flags) != r->flags)
3280                         r = TAILQ_NEXT(r, entries);
3281                 /* tcp/udp only. uid.op always 0 in other cases */
3282                 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
3283                     pf_socket_lookup(direction, pd, m), 1)) &&
3284                     !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
3285                     pd->lookup.uid))
3286                         r = TAILQ_NEXT(r, entries);
3287                 /* tcp/udp only. gid.op always 0 in other cases */
3288                 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
3289                     pf_socket_lookup(direction, pd, m), 1)) &&
3290                     !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
3291                     pd->lookup.gid))
3292                         r = TAILQ_NEXT(r, entries);
3293                 else if (r->prob &&
3294                     r->prob <= arc4random())
3295                         r = TAILQ_NEXT(r, entries);
3296                 else if (r->match_tag && !pf_match_tag(m, r, &tag,
3297                     pd->pf_mtag ? pd->pf_mtag->tag : 0))
3298                         r = TAILQ_NEXT(r, entries);
3299                 else if (r->os_fingerprint != PF_OSFP_ANY &&
3300                     (pd->proto != IPPROTO_TCP || !pf_osfp_match(
3301                     pf_osfp_fingerprint(pd, m, off, th),
3302                     r->os_fingerprint)))
3303                         r = TAILQ_NEXT(r, entries);
3304                 else {
3305                         if (r->tag)
3306                                 tag = r->tag;
3307                         if (r->rtableid >= 0)
3308                                 rtableid = r->rtableid;
3309                         if (r->anchor == NULL) {
3310                                 match = 1;
3311                                 *rm = r;
3312                                 *am = a;
3313                                 *rsm = ruleset;
3314                                 if ((*rm)->quick)
3315                                         break;
3316                                 r = TAILQ_NEXT(r, entries);
3317                         } else
3318                                 pf_step_into_anchor(anchor_stack, &asd,
3319                                     &ruleset, PF_RULESET_FILTER, &r, &a,
3320                                     &match);
3321                 }
3322                 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3323                     &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3324                         break;
3325         }
3326         r = *rm;
3327         a = *am;
3328         ruleset = *rsm;
3329
3330         REASON_SET(&reason, PFRES_MATCH);
3331
3332         if (r->log || (nr != NULL && nr->log)) {
3333                 if (rewrite)
3334                         m_copyback(m, off, hdrlen, pd->hdr.any);
3335                 PFLOG_PACKET(kif, m, af, direction, reason, r->log ? r : nr, a,
3336                     ruleset, pd, 1);
3337         }
3338
3339         if ((r->action == PF_DROP) &&
3340             ((r->rule_flag & PFRULE_RETURNRST) ||
3341             (r->rule_flag & PFRULE_RETURNICMP) ||
3342             (r->rule_flag & PFRULE_RETURN))) {
3343                 /* undo NAT changes, if they have taken place */
3344                 if (nr != NULL) {
3345                         PF_ACPY(saddr, &sk->addr[pd->sidx], af);
3346                         PF_ACPY(daddr, &sk->addr[pd->didx], af);
3347                         if (pd->sport)
3348                                 *pd->sport = sk->port[pd->sidx];
3349                         if (pd->dport)
3350                                 *pd->dport = sk->port[pd->didx];
3351                         if (pd->proto_sum)
3352                                 *pd->proto_sum = bproto_sum;
3353                         if (pd->ip_sum)
3354                                 *pd->ip_sum = bip_sum;
3355                         m_copyback(m, off, hdrlen, pd->hdr.any);
3356                 }
3357                 if (pd->proto == IPPROTO_TCP &&
3358                     ((r->rule_flag & PFRULE_RETURNRST) ||
3359                     (r->rule_flag & PFRULE_RETURN)) &&
3360                     !(th->th_flags & TH_RST)) {
3361                         u_int32_t        ack = ntohl(th->th_seq) + pd->p_len;
3362                         int              len = 0;
3363 #ifdef INET
3364                         struct ip       *h4;
3365 #endif
3366 #ifdef INET6
3367                         struct ip6_hdr  *h6;
3368 #endif
3369
3370                         switch (af) {
3371 #ifdef INET
3372                         case AF_INET:
3373                                 h4 = mtod(m, struct ip *);
3374                                 len = ntohs(h4->ip_len) - off;
3375                                 break;
3376 #endif
3377 #ifdef INET6
3378                         case AF_INET6:
3379                                 h6 = mtod(m, struct ip6_hdr *);
3380                                 len = ntohs(h6->ip6_plen) - (off - sizeof(*h6));
3381                                 break;
3382 #endif
3383                         }
3384
3385                         if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
3386                                 REASON_SET(&reason, PFRES_PROTCKSUM);
3387                         else {
3388                                 if (th->th_flags & TH_SYN)
3389                                         ack++;
3390                                 if (th->th_flags & TH_FIN)
3391                                         ack++;
3392                                 pf_send_tcp(m, r, af, pd->dst,
3393                                     pd->src, th->th_dport, th->th_sport,
3394                                     ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
3395                                     r->return_ttl, 1, 0, kif->pfik_ifp);
3396                         }
3397                 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
3398                     r->return_icmp)
3399                         pf_send_icmp(m, r->return_icmp >> 8,
3400                             r->return_icmp & 255, af, r);
3401                 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
3402                     r->return_icmp6)
3403                         pf_send_icmp(m, r->return_icmp6 >> 8,
3404                             r->return_icmp6 & 255, af, r);
3405         }
3406
3407         if (r->action == PF_DROP)
3408                 goto cleanup;
3409
3410         if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3411                 REASON_SET(&reason, PFRES_MEMORY);
3412                 goto cleanup;
3413         }
3414         if (rtableid >= 0)
3415                 M_SETFIB(m, rtableid);
3416
3417         if (!state_icmp && (r->keep_state || nr != NULL ||
3418             (pd->flags & PFDESC_TCP_NORM))) {
3419                 int action;
3420                 action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off,
3421                     sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum,
3422                     hdrlen);
3423                 if (action != PF_PASS)
3424                         return (action);
3425         } else {
3426                 if (sk != NULL)
3427                         uma_zfree(V_pf_state_key_z, sk);
3428                 if (nk != NULL)
3429                         uma_zfree(V_pf_state_key_z, nk);
3430         }
3431
3432         /* copy back packet headers if we performed NAT operations */
3433         if (rewrite)
3434                 m_copyback(m, off, hdrlen, pd->hdr.any);
3435
3436         if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) &&
3437             direction == PF_OUT &&
3438             pfsync_defer_ptr != NULL && pfsync_defer_ptr(*sm, m))
3439                 /*
3440                  * We want the state created, but we dont
3441                  * want to send this in case a partner
3442                  * firewall has to know about it to allow
3443                  * replies through it.
3444                  */
3445                 return (PF_DEFER);
3446
3447         return (PF_PASS);
3448
3449 cleanup:
3450         if (sk != NULL)
3451                 uma_zfree(V_pf_state_key_z, sk);
3452         if (nk != NULL)
3453                 uma_zfree(V_pf_state_key_z, nk);
3454         return (PF_DROP);
3455 }
3456
3457 static int
3458 pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
3459     struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *nk,
3460     struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport,
3461     u_int16_t dport, int *rewrite, struct pfi_kif *kif, struct pf_state **sm,
3462     int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen)
3463 {
3464         struct pf_state         *s = NULL;
3465         struct pf_src_node      *sn = NULL;
3466         struct tcphdr           *th = pd->hdr.tcp;
3467         u_int16_t                mss = V_tcp_mssdflt;
3468         u_short                  reason;
3469
3470         /* check maximums */
3471         if (r->max_states &&
3472             (counter_u64_fetch(r->states_cur) >= r->max_states)) {
3473                 counter_u64_add(V_pf_status.lcounters[LCNT_STATES], 1);
3474                 REASON_SET(&reason, PFRES_MAXSTATES);
3475                 return (PF_DROP);
3476         }
3477         /* src node for filter rule */
3478         if ((r->rule_flag & PFRULE_SRCTRACK ||
3479             r->rpool.opts & PF_POOL_STICKYADDR) &&
3480             pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) {
3481                 REASON_SET(&reason, PFRES_SRCLIMIT);
3482                 goto csfailed;
3483         }
3484         /* src node for translation rule */
3485         if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3486             pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) {
3487                 REASON_SET(&reason, PFRES_SRCLIMIT);
3488                 goto csfailed;
3489         }
3490         s = uma_zalloc(V_pf_state_z, M_NOWAIT | M_ZERO);
3491         if (s == NULL) {
3492                 REASON_SET(&reason, PFRES_MEMORY);
3493                 goto csfailed;
3494         }
3495         s->rule.ptr = r;
3496         s->nat_rule.ptr = nr;
3497         s->anchor.ptr = a;
3498         STATE_INC_COUNTERS(s);
3499         if (r->allow_opts)
3500                 s->state_flags |= PFSTATE_ALLOWOPTS;
3501         if (r->rule_flag & PFRULE_STATESLOPPY)
3502                 s->state_flags |= PFSTATE_SLOPPY;
3503         s->log = r->log & PF_LOG_ALL;
3504         s->sync_state = PFSYNC_S_NONE;
3505         if (nr != NULL)
3506                 s->log |= nr->log & PF_LOG_ALL;
3507         switch (pd->proto) {
3508         case IPPROTO_TCP:
3509                 s->src.seqlo = ntohl(th->th_seq);
3510                 s->src.seqhi = s->src.seqlo + pd->p_len + 1;
3511                 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
3512                     r->keep_state == PF_STATE_MODULATE) {
3513                         /* Generate sequence number modulator */
3514                         if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
3515                             0)
3516                                 s->src.seqdiff = 1;
3517                         pf_change_a(&th->th_seq, &th->th_sum,
3518                             htonl(s->src.seqlo + s->src.seqdiff), 0);
3519                         *rewrite = 1;
3520                 } else
3521                         s->src.seqdiff = 0;
3522                 if (th->th_flags & TH_SYN) {
3523                         s->src.seqhi++;
3524                         s->src.wscale = pf_get_wscale(m, off,
3525                             th->th_off, pd->af);
3526                 }
3527                 s->src.max_win = MAX(ntohs(th->th_win), 1);
3528                 if (s->src.wscale & PF_WSCALE_MASK) {
3529                         /* Remove scale factor from initial window */
3530                         int win = s->src.max_win;
3531                         win += 1 << (s->src.wscale & PF_WSCALE_MASK);
3532                         s->src.max_win = (win - 1) >>
3533                             (s->src.wscale & PF_WSCALE_MASK);
3534                 }
3535                 if (th->th_flags & TH_FIN)
3536                         s->src.seqhi++;
3537                 s->dst.seqhi = 1;
3538                 s->dst.max_win = 1;
3539                 s->src.state = TCPS_SYN_SENT;
3540                 s->dst.state = TCPS_CLOSED;
3541                 s->timeout = PFTM_TCP_FIRST_PACKET;
3542                 break;
3543         case IPPROTO_UDP:
3544                 s->src.state = PFUDPS_SINGLE;
3545                 s->dst.state = PFUDPS_NO_TRAFFIC;
3546                 s->timeout = PFTM_UDP_FIRST_PACKET;
3547                 break;
3548         case IPPROTO_ICMP:
3549 #ifdef INET6
3550         case IPPROTO_ICMPV6:
3551 #endif
3552                 s->timeout = PFTM_ICMP_FIRST_PACKET;
3553                 break;
3554         default:
3555                 s->src.state = PFOTHERS_SINGLE;
3556                 s->dst.state = PFOTHERS_NO_TRAFFIC;
3557                 s->timeout = PFTM_OTHER_FIRST_PACKET;
3558         }
3559
3560         if (r->rt && r->rt != PF_FASTROUTE) {
3561                 if (pf_map_addr(pd->af, r, pd->src, &s->rt_addr, NULL, &sn)) {
3562                         REASON_SET(&reason, PFRES_BADSTATE);
3563                         pf_src_tree_remove_state(s);
3564                         STATE_DEC_COUNTERS(s);
3565                         uma_zfree(V_pf_state_z, s);
3566                         goto csfailed;
3567                 }
3568                 s->rt_kif = r->rpool.cur->kif;
3569         }
3570
3571         s->creation = time_uptime;
3572         s->expire = time_uptime;
3573
3574         if (sn != NULL) {
3575                 s->src_node = sn;
3576                 s->src_node->states++;
3577         }
3578         if (nsn != NULL) {
3579                 /* XXX We only modify one side for now. */
3580                 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af);
3581                 s->nat_src_node = nsn;
3582                 s->nat_src_node->states++;
3583         }
3584         if (pd->proto == IPPROTO_TCP) {
3585                 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
3586                     off, pd, th, &s->src, &s->dst)) {
3587                         REASON_SET(&reason, PFRES_MEMORY);
3588                         pf_src_tree_remove_state(s);
3589                         STATE_DEC_COUNTERS(s);
3590                         uma_zfree(V_pf_state_z, s);
3591                         return (PF_DROP);
3592                 }
3593                 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
3594                     pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
3595                     &s->src, &s->dst, rewrite)) {
3596                         /* This really shouldn't happen!!! */
3597                         DPFPRINTF(PF_DEBUG_URGENT,
3598                             ("pf_normalize_tcp_stateful failed on first pkt"));
3599                         pf_normalize_tcp_cleanup(s);
3600                         pf_src_tree_remove_state(s);
3601                         STATE_DEC_COUNTERS(s);
3602                         uma_zfree(V_pf_state_z, s);
3603                         return (PF_DROP);
3604                 }
3605         }
3606         s->direction = pd->dir;
3607
3608         /*
3609          * sk/nk could already been setup by pf_get_translation().
3610          */
3611         if (nr == NULL) {
3612                 KASSERT((sk == NULL && nk == NULL), ("%s: nr %p sk %p, nk %p",
3613                     __func__, nr, sk, nk));
3614                 sk = pf_state_key_setup(pd, pd->src, pd->dst, sport, dport);
3615                 if (sk == NULL)
3616                         goto csfailed;
3617                 nk = sk;
3618         } else
3619                 KASSERT((sk != NULL && nk != NULL), ("%s: nr %p sk %p, nk %p",
3620                     __func__, nr, sk, nk));
3621
3622         /* Swap sk/nk for PF_OUT. */
3623         if (pf_state_insert(BOUND_IFACE(r, kif),
3624             (pd->dir == PF_IN) ? sk : nk,
3625             (pd->dir == PF_IN) ? nk : sk, s)) {
3626                 if (pd->proto == IPPROTO_TCP)
3627                         pf_normalize_tcp_cleanup(s);
3628                 REASON_SET(&reason, PFRES_STATEINS);
3629                 pf_src_tree_remove_state(s);
3630                 STATE_DEC_COUNTERS(s);
3631                 uma_zfree(V_pf_state_z, s);
3632                 return (PF_DROP);
3633         } else
3634                 *sm = s;
3635
3636         if (tag > 0)
3637                 s->tag = tag;
3638         if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
3639             TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
3640                 s->src.state = PF_TCPS_PROXY_SRC;
3641                 /* undo NAT changes, if they have taken place */
3642                 if (nr != NULL) {
3643                         struct pf_state_key *skt = s->key[PF_SK_WIRE];
3644                         if (pd->dir == PF_OUT)
3645                                 skt = s->key[PF_SK_STACK];
3646                         PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af);
3647                         PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af);
3648                         if (pd->sport)
3649                                 *pd->sport = skt->port[pd->sidx];
3650                         if (pd->dport)
3651                                 *pd->dport = skt->port[pd->didx];
3652                         if (pd->proto_sum)
3653                                 *pd->proto_sum = bproto_sum;
3654                         if (pd->ip_sum)
3655                                 *pd->ip_sum = bip_sum;
3656                         m_copyback(m, off, hdrlen, pd->hdr.any);
3657                 }
3658                 s->src.seqhi = htonl(arc4random());
3659                 /* Find mss option */
3660                 int rtid = M_GETFIB(m);
3661                 mss = pf_get_mss(m, off, th->th_off, pd->af);
3662                 mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
3663                 mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
3664                 s->src.mss = mss;
3665                 pf_send_tcp(NULL, r, pd->af, pd->dst, pd->src, th->th_dport,
3666                     th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
3667                     TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL);
3668                 REASON_SET(&reason, PFRES_SYNPROXY);
3669                 return (PF_SYNPROXY_DROP);
3670         }
3671
3672         return (PF_PASS);
3673
3674 csfailed:
3675         if (sk != NULL)
3676                 uma_zfree(V_pf_state_key_z, sk);
3677         if (nk != NULL)
3678                 uma_zfree(V_pf_state_key_z, nk);
3679
3680         if (sn != NULL && sn->states == 0 && sn->expire == 0) {
3681                 pf_unlink_src_node(sn);
3682                 pf_free_src_node(sn);
3683         }
3684
3685         if (nsn != sn && nsn != NULL && nsn->states == 0 && nsn->expire == 0) {
3686                 pf_unlink_src_node(nsn);
3687                 pf_free_src_node(nsn);
3688         }
3689
3690         return (PF_DROP);
3691 }
3692
3693 static int
3694 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
3695     struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
3696     struct pf_ruleset **rsm)
3697 {
3698         struct pf_rule          *r, *a = NULL;
3699         struct pf_ruleset       *ruleset = NULL;
3700         sa_family_t              af = pd->af;
3701         u_short                  reason;
3702         int                      tag = -1;
3703         int                      asd = 0;
3704         int                      match = 0;
3705         struct pf_anchor_stackframe     anchor_stack[PF_ANCHOR_STACKSIZE];
3706
3707         PF_RULES_RASSERT();
3708
3709         r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3710         while (r != NULL) {
3711                 r->evaluations++;
3712                 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3713                         r = r->skip[PF_SKIP_IFP].ptr;
3714                 else if (r->direction && r->direction != direction)
3715                         r = r->skip[PF_SKIP_DIR].ptr;
3716                 else if (r->af && r->af != af)
3717                         r = r->skip[PF_SKIP_AF].ptr;
3718                 else if (r->proto && r->proto != pd->proto)
3719                         r = r->skip[PF_SKIP_PROTO].ptr;
3720                 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
3721                     r->src.neg, kif, M_GETFIB(m)))
3722                         r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3723                 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
3724                     r->dst.neg, NULL, M_GETFIB(m)))
3725                         r = r->skip[PF_SKIP_DST_ADDR].ptr;
3726                 else if (r->tos && !(r->tos == pd->tos))
3727                         r = TAILQ_NEXT(r, entries);
3728                 else if (r->os_fingerprint != PF_OSFP_ANY)
3729                         r = TAILQ_NEXT(r, entries);
3730                 else if (pd->proto == IPPROTO_UDP &&
3731                     (r->src.port_op || r->dst.port_op))
3732                         r = TAILQ_NEXT(r, entries);
3733                 else if (pd->proto == IPPROTO_TCP &&
3734                     (r->src.port_op || r->dst.port_op || r->flagset))
3735                         r = TAILQ_NEXT(r, entries);
3736                 else if ((pd->proto == IPPROTO_ICMP ||
3737                     pd->proto == IPPROTO_ICMPV6) &&
3738                     (r->type || r->code))
3739                         r = TAILQ_NEXT(r, entries);
3740                 else if (r->prob && r->prob <=
3741                     (arc4random() % (UINT_MAX - 1) + 1))
3742                         r = TAILQ_NEXT(r, entries);
3743                 else if (r->match_tag && !pf_match_tag(m, r, &tag,
3744                     pd->pf_mtag ? pd->pf_mtag->tag : 0))
3745                         r = TAILQ_NEXT(r, entries);
3746                 else {
3747                         if (r->anchor == NULL) {
3748                                 match = 1;
3749                                 *rm = r;
3750                                 *am = a;
3751                                 *rsm = ruleset;
3752                                 if ((*rm)->quick)
3753                                         break;
3754                                 r = TAILQ_NEXT(r, entries);
3755                         } else
3756                                 pf_step_into_anchor(anchor_stack, &asd,
3757                                     &ruleset, PF_RULESET_FILTER, &r, &a,
3758                                     &match);
3759                 }
3760                 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3761                     &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3762                         break;
3763         }
3764         r = *rm;
3765         a = *am;
3766         ruleset = *rsm;
3767
3768         REASON_SET(&reason, PFRES_MATCH);
3769
3770         if (r->log)
3771                 PFLOG_PACKET(kif, m, af, direction, reason, r, a, ruleset, pd,
3772                     1);
3773
3774         if (r->action != PF_PASS)
3775                 return (PF_DROP);
3776
3777         if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3778                 REASON_SET(&reason, PFRES_MEMORY);
3779                 return (PF_DROP);
3780         }
3781
3782         return (PF_PASS);
3783 }
3784
3785 static int
3786 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst,
3787         struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off,
3788         struct pf_pdesc *pd, u_short *reason, int *copyback)
3789 {
3790         struct tcphdr           *th = pd->hdr.tcp;
3791         u_int16_t                win = ntohs(th->th_win);
3792         u_int32_t                ack, end, seq, orig_seq;
3793         u_int8_t                 sws, dws;
3794         int                      ackskew;
3795
3796         if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
3797                 sws = src->wscale & PF_WSCALE_MASK;
3798                 dws = dst->wscale & PF_WSCALE_MASK;
3799         } else
3800                 sws = dws = 0;
3801
3802         /*
3803          * Sequence tracking algorithm from Guido van Rooij's paper:
3804          *   http://www.madison-gurkha.com/publications/tcp_filtering/
3805          *      tcp_filtering.ps
3806          */
3807
3808         orig_seq = seq = ntohl(th->th_seq);
3809         if (src->seqlo == 0) {
3810                 /* First packet from this end. Set its state */
3811
3812                 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
3813                     src->scrub == NULL) {
3814                         if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
3815                                 REASON_SET(reason, PFRES_MEMORY);
3816                                 return (PF_DROP);
3817                         }
3818                 }
3819
3820                 /* Deferred generation of sequence number modulator */
3821                 if (dst->seqdiff && !src->seqdiff) {
3822                         /* use random iss for the TCP server */
3823                         while ((src->seqdiff = arc4random() - seq) == 0)
3824                                 ;
3825                         ack = ntohl(th->th_ack) - dst->seqdiff;
3826                         pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
3827                             src->seqdiff), 0);
3828                         pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
3829                         *copyback = 1;
3830                 } else {
3831                         ack = ntohl(th->th_ack);
3832                 }
3833
3834                 end = seq + pd->p_len;
3835                 if (th->th_flags & TH_SYN) {
3836                         end++;
3837                         if (dst->wscale & PF_WSCALE_FLAG) {
3838                                 src->wscale = pf_get_wscale(m, off, th->th_off,
3839                                     pd->af);
3840                                 if (src->wscale & PF_WSCALE_FLAG) {
3841                                         /* Remove scale factor from initial
3842                                          * window */
3843                                         sws = src->wscale & PF_WSCALE_MASK;
3844                                         win = ((u_int32_t)win + (1 << sws) - 1)
3845                                             >> sws;
3846                                         dws = dst->wscale & PF_WSCALE_MASK;
3847                                 } else {
3848                                         /* fixup other window */
3849                                         dst->max_win <<= dst->wscale &
3850                                             PF_WSCALE_MASK;
3851                                         /* in case of a retrans SYN|ACK */
3852                                         dst->wscale = 0;
3853                                 }
3854                         }
3855                 }
3856                 if (th->th_flags & TH_FIN)
3857                         end++;
3858
3859                 src->seqlo = seq;
3860                 if (src->state < TCPS_SYN_SENT)
3861                         src->state = TCPS_SYN_SENT;
3862
3863                 /*
3864                  * May need to slide the window (seqhi may have been set by
3865                  * the crappy stack check or if we picked up the connection
3866                  * after establishment)
3867                  */
3868                 if (src->seqhi == 1 ||
3869                     SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
3870                         src->seqhi = end + MAX(1, dst->max_win << dws);
3871                 if (win > src->max_win)
3872                         src->max_win = win;
3873
3874         } else {
3875                 ack = ntohl(th->th_ack) - dst->seqdiff;
3876                 if (src->seqdiff) {
3877                         /* Modulate sequence numbers */
3878                         pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
3879                             src->seqdiff), 0);
3880                         pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
3881                         *copyback = 1;
3882                 }
3883                 end = seq + pd->p_len;
3884                 if (th->th_flags & TH_SYN)
3885                         end++;
3886                 if (th->th_flags & TH_FIN)
3887                         end++;
3888         }
3889
3890         if ((th->th_flags & TH_ACK) == 0) {
3891                 /* Let it pass through the ack skew check */
3892                 ack = dst->seqlo;
3893         } else if ((ack == 0 &&
3894             (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
3895             /* broken tcp stacks do not set ack */
3896             (dst->state < TCPS_SYN_SENT)) {
3897                 /*
3898                  * Many stacks (ours included) will set the ACK number in an
3899                  * FIN|ACK if the SYN times out -- no sequence to ACK.
3900                  */
3901                 ack = dst->seqlo;
3902         }
3903
3904         if (seq == end) {
3905                 /* Ease sequencing restrictions on no data packets */
3906                 seq = src->seqlo;
3907                 end = seq;
3908         }
3909
3910         ackskew = dst->seqlo - ack;
3911
3912
3913         /*
3914          * Need to demodulate the sequence numbers in any TCP SACK options
3915          * (Selective ACK). We could optionally validate the SACK values
3916          * against the current ACK window, either forwards or backwards, but
3917          * I'm not confident that SACK has been implemented properly
3918          * everywhere. It wouldn't surprise me if several stacks accidently
3919          * SACK too far backwards of previously ACKed data. There really aren't
3920          * any security implications of bad SACKing unless the target stack
3921          * doesn't validate the option length correctly. Someone trying to
3922          * spoof into a TCP connection won't bother blindly sending SACK
3923          * options anyway.
3924          */
3925         if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
3926                 if (pf_modulate_sack(m, off, pd, th, dst))
3927                         *copyback = 1;
3928         }
3929
3930
3931 #define MAXACKWINDOW (0xffff + 1500)    /* 1500 is an arbitrary fudge factor */
3932         if (SEQ_GEQ(src->seqhi, end) &&
3933             /* Last octet inside other's window space */
3934             SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
3935             /* Retrans: not more than one window back */
3936             (ackskew >= -MAXACKWINDOW) &&
3937             /* Acking not more than one reassembled fragment backwards */
3938             (ackskew <= (MAXACKWINDOW << sws)) &&
3939             /* Acking not more than one window forward */
3940             ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
3941             (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
3942             (pd->flags & PFDESC_IP_REAS) == 0)) {
3943             /* Require an exact/+1 sequence match on resets when possible */
3944
3945                 if (dst->scrub || src->scrub) {
3946                         if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
3947                             *state, src, dst, copyback))
3948                                 return (PF_DROP);
3949                 }
3950
3951                 /* update max window */
3952                 if (src->max_win < win)
3953                         src->max_win = win;
3954                 /* synchronize sequencing */
3955                 if (SEQ_GT(end, src->seqlo))
3956                         src->seqlo = end;
3957                 /* slide the window of what the other end can send */
3958                 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
3959                         dst->seqhi = ack + MAX((win << sws), 1);
3960
3961
3962                 /* update states */
3963                 if (th->th_flags & TH_SYN)
3964                         if (src->state < TCPS_SYN_SENT)
3965                                 src->state = TCPS_SYN_SENT;
3966                 if (th->th_flags & TH_FIN)
3967                         if (src->state < TCPS_CLOSING)
3968                                 src->state = TCPS_CLOSING;
3969                 if (th->th_flags & TH_ACK) {
3970                         if (dst->state == TCPS_SYN_SENT) {
3971                                 dst->state = TCPS_ESTABLISHED;
3972                                 if (src->state == TCPS_ESTABLISHED &&
3973                                     (*state)->src_node != NULL &&
3974                                     pf_src_connlimit(state)) {
3975                                         REASON_SET(reason, PFRES_SRCLIMIT);
3976                                         return (PF_DROP);
3977                                 }
3978                         } else if (dst->state == TCPS_CLOSING)
3979                                 dst->state = TCPS_FIN_WAIT_2;
3980                 }
3981                 if (th->th_flags & TH_RST)
3982                         src->state = dst->state = TCPS_TIME_WAIT;
3983
3984                 /* update expire time */
3985                 (*state)->expire = time_uptime;
3986                 if (src->state >= TCPS_FIN_WAIT_2 &&
3987                     dst->state >= TCPS_FIN_WAIT_2)
3988                         (*state)->timeout = PFTM_TCP_CLOSED;
3989                 else if (src->state >= TCPS_CLOSING &&
3990                     dst->state >= TCPS_CLOSING)
3991                         (*state)->timeout = PFTM_TCP_FIN_WAIT;
3992                 else if (src->state < TCPS_ESTABLISHED ||
3993                     dst->state < TCPS_ESTABLISHED)
3994                         (*state)->timeout = PFTM_TCP_OPENING;
3995                 else if (src->state >= TCPS_CLOSING ||
3996                     dst->state >= TCPS_CLOSING)
3997                         (*state)->timeout = PFTM_TCP_CLOSING;
3998                 else
3999                         (*state)->timeout = PFTM_TCP_ESTABLISHED;
4000
4001                 /* Fall through to PASS packet */
4002
4003         } else if ((dst->state < TCPS_SYN_SENT ||
4004                 dst->state >= TCPS_FIN_WAIT_2 ||
4005                 src->state >= TCPS_FIN_WAIT_2) &&
4006             SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
4007             /* Within a window forward of the originating packet */
4008             SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
4009             /* Within a window backward of the originating packet */
4010
4011                 /*
4012                  * This currently handles three situations:
4013                  *  1) Stupid stacks will shotgun SYNs before their peer
4014                  *     replies.
4015                  *  2) When PF catches an already established stream (the
4016                  *     firewall rebooted, the state table was flushed, routes
4017                  *     changed...)
4018                  *  3) Packets get funky immediately after the connection
4019                  *     closes (this should catch Solaris spurious ACK|FINs
4020                  *     that web servers like to spew after a close)
4021                  *
4022                  * This must be a little more careful than the above code
4023                  * since packet floods will also be caught here. We don't
4024                  * update the TTL here to mitigate the damage of a packet
4025                  * flood and so the same code can handle awkward establishment
4026                  * and a loosened connection close.
4027                  * In the establishment case, a correct peer response will
4028                  * validate the connection, go through the normal state code
4029                  * and keep updating the state TTL.
4030                  */
4031
4032                 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4033                         printf("pf: loose state match: ");
4034                         pf_print_state(*state);
4035                         pf_print_flags(th->th_flags);
4036                         printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4037                             "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
4038                             pd->p_len, ackskew, (unsigned long long)(*state)->packets[0],
4039                             (unsigned long long)(*state)->packets[1],
4040                             pd->dir == PF_IN ? "in" : "out",
4041                             pd->dir == (*state)->direction ? "fwd" : "rev");
4042                 }
4043
4044                 if (dst->scrub || src->scrub) {
4045                         if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4046                             *state, src, dst, copyback))
4047                                 return (PF_DROP);
4048                 }
4049
4050                 /* update max window */
4051                 if (src->max_win < win)
4052                         src->max_win = win;
4053                 /* synchronize sequencing */
4054                 if (SEQ_GT(end, src->seqlo))
4055                         src->seqlo = end;
4056                 /* slide the window of what the other end can send */
4057                 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4058                         dst->seqhi = ack + MAX((win << sws), 1);
4059
4060                 /*
4061                  * Cannot set dst->seqhi here since this could be a shotgunned
4062                  * SYN and not an already established connection.
4063                  */
4064
4065                 if (th->th_flags & TH_FIN)
4066                         if (src->state < TCPS_CLOSING)
4067                                 src->state = TCPS_CLOSING;
4068                 if (th->th_flags & TH_RST)
4069                         src->state = dst->state = TCPS_TIME_WAIT;
4070
4071                 /* Fall through to PASS packet */
4072
4073         } else {
4074                 if ((*state)->dst.state == TCPS_SYN_SENT &&
4075                     (*state)->src.state == TCPS_SYN_SENT) {
4076                         /* Send RST for state mismatches during handshake */
4077                         if (!(th->th_flags & TH_RST))
4078                                 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4079                                     pd->dst, pd->src, th->th_dport,
4080                                     th->th_sport, ntohl(th->th_ack), 0,
4081                                     TH_RST, 0, 0,
4082                                     (*state)->rule.ptr->return_ttl, 1, 0,
4083                                     kif->pfik_ifp);
4084                         src->seqlo = 0;
4085                         src->seqhi = 1;
4086                         src->max_win = 1;
4087                 } else if (V_pf_status.debug >= PF_DEBUG_MISC) {
4088                         printf("pf: BAD state: ");
4089                         pf_print_state(*state);
4090                         pf_print_flags(th->th_flags);
4091                         printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4092                             "pkts=%llu:%llu dir=%s,%s\n",
4093                             seq, orig_seq, ack, pd->p_len, ackskew,
4094                             (unsigned long long)(*state)->packets[0],
4095                             (unsigned long long)(*state)->packets[1],
4096                             pd->dir == PF_IN ? "in" : "out",
4097                             pd->dir == (*state)->direction ? "fwd" : "rev");
4098                         printf("pf: State failure on: %c %c %c %c | %c %c\n",
4099                             SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
4100                             SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
4101                             ' ': '2',
4102                             (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
4103                             (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
4104                             SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
4105                             SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
4106                 }
4107                 REASON_SET(reason, PFRES_BADSTATE);
4108                 return (PF_DROP);
4109         }
4110
4111         return (PF_PASS);
4112 }
4113
4114 static int
4115 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst,
4116         struct pf_state **state, struct pf_pdesc *pd, u_short *reason)
4117 {
4118         struct tcphdr           *th = pd->hdr.tcp;
4119
4120         if (th->th_flags & TH_SYN)
4121                 if (src->state < TCPS_SYN_SENT)
4122                         src->state = TCPS_SYN_SENT;
4123         if (th->th_flags & TH_FIN)
4124                 if (src->state < TCPS_CLOSING)
4125                         src->state = TCPS_CLOSING;
4126         if (th->th_flags & TH_ACK) {
4127                 if (dst->state == TCPS_SYN_SENT) {
4128                         dst->state = TCPS_ESTABLISHED;
4129                         if (src->state == TCPS_ESTABLISHED &&
4130                             (*state)->src_node != NULL &&
4131                             pf_src_connlimit(state)) {
4132                                 REASON_SET(reason, PFRES_SRCLIMIT);
4133                                 return (PF_DROP);
4134                         }
4135                 } else if (dst->state == TCPS_CLOSING) {
4136                         dst->state = TCPS_FIN_WAIT_2;
4137                 } else if (src->state == TCPS_SYN_SENT &&
4138                     dst->state < TCPS_SYN_SENT) {
4139                         /*
4140                          * Handle a special sloppy case where we only see one
4141                          * half of the connection. If there is a ACK after
4142                          * the initial SYN without ever seeing a packet from
4143                          * the destination, set the connection to established.
4144                          */
4145                         dst->state = src->state = TCPS_ESTABLISHED;
4146                         if ((*state)->src_node != NULL &&
4147                             pf_src_connlimit(state)) {
4148                                 REASON_SET(reason, PFRES_SRCLIMIT);
4149                                 return (PF_DROP);
4150                         }
4151                 } else if (src->state == TCPS_CLOSING &&
4152                     dst->state == TCPS_ESTABLISHED &&
4153                     dst->seqlo == 0) {
4154                         /*
4155                          * Handle the closing of half connections where we
4156                          * don't see the full bidirectional FIN/ACK+ACK
4157                          * handshake.
4158                          */
4159                         dst->state = TCPS_CLOSING;
4160                 }
4161         }
4162         if (th->th_flags & TH_RST)
4163                 src->state = dst->state = TCPS_TIME_WAIT;
4164
4165         /* update expire time */
4166         (*state)->expire = time_uptime;
4167         if (src->state >= TCPS_FIN_WAIT_2 &&
4168             dst->state >= TCPS_FIN_WAIT_2)
4169                 (*state)->timeout = PFTM_TCP_CLOSED;
4170         else if (src->state >= TCPS_CLOSING &&
4171             dst->state >= TCPS_CLOSING)
4172                 (*state)->timeout = PFTM_TCP_FIN_WAIT;
4173         else if (src->state < TCPS_ESTABLISHED ||
4174             dst->state < TCPS_ESTABLISHED)
4175                 (*state)->timeout = PFTM_TCP_OPENING;
4176         else if (src->state >= TCPS_CLOSING ||
4177             dst->state >= TCPS_CLOSING)
4178                 (*state)->timeout = PFTM_TCP_CLOSING;
4179         else
4180                 (*state)->timeout = PFTM_TCP_ESTABLISHED;
4181
4182         return (PF_PASS);
4183 }
4184
4185 static int
4186 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
4187     struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
4188     u_short *reason)
4189 {
4190         struct pf_state_key_cmp  key;
4191         struct tcphdr           *th = pd->hdr.tcp;
4192         int                      copyback = 0;
4193         struct pf_state_peer    *src, *dst;
4194         struct pf_state_key     *sk;
4195
4196         bzero(&key, sizeof(key));
4197         key.af = pd->af;
4198         key.proto = IPPROTO_TCP;
4199         if (direction == PF_IN) {       /* wire side, straight */
4200                 PF_ACPY(&key.addr[0], pd->src, key.af);
4201                 PF_ACPY(&key.addr[1], pd->dst, key.af);
4202                 key.port[0] = th->th_sport;
4203                 key.port[1] = th->th_dport;
4204         } else {                        /* stack side, reverse */
4205                 PF_ACPY(&key.addr[1], pd->src, key.af);
4206                 PF_ACPY(&key.addr[0], pd->dst, key.af);
4207                 key.port[1] = th->th_sport;
4208                 key.port[0] = th->th_dport;
4209         }
4210
4211         STATE_LOOKUP(kif, &key, direction, *state, pd);
4212
4213         if (direction == (*state)->direction) {
4214                 src = &(*state)->src;
4215                 dst = &(*state)->dst;
4216         } else {
4217                 src = &(*state)->dst;
4218                 dst = &(*state)->src;
4219         }
4220
4221         sk = (*state)->key[pd->didx];
4222
4223         if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
4224                 if (direction != (*state)->direction) {
4225                         REASON_SET(reason, PFRES_SYNPROXY);
4226                         return (PF_SYNPROXY_DROP);
4227                 }
4228                 if (th->th_flags & TH_SYN) {
4229                         if (ntohl(th->th_seq) != (*state)->src.seqlo) {
4230                                 REASON_SET(reason, PFRES_SYNPROXY);
4231                                 return (PF_DROP);
4232                         }
4233                         pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4234                             pd->src, th->th_dport, th->th_sport,
4235                             (*state)->src.seqhi, ntohl(th->th_seq) + 1,
4236                             TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 0, NULL);
4237                         REASON_SET(reason, PFRES_SYNPROXY);
4238                         return (PF_SYNPROXY_DROP);
4239                 } else if (!(th->th_flags & TH_ACK) ||
4240                     (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4241                     (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4242                         REASON_SET(reason, PFRES_SYNPROXY);
4243                         return (PF_DROP);
4244                 } else if ((*state)->src_node != NULL &&
4245                     pf_src_connlimit(state)) {
4246                         REASON_SET(reason, PFRES_SRCLIMIT);
4247                         return (PF_DROP);
4248                 } else
4249                         (*state)->src.state = PF_TCPS_PROXY_DST;
4250         }
4251         if ((*state)->src.state == PF_TCPS_PROXY_DST) {
4252                 if (direction == (*state)->direction) {
4253                         if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
4254                             (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4255                             (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4256                                 REASON_SET(reason, PFRES_SYNPROXY);
4257                                 return (PF_DROP);
4258                         }
4259                         (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
4260                         if ((*state)->dst.seqhi == 1)
4261                                 (*state)->dst.seqhi = htonl(arc4random());
4262                         pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4263                             &sk->addr[pd->sidx], &sk->addr[pd->didx],
4264                             sk->port[pd->sidx], sk->port[pd->didx],
4265                             (*state)->dst.seqhi, 0, TH_SYN, 0,
4266                             (*state)->src.mss, 0, 0, (*state)->tag, NULL);
4267                         REASON_SET(reason, PFRES_SYNPROXY);
4268                         return (PF_SYNPROXY_DROP);
4269                 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
4270                     (TH_SYN|TH_ACK)) ||
4271                     (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
4272                         REASON_SET(reason, PFRES_SYNPROXY);
4273                         return (PF_DROP);
4274                 } else {
4275                         (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
4276                         (*state)->dst.seqlo = ntohl(th->th_seq);
4277                         pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4278                             pd->src, th->th_dport, th->th_sport,
4279                             ntohl(th->th_ack), ntohl(th->th_seq) + 1,
4280                             TH_ACK, (*state)->src.max_win, 0, 0, 0,
4281                             (*state)->tag, NULL);
4282                         pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4283                             &sk->addr[pd->sidx], &sk->addr[pd->didx],
4284                             sk->port[pd->sidx], sk->port[pd->didx],
4285                             (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
4286                             TH_ACK, (*state)->dst.max_win, 0, 0, 1, 0, NULL);
4287                         (*state)->src.seqdiff = (*state)->dst.seqhi -
4288                             (*state)->src.seqlo;
4289                         (*state)->dst.seqdiff = (*state)->src.seqhi -
4290                             (*state)->dst.seqlo;
4291                         (*state)->src.seqhi = (*state)->src.seqlo +
4292                             (*state)->dst.max_win;
4293                         (*state)->dst.seqhi = (*state)->dst.seqlo +
4294                             (*state)->src.max_win;
4295                         (*state)->src.wscale = (*state)->dst.wscale = 0;
4296                         (*state)->src.state = (*state)->dst.state =
4297                             TCPS_ESTABLISHED;
4298                         REASON_SET(reason, PFRES_SYNPROXY);
4299                         return (PF_SYNPROXY_DROP);
4300                 }
4301         }
4302
4303         if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
4304             dst->state >= TCPS_FIN_WAIT_2 &&
4305             src->state >= TCPS_FIN_WAIT_2) {
4306                 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4307                         printf("pf: state reuse ");
4308                         pf_print_state(*state);
4309                         pf_print_flags(th->th_flags);
4310                         printf("\n");
4311                 }
4312                 /* XXX make sure it's the same direction ?? */
4313                 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
4314                 pf_unlink_state(*state, PF_ENTER_LOCKED);
4315                 *state = NULL;
4316                 return (PF_DROP);
4317         }
4318
4319         if ((*state)->state_flags & PFSTATE_SLOPPY) {
4320                 if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP)
4321                         return (PF_DROP);
4322         } else {
4323                 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason,
4324                     &copyback) == PF_DROP)
4325                         return (PF_DROP);
4326         }
4327
4328         /* translate source/destination address, if necessary */
4329         if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4330                 struct pf_state_key *nk = (*state)->key[pd->didx];
4331
4332                 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4333                     nk->port[pd->sidx] != th->th_sport)
4334                         pf_change_ap(pd->src, &th->th_sport, pd->ip_sum,
4335                             &th->th_sum, &nk->addr[pd->sidx],
4336                             nk->port[pd->sidx], 0, pd->af);
4337
4338                 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4339                     nk->port[pd->didx] != th->th_dport)
4340                         pf_change_ap(pd->dst, &th->th_dport, pd->ip_sum,
4341                             &th->th_sum, &nk->addr[pd->didx],
4342                             nk->port[pd->didx], 0, pd->af);
4343                 copyback = 1;
4344         }
4345
4346         /* Copyback sequence modulation or stateful scrub changes if needed */
4347         if (copyback)
4348                 m_copyback(m, off, sizeof(*th), (caddr_t)th);
4349
4350         return (PF_PASS);
4351 }
4352
4353 static int
4354 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
4355     struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4356 {
4357         struct pf_state_peer    *src, *dst;
4358         struct pf_state_key_cmp  key;
4359         struct udphdr           *uh = pd->hdr.udp;
4360
4361         bzero(&key, sizeof(key));
4362         key.af = pd->af;
4363         key.proto = IPPROTO_UDP;
4364         if (direction == PF_IN) {       /* wire side, straight */
4365                 PF_ACPY(&key.addr[0], pd->src, key.af);
4366                 PF_ACPY(&key.addr[1], pd->dst, key.af);
4367                 key.port[0] = uh->uh_sport;
4368                 key.port[1] = uh->uh_dport;
4369         } else {                        /* stack side, reverse */
4370                 PF_ACPY(&key.addr[1], pd->src, key.af);
4371                 PF_ACPY(&key.addr[0], pd->dst, key.af);
4372                 key.port[1] = uh->uh_sport;
4373                 key.port[0] = uh->uh_dport;
4374         }
4375
4376         STATE_LOOKUP(kif, &key, direction, *state, pd);
4377
4378         if (direction == (*state)->direction) {
4379                 src = &(*state)->src;
4380                 dst = &(*state)->dst;
4381         } else {
4382                 src = &(*state)->dst;
4383                 dst = &(*state)->src;
4384         }
4385
4386         /* update states */
4387         if (src->state < PFUDPS_SINGLE)
4388                 src->state = PFUDPS_SINGLE;
4389         if (dst->state == PFUDPS_SINGLE)
4390                 dst->state = PFUDPS_MULTIPLE;
4391
4392         /* update expire time */
4393         (*state)->expire = time_uptime;
4394         if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
4395                 (*state)->timeout = PFTM_UDP_MULTIPLE;
4396         else
4397                 (*state)->timeout = PFTM_UDP_SINGLE;
4398
4399         /* translate source/destination address, if necessary */
4400         if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4401                 struct pf_state_key *nk = (*state)->key[pd->didx];
4402
4403                 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4404                     nk->port[pd->sidx] != uh->uh_sport)
4405                         pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum,
4406                             &uh->uh_sum, &nk->addr[pd->sidx],
4407                             nk->port[pd->sidx], 1, pd->af);
4408
4409                 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4410                     nk->port[pd->didx] != uh->uh_dport)
4411                         pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum,
4412                             &uh->uh_sum, &nk->addr[pd->didx],
4413                             nk->port[pd->didx], 1, pd->af);
4414                 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
4415         }
4416
4417         return (PF_PASS);
4418 }
4419
4420 static int
4421 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
4422     struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
4423 {
4424         struct pf_addr  *saddr = pd->src, *daddr = pd->dst;
4425         u_int16_t        icmpid = 0, *icmpsum;
4426         u_int8_t         icmptype;
4427         int              state_icmp = 0;
4428         struct pf_state_key_cmp key;
4429
4430         bzero(&key, sizeof(key));
4431         switch (pd->proto) {
4432 #ifdef INET
4433         case IPPROTO_ICMP:
4434                 icmptype = pd->hdr.icmp->icmp_type;
4435                 icmpid = pd->hdr.icmp->icmp_id;
4436                 icmpsum = &pd->hdr.icmp->icmp_cksum;
4437
4438                 if (icmptype == ICMP_UNREACH ||
4439                     icmptype == ICMP_SOURCEQUENCH ||
4440                     icmptype == ICMP_REDIRECT ||
4441                     icmptype == ICMP_TIMXCEED ||
4442                     icmptype == ICMP_PARAMPROB)
4443                         state_icmp++;
4444                 break;
4445 #endif /* INET */
4446 #ifdef INET6
4447         case IPPROTO_ICMPV6:
4448                 icmptype = pd->hdr.icmp6->icmp6_type;
4449                 icmpid = pd->hdr.icmp6->icmp6_id;
4450                 icmpsum = &pd->hdr.icmp6->icmp6_cksum;
4451
4452                 if (icmptype == ICMP6_DST_UNREACH ||
4453                     icmptype == ICMP6_PACKET_TOO_BIG ||
4454                     icmptype == ICMP6_TIME_EXCEEDED ||
4455                     icmptype == ICMP6_PARAM_PROB)
4456                         state_icmp++;
4457                 break;
4458 #endif /* INET6 */
4459         }
4460
4461         if (!state_icmp) {
4462
4463                 /*
4464                  * ICMP query/reply message not related to a TCP/UDP packet.
4465                  * Search for an ICMP state.
4466                  */
4467                 key.af = pd->af;
4468                 key.proto = pd->proto;
4469                 key.port[0] = key.port[1] = icmpid;
4470                 if (direction == PF_IN) {       /* wire side, straight */
4471                         PF_ACPY(&key.addr[0], pd->src, key.af);
4472                         PF_ACPY(&key.addr[1], pd->dst, key.af);
4473                 } else {                        /* stack side, reverse */
4474                         PF_ACPY(&key.addr[1], pd->src, key.af);
4475                         PF_ACPY(&key.addr[0], pd->dst, key.af);
4476                 }
4477
4478                 STATE_LOOKUP(kif, &key, direction, *state, pd);
4479
4480                 (*state)->expire = time_uptime;
4481                 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
4482
4483                 /* translate source/destination address, if necessary */
4484                 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4485                         struct pf_state_key *nk = (*state)->key[pd->didx];
4486
4487                         switch (pd->af) {
4488 #ifdef INET
4489                         case AF_INET:
4490                                 if (PF_ANEQ(pd->src,
4491                                     &nk->addr[pd->sidx], AF_INET))
4492                                         pf_change_a(&saddr->v4.s_addr,
4493                                             pd->ip_sum,
4494                                             nk->addr[pd->sidx].v4.s_addr, 0);
4495
4496                                 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx],
4497                                     AF_INET))
4498                                         pf_change_a(&daddr->v4.s_addr,
4499                                             pd->ip_sum,
4500                                             nk->addr[pd->didx].v4.s_addr, 0);
4501
4502                                 if (nk->port[0] !=
4503                                     pd->hdr.icmp->icmp_id) {
4504                                         pd->hdr.icmp->icmp_cksum =
4505                                             pf_cksum_fixup(
4506                                             pd->hdr.icmp->icmp_cksum, icmpid,
4507                                             nk->port[pd->sidx], 0);
4508                                         pd->hdr.icmp->icmp_id =
4509                                             nk->port[pd->sidx];
4510                                 }
4511
4512                                 m_copyback(m, off, ICMP_MINLEN,
4513                                     (caddr_t )pd->hdr.icmp);
4514                                 break;
4515 #endif /* INET */
4516 #ifdef INET6
4517                         case AF_INET6:
4518                                 if (PF_ANEQ(pd->src,
4519                                     &nk->addr[pd->sidx], AF_INET6))
4520                                         pf_change_a6(saddr,
4521                                             &pd->hdr.icmp6->icmp6_cksum,
4522                                             &nk->addr[pd->sidx], 0);
4523
4524                                 if (PF_ANEQ(pd->dst,
4525                                     &nk->addr[pd->didx], AF_INET6))
4526                                         pf_change_a6(daddr,
4527                                             &pd->hdr.icmp6->icmp6_cksum,
4528                                             &nk->addr[pd->didx], 0);
4529
4530                                 m_copyback(m, off, sizeof(struct icmp6_hdr),
4531                                     (caddr_t )pd->hdr.icmp6);
4532                                 break;
4533 #endif /* INET6 */
4534                         }
4535                 }
4536                 return (PF_PASS);
4537
4538         } else {
4539                 /*
4540                  * ICMP error message in response to a TCP/UDP packet.
4541                  * Extract the inner TCP/UDP header and search for that state.
4542                  */
4543
4544                 struct pf_pdesc pd2;
4545                 bzero(&pd2, sizeof pd2);
4546 #ifdef INET
4547                 struct ip       h2;
4548 #endif /* INET */
4549 #ifdef INET6
4550                 struct ip6_hdr  h2_6;
4551                 int             terminal = 0;
4552 #endif /* INET6 */
4553                 int             ipoff2 = 0;
4554                 int             off2 = 0;
4555
4556                 pd2.af = pd->af;
4557                 /* Payload packet is from the opposite direction. */
4558                 pd2.sidx = (direction == PF_IN) ? 1 : 0;
4559                 pd2.didx = (direction == PF_IN) ? 0 : 1;
4560                 switch (pd->af) {
4561 #ifdef INET
4562                 case AF_INET:
4563                         /* offset of h2 in mbuf chain */
4564                         ipoff2 = off + ICMP_MINLEN;
4565
4566                         if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
4567                             NULL, reason, pd2.af)) {
4568                                 DPFPRINTF(PF_DEBUG_MISC,
4569                                     ("pf: ICMP error message too short "
4570                                     "(ip)\n"));
4571                                 return (PF_DROP);
4572                         }
4573                         /*
4574                          * ICMP error messages don't refer to non-first
4575                          * fragments
4576                          */
4577                         if (h2.ip_off & htons(IP_OFFMASK)) {
4578                                 REASON_SET(reason, PFRES_FRAG);
4579                                 return (PF_DROP);
4580                         }
4581
4582                         /* offset of protocol header that follows h2 */
4583                         off2 = ipoff2 + (h2.ip_hl << 2);
4584
4585                         pd2.proto = h2.ip_p;
4586                         pd2.src = (struct pf_addr *)&h2.ip_src;
4587                         pd2.dst = (struct pf_addr *)&h2.ip_dst;
4588                         pd2.ip_sum = &h2.ip_sum;
4589                         break;
4590 #endif /* INET */
4591 #ifdef INET6
4592                 case AF_INET6:
4593                         ipoff2 = off + sizeof(struct icmp6_hdr);
4594
4595                         if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
4596                             NULL, reason, pd2.af)) {
4597                                 DPFPRINTF(PF_DEBUG_MISC,
4598                                     ("pf: ICMP error message too short "
4599                                     "(ip6)\n"));
4600                                 return (PF_DROP);
4601                         }
4602                         pd2.proto = h2_6.ip6_nxt;
4603                         pd2.src = (struct pf_addr *)&h2_6.ip6_src;
4604                         pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
4605                         pd2.ip_sum = NULL;
4606                         off2 = ipoff2 + sizeof(h2_6);
4607                         do {
4608                                 switch (pd2.proto) {
4609                                 case IPPROTO_FRAGMENT:
4610                                         /*
4611                                          * ICMPv6 error messages for
4612                                          * non-first fragments
4613                                          */
4614                                         REASON_SET(reason, PFRES_FRAG);
4615                                         return (PF_DROP);
4616                                 case IPPROTO_AH:
4617                                 case IPPROTO_HOPOPTS:
4618                                 case IPPROTO_ROUTING:
4619                                 case IPPROTO_DSTOPTS: {
4620                                         /* get next header and header length */
4621                                         struct ip6_ext opt6;
4622
4623                                         if (!pf_pull_hdr(m, off2, &opt6,
4624                                             sizeof(opt6), NULL, reason,
4625                                             pd2.af)) {
4626                                                 DPFPRINTF(PF_DEBUG_MISC,
4627                                                     ("pf: ICMPv6 short opt\n"));
4628                                                 return (PF_DROP);
4629                                         }
4630                                         if (pd2.proto == IPPROTO_AH)
4631                                                 off2 += (opt6.ip6e_len + 2) * 4;
4632                                         else
4633                                                 off2 += (opt6.ip6e_len + 1) * 8;
4634                                         pd2.proto = opt6.ip6e_nxt;
4635                                         /* goto the next header */
4636                                         break;
4637                                 }
4638                                 default:
4639                                         terminal++;
4640                                         break;
4641                                 }
4642                         } while (!terminal);
4643                         break;
4644 #endif /* INET6 */
4645                 }
4646
4647                 switch (pd2.proto) {
4648                 case IPPROTO_TCP: {
4649                         struct tcphdr            th;
4650                         u_int32_t                seq;
4651                         struct pf_state_peer    *src, *dst;
4652                         u_int8_t                 dws;
4653                         int                      copyback = 0;
4654
4655                         /*
4656                          * Only the first 8 bytes of the TCP header can be
4657                          * expected. Don't access any TCP header fields after
4658                          * th_seq, an ackskew test is not possible.
4659                          */
4660                         if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
4661                             pd2.af)) {
4662                                 DPFPRINTF(PF_DEBUG_MISC,
4663                                     ("pf: ICMP error message too short "
4664                                     "(tcp)\n"));
4665                                 return (PF_DROP);
4666                         }
4667
4668                         key.af = pd2.af;
4669                         key.proto = IPPROTO_TCP;
4670                         PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4671                         PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4672                         key.port[pd2.sidx] = th.th_sport;
4673                         key.port[pd2.didx] = th.th_dport;
4674
4675                         STATE_LOOKUP(kif, &key, direction, *state, pd);
4676
4677                         if (direction == (*state)->direction) {
4678                                 src = &(*state)->dst;
4679                                 dst = &(*state)->src;
4680                         } else {
4681                                 src = &(*state)->src;
4682                                 dst = &(*state)->dst;
4683                         }
4684
4685                         if (src->wscale && dst->wscale)
4686                                 dws = dst->wscale & PF_WSCALE_MASK;
4687                         else
4688                                 dws = 0;
4689
4690                         /* Demodulate sequence number */
4691                         seq = ntohl(th.th_seq) - src->seqdiff;
4692                         if (src->seqdiff) {
4693                                 pf_change_a(&th.th_seq, icmpsum,
4694                                     htonl(seq), 0);
4695                                 copyback = 1;
4696                         }
4697
4698                         if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
4699                             (!SEQ_GEQ(src->seqhi, seq) ||
4700                             !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
4701                                 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4702                                         printf("pf: BAD ICMP %d:%d ",
4703                                             icmptype, pd->hdr.icmp->icmp_code);
4704                                         pf_print_host(pd->src, 0, pd->af);
4705                                         printf(" -> ");
4706                                         pf_print_host(pd->dst, 0, pd->af);
4707                                         printf(" state: ");
4708                                         pf_print_state(*state);
4709                                         printf(" seq=%u\n", seq);
4710                                 }
4711                                 REASON_SET(reason, PFRES_BADSTATE);
4712                                 return (PF_DROP);
4713                         } else {
4714                                 if (V_pf_status.debug >= PF_DEBUG_MISC) {
4715                                         printf("pf: OK ICMP %d:%d ",
4716                                             icmptype, pd->hdr.icmp->icmp_code);
4717                                         pf_print_host(pd->src, 0, pd->af);
4718                                         printf(" -> ");
4719                                         pf_print_host(pd->dst, 0, pd->af);
4720                                         printf(" state: ");
4721                                         pf_print_state(*state);
4722                                         printf(" seq=%u\n", seq);
4723                                 }
4724                         }
4725
4726                         /* translate source/destination address, if necessary */
4727                         if ((*state)->key[PF_SK_WIRE] !=
4728                             (*state)->key[PF_SK_STACK]) {
4729                                 struct pf_state_key *nk =
4730                                     (*state)->key[pd->didx];
4731
4732                                 if (PF_ANEQ(pd2.src,
4733                                     &nk->addr[pd2.sidx], pd2.af) ||
4734                                     nk->port[pd2.sidx] != th.th_sport)
4735                                         pf_change_icmp(pd2.src, &th.th_sport,
4736                                             daddr, &nk->addr[pd2.sidx],
4737                                             nk->port[pd2.sidx], NULL,
4738                                             pd2.ip_sum, icmpsum,
4739                                             pd->ip_sum, 0, pd2.af);
4740
4741                                 if (PF_ANEQ(pd2.dst,
4742                                     &nk->addr[pd2.didx], pd2.af) ||
4743                                     nk->port[pd2.didx] != th.th_dport)
4744                                         pf_change_icmp(pd2.dst, &th.th_dport,
4745                                             NULL, /* XXX Inbound NAT? */
4746                                             &nk->addr[pd2.didx],
4747                                             nk->port[pd2.didx], NULL,
4748                                             pd2.ip_sum, icmpsum,
4749                                             pd->ip_sum, 0, pd2.af);
4750                                 copyback = 1;
4751                         }
4752
4753                         if (copyback) {
4754                                 switch (pd2.af) {
4755 #ifdef INET
4756                                 case AF_INET:
4757                                         m_copyback(m, off, ICMP_MINLEN,
4758                                             (caddr_t )pd->hdr.icmp);
4759                                         m_copyback(m, ipoff2, sizeof(h2),
4760                                             (caddr_t )&h2);
4761                                         break;
4762 #endif /* INET */
4763 #ifdef INET6
4764                                 case AF_INET6:
4765                                         m_copyback(m, off,
4766                                             sizeof(struct icmp6_hdr),
4767                                             (caddr_t )pd->hdr.icmp6);
4768                                         m_copyback(m, ipoff2, sizeof(h2_6),
4769                                             (caddr_t )&h2_6);
4770                                         break;
4771 #endif /* INET6 */
4772                                 }
4773                                 m_copyback(m, off2, 8, (caddr_t)&th);
4774                         }
4775
4776                         return (PF_PASS);
4777                         break;
4778                 }
4779                 case IPPROTO_UDP: {
4780                         struct udphdr           uh;
4781
4782                         if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
4783                             NULL, reason, pd2.af)) {
4784                                 DPFPRINTF(PF_DEBUG_MISC,
4785                                     ("pf: ICMP error message too short "
4786                                     "(udp)\n"));
4787                                 return (PF_DROP);
4788                         }
4789
4790                         key.af = pd2.af;
4791                         key.proto = IPPROTO_UDP;
4792                         PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4793                         PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4794                         key.port[pd2.sidx] = uh.uh_sport;
4795                         key.port[pd2.didx] = uh.uh_dport;
4796
4797                         STATE_LOOKUP(kif, &key, direction, *state, pd);
4798
4799                         /* translate source/destination address, if necessary */
4800                         if ((*state)->key[PF_SK_WIRE] !=
4801                             (*state)->key[PF_SK_STACK]) {
4802                                 struct pf_state_key *nk =
4803                                     (*state)->key[pd->didx];
4804
4805                                 if (PF_ANEQ(pd2.src,
4806                                     &nk->addr[pd2.sidx], pd2.af) ||
4807                                     nk->port[pd2.sidx] != uh.uh_sport)
4808                                         pf_change_icmp(pd2.src, &uh.uh_sport,
4809                                             daddr, &nk->addr[pd2.sidx],
4810                                             nk->port[pd2.sidx], &uh.uh_sum,
4811                                             pd2.ip_sum, icmpsum,
4812                                             pd->ip_sum, 1, pd2.af);
4813
4814                                 if (PF_ANEQ(pd2.dst,
4815                                     &nk->addr[pd2.didx], pd2.af) ||
4816                                     nk->port[pd2.didx] != uh.uh_dport)
4817                                         pf_change_icmp(pd2.dst, &uh.uh_dport,
4818                                             NULL, /* XXX Inbound NAT? */
4819                                             &nk->addr[pd2.didx],
4820                                             nk->port[pd2.didx], &uh.uh_sum,
4821                                             pd2.ip_sum, icmpsum,
4822                                             pd->ip_sum, 1, pd2.af);
4823
4824                                 switch (pd2.af) {
4825 #ifdef INET
4826                                 case AF_INET:
4827                                         m_copyback(m, off, ICMP_MINLEN,
4828                                             (caddr_t )pd->hdr.icmp);
4829                                         m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4830                                         break;
4831 #endif /* INET */
4832 #ifdef INET6
4833                                 case AF_INET6:
4834                                         m_copyback(m, off,
4835                                             sizeof(struct icmp6_hdr),
4836                                             (caddr_t )pd->hdr.icmp6);
4837                                         m_copyback(m, ipoff2, sizeof(h2_6),
4838                                             (caddr_t )&h2_6);
4839                                         break;
4840 #endif /* INET6 */
4841                                 }
4842                                 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
4843                         }
4844                         return (PF_PASS);
4845                         break;
4846                 }
4847 #ifdef INET
4848                 case IPPROTO_ICMP: {
4849                         struct icmp             iih;
4850
4851                         if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
4852                             NULL, reason, pd2.af)) {
4853                                 DPFPRINTF(PF_DEBUG_MISC,
4854                                     ("pf: ICMP error message too short i"
4855                                     "(icmp)\n"));
4856                                 return (PF_DROP);
4857                         }
4858
4859                         key.af = pd2.af;
4860                         key.proto = IPPROTO_ICMP;
4861                         PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4862                         PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4863                         key.port[0] = key.port[1] = iih.icmp_id;
4864
4865                         STATE_LOOKUP(kif, &key, direction, *state, pd);
4866
4867                         /* translate source/destination address, if necessary */
4868                         if ((*state)->key[PF_SK_WIRE] !=
4869                             (*state)->key[PF_SK_STACK]) {
4870                                 struct pf_state_key *nk =
4871                                     (*state)->key[pd->didx];
4872
4873                                 if (PF_ANEQ(pd2.src,
4874                                     &nk->addr[pd2.sidx], pd2.af) ||
4875                                     nk->port[pd2.sidx] != iih.icmp_id)
4876                                         pf_change_icmp(pd2.src, &iih.icmp_id,
4877                                             daddr, &nk->addr[pd2.sidx],
4878                                             nk->port[pd2.sidx], NULL,
4879                                             pd2.ip_sum, icmpsum,
4880                                             pd->ip_sum, 0, AF_INET);
4881
4882                                 if (PF_ANEQ(pd2.dst,
4883                                     &nk->addr[pd2.didx], pd2.af) ||
4884                                     nk->port[pd2.didx] != iih.icmp_id)
4885                                         pf_change_icmp(pd2.dst, &iih.icmp_id,
4886                                             NULL, /* XXX Inbound NAT? */
4887                                             &nk->addr[pd2.didx],
4888                                             nk->port[pd2.didx], NULL,
4889                                             pd2.ip_sum, icmpsum,
4890                                             pd->ip_sum, 0, AF_INET);
4891
4892                                 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
4893                                 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4894                                 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
4895                         }
4896                         return (PF_PASS);
4897                         break;
4898                 }
4899 #endif /* INET */
4900 #ifdef INET6
4901                 case IPPROTO_ICMPV6: {
4902                         struct icmp6_hdr        iih;
4903
4904                         if (!pf_pull_hdr(m, off2, &iih,
4905                             sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
4906                                 DPFPRINTF(PF_DEBUG_MISC,
4907                                     ("pf: ICMP error message too short "
4908                                     "(icmp6)\n"));
4909                                 return (PF_DROP);
4910                         }
4911
4912                         key.af = pd2.af;
4913                         key.proto = IPPROTO_ICMPV6;
4914                         PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4915                         PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4916                         key.port[0] = key.port[1] = iih.icmp6_id;
4917
4918                         STATE_LOOKUP(kif, &key, direction, *state, pd);
4919
4920                         /* translate source/destination address, if necessary */
4921                         if ((*state)->key[PF_SK_WIRE] !=
4922                             (*state)->key[PF_SK_STACK]) {
4923                                 struct pf_state_key *nk =
4924                                     (*state)->key[pd->didx];
4925
4926                                 if (PF_ANEQ(pd2.src,
4927                                     &nk->addr[pd2.sidx], pd2.af) ||
4928                                     nk->port[pd2.sidx] != iih.icmp6_id)
4929                                         pf_change_icmp(pd2.src, &iih.icmp6_id,
4930                                             daddr, &nk->addr[pd2.sidx],
4931                                             nk->port[pd2.sidx], NULL,
4932                                             pd2.ip_sum, icmpsum,
4933                                             pd->ip_sum, 0, AF_INET6);
4934
4935                                 if (PF_ANEQ(pd2.dst,
4936                                     &nk->addr[pd2.didx], pd2.af) ||
4937                                     nk->port[pd2.didx] != iih.icmp6_id)
4938                                         pf_change_icmp(pd2.dst, &iih.icmp6_id,
4939                                             NULL, /* XXX Inbound NAT? */
4940                                             &nk->addr[pd2.didx],
4941                                             nk->port[pd2.didx], NULL,
4942                                             pd2.ip_sum, icmpsum,
4943                                             pd->ip_sum, 0, AF_INET6);
4944
4945                                 m_copyback(m, off, sizeof(struct icmp6_hdr),
4946                                     (caddr_t)pd->hdr.icmp6);
4947                                 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
4948                                 m_copyback(m, off2, sizeof(struct icmp6_hdr),
4949                                     (caddr_t)&iih);
4950                         }
4951                         return (PF_PASS);
4952                         break;
4953                 }
4954 #endif /* INET6 */
4955                 default: {
4956                         key.af = pd2.af;
4957                         key.proto = pd2.proto;
4958                         PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4959                         PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4960                         key.port[0] = key.port[1] = 0;
4961
4962                         STATE_LOOKUP(kif, &key, direction, *state, pd);
4963
4964                         /* translate source/destination address, if necessary */
4965                         if ((*state)->key[PF_SK_WIRE] !=
4966                             (*state)->key[PF_SK_STACK]) {
4967                                 struct pf_state_key *nk =
4968                                     (*state)->key[pd->didx];
4969
4970                                 if (PF_ANEQ(pd2.src,
4971                                     &nk->addr[pd2.sidx], pd2.af))
4972                                         pf_change_icmp(pd2.src, NULL, daddr,
4973                                             &nk->addr[pd2.sidx], 0, NULL,
4974                                             pd2.ip_sum, icmpsum,
4975                                             pd->ip_sum, 0, pd2.af);
4976
4977                                 if (PF_ANEQ(pd2.dst,
4978                                     &nk->addr[pd2.didx], pd2.af))
4979                                         pf_change_icmp(pd2.src, NULL,
4980                                             NULL, /* XXX Inbound NAT? */
4981                                             &nk->addr[pd2.didx], 0, NULL,
4982                                             pd2.ip_sum, icmpsum,
4983                                             pd->ip_sum, 0, pd2.af);
4984
4985                                 switch (pd2.af) {
4986 #ifdef INET
4987                                 case AF_INET:
4988                                         m_copyback(m, off, ICMP_MINLEN,
4989                                             (caddr_t)pd->hdr.icmp);
4990                                         m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4991                                         break;
4992 #endif /* INET */
4993 #ifdef INET6
4994                                 case AF_INET6:
4995                                         m_copyback(m, off,
4996                                             sizeof(struct icmp6_hdr),
4997                                             (caddr_t )pd->hdr.icmp6);
4998                                         m_copyback(m, ipoff2, sizeof(h2_6),
4999                                             (caddr_t )&h2_6);
5000                                         break;
5001 #endif /* INET6 */
5002                                 }
5003                         }
5004                         return (PF_PASS);
5005                         break;
5006                 }
5007                 }
5008         }
5009 }
5010
5011 static int
5012 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
5013     struct mbuf *m, struct pf_pdesc *pd)
5014 {
5015         struct pf_state_peer    *src, *dst;
5016         struct pf_state_key_cmp  key;
5017
5018         bzero(&key, sizeof(key));
5019         key.af = pd->af;
5020         key.proto = pd->proto;
5021         if (direction == PF_IN) {
5022                 PF_ACPY(&key.addr[0], pd->src, key.af);
5023                 PF_ACPY(&key.addr[1], pd->dst, key.af);
5024                 key.port[0] = key.port[1] = 0;
5025         } else {
5026                 PF_ACPY(&key.addr[1], pd->src, key.af);
5027                 PF_ACPY(&key.addr[0], pd->dst, key.af);
5028                 key.port[1] = key.port[0] = 0;
5029         }
5030
5031         STATE_LOOKUP(kif, &key, direction, *state, pd);
5032
5033         if (direction == (*state)->direction) {
5034                 src = &(*state)->src;
5035                 dst = &(*state)->dst;
5036         } else {
5037                 src = &(*state)->dst;
5038                 dst = &(*state)->src;
5039         }
5040
5041         /* update states */
5042         if (src->state < PFOTHERS_SINGLE)
5043                 src->state = PFOTHERS_SINGLE;
5044         if (dst->state == PFOTHERS_SINGLE)
5045                 dst->state = PFOTHERS_MULTIPLE;
5046
5047         /* update expire time */
5048         (*state)->expire = time_uptime;
5049         if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
5050                 (*state)->timeout = PFTM_OTHER_MULTIPLE;
5051         else
5052                 (*state)->timeout = PFTM_OTHER_SINGLE;
5053
5054         /* translate source/destination address, if necessary */
5055         if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5056                 struct pf_state_key *nk = (*state)->key[pd->didx];
5057
5058                 KASSERT(nk, ("%s: nk is null", __func__));
5059                 KASSERT(pd, ("%s: pd is null", __func__));
5060                 KASSERT(pd->src, ("%s: pd->src is null", __func__));
5061                 KASSERT(pd->dst, ("%s: pd->dst is null", __func__));
5062                 switch (pd->af) {
5063 #ifdef INET
5064                 case AF_INET:
5065                         if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5066                                 pf_change_a(&pd->src->v4.s_addr,
5067                                     pd->ip_sum,
5068                                     nk->addr[pd->sidx].v4.s_addr,
5069                                     0);
5070
5071
5072                         if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5073                                 pf_change_a(&pd->dst->v4.s_addr,
5074                                     pd->ip_sum,
5075                                     nk->addr[pd->didx].v4.s_addr,
5076                                     0);
5077
5078                                 break;
5079 #endif /* INET */
5080 #ifdef INET6
5081                 case AF_INET6:
5082                         if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5083                                 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
5084
5085                         if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5086                                 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
5087 #endif /* INET6 */
5088                 }
5089         }
5090         return (PF_PASS);
5091 }
5092
5093 /*
5094  * ipoff and off are measured from the start of the mbuf chain.
5095  * h must be at "ipoff" on the mbuf chain.
5096  */
5097 void *
5098 pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
5099     u_short *actionp, u_short *reasonp, sa_family_t af)
5100 {
5101         switch (af) {
5102 #ifdef INET
5103         case AF_INET: {
5104                 struct ip       *h = mtod(m, struct ip *);
5105                 u_int16_t        fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
5106
5107                 if (fragoff) {
5108                         if (fragoff >= len)
5109                                 ACTION_SET(actionp, PF_PASS);
5110                         else {
5111                                 ACTION_SET(actionp, PF_DROP);
5112                                 REASON_SET(reasonp, PFRES_FRAG);
5113                         }
5114                         return (NULL);
5115                 }
5116                 if (m->m_pkthdr.len < off + len ||
5117                     ntohs(h->ip_len) < off + len) {
5118                         ACTION_SET(actionp, PF_DROP);
5119                         REASON_SET(reasonp, PFRES_SHORT);
5120                         return (NULL);
5121                 }
5122                 break;
5123         }
5124 #endif /* INET */
5125 #ifdef INET6
5126         case AF_INET6: {
5127                 struct ip6_hdr  *h = mtod(m, struct ip6_hdr *);
5128
5129                 if (m->m_pkthdr.len < off + len ||
5130                     (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
5131                     (unsigned)(off + len)) {
5132                         ACTION_SET(actionp, PF_DROP);
5133                         REASON_SET(reasonp, PFRES_SHORT);
5134                         return (NULL);
5135                 }
5136                 break;
5137         }
5138 #endif /* INET6 */
5139         }
5140         m_copydata(m, off, len, p);
5141         return (p);
5142 }
5143
5144 int
5145 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif,
5146     int rtableid)
5147 {
5148 #ifdef RADIX_MPATH
5149         struct radix_node_head  *rnh;
5150 #endif
5151         struct sockaddr_in      *dst;
5152         int                      ret = 1;
5153         int                      check_mpath;
5154 #ifdef INET6
5155         struct sockaddr_in6     *dst6;
5156         struct route_in6         ro;
5157 #else
5158         struct route             ro;
5159 #endif
5160         struct radix_node       *rn;
5161         struct rtentry          *rt;
5162         struct ifnet            *ifp;
5163
5164         check_mpath = 0;
5165 #ifdef RADIX_MPATH
5166         /* XXX: stick to table 0 for now */
5167         rnh = rt_tables_get_rnh(0, af);
5168         if (rnh != NULL && rn_mpath_capable(rnh))
5169                 check_mpath = 1;
5170 #endif
5171         bzero(&ro, sizeof(ro));
5172         switch (af) {
5173         case AF_INET:
5174                 dst = satosin(&ro.ro_dst);
5175                 dst->sin_family = AF_INET;
5176                 dst->sin_len = sizeof(*dst);
5177                 dst->sin_addr = addr->v4;
5178                 break;
5179 #ifdef INET6
5180         case AF_INET6:
5181                 /*
5182                  * Skip check for addresses with embedded interface scope,
5183                  * as they would always match anyway.
5184                  */
5185                 if (IN6_IS_SCOPE_EMBED(&addr->v6))
5186                         goto out;
5187                 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
5188                 dst6->sin6_family = AF_INET6;
5189                 dst6->sin6_len = sizeof(*dst6);
5190                 dst6->sin6_addr = addr->v6;
5191                 break;
5192 #endif /* INET6 */
5193         default:
5194                 return (0);
5195         }
5196
5197         /* Skip checks for ipsec interfaces */
5198         if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5199                 goto out;
5200
5201         switch (af) {
5202 #ifdef INET6
5203         case AF_INET6:
5204                 in6_rtalloc_ign(&ro, 0, rtableid);
5205                 break;
5206 #endif
5207 #ifdef INET
5208         case AF_INET:
5209                 in_rtalloc_ign((struct route *)&ro, 0, rtableid);
5210                 break;
5211 #endif
5212         default:
5213                 rtalloc_ign((struct route *)&ro, 0);    /* No/default FIB. */
5214                 break;
5215         }
5216
5217         if (ro.ro_rt != NULL) {
5218                 /* No interface given, this is a no-route check */
5219                 if (kif == NULL)
5220                         goto out;
5221
5222                 if (kif->pfik_ifp == NULL) {
5223                         ret = 0;
5224                         goto out;
5225                 }
5226
5227                 /* Perform uRPF check if passed input interface */
5228                 ret = 0;
5229                 rn = (struct radix_node *)ro.ro_rt;
5230                 do {
5231                         rt = (struct rtentry *)rn;
5232                         ifp = rt->rt_ifp;
5233
5234                         if (kif->pfik_ifp == ifp)
5235                                 ret = 1;
5236 #ifdef RADIX_MPATH
5237                         rn = rn_mpath_next(rn);
5238 #endif
5239                 } while (check_mpath == 1 && rn != NULL && ret == 0);
5240         } else
5241                 ret = 0;
5242 out:
5243         if (ro.ro_rt != NULL)
5244                 RTFREE(ro.ro_rt);
5245         return (ret);
5246 }
5247
5248 #ifdef INET
5249 static void
5250 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5251     struct pf_state *s, struct pf_pdesc *pd)
5252 {
5253         struct mbuf             *m0, *m1;
5254         struct sockaddr_in      dst;
5255         struct ip               *ip;
5256         struct ifnet            *ifp = NULL;
5257         struct pf_addr           naddr;
5258         struct pf_src_node      *sn = NULL;
5259         int                      error = 0;
5260         uint16_t                 ip_len, ip_off;
5261
5262         KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5263         KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5264             __func__));
5265
5266         if ((pd->pf_mtag == NULL &&
5267             ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5268             pd->pf_mtag->routed++ > 3) {
5269                 m0 = *m;
5270                 *m = NULL;
5271                 goto bad_locked;
5272         }
5273
5274         if (r->rt == PF_DUPTO) {
5275                 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5276                         if (s)
5277                                 PF_STATE_UNLOCK(s);
5278                         return;
5279                 }
5280         } else {
5281                 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5282                         if (s)
5283                                 PF_STATE_UNLOCK(s);
5284                         return;
5285                 }
5286                 m0 = *m;
5287         }
5288
5289         ip = mtod(m0, struct ip *);
5290
5291         bzero(&dst, sizeof(dst));
5292         dst.sin_family = AF_INET;
5293         dst.sin_len = sizeof(dst);
5294         dst.sin_addr = ip->ip_dst;
5295
5296         if (r->rt == PF_FASTROUTE) {
5297                 struct rtentry *rt;
5298
5299                 if (s)
5300                         PF_STATE_UNLOCK(s);
5301                 rt = rtalloc1_fib(sintosa(&dst), 0, 0, M_GETFIB(m0));
5302                 if (rt == NULL) {
5303                         KMOD_IPSTAT_INC(ips_noroute);
5304                         error = EHOSTUNREACH;
5305                         goto bad;
5306                 }
5307
5308                 ifp = rt->rt_ifp;
5309                 counter_u64_add(rt->rt_pksent, 1);
5310
5311                 if (rt->rt_flags & RTF_GATEWAY)
5312                         bcopy(satosin(rt->rt_gateway), &dst, sizeof(dst));
5313                 RTFREE_LOCKED(rt);
5314         } else {
5315                 if (TAILQ_EMPTY(&r->rpool.list)) {
5316                         DPFPRINTF(PF_DEBUG_URGENT,
5317                             ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5318                         goto bad_locked;
5319                 }
5320                 if (s == NULL) {
5321                         pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
5322                             &naddr, NULL, &sn);
5323                         if (!PF_AZERO(&naddr, AF_INET))
5324                                 dst.sin_addr.s_addr = naddr.v4.s_addr;
5325                         ifp = r->rpool.cur->kif ?
5326                             r->rpool.cur->kif->pfik_ifp : NULL;
5327                 } else {
5328                         if (!PF_AZERO(&s->rt_addr, AF_INET))
5329                                 dst.sin_addr.s_addr =
5330                                     s->rt_addr.v4.s_addr;
5331                         ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5332                         PF_STATE_UNLOCK(s);
5333                 }
5334         }
5335         if (ifp == NULL)
5336                 goto bad;
5337
5338         if (oifp != ifp) {
5339                 if (pf_test(PF_OUT, ifp, &m0, NULL) != PF_PASS)
5340                         goto bad;
5341                 else if (m0 == NULL)
5342                         goto done;
5343                 if (m0->m_len < sizeof(struct ip)) {
5344                         DPFPRINTF(PF_DEBUG_URGENT,
5345                             ("%s: m0->m_len < sizeof(struct ip)\n", __func__));
5346                         goto bad;
5347                 }
5348                 ip = mtod(m0, struct ip *);
5349         }
5350
5351         if (ifp->if_flags & IFF_LOOPBACK)
5352                 m0->m_flags |= M_SKIP_FIREWALL;
5353
5354         ip_len = ntohs(ip->ip_len);
5355         ip_off = ntohs(ip->ip_off);
5356
5357         /* Copied from FreeBSD 10.0-CURRENT ip_output. */
5358         m0->m_pkthdr.csum_flags |= CSUM_IP;
5359         if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~ifp->if_hwassist) {
5360                 in_delayed_cksum(m0);
5361                 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
5362         }
5363 #ifdef SCTP
5364         if (m0->m_pkthdr.csum_flags & CSUM_SCTP & ~ifp->if_hwassist) {
5365                 sctp_delayed_cksum(m, (uint32_t)(ip->ip_hl << 2));
5366                 m0->m_pkthdr.csum_flags &= ~CSUM_SCTP;
5367         }
5368 #endif
5369
5370         /*
5371          * If small enough for interface, or the interface will take
5372          * care of the fragmentation for us, we can just send directly.
5373          */
5374         if (ip_len <= ifp->if_mtu ||
5375             (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0 ||
5376             ((ip_off & IP_DF) == 0 && (ifp->if_hwassist & CSUM_FRAGMENT))) {
5377                 ip->ip_sum = 0;
5378                 if (m0->m_pkthdr.csum_flags & CSUM_IP & ~ifp->if_hwassist) {
5379                         ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
5380                         m0->m_pkthdr.csum_flags &= ~CSUM_IP;
5381                 }
5382                 m_clrprotoflags(m0);    /* Avoid confusing lower layers. */
5383                 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5384                 goto done;
5385         }
5386
5387         /* Balk when DF bit is set or the interface didn't support TSO. */
5388         if ((ip_off & IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5389                 error = EMSGSIZE;
5390                 KMOD_IPSTAT_INC(ips_cantfrag);
5391                 if (r->rt != PF_DUPTO) {
5392                         icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
5393                             ifp->if_mtu);
5394                         goto done;
5395                 } else
5396                         goto bad;
5397         }
5398
5399         error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist);
5400         if (error)
5401                 goto bad;
5402
5403         for (; m0; m0 = m1) {
5404                 m1 = m0->m_nextpkt;
5405                 m0->m_nextpkt = NULL;
5406                 if (error == 0) {
5407                         m_clrprotoflags(m0);
5408                         error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5409                 } else
5410                         m_freem(m0);
5411         }
5412
5413         if (error == 0)
5414                 KMOD_IPSTAT_INC(ips_fragmented);
5415
5416 done:
5417         if (r->rt != PF_DUPTO)
5418                 *m = NULL;
5419         return;
5420
5421 bad_locked:
5422         if (s)
5423                 PF_STATE_UNLOCK(s);
5424 bad:
5425         m_freem(m0);
5426         goto done;
5427 }
5428 #endif /* INET */
5429
5430 #ifdef INET6
5431 static void
5432 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5433     struct pf_state *s, struct pf_pdesc *pd)
5434 {
5435         struct mbuf             *m0;
5436         struct sockaddr_in6     dst;
5437         struct ip6_hdr          *ip6;
5438         struct ifnet            *ifp = NULL;
5439         struct pf_addr           naddr;
5440         struct pf_src_node      *sn = NULL;
5441
5442         KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5443         KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5444             __func__));
5445
5446         if ((pd->pf_mtag == NULL &&
5447             ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5448             pd->pf_mtag->routed++ > 3) {
5449                 m0 = *m;
5450                 *m = NULL;
5451                 goto bad_locked;
5452         }
5453
5454         if (r->rt == PF_DUPTO) {
5455                 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5456                         if (s)
5457                                 PF_STATE_UNLOCK(s);
5458                         return;
5459                 }
5460         } else {
5461                 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5462                         if (s)
5463                                 PF_STATE_UNLOCK(s);
5464                         return;
5465                 }
5466                 m0 = *m;
5467         }
5468
5469         ip6 = mtod(m0, struct ip6_hdr *);
5470
5471         bzero(&dst, sizeof(dst));
5472         dst.sin6_family = AF_INET6;
5473         dst.sin6_len = sizeof(dst);
5474         dst.sin6_addr = ip6->ip6_dst;
5475
5476         /* Cheat. XXX why only in the v6 case??? */
5477         if (r->rt == PF_FASTROUTE) {
5478                 if (s)
5479                         PF_STATE_UNLOCK(s);
5480                 m0->m_flags |= M_SKIP_FIREWALL;
5481                 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
5482                 *m = NULL;
5483                 return;
5484         }
5485
5486         if (TAILQ_EMPTY(&r->rpool.list)) {
5487                 DPFPRINTF(PF_DEBUG_URGENT,
5488                     ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5489                 goto bad_locked;
5490         }
5491         if (s == NULL) {
5492                 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
5493                     &naddr, NULL, &sn);
5494                 if (!PF_AZERO(&naddr, AF_INET6))
5495                         PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5496                             &naddr, AF_INET6);
5497                 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
5498         } else {
5499                 if (!PF_AZERO(&s->rt_addr, AF_INET6))
5500                         PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5501                             &s->rt_addr, AF_INET6);
5502                 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5503         }
5504
5505         if (s)
5506                 PF_STATE_UNLOCK(s);
5507
5508         if (ifp == NULL)
5509                 goto bad;
5510
5511         if (oifp != ifp) {
5512                 if (pf_test6(PF_FWD, ifp, &m0, NULL) != PF_PASS)
5513                         goto bad;
5514                 else if (m0 == NULL)
5515                         goto done;
5516                 if (m0->m_len < sizeof(struct ip6_hdr)) {
5517                         DPFPRINTF(PF_DEBUG_URGENT,
5518                             ("%s: m0->m_len < sizeof(struct ip6_hdr)\n",
5519                             __func__));
5520                         goto bad;
5521                 }
5522                 ip6 = mtod(m0, struct ip6_hdr *);
5523         }
5524
5525         if (ifp->if_flags & IFF_LOOPBACK)
5526                 m0->m_flags |= M_SKIP_FIREWALL;
5527
5528         /*
5529          * If the packet is too large for the outgoing interface,
5530          * send back an icmp6 error.
5531          */
5532         if (IN6_IS_SCOPE_EMBED(&dst.sin6_addr))
5533                 dst.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
5534         if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu)
5535                 nd6_output(ifp, ifp, m0, &dst, NULL);
5536         else {
5537                 in6_ifstat_inc(ifp, ifs6_in_toobig);
5538                 if (r->rt != PF_DUPTO)
5539                         icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
5540                 else
5541                         goto bad;
5542         }
5543
5544 done:
5545         if (r->rt != PF_DUPTO)
5546                 *m = NULL;
5547         return;
5548
5549 bad_locked:
5550         if (s)
5551                 PF_STATE_UNLOCK(s);
5552 bad:
5553         m_freem(m0);
5554         goto done;
5555 }
5556 #endif /* INET6 */
5557
5558 /*
5559  * FreeBSD supports cksum offloads for the following drivers.
5560  *  em(4), fxp(4), ixgb(4), lge(4), ndis(4), nge(4), re(4),
5561  *   ti(4), txp(4), xl(4)
5562  *
5563  * CSUM_DATA_VALID | CSUM_PSEUDO_HDR :
5564  *  network driver performed cksum including pseudo header, need to verify
5565  *   csum_data
5566  * CSUM_DATA_VALID :
5567  *  network driver performed cksum, needs to additional pseudo header
5568  *  cksum computation with partial csum_data(i.e. lack of H/W support for
5569  *  pseudo header, for instance hme(4), sk(4) and possibly gem(4))
5570  *
5571  * After validating the cksum of packet, set both flag CSUM_DATA_VALID and
5572  * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper
5573  * TCP/UDP layer.
5574  * Also, set csum_data to 0xffff to force cksum validation.
5575  */
5576 static int
5577 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af)
5578 {
5579         u_int16_t sum = 0;
5580         int hw_assist = 0;
5581         struct ip *ip;
5582
5583         if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
5584                 return (1);
5585         if (m->m_pkthdr.len < off + len)
5586                 return (1);
5587
5588         switch (p) {
5589         case IPPROTO_TCP:
5590                 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5591                         if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5592                                 sum = m->m_pkthdr.csum_data;
5593                         } else {
5594                                 ip = mtod(m, struct ip *);
5595                                 sum = in_pseudo(ip->ip_src.s_addr,
5596                                 ip->ip_dst.s_addr, htonl((u_short)len +
5597                                 m->m_pkthdr.csum_data + IPPROTO_TCP));
5598                         }
5599                         sum ^= 0xffff;
5600                         ++hw_assist;
5601                 }
5602                 break;
5603         case IPPROTO_UDP:
5604                 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5605                         if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5606                                 sum = m->m_pkthdr.csum_data;
5607                         } else {
5608                                 ip = mtod(m, struct ip *);
5609                                 sum = in_pseudo(ip->ip_src.s_addr,
5610                                 ip->ip_dst.s_addr, htonl((u_short)len +
5611                                 m->m_pkthdr.csum_data + IPPROTO_UDP));
5612                         }
5613                         sum ^= 0xffff;
5614                         ++hw_assist;
5615                 }
5616                 break;
5617         case IPPROTO_ICMP:
5618 #ifdef INET6
5619         case IPPROTO_ICMPV6:
5620 #endif /* INET6 */
5621                 break;
5622         default:
5623                 return (1);
5624         }
5625
5626         if (!hw_assist) {
5627                 switch (af) {
5628                 case AF_INET:
5629                         if (p == IPPROTO_ICMP) {
5630                                 if (m->m_len < off)
5631                                         return (1);
5632                                 m->m_data += off;
5633                                 m->m_len -= off;
5634                                 sum = in_cksum(m, len);
5635                                 m->m_data -= off;
5636                                 m->m_len += off;
5637                         } else {
5638                                 if (m->m_len < sizeof(struct ip))
5639                                         return (1);
5640                                 sum = in4_cksum(m, p, off, len);
5641                         }
5642                         break;
5643 #ifdef INET6
5644                 case AF_INET6:
5645                         if (m->m_len < sizeof(struct ip6_hdr))
5646                                 return (1);
5647                         sum = in6_cksum(m, p, off, len);
5648                         break;
5649 #endif /* INET6 */
5650                 default:
5651                         return (1);
5652                 }
5653         }
5654         if (sum) {
5655                 switch (p) {
5656                 case IPPROTO_TCP:
5657                     {
5658                         KMOD_TCPSTAT_INC(tcps_rcvbadsum);
5659                         break;
5660                     }
5661                 case IPPROTO_UDP:
5662                     {
5663                         KMOD_UDPSTAT_INC(udps_badsum);
5664                         break;
5665                     }
5666 #ifdef INET
5667                 case IPPROTO_ICMP:
5668                     {
5669                         KMOD_ICMPSTAT_INC(icps_checksum);
5670                         break;
5671                     }
5672 #endif
5673 #ifdef INET6
5674                 case IPPROTO_ICMPV6:
5675                     {
5676                         KMOD_ICMP6STAT_INC(icp6s_checksum);
5677                         break;
5678                     }
5679 #endif /* INET6 */
5680                 }
5681                 return (1);
5682         } else {
5683                 if (p == IPPROTO_TCP || p == IPPROTO_UDP) {
5684                         m->m_pkthdr.csum_flags |=
5685                             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
5686                         m->m_pkthdr.csum_data = 0xffff;
5687                 }
5688         }
5689         return (0);
5690 }
5691
5692
5693 #ifdef INET
5694 int
5695 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
5696 {
5697         struct pfi_kif          *kif;
5698         u_short                  action, reason = 0, log = 0;
5699         struct mbuf             *m = *m0;
5700         struct ip               *h = NULL;
5701         struct m_tag            *ipfwtag;
5702         struct pf_rule          *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
5703         struct pf_state         *s = NULL;
5704         struct pf_ruleset       *ruleset = NULL;
5705         struct pf_pdesc          pd;
5706         int                      off, dirndx, pqid = 0;
5707
5708         M_ASSERTPKTHDR(m);
5709
5710         if (!V_pf_status.running)
5711                 return (PF_PASS);
5712
5713         memset(&pd, 0, sizeof(pd));
5714
5715         kif = (struct pfi_kif *)ifp->if_pf_kif;
5716
5717         if (kif == NULL) {
5718                 DPFPRINTF(PF_DEBUG_URGENT,
5719                     ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
5720                 return (PF_DROP);
5721         }
5722         if (kif->pfik_flags & PFI_IFLAG_SKIP)
5723                 return (PF_PASS);
5724
5725         if (m->m_flags & M_SKIP_FIREWALL)
5726                 return (PF_PASS);
5727
5728         pd.pf_mtag = pf_find_mtag(m);
5729
5730         PF_RULES_RLOCK();
5731
5732         if (ip_divert_ptr != NULL &&
5733             ((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) {
5734                 struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1);
5735                 if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) {
5736                         if (pd.pf_mtag == NULL &&
5737                             ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5738                                 action = PF_DROP;
5739                                 goto done;
5740                         }
5741                         pd.pf_mtag->flags |= PF_PACKET_LOOPED;
5742                         m_tag_delete(m, ipfwtag);
5743                 }
5744                 if (pd.pf_mtag && pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) {
5745                         m->m_flags |= M_FASTFWD_OURS;
5746                         pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT;
5747                 }
5748         } else if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
5749                 /* We do IP header normalization and packet reassembly here */
5750                 action = PF_DROP;
5751                 goto done;
5752         }
5753         m = *m0;        /* pf_normalize messes with m0 */
5754         h = mtod(m, struct ip *);
5755
5756         off = h->ip_hl << 2;
5757         if (off < (int)sizeof(struct ip)) {
5758                 action = PF_DROP;
5759                 REASON_SET(&reason, PFRES_SHORT);
5760                 log = 1;
5761                 goto done;
5762         }
5763
5764         pd.src = (struct pf_addr *)&h->ip_src;
5765         pd.dst = (struct pf_addr *)&h->ip_dst;
5766         pd.sport = pd.dport = NULL;
5767         pd.ip_sum = &h->ip_sum;
5768         pd.proto_sum = NULL;
5769         pd.proto = h->ip_p;
5770         pd.dir = dir;
5771         pd.sidx = (dir == PF_IN) ? 0 : 1;
5772         pd.didx = (dir == PF_IN) ? 1 : 0;
5773         pd.af = AF_INET;
5774         pd.tos = h->ip_tos;
5775         pd.tot_len = ntohs(h->ip_len);
5776
5777         /* handle fragments that didn't get reassembled by normalization */
5778         if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
5779                 action = pf_test_fragment(&r, dir, kif, m, h,
5780                     &pd, &a, &ruleset);
5781                 goto done;
5782         }
5783
5784         switch (h->ip_p) {
5785
5786         case IPPROTO_TCP: {
5787                 struct tcphdr   th;
5788
5789                 pd.hdr.tcp = &th;
5790                 if (!pf_pull_hdr(m, off, &th, sizeof(th),
5791                     &action, &reason, AF_INET)) {
5792                         log = action != PF_PASS;
5793                         goto done;
5794                 }
5795                 pd.p_len = pd.tot_len - off - (th.th_off << 2);
5796                 if ((th.th_flags & TH_ACK) && pd.p_len == 0)
5797                         pqid = 1;
5798                 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
5799                 if (action == PF_DROP)
5800                         goto done;
5801                 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
5802                     &reason);
5803                 if (action == PF_PASS) {
5804                         if (pfsync_update_state_ptr != NULL)
5805                                 pfsync_update_state_ptr(s);
5806                         r = s->rule.ptr;
5807                         a = s->anchor.ptr;
5808                         log = s->log;
5809                 } else if (s == NULL)
5810                         action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5811                             &a, &ruleset, inp);
5812                 break;
5813         }
5814
5815         case IPPROTO_UDP: {
5816                 struct udphdr   uh;
5817
5818                 pd.hdr.udp = &uh;
5819                 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
5820                     &action, &reason, AF_INET)) {
5821                         log = action != PF_PASS;
5822                         goto done;
5823                 }
5824                 if (uh.uh_dport == 0 ||
5825                     ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
5826                     ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
5827                         action = PF_DROP;
5828                         REASON_SET(&reason, PFRES_SHORT);
5829                         goto done;
5830                 }
5831                 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
5832                 if (action == PF_PASS) {
5833                         if (pfsync_update_state_ptr != NULL)
5834                                 pfsync_update_state_ptr(s);
5835                         r = s->rule.ptr;
5836                         a = s->anchor.ptr;
5837                         log = s->log;
5838                 } else if (s == NULL)
5839                         action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5840                             &a, &ruleset, inp);
5841                 break;
5842         }
5843
5844         case IPPROTO_ICMP: {
5845                 struct icmp     ih;
5846
5847                 pd.hdr.icmp = &ih;
5848                 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
5849                     &action, &reason, AF_INET)) {
5850                         log = action != PF_PASS;
5851                         goto done;
5852                 }
5853                 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
5854                     &reason);
5855                 if (action == PF_PASS) {
5856                         if (pfsync_update_state_ptr != NULL)
5857                                 pfsync_update_state_ptr(s);
5858                         r = s->rule.ptr;
5859                         a = s->anchor.ptr;
5860                         log = s->log;
5861                 } else if (s == NULL)
5862                         action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5863                             &a, &ruleset, inp);
5864                 break;
5865         }
5866
5867 #ifdef INET6
5868         case IPPROTO_ICMPV6: {
5869                 action = PF_DROP;
5870                 DPFPRINTF(PF_DEBUG_MISC,
5871                     ("pf: dropping IPv4 packet with ICMPv6 payload\n"));
5872                 goto done;
5873         }
5874 #endif
5875
5876         default:
5877                 action = pf_test_state_other(&s, dir, kif, m, &pd);
5878                 if (action == PF_PASS) {
5879                         if (pfsync_update_state_ptr != NULL)
5880                                 pfsync_update_state_ptr(s);
5881                         r = s->rule.ptr;
5882                         a = s->anchor.ptr;
5883                         log = s->log;
5884                 } else if (s == NULL)
5885                         action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5886                             &a, &ruleset, inp);
5887                 break;
5888         }
5889
5890 done:
5891         PF_RULES_RUNLOCK();
5892         if (action == PF_PASS && h->ip_hl > 5 &&
5893             !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
5894                 action = PF_DROP;
5895                 REASON_SET(&reason, PFRES_IPOPTIONS);
5896                 log = 1;
5897                 DPFPRINTF(PF_DEBUG_MISC,
5898                     ("pf: dropping packet with ip options\n"));
5899         }
5900
5901         if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
5902                 action = PF_DROP;
5903                 REASON_SET(&reason, PFRES_MEMORY);
5904         }
5905         if (r->rtableid >= 0)
5906                 M_SETFIB(m, r->rtableid);
5907
5908 #ifdef ALTQ
5909         if (action == PF_PASS && r->qid) {
5910                 if (pd.pf_mtag == NULL &&
5911                     ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5912                         action = PF_DROP;
5913                         REASON_SET(&reason, PFRES_MEMORY);
5914                 }
5915                 if (pqid || (pd.tos & IPTOS_LOWDELAY))
5916                         pd.pf_mtag->qid = r->pqid;
5917                 else
5918                         pd.pf_mtag->qid = r->qid;
5919                 /* add hints for ecn */
5920                 pd.pf_mtag->hdr = h;
5921
5922         }
5923 #endif /* ALTQ */
5924
5925         /*
5926          * connections redirected to loopback should not match sockets
5927          * bound specifically to loopback due to security implications,
5928          * see tcp_input() and in_pcblookup_listen().
5929          */
5930         if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
5931             pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
5932             (s->nat_rule.ptr->action == PF_RDR ||
5933             s->nat_rule.ptr->action == PF_BINAT) &&
5934             (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
5935                 m->m_flags |= M_SKIP_FIREWALL;
5936
5937         if (action == PF_PASS && r->divert.port && ip_divert_ptr != NULL &&
5938             !PACKET_LOOPED(&pd)) {
5939
5940                 ipfwtag = m_tag_alloc(MTAG_IPFW_RULE, 0,
5941                     sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO);
5942                 if (ipfwtag != NULL) {
5943                         ((struct ipfw_rule_ref *)(ipfwtag+1))->info =
5944                             ntohs(r->divert.port);
5945                         ((struct ipfw_rule_ref *)(ipfwtag+1))->rulenum = dir;
5946
5947                         if (s)
5948                                 PF_STATE_UNLOCK(s);
5949
5950                         m_tag_prepend(m, ipfwtag);
5951                         if (m->m_flags & M_FASTFWD_OURS) {
5952                                 if (pd.pf_mtag == NULL &&
5953                                     ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5954                                         action = PF_DROP;
5955                                         REASON_SET(&reason, PFRES_MEMORY);
5956                                         log = 1;
5957                                         DPFPRINTF(PF_DEBUG_MISC,
5958                                             ("pf: failed to allocate tag\n"));
5959                                 }
5960                                 pd.pf_mtag->flags |= PF_FASTFWD_OURS_PRESENT;
5961                                 m->m_flags &= ~M_FASTFWD_OURS;
5962                         }
5963                         ip_divert_ptr(*m0, dir ==  PF_IN ? DIR_IN : DIR_OUT);
5964                         *m0 = NULL;
5965
5966                         return (action);
5967                 } else {
5968                         /* XXX: ipfw has the same behaviour! */
5969                         action = PF_DROP;
5970                         REASON_SET(&reason, PFRES_MEMORY);
5971                         log = 1;
5972                         DPFPRINTF(PF_DEBUG_MISC,
5973                             ("pf: failed to allocate divert tag\n"));
5974                 }
5975         }
5976
5977         if (log) {
5978                 struct pf_rule *lr;
5979
5980                 if (s != NULL && s->nat_rule.ptr != NULL &&
5981                     s->nat_rule.ptr->log & PF_LOG_ALL)
5982                         lr = s->nat_rule.ptr;
5983                 else
5984                         lr = r;
5985                 PFLOG_PACKET(kif, m, AF_INET, dir, reason, lr, a, ruleset, &pd,
5986                     (s == NULL));
5987         }
5988
5989         kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
5990         kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
5991
5992         if (action == PF_PASS || r->action == PF_DROP) {
5993                 dirndx = (dir == PF_OUT);
5994                 r->packets[dirndx]++;
5995                 r->bytes[dirndx] += pd.tot_len;
5996                 if (a != NULL) {
5997                         a->packets[dirndx]++;
5998                         a->bytes[dirndx] += pd.tot_len;
5999                 }
6000                 if (s != NULL) {
6001                         if (s->nat_rule.ptr != NULL) {
6002                                 s->nat_rule.ptr->packets[dirndx]++;
6003                                 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6004                         }
6005                         if (s->src_node != NULL) {
6006                                 s->src_node->packets[dirndx]++;
6007                                 s->src_node->bytes[dirndx] += pd.tot_len;
6008                         }
6009                         if (s->nat_src_node != NULL) {
6010                                 s->nat_src_node->packets[dirndx]++;
6011                                 s->nat_src_node->bytes[dirndx] += pd.tot_len;
6012                         }
6013                         dirndx = (dir == s->direction) ? 0 : 1;
6014                         s->packets[dirndx]++;
6015                         s->bytes[dirndx] += pd.tot_len;
6016                 }
6017                 tr = r;
6018                 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6019                 if (nr != NULL && r == &V_pf_default_rule)
6020                         tr = nr;
6021                 if (tr->src.addr.type == PF_ADDR_TABLE)
6022                         pfr_update_stats(tr->src.addr.p.tbl,
6023                             (s == NULL) ? pd.src :
6024                             &s->key[(s->direction == PF_IN)]->
6025                                 addr[(s->direction == PF_OUT)],
6026                             pd.af, pd.tot_len, dir == PF_OUT,
6027                             r->action == PF_PASS, tr->src.neg);
6028                 if (tr->dst.addr.type == PF_ADDR_TABLE)
6029                         pfr_update_stats(tr->dst.addr.p.tbl,
6030                             (s == NULL) ? pd.dst :
6031                             &s->key[(s->direction == PF_IN)]->
6032                                 addr[(s->direction == PF_IN)],
6033                             pd.af, pd.tot_len, dir == PF_OUT,
6034                             r->action == PF_PASS, tr->dst.neg);
6035         }
6036
6037         switch (action) {
6038         case PF_SYNPROXY_DROP:
6039                 m_freem(*m0);
6040         case PF_DEFER:
6041                 *m0 = NULL;
6042                 action = PF_PASS;
6043                 break;
6044         case PF_DROP:
6045                 m_freem(*m0);
6046                 *m0 = NULL;
6047                 break;
6048         default:
6049                 /* pf_route() returns unlocked. */
6050                 if (r->rt) {
6051                         pf_route(m0, r, dir, kif->pfik_ifp, s, &pd);
6052                         return (action);
6053                 }
6054                 break;
6055         }
6056         if (s)
6057                 PF_STATE_UNLOCK(s);
6058
6059         return (action);
6060 }
6061 #endif /* INET */
6062
6063 #ifdef INET6
6064 int
6065 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
6066 {
6067         struct pfi_kif          *kif;
6068         u_short                  action, reason = 0, log = 0;
6069         struct mbuf             *m = *m0, *n = NULL;
6070         struct m_tag            *mtag;
6071         struct ip6_hdr          *h = NULL;
6072         struct pf_rule          *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
6073         struct pf_state         *s = NULL;
6074         struct pf_ruleset       *ruleset = NULL;
6075         struct pf_pdesc          pd;
6076         int                      off, terminal = 0, dirndx, rh_cnt = 0;
6077         int                      fwdir = dir;
6078
6079         M_ASSERTPKTHDR(m);
6080
6081         if (dir == PF_OUT && m->m_pkthdr.rcvif && ifp != m->m_pkthdr.rcvif)
6082                 fwdir = PF_FWD;
6083
6084         if (!V_pf_status.running)
6085                 return (PF_PASS);
6086
6087         memset(&pd, 0, sizeof(pd));
6088         pd.pf_mtag = pf_find_mtag(m);
6089
6090         if (pd.pf_mtag && pd.pf_mtag->flags & PF_TAG_GENERATED)
6091                 return (PF_PASS);
6092
6093         kif = (struct pfi_kif *)ifp->if_pf_kif;
6094         if (kif == NULL) {
6095                 DPFPRINTF(PF_DEBUG_URGENT,
6096                     ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
6097                 return (PF_DROP);
6098         }
6099         if (kif->pfik_flags & PFI_IFLAG_SKIP)
6100                 return (PF_PASS);
6101
6102         if (m->m_flags & M_SKIP_FIREWALL)
6103                 return (PF_PASS);
6104
6105         PF_RULES_RLOCK();
6106
6107         /* We do IP header normalization and packet reassembly here */
6108         if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
6109                 action = PF_DROP;
6110                 goto done;
6111         }
6112         m = *m0;        /* pf_normalize messes with m0 */
6113         h = mtod(m, struct ip6_hdr *);
6114
6115 #if 1
6116         /*
6117          * we do not support jumbogram yet.  if we keep going, zero ip6_plen
6118          * will do something bad, so drop the packet for now.
6119          */
6120         if (htons(h->ip6_plen) == 0) {
6121                 action = PF_DROP;
6122                 REASON_SET(&reason, PFRES_NORM);        /*XXX*/
6123                 goto done;
6124         }
6125 #endif
6126
6127         pd.src = (struct pf_addr *)&h->ip6_src;
6128         pd.dst = (struct pf_addr *)&h->ip6_dst;
6129         pd.sport = pd.dport = NULL;
6130         pd.ip_sum = NULL;
6131         pd.proto_sum = NULL;
6132         pd.dir = dir;
6133         pd.sidx = (dir == PF_IN) ? 0 : 1;
6134         pd.didx = (dir == PF_IN) ? 1 : 0;
6135         pd.af = AF_INET6;
6136         pd.tos = 0;
6137         pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
6138
6139         off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
6140         pd.proto = h->ip6_nxt;
6141         do {
6142                 switch (pd.proto) {
6143                 case IPPROTO_FRAGMENT:
6144                         action = pf_test_fragment(&r, dir, kif, m, h,
6145                             &pd, &a, &ruleset);
6146                         if (action == PF_DROP)
6147                                 REASON_SET(&reason, PFRES_FRAG);
6148                         goto done;
6149                 case IPPROTO_ROUTING: {
6150                         struct ip6_rthdr rthdr;
6151
6152                         if (rh_cnt++) {
6153                                 DPFPRINTF(PF_DEBUG_MISC,
6154                                     ("pf: IPv6 more than one rthdr\n"));
6155                                 action = PF_DROP;
6156                                 REASON_SET(&reason, PFRES_IPOPTIONS);
6157                                 log = 1;
6158                                 goto done;
6159                         }
6160                         if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
6161                             &reason, pd.af)) {
6162                                 DPFPRINTF(PF_DEBUG_MISC,
6163                                     ("pf: IPv6 short rthdr\n"));
6164                                 action = PF_DROP;
6165                                 REASON_SET(&reason, PFRES_SHORT);
6166                                 log = 1;
6167                                 goto done;
6168                         }
6169                         if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
6170                                 DPFPRINTF(PF_DEBUG_MISC,
6171                                     ("pf: IPv6 rthdr0\n"));
6172                                 action = PF_DROP;
6173                                 REASON_SET(&reason, PFRES_IPOPTIONS);
6174                                 log = 1;
6175                                 goto done;
6176                         }
6177                         /* FALLTHROUGH */
6178                 }
6179                 case IPPROTO_AH:
6180                 case IPPROTO_HOPOPTS:
6181                 case IPPROTO_DSTOPTS: {
6182                         /* get next header and header length */
6183                         struct ip6_ext  opt6;
6184
6185                         if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
6186                             NULL, &reason, pd.af)) {
6187                                 DPFPRINTF(PF_DEBUG_MISC,
6188                                     ("pf: IPv6 short opt\n"));
6189                                 action = PF_DROP;
6190                                 log = 1;
6191                                 goto done;
6192                         }
6193                         if (pd.proto == IPPROTO_AH)
6194                                 off += (opt6.ip6e_len + 2) * 4;
6195                         else
6196                                 off += (opt6.ip6e_len + 1) * 8;
6197                         pd.proto = opt6.ip6e_nxt;
6198                         /* goto the next header */
6199                         break;
6200                 }
6201                 default:
6202                         terminal++;
6203                         break;
6204                 }
6205         } while (!terminal);
6206
6207         /* if there's no routing header, use unmodified mbuf for checksumming */
6208         if (!n)
6209                 n = m;
6210
6211         switch (pd.proto) {
6212
6213         case IPPROTO_TCP: {
6214                 struct tcphdr   th;
6215
6216                 pd.hdr.tcp = &th;
6217                 if (!pf_pull_hdr(m, off, &th, sizeof(th),
6218                     &action, &reason, AF_INET6)) {
6219                         log = action != PF_PASS;
6220                         goto done;
6221                 }
6222                 pd.p_len = pd.tot_len - off - (th.th_off << 2);
6223                 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6224                 if (action == PF_DROP)
6225                         goto done;
6226                 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6227                     &reason);
6228                 if (action == PF_PASS) {
6229                         if (pfsync_update_state_ptr != NULL)
6230                                 pfsync_update_state_ptr(s);
6231                         r = s->rule.ptr;
6232                         a = s->anchor.ptr;
6233                         log = s->log;
6234                 } else if (s == NULL)
6235                         action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6236                             &a, &ruleset, inp);
6237                 break;
6238         }
6239
6240         case IPPROTO_UDP: {
6241                 struct udphdr   uh;
6242
6243                 pd.hdr.udp = &uh;
6244                 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6245                     &action, &reason, AF_INET6)) {
6246                         log = action != PF_PASS;
6247                         goto done;
6248                 }
6249                 if (uh.uh_dport == 0 ||
6250                     ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6251                     ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6252                         action = PF_DROP;
6253                         REASON_SET(&reason, PFRES_SHORT);
6254                         goto done;
6255                 }
6256                 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6257                 if (action == PF_PASS) {
6258                         if (pfsync_update_state_ptr != NULL)
6259                                 pfsync_update_state_ptr(s);
6260                         r = s->rule.ptr;
6261                         a = s->anchor.ptr;
6262                         log = s->log;
6263                 } else if (s == NULL)
6264                         action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6265                             &a, &ruleset, inp);
6266                 break;
6267         }
6268
6269         case IPPROTO_ICMP: {
6270                 action = PF_DROP;
6271                 DPFPRINTF(PF_DEBUG_MISC,
6272                     ("pf: dropping IPv6 packet with ICMPv4 payload\n"));
6273                 goto done;
6274         }
6275
6276         case IPPROTO_ICMPV6: {
6277                 struct icmp6_hdr        ih;
6278
6279                 pd.hdr.icmp6 = &ih;
6280                 if (!pf_pull_hdr(m, off, &ih, sizeof(ih),
6281                     &action, &reason, AF_INET6)) {
6282                         log = action != PF_PASS;
6283                         goto done;
6284                 }
6285                 action = pf_test_state_icmp(&s, dir, kif,
6286                     m, off, h, &pd, &reason);
6287                 if (action == PF_PASS) {
6288                         if (pfsync_update_state_ptr != NULL)
6289                                 pfsync_update_state_ptr(s);
6290                         r = s->rule.ptr;
6291                         a = s->anchor.ptr;
6292                         log = s->log;
6293                 } else if (s == NULL)
6294                         action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6295                             &a, &ruleset, inp);
6296                 break;
6297         }
6298
6299         default:
6300                 action = pf_test_state_other(&s, dir, kif, m, &pd);
6301                 if (action == PF_PASS) {
6302                         if (pfsync_update_state_ptr != NULL)
6303                                 pfsync_update_state_ptr(s);
6304                         r = s->rule.ptr;
6305                         a = s->anchor.ptr;
6306                         log = s->log;
6307                 } else if (s == NULL)
6308                         action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6309                             &a, &ruleset, inp);
6310                 break;
6311         }
6312
6313 done:
6314         PF_RULES_RUNLOCK();
6315         if (n != m) {
6316                 m_freem(n);
6317                 n = NULL;
6318         }
6319
6320         /* handle dangerous IPv6 extension headers. */
6321         if (action == PF_PASS && rh_cnt &&
6322             !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6323                 action = PF_DROP;
6324                 REASON_SET(&reason, PFRES_IPOPTIONS);
6325                 log = 1;
6326                 DPFPRINTF(PF_DEBUG_MISC,
6327                     ("pf: dropping packet with dangerous v6 headers\n"));
6328         }
6329
6330         if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
6331                 action = PF_DROP;
6332                 REASON_SET(&reason, PFRES_MEMORY);
6333         }
6334         if (r->rtableid >= 0)
6335                 M_SETFIB(m, r->rtableid);
6336
6337 #ifdef ALTQ
6338         if (action == PF_PASS && r->qid) {
6339                 if (pd.pf_mtag == NULL &&
6340                     ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6341                         action = PF_DROP;
6342                         REASON_SET(&reason, PFRES_MEMORY);
6343                 }
6344                 if (pd.tos & IPTOS_LOWDELAY)
6345                         pd.pf_mtag->qid = r->pqid;
6346                 else
6347                         pd.pf_mtag->qid = r->qid;
6348                 /* add hints for ecn */
6349                 pd.pf_mtag->hdr = h;
6350         }
6351 #endif /* ALTQ */
6352
6353         if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6354             pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6355             (s->nat_rule.ptr->action == PF_RDR ||
6356             s->nat_rule.ptr->action == PF_BINAT) &&
6357             IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
6358                 m->m_flags |= M_SKIP_FIREWALL;
6359
6360         /* XXX: Anybody working on it?! */
6361         if (r->divert.port)
6362                 printf("pf: divert(9) is not supported for IPv6\n");
6363
6364         if (log) {
6365                 struct pf_rule *lr;
6366
6367                 if (s != NULL && s->nat_rule.ptr != NULL &&
6368                     s->nat_rule.ptr->log & PF_LOG_ALL)
6369                         lr = s->nat_rule.ptr;
6370                 else
6371                         lr = r;
6372                 PFLOG_PACKET(kif, m, AF_INET6, dir, reason, lr, a, ruleset,
6373                     &pd, (s == NULL));
6374         }
6375
6376         kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6377         kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
6378
6379         if (action == PF_PASS || r->action == PF_DROP) {
6380                 dirndx = (dir == PF_OUT);
6381                 r->packets[dirndx]++;
6382                 r->bytes[dirndx] += pd.tot_len;
6383                 if (a != NULL) {
6384                         a->packets[dirndx]++;
6385                         a->bytes[dirndx] += pd.tot_len;
6386                 }
6387                 if (s != NULL) {
6388                         if (s->nat_rule.ptr != NULL) {
6389                                 s->nat_rule.ptr->packets[dirndx]++;
6390                                 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6391                         }
6392                         if (s->src_node != NULL) {
6393                                 s->src_node->packets[dirndx]++;
6394                                 s->src_node->bytes[dirndx] += pd.tot_len;
6395                         }
6396                         if (s->nat_src_node != NULL) {
6397                                 s->nat_src_node->packets[dirndx]++;
6398                                 s->nat_src_node->bytes[dirndx] += pd.tot_len;
6399                         }
6400                         dirndx = (dir == s->direction) ? 0 : 1;
6401                         s->packets[dirndx]++;
6402                         s->bytes[dirndx] += pd.tot_len;
6403                 }
6404                 tr = r;
6405                 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6406                 if (nr != NULL && r == &V_pf_default_rule)
6407                         tr = nr;
6408                 if (tr->src.addr.type == PF_ADDR_TABLE)
6409                         pfr_update_stats(tr->src.addr.p.tbl,
6410                             (s == NULL) ? pd.src :
6411                             &s->key[(s->direction == PF_IN)]->addr[0],
6412                             pd.af, pd.tot_len, dir == PF_OUT,
6413                             r->action == PF_PASS, tr->src.neg);
6414                 if (tr->dst.addr.type == PF_ADDR_TABLE)
6415                         pfr_update_stats(tr->dst.addr.p.tbl,
6416                             (s == NULL) ? pd.dst :
6417                             &s->key[(s->direction == PF_IN)]->addr[1],
6418                             pd.af, pd.tot_len, dir == PF_OUT,
6419                             r->action == PF_PASS, tr->dst.neg);
6420         }
6421
6422         switch (action) {
6423         case PF_SYNPROXY_DROP:
6424                 m_freem(*m0);
6425         case PF_DEFER:
6426                 *m0 = NULL;
6427                 action = PF_PASS;
6428                 break;
6429         case PF_DROP:
6430                 m_freem(*m0);
6431                 *m0 = NULL;
6432                 break;
6433         default:
6434                 /* pf_route6() returns unlocked. */
6435                 if (r->rt) {
6436                         pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);
6437                         return (action);
6438                 }
6439                 break;
6440         }
6441
6442         if (s)
6443                 PF_STATE_UNLOCK(s);
6444
6445         /* If reassembled packet passed, create new fragments. */
6446         if (action == PF_PASS && *m0 && fwdir == PF_FWD &&
6447             (mtag = m_tag_find(m, PF_REASSEMBLED, NULL)) != NULL)
6448                 action = pf_refragment6(ifp, m0, mtag);
6449
6450         return (action);
6451 }
6452 #endif /* INET6 */