2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2015-2019 Yandex LLC
5 * Copyright (c) 2015 Alexander V. Chernikov <melifaro@FreeBSD.org>
6 * Copyright (c) 2016-2019 Andrey V. Elsukov <ae@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/counter.h>
36 #include <sys/errno.h>
37 #include <sys/kernel.h>
39 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/rmlock.h>
43 #include <sys/rwlock.h>
44 #include <sys/socket.h>
45 #include <sys/queue.h>
46 #include <sys/syslog.h>
47 #include <sys/sysctl.h>
50 #include <net/if_var.h>
51 #include <net/if_pflog.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/ip_var.h>
57 #include <netinet/ip_fw.h>
58 #include <netinet/ip6.h>
59 #include <netinet/icmp6.h>
60 #include <netinet/ip_icmp.h>
61 #include <netinet/tcp.h>
62 #include <netinet/udp.h>
63 #include <netinet6/in6_var.h>
64 #include <netinet6/ip6_var.h>
65 #include <netinet6/ip_fw_nat64.h>
67 #include <netpfil/ipfw/ip_fw_private.h>
68 #include <netpfil/pf/pf.h>
72 MALLOC_DEFINE(M_NAT64LSN, "NAT64LSN", "NAT64LSN");
74 static void nat64lsn_periodic(void *data);
75 #define PERIODIC_DELAY 4
76 static uint8_t nat64lsn_proto_map[256];
77 uint8_t nat64lsn_rproto_map[NAT_MAX_PROTO];
79 #define NAT64_FLAG_FIN 0x01 /* FIN was seen */
80 #define NAT64_FLAG_SYN 0x02 /* First syn in->out */
81 #define NAT64_FLAG_ESTAB 0x04 /* Packet with Ack */
82 #define NAT64_FLAGS_TCP (NAT64_FLAG_SYN|NAT64_FLAG_ESTAB|NAT64_FLAG_FIN)
84 #define NAT64_FLAG_RDR 0x80 /* Port redirect */
85 #define NAT64_LOOKUP(chain, cmd) \
86 (struct nat64lsn_cfg *)SRV_OBJECT((chain), (cmd)->arg1)
88 * Delayed job queue, used to create new hosts
97 struct nat64lsn_job_item {
98 TAILQ_ENTRY(nat64lsn_job_item) next;
99 enum nat64lsn_jtype jtype;
100 struct nat64lsn_host *nh;
101 struct nat64lsn_portgroup *pg;
103 struct in6_addr haddr;
108 unsigned int fhash; /* Flow hash */
109 uint32_t aaddr; /* Last used address (net) */
111 struct ipfw_flow_id f_id;
112 uint64_t delmask[NAT64LSN_PGPTRNMASK];
115 static struct mtx jmtx;
116 #define JQUEUE_LOCK_INIT() mtx_init(&jmtx, "qlock", NULL, MTX_DEF)
117 #define JQUEUE_LOCK_DESTROY() mtx_destroy(&jmtx)
118 #define JQUEUE_LOCK() mtx_lock(&jmtx)
119 #define JQUEUE_UNLOCK() mtx_unlock(&jmtx)
121 static void nat64lsn_enqueue_job(struct nat64lsn_cfg *cfg,
122 struct nat64lsn_job_item *ji);
123 static void nat64lsn_enqueue_jobs(struct nat64lsn_cfg *cfg,
124 struct nat64lsn_job_head *jhead, int jlen);
126 static struct nat64lsn_job_item *nat64lsn_create_job(struct nat64lsn_cfg *cfg,
127 const struct ipfw_flow_id *f_id, int jtype);
128 static int nat64lsn_request_portgroup(struct nat64lsn_cfg *cfg,
129 const struct ipfw_flow_id *f_id, struct mbuf **pm, uint32_t aaddr,
131 static int nat64lsn_request_host(struct nat64lsn_cfg *cfg,
132 const struct ipfw_flow_id *f_id, struct mbuf **pm);
133 static int nat64lsn_translate4(struct nat64lsn_cfg *cfg,
134 const struct ipfw_flow_id *f_id, struct mbuf **pm);
135 static int nat64lsn_translate6(struct nat64lsn_cfg *cfg,
136 struct ipfw_flow_id *f_id, struct mbuf **pm);
138 static int alloc_portgroup(struct nat64lsn_job_item *ji);
139 static void destroy_portgroup(struct nat64lsn_portgroup *pg);
140 static void destroy_host6(struct nat64lsn_host *nh);
141 static int alloc_host6(struct nat64lsn_cfg *cfg, struct nat64lsn_job_item *ji);
143 static int attach_portgroup(struct nat64lsn_cfg *cfg,
144 struct nat64lsn_job_item *ji);
145 static int attach_host6(struct nat64lsn_cfg *cfg, struct nat64lsn_job_item *ji);
149 static uma_zone_t nat64lsn_host_zone;
150 static uma_zone_t nat64lsn_pg_zone;
151 static uma_zone_t nat64lsn_pgidx_zone;
153 static unsigned int nat64lsn_periodic_chkstates(struct nat64lsn_cfg *cfg,
154 struct nat64lsn_host *nh);
156 #define I6_hash(x) (djb_hash((const unsigned char *)(x), 16))
157 #define I6_first(_ph, h) (_ph)[h]
158 #define I6_next(x) (x)->next
159 #define I6_val(x) (&(x)->addr)
160 #define I6_cmp(a, b) IN6_ARE_ADDR_EQUAL(a, b)
161 #define I6_lock(a, b)
162 #define I6_unlock(a, b)
164 #define I6HASH_FIND(_cfg, _res, _a) \
165 CHT_FIND(_cfg->ih, _cfg->ihsize, I6_, _res, _a)
166 #define I6HASH_INSERT(_cfg, _i) \
167 CHT_INSERT_HEAD(_cfg->ih, _cfg->ihsize, I6_, _i)
168 #define I6HASH_REMOVE(_cfg, _res, _tmp, _a) \
169 CHT_REMOVE(_cfg->ih, _cfg->ihsize, I6_, _res, _tmp, _a)
171 #define I6HASH_FOREACH_SAFE(_cfg, _x, _tmp, _cb, _arg) \
172 CHT_FOREACH_SAFE(_cfg->ih, _cfg->ihsize, I6_, _x, _tmp, _cb, _arg)
174 #define HASH_IN4(x) djb_hash((const unsigned char *)(x), 8)
177 djb_hash(const unsigned char *h, const int len)
179 unsigned int result = 0;
182 for (i = 0; i < len; i++)
183 result = 33 * result ^ h[i];
190 bitmask_size(size_t num, int *level)
195 for (c = 0, x = num; num > 1; num /= 64, c++)
202 bitmask_prepare(uint64_t *pmask, size_t bufsize, int level)
206 memset(pmask, 0xFF, bufsize);
207 for (x = 0, z = 1; level > 1; x += z, z *= 64, level--)
214 nat64lsn_log(struct pfloghdr *plog, struct mbuf *m, sa_family_t family,
215 uint32_t n, uint32_t sn)
218 memset(plog, 0, sizeof(*plog));
219 plog->length = PFLOG_REAL_HDRLEN;
221 plog->action = PF_NAT;
223 plog->rulenr = htonl(n);
224 plog->subrulenr = htonl(sn);
225 plog->ruleset[0] = '\0';
226 strlcpy(plog->ifname, "NAT64LSN", sizeof(plog->ifname));
227 ipfw_bpf_mtap2(plog, PFLOG_HDRLEN, m);
230 * Inspects icmp packets to see if the message contains different
231 * packet header so we need to alter @addr and @port.
234 inspect_icmp_mbuf(struct mbuf **m, uint8_t *nat_proto, uint32_t *addr,
240 struct icmphdr *icmp;
244 ip = mtod(*m, struct ip *); /* Outer IP header */
245 off = (ip->ip_hl << 2) + ICMP_MINLEN;
246 if ((*m)->m_len < off)
247 *m = m_pullup(*m, off);
251 ip = mtod(*m, struct ip *); /* Outer IP header */
252 icmp = L3HDR(ip, struct icmphdr *);
253 switch (icmp->icmp_type) {
256 /* Use icmp ID as distinguisher */
257 *port = ntohs(*((uint16_t *)(icmp + 1)));
266 * ICMP_UNREACH and ICMP_TIMXCEED contains IP header + 64 bits
269 if ((*m)->m_pkthdr.len < off + sizeof(struct ip) + ICMP_MINLEN)
271 if ((*m)->m_len < off + sizeof(struct ip) + ICMP_MINLEN)
272 *m = m_pullup(*m, off + sizeof(struct ip) + ICMP_MINLEN);
275 ip = mtodo(*m, off); /* Inner IP header */
277 off += ip->ip_hl << 2; /* Skip inner IP header */
278 *addr = ntohl(ip->ip_src.s_addr);
279 if ((*m)->m_len < off + ICMP_MINLEN)
280 *m = m_pullup(*m, off + ICMP_MINLEN);
285 tcp = mtodo(*m, off);
286 *nat_proto = NAT_PROTO_TCP;
287 *port = ntohs(tcp->th_sport);
290 udp = mtodo(*m, off);
291 *nat_proto = NAT_PROTO_UDP;
292 *port = ntohs(udp->uh_sport);
296 * We will translate only ICMP errors for our ICMP
299 icmp = mtodo(*m, off);
300 if (icmp->icmp_type != ICMP_ECHO)
302 *port = ntohs(*((uint16_t *)(icmp + 1)));
308 static inline uint8_t
309 convert_tcp_flags(uint8_t flags)
313 result = flags & (TH_FIN|TH_SYN);
314 result |= (flags & TH_RST) >> 2; /* Treat RST as FIN */
315 result |= (flags & TH_ACK) >> 2; /* Treat ACK as estab */
320 static NAT64NOINLINE int
321 nat64lsn_translate4(struct nat64lsn_cfg *cfg, const struct ipfw_flow_id *f_id,
324 struct pfloghdr loghdr, *logdata;
325 struct in6_addr src6;
326 struct nat64lsn_portgroup *pg;
327 struct nat64lsn_host *nh;
328 struct nat64lsn_state *st;
331 uint16_t state_flags, state_ts;
332 uint16_t port, lport;
337 port = f_id->dst_port;
338 if (addr < cfg->prefix4 || addr > cfg->pmask4) {
339 NAT64STAT_INC(&cfg->base.stats, nomatch4);
340 return (cfg->nomatch_verdict);
343 /* Check if protocol is supported and get its short id */
344 nat_proto = nat64lsn_proto_map[f_id->proto];
345 if (nat_proto == 0) {
346 NAT64STAT_INC(&cfg->base.stats, noproto);
347 return (cfg->nomatch_verdict);
350 /* We might need to handle icmp differently */
351 if (nat_proto == NAT_PROTO_ICMP) {
352 ret = inspect_icmp_mbuf(pm, &nat_proto, &addr, &port);
355 NAT64STAT_INC(&cfg->base.stats, nomem);
358 NAT64STAT_INC(&cfg->base.stats, noproto);
359 return (cfg->nomatch_verdict);
361 /* XXX: Check addr for validity */
362 if (addr < cfg->prefix4 || addr > cfg->pmask4) {
363 NAT64STAT_INC(&cfg->base.stats, nomatch4);
364 return (cfg->nomatch_verdict);
368 /* Calc portgroup offset w.r.t protocol */
369 pg = GET_PORTGROUP(cfg, addr, nat_proto, port);
371 /* Check if this port is occupied by any portgroup */
373 NAT64STAT_INC(&cfg->base.stats, nomatch4);
375 DPRINTF(DP_STATE, "NOMATCH %u %d %d (%d)", addr, nat_proto, port,
376 _GET_PORTGROUP_IDX(cfg, addr, nat_proto, port));
378 return (cfg->nomatch_verdict);
381 /* TODO: Check flags to see if we need to do some static mapping */
384 /* Prepare some fields we might need to update */
386 ip = mtod(*pm, struct ip *);
387 if (ip->ip_p == IPPROTO_TCP)
388 state_flags = convert_tcp_flags(
389 L3HDR(ip, struct tcphdr *)->th_flags);
393 /* Lock host and get port mapping */
396 st = &pg->states[port & (NAT64_CHUNK_SIZE - 1)];
397 if (st->timestamp != state_ts)
398 st->timestamp = state_ts;
399 if ((st->flags & state_flags) != state_flags)
400 st->flags |= state_flags;
401 lport = htons(st->u.s.lport);
405 if (cfg->base.flags & NAT64_LOG) {
407 nat64lsn_log(logdata, *pm, AF_INET, pg->idx, st->cur.off);
411 nat64_embed_ip4(&src6, cfg->base.plat_plen, htonl(f_id->src_ip));
412 ret = nat64_do_handle_ip4(*pm, &src6, &nh->addr, lport,
413 &cfg->base, logdata);
415 if (ret == NAT64SKIP)
416 return (cfg->nomatch_verdict);
417 if (ret == NAT64MFREE)
425 nat64lsn_dump_state(const struct nat64lsn_cfg *cfg,
426 const struct nat64lsn_portgroup *pg, const struct nat64lsn_state *st,
427 const char *px, int off)
429 char s[INET6_ADDRSTRLEN], a[INET_ADDRSTRLEN], d[INET_ADDRSTRLEN];
431 if ((V_nat64_debug & DP_STATE) == 0)
433 inet_ntop(AF_INET6, &pg->host->addr, s, sizeof(s));
434 inet_ntop(AF_INET, &pg->aaddr, a, sizeof(a));
435 inet_ntop(AF_INET, &st->u.s.faddr, d, sizeof(d));
437 DPRINTF(DP_STATE, "%s: PG %d ST [%p|%d]: %s:%d/%d <%s:%d> "
438 "%s:%d AGE %d", px, pg->idx, st, off,
439 s, st->u.s.lport, pg->nat_proto, a, pg->aport + off,
440 d, st->u.s.fport, GET_AGE(st->timestamp));
444 * Check if particular TCP state is stale and should be deleted.
445 * Return 1 if true, 0 otherwise.
448 nat64lsn_periodic_check_tcp(const struct nat64lsn_cfg *cfg,
449 const struct nat64lsn_state *st, int age)
453 if (st->flags & NAT64_FLAG_FIN)
454 ttl = cfg->st_close_ttl;
455 else if (st->flags & NAT64_FLAG_ESTAB)
456 ttl = cfg->st_estab_ttl;
457 else if (st->flags & NAT64_FLAG_SYN)
458 ttl = cfg->st_syn_ttl;
460 ttl = cfg->st_syn_ttl;
468 * Check if nat state @st is stale and should be deleted.
469 * Return 1 if true, 0 otherwise.
471 static NAT64NOINLINE int
472 nat64lsn_periodic_chkstate(const struct nat64lsn_cfg *cfg,
473 const struct nat64lsn_portgroup *pg, const struct nat64lsn_state *st)
477 age = GET_AGE(st->timestamp);
480 /* Skip immutable records */
481 if (st->flags & NAT64_FLAG_RDR)
484 switch (pg->nat_proto) {
486 delete = nat64lsn_periodic_check_tcp(cfg, st, age);
489 if (age > cfg->st_udp_ttl)
493 if (age > cfg->st_icmp_ttl)
503 * The following structures and functions
504 * are used to perform SLIST_FOREACH_SAFE()
505 * analog for states identified by struct st_ptr.
509 struct nat64lsn_portgroup *pg;
510 struct nat64lsn_state *st;
511 struct st_ptr sidx_next;
514 static struct st_idx *
515 st_first(const struct nat64lsn_cfg *cfg, const struct nat64lsn_host *nh,
516 struct st_ptr *sidx, struct st_idx *si)
518 struct nat64lsn_portgroup *pg;
519 struct nat64lsn_state *st;
521 if (sidx->idx == 0) {
522 memset(si, 0, sizeof(*si));
526 pg = PORTGROUP_BYSIDX(cfg, nh, sidx->idx);
527 st = &pg->states[sidx->off];
531 si->sidx_next = st->next;
536 static struct st_idx *
537 st_next(const struct nat64lsn_cfg *cfg, const struct nat64lsn_host *nh,
541 struct nat64lsn_portgroup *pg;
542 struct nat64lsn_state *st;
544 sidx = si->sidx_next;
546 memset(si, 0, sizeof(*si));
552 pg = PORTGROUP_BYSIDX(cfg, nh, sidx.idx);
553 st = &pg->states[sidx.off];
557 si->sidx_next = st->next;
562 static struct st_idx *
563 st_save_cond(struct st_idx *si_dst, struct st_idx *si)
572 nat64lsn_periodic_chkstates(struct nat64lsn_cfg *cfg, struct nat64lsn_host *nh)
574 struct st_idx si, si_prev;
576 unsigned int delcount;
579 for (i = 0; i < nh->hsize; i++) {
580 memset(&si_prev, 0, sizeof(si_prev));
581 for (st_first(cfg, nh, &nh->phash[i], &si);
583 st_save_cond(&si_prev, &si), st_next(cfg, nh, &si)) {
584 if (nat64lsn_periodic_chkstate(cfg, si.pg, si.st) == 0)
586 nat64lsn_dump_state(cfg, si.pg, si.st, "DELETE STATE",
588 /* Unlink from hash */
589 if (si_prev.st != NULL)
590 si_prev.st->next = si.st->next;
592 nh->phash[i] = si.st->next;
593 /* Delete state and free its data */
594 PG_MARK_FREE_IDX(si.pg, si.st->cur.off);
595 memset(si.st, 0, sizeof(struct nat64lsn_state));
599 /* Update portgroup timestamp */
600 SET_AGE(si.pg->timestamp);
603 NAT64STAT_ADD(&cfg->base.stats, sdeleted, delcount);
608 * Checks if portgroup is not used and can be deleted,
609 * Returns 1 if stale, 0 otherwise
612 stale_pg(const struct nat64lsn_cfg *cfg, const struct nat64lsn_portgroup *pg)
615 if (!PG_IS_EMPTY(pg))
617 if (GET_AGE(pg->timestamp) < cfg->pg_delete_delay)
623 * Checks if host record is not used and can be deleted,
624 * Returns 1 if stale, 0 otherwise
627 stale_nh(const struct nat64lsn_cfg *cfg, const struct nat64lsn_host *nh)
630 if (nh->pg_used != 0)
632 if (GET_AGE(nh->timestamp) < cfg->nh_delete_delay)
637 struct nat64lsn_periodic_data {
638 struct nat64lsn_cfg *cfg;
639 struct nat64lsn_job_head jhead;
643 static NAT64NOINLINE int
644 nat64lsn_periodic_chkhost(struct nat64lsn_host *nh,
645 struct nat64lsn_periodic_data *d)
647 struct nat64lsn_portgroup *pg;
648 struct nat64lsn_job_item *ji;
649 uint64_t delmask[NAT64LSN_PGPTRNMASK];
653 memset(delmask, 0, sizeof(delmask));
655 if (V_nat64_debug & DP_JQUEUE) {
656 char a[INET6_ADDRSTRLEN];
658 inet_ntop(AF_INET6, &nh->addr, a, sizeof(a));
659 DPRINTF(DP_JQUEUE, "Checking %s host %s on cpu %d",
660 stale_nh(d->cfg, nh) ? "stale" : "non-stale", a, curcpu);
662 if (!stale_nh(d->cfg, nh)) {
663 /* Non-stale host. Inspect internals */
666 /* Stage 1: Check&expire states */
667 if (nat64lsn_periodic_chkstates(d->cfg, nh) != 0)
668 SET_AGE(nh->timestamp);
670 /* Stage 2: Check if we need to expire */
671 for (i = 0; i < nh->pg_used; i++) {
672 pg = PORTGROUP_BYSIDX(d->cfg, nh, i + 1);
676 /* Check if we can delete portgroup */
677 if (stale_pg(d->cfg, pg) == 0)
680 DPRINTF(DP_JQUEUE, "Check PG %d", i);
681 delmask[i / 64] |= ((uint64_t)1 << (i % 64));
690 DPRINTF(DP_JQUEUE, "Queueing %d portgroups for deleting", delcount);
691 /* We have something to delete - add it to queue */
692 ji = nat64lsn_create_job(d->cfg, NULL, JTYPE_DELPORTGROUP);
696 ji->haddr = nh->addr;
697 ji->delcount = delcount;
698 memcpy(ji->delmask, delmask, sizeof(ji->delmask));
700 TAILQ_INSERT_TAIL(&d->jhead, ji, next);
706 * This procedure is used to perform various maintance
707 * on dynamic hash list. Currently it is called every second.
710 nat64lsn_periodic(void *data)
712 struct ip_fw_chain *ch;
714 struct nat64lsn_cfg *cfg;
715 struct nat64lsn_periodic_data d;
716 struct nat64lsn_host *nh, *tmp;
718 cfg = (struct nat64lsn_cfg *) data;
720 CURVNET_SET(cfg->vp);
722 memset(&d, 0, sizeof(d));
724 TAILQ_INIT(&d.jhead);
728 /* Stage 1: foreach host, check all its portgroups */
729 I6HASH_FOREACH_SAFE(cfg, nh, tmp, nat64lsn_periodic_chkhost, &d);
731 /* Enqueue everything we have requested */
732 nat64lsn_enqueue_jobs(cfg, &d.jhead, d.jlen);
734 callout_schedule(&cfg->periodic, hz * PERIODIC_DELAY);
741 static NAT64NOINLINE void
742 reinject_mbuf(struct nat64lsn_cfg *cfg, struct nat64lsn_job_item *ji)
748 /* Request has failed or packet type is wrong */
749 if (ji->f_id.addr_type != 6 || ji->done == 0) {
752 NAT64STAT_INC(&cfg->base.stats, dropped);
753 DPRINTF(DP_DROPS, "mbuf dropped: type %d, done %d",
754 ji->jtype, ji->done);
759 * XXX: Limit recursion level
762 NAT64STAT_INC(&cfg->base.stats, jreinjected);
763 DPRINTF(DP_JQUEUE, "Reinject mbuf");
764 nat64lsn_translate6(cfg, &ji->f_id, &ji->m);
768 destroy_portgroup(struct nat64lsn_portgroup *pg)
771 DPRINTF(DP_OBJ, "DESTROY PORTGROUP %d %p", pg->idx, pg);
772 uma_zfree(nat64lsn_pg_zone, pg);
775 static NAT64NOINLINE int
776 alloc_portgroup(struct nat64lsn_job_item *ji)
778 struct nat64lsn_portgroup *pg;
780 pg = uma_zalloc(nat64lsn_pg_zone, M_NOWAIT);
784 if (ji->needs_idx != 0) {
785 ji->spare_idx = uma_zalloc(nat64lsn_pgidx_zone, M_NOWAIT);
786 /* Failed alloc isn't always fatal, so don't check */
788 memset(&pg->freemask, 0xFF, sizeof(pg->freemask));
789 pg->nat_proto = ji->nat_proto;
796 destroy_host6(struct nat64lsn_host *nh)
798 char a[INET6_ADDRSTRLEN];
801 inet_ntop(AF_INET6, &nh->addr, a, sizeof(a));
802 DPRINTF(DP_OBJ, "DESTROY HOST %s %p (pg used %d)", a, nh,
804 NAT64_LOCK_DESTROY(nh);
805 for (i = 0; i < nh->pg_allocated / NAT64LSN_PGIDX_CHUNK; i++)
806 uma_zfree(nat64lsn_pgidx_zone, PORTGROUP_CHUNK(nh, i));
807 uma_zfree(nat64lsn_host_zone, nh);
810 static NAT64NOINLINE int
811 alloc_host6(struct nat64lsn_cfg *cfg, struct nat64lsn_job_item *ji)
813 struct nat64lsn_host *nh;
814 char a[INET6_ADDRSTRLEN];
816 nh = uma_zalloc(nat64lsn_host_zone, M_NOWAIT);
819 PORTGROUP_CHUNK(nh, 0) = uma_zalloc(nat64lsn_pgidx_zone, M_NOWAIT);
820 if (PORTGROUP_CHUNK(nh, 0) == NULL) {
821 uma_zfree(nat64lsn_host_zone, nh);
824 if (alloc_portgroup(ji) != 0) {
825 NAT64STAT_INC(&cfg->base.stats, jportfails);
826 uma_zfree(nat64lsn_pgidx_zone, PORTGROUP_CHUNK(nh, 0));
827 uma_zfree(nat64lsn_host_zone, nh);
832 nh->addr = ji->haddr;
833 nh->hsize = NAT64LSN_HSIZE; /* XXX: hardcoded size */
834 nh->pg_allocated = NAT64LSN_PGIDX_CHUNK;
838 inet_ntop(AF_INET6, &nh->addr, a, sizeof(a));
839 DPRINTF(DP_OBJ, "ALLOC HOST %s %p", a, ji->nh);
844 * Finds free @pg index inside @nh
846 static NAT64NOINLINE int
847 find_nh_pg_idx(struct nat64lsn_cfg *cfg, struct nat64lsn_host *nh, int *idx)
851 for (i = 0; i < nh->pg_allocated; i++) {
852 if (PORTGROUP_BYSIDX(cfg, nh, i + 1) == NULL) {
860 static NAT64NOINLINE int
861 attach_host6(struct nat64lsn_cfg *cfg, struct nat64lsn_job_item *ji)
863 char a[INET6_ADDRSTRLEN];
864 struct nat64lsn_host *nh;
866 I6HASH_FIND(cfg, nh, &ji->haddr);
868 /* Add new host to list */
870 I6HASH_INSERT(cfg, nh);
874 inet_ntop(AF_INET6, &nh->addr, a, sizeof(a));
875 DPRINTF(DP_OBJ, "ATTACH HOST %s %p", a, nh);
877 * Try to add portgroup.
878 * Note it will automatically set
879 * 'done' on ji if successful.
881 if (attach_portgroup(cfg, ji) != 0) {
882 DPRINTF(DP_DROPS, "%s %p failed to attach PG",
884 NAT64STAT_INC(&cfg->base.stats, jportfails);
891 * nh isn't NULL. This probably means we had several simultaneous
892 * host requests. The previous one request has already attached
893 * this host. Requeue attached mbuf and mark job as done, but
894 * leave nh and pg pointers not changed, so nat64lsn_do_request()
895 * will release all allocated resources.
897 inet_ntop(AF_INET6, &nh->addr, a, sizeof(a));
898 DPRINTF(DP_OBJ, "%s %p is already attached as %p",
904 static NAT64NOINLINE int
905 find_pg_place_addr(const struct nat64lsn_cfg *cfg, int addr_off,
906 int nat_proto, uint16_t *aport, int *ppg_idx)
910 pg_idx = addr_off * _ADDR_PG_COUNT +
911 (nat_proto - 1) * _ADDR_PG_PROTO_COUNT;
913 for (j = NAT64_MIN_CHUNK; j < _ADDR_PG_PROTO_COUNT; j++) {
914 if (cfg->pg[pg_idx + j] != NULL)
917 *aport = j * NAT64_CHUNK_SIZE;
918 *ppg_idx = pg_idx + j;
926 * XXX: This function needs to be rewritten to
927 * use free bitmask for faster pg finding,
928 * additionally, it should take into consideration
929 * a) randomization and
930 * b) previous addresses allocated to given nat instance
933 static NAT64NOINLINE int
934 find_portgroup_place(struct nat64lsn_cfg *cfg, struct nat64lsn_job_item *ji,
935 uint32_t *aaddr, uint16_t *aport, int *ppg_idx)
940 * XXX: Use bitmask index to be able to find/check if IP address
941 * has some spare pg's
943 nat_proto = ji->nat_proto;
945 /* First, try to use same address */
946 if (ji->aaddr != 0) {
947 i = ntohl(ji->aaddr) - cfg->prefix4;
948 if (find_pg_place_addr(cfg, i, nat_proto, aport,
951 *aaddr = htonl(cfg->prefix4 + i);
956 /* Next, try to use random address based on flow hash */
957 i = ji->fhash % (1 << (32 - cfg->plen4));
958 if (find_pg_place_addr(cfg, i, nat_proto, aport, ppg_idx) != 0) {
960 *aaddr = htonl(cfg->prefix4 + i);
965 /* Last one: simply find ANY available */
966 for (i = 0; i < (1 << (32 - cfg->plen4)); i++) {
967 if (find_pg_place_addr(cfg, i, nat_proto, aport,
970 *aaddr = htonl(cfg->prefix4 + i);
978 static NAT64NOINLINE int
979 attach_portgroup(struct nat64lsn_cfg *cfg, struct nat64lsn_job_item *ji)
981 char a[INET6_ADDRSTRLEN];
982 struct nat64lsn_portgroup *pg;
983 struct nat64lsn_host *nh;
986 int nh_pg_idx, pg_idx;
991 * Find source host and bind: we can't rely on
994 I6HASH_FIND(cfg, nh, &ji->haddr);
998 /* Find spare port chunk */
999 if (find_portgroup_place(cfg, ji, &aaddr, &aport, &pg_idx) != 0) {
1000 inet_ntop(AF_INET6, &nh->addr, a, sizeof(a));
1001 DPRINTF(DP_OBJ | DP_DROPS, "empty PG not found for %s", a);
1005 /* Expand PG indexes if needed */
1006 if (nh->pg_allocated < cfg->max_chunks && ji->spare_idx != NULL) {
1007 PORTGROUP_CHUNK(nh, nh->pg_allocated / NAT64LSN_PGIDX_CHUNK) =
1009 nh->pg_allocated += NAT64LSN_PGIDX_CHUNK;
1010 ji->spare_idx = NULL;
1013 /* Find empty index to store PG in the @nh */
1014 if (find_nh_pg_idx(cfg, nh, &nh_pg_idx) != 0) {
1015 inet_ntop(AF_INET6, &nh->addr, a, sizeof(a));
1016 DPRINTF(DP_OBJ | DP_DROPS, "free PG index not found for %s",
1021 cfg->pg[pg_idx] = pg;
1022 cfg->protochunks[pg->nat_proto]++;
1023 NAT64STAT_INC(&cfg->base.stats, spgcreated);
1029 SET_AGE(pg->timestamp);
1031 PORTGROUP_BYSIDX(cfg, nh, nh_pg_idx + 1) = pg;
1032 if (nh->pg_used == nh_pg_idx)
1034 SET_AGE(nh->timestamp);
1042 static NAT64NOINLINE void
1043 consider_del_portgroup(struct nat64lsn_cfg *cfg, struct nat64lsn_job_item *ji)
1045 struct nat64lsn_host *nh, *nh_tmp;
1046 struct nat64lsn_portgroup *pg, *pg_list[256];
1047 int i, pg_lidx, idx;
1049 /* Find source host */
1050 I6HASH_FIND(cfg, nh, &ji->haddr);
1051 if (nh == NULL || nh->pg_used == 0)
1054 memset(pg_list, 0, sizeof(pg_list));
1059 for (i = nh->pg_used - 1; i >= 0; i--) {
1060 if ((ji->delmask[i / 64] & ((uint64_t)1 << (i % 64))) == 0)
1062 pg = PORTGROUP_BYSIDX(cfg, nh, i + 1);
1064 /* Check that PG isn't busy. */
1065 if (stale_pg(cfg, pg) == 0)
1069 pg_list[pg_lidx++] = pg;
1070 PORTGROUP_BYSIDX(cfg, nh, i + 1) = NULL;
1072 idx = _GET_PORTGROUP_IDX(cfg, ntohl(pg->aaddr), pg->nat_proto,
1074 KASSERT(cfg->pg[idx] == pg, ("Non matched pg"));
1075 cfg->pg[idx] = NULL;
1076 cfg->protochunks[pg->nat_proto]--;
1077 NAT64STAT_INC(&cfg->base.stats, spgdeleted);
1079 /* Decrease pg_used */
1080 while (nh->pg_used > 0 &&
1081 PORTGROUP_BYSIDX(cfg, nh, nh->pg_used) == NULL)
1084 /* Check if on-stack buffer has ended */
1085 if (pg_lidx == nitems(pg_list))
1091 if (stale_nh(cfg, nh)) {
1092 I6HASH_REMOVE(cfg, nh, nh_tmp, &ji->haddr);
1093 KASSERT(nh != NULL, ("Unable to find address"));
1096 I6HASH_FIND(cfg, nh, &ji->haddr);
1097 KASSERT(nh == NULL, ("Failed to delete address"));
1100 /* TODO: Delay freeing portgroups */
1101 while (pg_lidx > 0) {
1103 NAT64STAT_INC(&cfg->base.stats, spgdeleted);
1104 destroy_portgroup(pg_list[pg_lidx]);
1109 * Main request handler.
1110 * Responsible for handling jqueue, e.g.
1111 * creating new hosts, addind/deleting portgroups.
1113 static NAT64NOINLINE void
1114 nat64lsn_do_request(void *data)
1117 struct nat64lsn_job_head jhead;
1118 struct nat64lsn_job_item *ji;
1120 struct nat64lsn_cfg *cfg = (struct nat64lsn_cfg *) data;
1121 struct ip_fw_chain *ch;
1124 CURVNET_SET(cfg->vp);
1128 /* XXX: We're running unlocked here */
1136 TAILQ_SWAP(&jhead, &cfg->jhead, nat64lsn_job_item, next);
1141 /* check if we need to resize hash */
1143 if (cfg->ihcount > cfg->ihsize && cfg->ihsize < 65536) {
1144 nhsize = cfg->ihsize;
1145 for ( ; cfg->ihcount > nhsize && nhsize < 65536; nhsize *= 2)
1147 } else if (cfg->ihcount < cfg->ihsize * 4) {
1148 nhsize = cfg->ihsize;
1149 for ( ; cfg->ihcount < nhsize * 4 && nhsize > 32; nhsize /= 2)
1155 if (TAILQ_EMPTY(&jhead)) {
1160 NAT64STAT_INC(&cfg->base.stats, jcalls);
1161 DPRINTF(DP_JQUEUE, "count=%d", jcount);
1165 * What we should do here is to build a hash
1166 * to ensure we don't have lots of duplicate requests.
1167 * Skip this for now.
1169 * TODO: Limit per-call number of items
1172 /* Pre-allocate everything for entire chain */
1173 TAILQ_FOREACH(ji, &jhead, next) {
1174 switch (ji->jtype) {
1176 if (alloc_host6(cfg, ji) != 0)
1177 NAT64STAT_INC(&cfg->base.stats,
1180 case JTYPE_NEWPORTGROUP:
1181 if (alloc_portgroup(ji) != 0)
1182 NAT64STAT_INC(&cfg->base.stats,
1185 case JTYPE_DELPORTGROUP:
1186 delcount += ji->delcount;
1194 * TODO: Alloc hew hash
1201 /* Apply all changes in batch */
1205 TAILQ_FOREACH(ji, &jhead, next) {
1206 switch (ji->jtype) {
1209 attach_host6(cfg, ji);
1211 case JTYPE_NEWPORTGROUP:
1212 if (ji->pg != NULL &&
1213 attach_portgroup(cfg, ji) != 0)
1214 NAT64STAT_INC(&cfg->base.stats,
1217 case JTYPE_DELPORTGROUP:
1218 consider_del_portgroup(cfg, ji);
1224 /* XXX: Move everything to new hash */
1228 IPFW_UH_WUNLOCK(ch);
1230 /* Flush unused entries */
1231 while (!TAILQ_EMPTY(&jhead)) {
1232 ji = TAILQ_FIRST(&jhead);
1233 TAILQ_REMOVE(&jhead, ji, next);
1235 destroy_host6(ji->nh);
1237 destroy_portgroup(ji->pg);
1239 reinject_mbuf(cfg, ji);
1240 if (ji->spare_idx != NULL)
1241 uma_zfree(nat64lsn_pgidx_zone, ji->spare_idx);
1247 static NAT64NOINLINE struct nat64lsn_job_item *
1248 nat64lsn_create_job(struct nat64lsn_cfg *cfg, const struct ipfw_flow_id *f_id,
1251 struct nat64lsn_job_item *ji;
1252 struct in6_addr haddr;
1256 * Do not try to lock possibly contested mutex if we're near the limit.
1257 * Drop packet instead.
1259 if (cfg->jlen >= cfg->jmaxlen) {
1260 NAT64STAT_INC(&cfg->base.stats, jmaxlen);
1264 memset(&haddr, 0, sizeof(haddr));
1267 haddr = f_id->src_ip6;
1268 nat_proto = nat64lsn_proto_map[f_id->proto];
1270 DPRINTF(DP_JQUEUE, "REQUEST pg nat_proto %d on proto %d",
1271 nat_proto, f_id->proto);
1277 ji = malloc(sizeof(struct nat64lsn_job_item), M_IPFW,
1281 NAT64STAT_INC(&cfg->base.stats, jnomem);
1290 ji->nat_proto = nat_proto;
1296 static NAT64NOINLINE void
1297 nat64lsn_enqueue_job(struct nat64lsn_cfg *cfg, struct nat64lsn_job_item *ji)
1304 TAILQ_INSERT_TAIL(&cfg->jhead, ji, next);
1306 NAT64STAT_INC(&cfg->base.stats, jrequests);
1308 if (callout_pending(&cfg->jcallout) == 0)
1309 callout_reset(&cfg->jcallout, 1, nat64lsn_do_request, cfg);
1313 static NAT64NOINLINE void
1314 nat64lsn_enqueue_jobs(struct nat64lsn_cfg *cfg,
1315 struct nat64lsn_job_head *jhead, int jlen)
1318 if (TAILQ_EMPTY(jhead))
1321 /* Attach current queue to execution one */
1323 TAILQ_CONCAT(&cfg->jhead, jhead, next);
1325 NAT64STAT_ADD(&cfg->base.stats, jrequests, jlen);
1327 if (callout_pending(&cfg->jcallout) == 0)
1328 callout_reset(&cfg->jcallout, 1, nat64lsn_do_request, cfg);
1333 flow6_hash(const struct ipfw_flow_id *f_id)
1335 unsigned char hbuf[36];
1337 memcpy(hbuf, &f_id->dst_ip6, 16);
1338 memcpy(&hbuf[16], &f_id->src_ip6, 16);
1339 memcpy(&hbuf[32], &f_id->dst_port, 2);
1340 memcpy(&hbuf[32], &f_id->src_port, 2);
1342 return (djb_hash(hbuf, sizeof(hbuf)));
1345 static NAT64NOINLINE int
1346 nat64lsn_request_host(struct nat64lsn_cfg *cfg,
1347 const struct ipfw_flow_id *f_id, struct mbuf **pm)
1349 struct nat64lsn_job_item *ji;
1355 ji = nat64lsn_create_job(cfg, f_id, JTYPE_NEWHOST);
1358 NAT64STAT_INC(&cfg->base.stats, dropped);
1359 DPRINTF(DP_DROPS, "failed to create job");
1362 /* Provide pseudo-random value based on flow */
1363 ji->fhash = flow6_hash(f_id);
1364 nat64lsn_enqueue_job(cfg, ji);
1365 NAT64STAT_INC(&cfg->base.stats, jhostsreq);
1368 return (IP_FW_DENY);
1371 static NAT64NOINLINE int
1372 nat64lsn_request_portgroup(struct nat64lsn_cfg *cfg,
1373 const struct ipfw_flow_id *f_id, struct mbuf **pm, uint32_t aaddr,
1376 struct nat64lsn_job_item *ji;
1382 ji = nat64lsn_create_job(cfg, f_id, JTYPE_NEWPORTGROUP);
1385 NAT64STAT_INC(&cfg->base.stats, dropped);
1386 DPRINTF(DP_DROPS, "failed to create job");
1389 /* Provide pseudo-random value based on flow */
1390 ji->fhash = flow6_hash(f_id);
1392 ji->needs_idx = needs_idx;
1393 nat64lsn_enqueue_job(cfg, ji);
1394 NAT64STAT_INC(&cfg->base.stats, jportreq);
1397 return (IP_FW_DENY);
1400 static NAT64NOINLINE struct nat64lsn_state *
1401 nat64lsn_create_state(struct nat64lsn_cfg *cfg, struct nat64lsn_host *nh,
1402 int nat_proto, struct nat64lsn_state *kst, uint32_t *aaddr)
1404 struct nat64lsn_portgroup *pg;
1405 struct nat64lsn_state *st;
1408 /* XXX: create additional bitmask for selecting proper portgroup */
1409 for (i = 0; i < nh->pg_used; i++) {
1410 pg = PORTGROUP_BYSIDX(cfg, nh, i + 1);
1415 if (pg->nat_proto != nat_proto)
1418 off = PG_GET_FREE_IDX(pg);
1420 /* We have found spare state. Use it */
1422 PG_MARK_BUSY_IDX(pg, off);
1423 st = &pg->states[off];
1426 * Fill in new info. Assume state was zeroed.
1427 * Timestamp and flags will be filled by caller.
1430 st->cur.idx = i + 1;
1433 /* Insert into host hash table */
1434 hval = HASH_IN4(&st->u.hkey) & (nh->hsize - 1);
1435 st->next = nh->phash[hval];
1436 nh->phash[hval] = st->cur;
1438 nat64lsn_dump_state(cfg, pg, st, "ALLOC STATE", off);
1440 NAT64STAT_INC(&cfg->base.stats, screated);
1444 /* Saev last used alias affress */
1451 static NAT64NOINLINE int
1452 nat64lsn_translate6(struct nat64lsn_cfg *cfg, struct ipfw_flow_id *f_id,
1455 struct pfloghdr loghdr, *logdata;
1456 char a[INET6_ADDRSTRLEN];
1457 struct nat64lsn_host *nh;
1459 struct nat64lsn_state *st, kst;
1460 struct nat64lsn_portgroup *pg;
1461 struct icmp6_hdr *icmp6;
1463 int action, hval, nat_proto, proto;
1464 uint16_t aport, state_ts, state_flags;
1466 /* Check if af/protocol is supported and get it short id */
1467 nat_proto = nat64lsn_proto_map[f_id->proto];
1468 if (nat_proto == 0) {
1470 * Since we can be called from jobs handler, we need
1471 * to free mbuf by self, do not leave this task to
1472 * ipfw_check_packet().
1474 NAT64STAT_INC(&cfg->base.stats, noproto);
1478 /* Try to find host first */
1479 I6HASH_FIND(cfg, nh, &f_id->src_ip6);
1482 return (nat64lsn_request_host(cfg, f_id, pm));
1484 /* Fill-in on-stack state structure */
1485 kst.u.s.faddr = nat64_extract_ip4(&f_id->dst_ip6,
1486 cfg->base.plat_plen);
1487 if (kst.u.s.faddr == 0 ||
1488 nat64_check_private_ip4(&cfg->base, kst.u.s.faddr) != 0) {
1489 NAT64STAT_INC(&cfg->base.stats, dropped);
1492 kst.u.s.fport = f_id->dst_port;
1493 kst.u.s.lport = f_id->src_port;
1495 /* Prepare some fields we might need to update */
1497 proto = nat64_getlasthdr(*pm, &hval);
1499 NAT64STAT_INC(&cfg->base.stats, dropped);
1500 DPRINTF(DP_DROPS, "dropped due to mbuf isn't contigious");
1505 if (proto == IPPROTO_TCP)
1506 state_flags = convert_tcp_flags(
1507 TCP(mtodo(*pm, hval))->th_flags);
1510 if (proto == IPPROTO_ICMPV6) {
1511 /* Alter local port data */
1512 icmp6 = mtodo(*pm, hval);
1513 if (icmp6->icmp6_type == ICMP6_ECHO_REQUEST ||
1514 icmp6->icmp6_type == ICMP6_ECHO_REPLY)
1515 kst.u.s.lport = ntohs(icmp6->icmp6_id);
1518 hval = HASH_IN4(&kst.u.hkey) & (nh->hsize - 1);
1522 /* OK, let's find state in host hash */
1524 sidx = nh->phash[hval];
1526 while (sidx.idx != 0) {
1527 pg = PORTGROUP_BYSIDX(cfg, nh, sidx.idx);
1528 st = &pg->states[sidx.off];
1529 //DPRINTF("SISX: %d/%d next: %d/%d", sidx.idx, sidx.off,
1530 //st->next.idx, st->next.off);
1531 if (st->u.hkey == kst.u.hkey && pg->nat_proto == nat_proto)
1534 DPRINTF(DP_ALL, "XXX: too long %d/%d %d/%d\n",
1535 sidx.idx, sidx.off, st->next.idx, st->next.off);
1536 DPRINTF(DP_GENERIC, "TR host %s %p on cpu %d",
1537 inet_ntop(AF_INET6, &nh->addr, a, sizeof(a)),
1544 if (sidx.idx == 0) {
1546 st = nat64lsn_create_state(cfg, nh, nat_proto, &kst, &aaddr);
1548 /* No free states. Request more if we can */
1549 if (nh->pg_used >= cfg->max_chunks) {
1551 DPRINTF(DP_DROPS, "PG limit reached "
1552 " for host %s (used %u, allocated %u, "
1553 "limit %u)", inet_ntop(AF_INET6,
1554 &nh->addr, a, sizeof(a)),
1555 nh->pg_used * NAT64_CHUNK_SIZE,
1556 nh->pg_allocated * NAT64_CHUNK_SIZE,
1557 cfg->max_chunks * NAT64_CHUNK_SIZE);
1559 NAT64STAT_INC(&cfg->base.stats, dropped);
1562 if ((nh->pg_allocated <=
1563 nh->pg_used + NAT64LSN_REMAININGPG) &&
1564 nh->pg_allocated < cfg->max_chunks)
1565 action = 1; /* Request new indexes */
1569 //DPRINTF("No state, unlock for %p", nh);
1570 return (nat64lsn_request_portgroup(cfg, f_id,
1571 pm, aaddr, action));
1574 /* We've got new state. */
1576 pg = PORTGROUP_BYSIDX(cfg, nh, sidx.idx);
1579 /* Okay, state found */
1581 /* Update necessary fileds */
1582 if (st->timestamp != state_ts)
1583 st->timestamp = state_ts;
1584 if ((st->flags & state_flags) != 0)
1585 st->flags |= state_flags;
1587 /* Copy needed state data */
1589 aport = htons(pg->aport + sidx.off);
1593 if (cfg->base.flags & NAT64_LOG) {
1595 nat64lsn_log(logdata, *pm, AF_INET6, pg->idx, st->cur.off);
1599 action = nat64_do_handle_ip6(*pm, aaddr, aport, &cfg->base, logdata);
1600 if (action == NAT64SKIP)
1601 return (cfg->nomatch_verdict);
1602 if (action == NAT64MFREE) {
1606 *pm = NULL; /* mark mbuf as consumed */
1607 return (IP_FW_DENY);
1611 * Main dataplane entry point.
1614 ipfw_nat64lsn(struct ip_fw_chain *ch, struct ip_fw_args *args,
1615 ipfw_insn *cmd, int *done)
1618 struct nat64lsn_cfg *cfg;
1621 IPFW_RLOCK_ASSERT(ch);
1623 *done = 1; /* terminate the search */
1625 if (cmd->opcode != O_EXTERNAL_ACTION ||
1626 cmd->arg1 != V_nat64lsn_eid ||
1627 icmd->opcode != O_EXTERNAL_INSTANCE ||
1628 (cfg = NAT64_LOOKUP(ch, icmd)) == NULL)
1631 switch (args->f_id.addr_type) {
1633 ret = nat64lsn_translate4(cfg, &args->f_id, &args->m);
1636 ret = nat64lsn_translate6(cfg, &args->f_id, &args->m);
1639 return (cfg->nomatch_verdict);
1645 nat64lsn_ctor_host(void *mem, int size, void *arg, int flags)
1647 struct nat64lsn_host *nh;
1649 nh = (struct nat64lsn_host *)mem;
1650 memset(nh->pg_ptr, 0, sizeof(nh->pg_ptr));
1651 memset(nh->phash, 0, sizeof(nh->phash));
1656 nat64lsn_ctor_pgidx(void *mem, int size, void *arg, int flags)
1659 memset(mem, 0, size);
1664 nat64lsn_init_internal(void)
1667 memset(nat64lsn_proto_map, 0, sizeof(nat64lsn_proto_map));
1668 /* Set up supported protocol map */
1669 nat64lsn_proto_map[IPPROTO_TCP] = NAT_PROTO_TCP;
1670 nat64lsn_proto_map[IPPROTO_UDP] = NAT_PROTO_UDP;
1671 nat64lsn_proto_map[IPPROTO_ICMP] = NAT_PROTO_ICMP;
1672 nat64lsn_proto_map[IPPROTO_ICMPV6] = NAT_PROTO_ICMP;
1673 /* Fill in reverse proto map */
1674 memset(nat64lsn_rproto_map, 0, sizeof(nat64lsn_rproto_map));
1675 nat64lsn_rproto_map[NAT_PROTO_TCP] = IPPROTO_TCP;
1676 nat64lsn_rproto_map[NAT_PROTO_UDP] = IPPROTO_UDP;
1677 nat64lsn_rproto_map[NAT_PROTO_ICMP] = IPPROTO_ICMPV6;
1680 nat64lsn_host_zone = uma_zcreate("NAT64 hosts zone",
1681 sizeof(struct nat64lsn_host), nat64lsn_ctor_host, NULL,
1682 NULL, NULL, UMA_ALIGN_PTR, 0);
1683 nat64lsn_pg_zone = uma_zcreate("NAT64 portgroups zone",
1684 sizeof(struct nat64lsn_portgroup), NULL, NULL, NULL, NULL,
1686 nat64lsn_pgidx_zone = uma_zcreate("NAT64 portgroup indexes zone",
1687 sizeof(struct nat64lsn_portgroup *) * NAT64LSN_PGIDX_CHUNK,
1688 nat64lsn_ctor_pgidx, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1692 nat64lsn_uninit_internal(void)
1695 JQUEUE_LOCK_DESTROY();
1696 uma_zdestroy(nat64lsn_host_zone);
1697 uma_zdestroy(nat64lsn_pg_zone);
1698 uma_zdestroy(nat64lsn_pgidx_zone);
1702 nat64lsn_start_instance(struct nat64lsn_cfg *cfg)
1705 callout_reset(&cfg->periodic, hz * PERIODIC_DELAY,
1706 nat64lsn_periodic, cfg);
1709 struct nat64lsn_cfg *
1710 nat64lsn_init_instance(struct ip_fw_chain *ch, size_t numaddr)
1712 struct nat64lsn_cfg *cfg;
1714 cfg = malloc(sizeof(struct nat64lsn_cfg), M_IPFW, M_WAITOK | M_ZERO);
1715 TAILQ_INIT(&cfg->jhead);
1718 COUNTER_ARRAY_ALLOC(cfg->base.stats.cnt, NAT64STATS, M_WAITOK);
1720 cfg->ihsize = NAT64LSN_HSIZE;
1721 cfg->ih = malloc(sizeof(void *) * cfg->ihsize, M_IPFW,
1724 cfg->pg = malloc(sizeof(void *) * numaddr * _ADDR_PG_COUNT, M_IPFW,
1727 callout_init(&cfg->periodic, CALLOUT_MPSAFE);
1728 callout_init(&cfg->jcallout, CALLOUT_MPSAFE);
1734 * Destroy all hosts callback.
1735 * Called on module unload when all activity already finished, so
1736 * can work without any locks.
1738 static NAT64NOINLINE int
1739 nat64lsn_destroy_host(struct nat64lsn_host *nh, struct nat64lsn_cfg *cfg)
1741 struct nat64lsn_portgroup *pg;
1744 for (i = nh->pg_used; i > 0; i--) {
1745 pg = PORTGROUP_BYSIDX(cfg, nh, i);
1748 cfg->pg[pg->idx] = NULL;
1749 destroy_portgroup(pg);
1758 nat64lsn_destroy_instance(struct nat64lsn_cfg *cfg)
1760 struct nat64lsn_host *nh, *tmp;
1762 callout_drain(&cfg->jcallout);
1763 callout_drain(&cfg->periodic);
1764 I6HASH_FOREACH_SAFE(cfg, nh, tmp, nat64lsn_destroy_host, cfg);
1765 DPRINTF(DP_OBJ, "instance %s: hosts %d", cfg->name, cfg->ihcount);
1767 COUNTER_ARRAY_FREE(cfg->base.stats.cnt, NAT64STATS);
1768 free(cfg->ih, M_IPFW);
1769 free(cfg->pg, M_IPFW);