2 * Copyright (c) 2015-2016 Yandex LLC
3 * Copyright (c) 2015 Alexander V. Chernikov <melifaro@FreeBSD.org>
4 * Copyright (c) 2016 Andrey V. Elsukov <ae@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/counter.h>
35 #include <sys/errno.h>
36 #include <sys/kernel.h>
38 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/rmlock.h>
42 #include <sys/rwlock.h>
43 #include <sys/socket.h>
44 #include <sys/queue.h>
45 #include <sys/syslog.h>
46 #include <sys/sysctl.h>
49 #include <net/if_var.h>
50 #include <net/if_pflog.h>
53 #include <netinet/in.h>
54 #include <netinet/ip.h>
55 #include <netinet/ip_var.h>
56 #include <netinet/ip_fw.h>
57 #include <netinet/ip6.h>
58 #include <netinet/icmp6.h>
59 #include <netinet/ip_icmp.h>
60 #include <netinet/tcp.h>
61 #include <netinet/udp.h>
62 #include <netinet6/in6_var.h>
63 #include <netinet6/ip6_var.h>
64 #include <netinet6/ip_fw_nat64.h>
66 #include <netpfil/ipfw/ip_fw_private.h>
67 #include <netpfil/pf/pf.h>
71 MALLOC_DEFINE(M_NAT64LSN, "NAT64LSN", "NAT64LSN");
73 static void nat64lsn_periodic(void *data);
74 #define PERIODIC_DELAY 4
75 static uint8_t nat64lsn_proto_map[256];
76 uint8_t nat64lsn_rproto_map[NAT_MAX_PROTO];
78 #define NAT64_FLAG_FIN 0x01 /* FIN was seen */
79 #define NAT64_FLAG_SYN 0x02 /* First syn in->out */
80 #define NAT64_FLAG_ESTAB 0x04 /* Packet with Ack */
81 #define NAT64_FLAGS_TCP (NAT64_FLAG_SYN|NAT64_FLAG_ESTAB|NAT64_FLAG_FIN)
83 #define NAT64_FLAG_RDR 0x80 /* Port redirect */
84 #define NAT64_LOOKUP(chain, cmd) \
85 (struct nat64lsn_cfg *)SRV_OBJECT((chain), (cmd)->arg1)
87 * Delayed job queue, used to create new hosts
96 struct nat64lsn_job_item {
97 TAILQ_ENTRY(nat64lsn_job_item) next;
98 enum nat64lsn_jtype jtype;
99 struct nat64lsn_host *nh;
100 struct nat64lsn_portgroup *pg;
102 struct in6_addr haddr;
107 unsigned int fhash; /* Flow hash */
108 uint32_t aaddr; /* Last used address (net) */
110 struct ipfw_flow_id f_id;
111 uint64_t delmask[NAT64LSN_PGPTRNMASK];
114 static struct mtx jmtx;
115 #define JQUEUE_LOCK_INIT() mtx_init(&jmtx, "qlock", NULL, MTX_DEF)
116 #define JQUEUE_LOCK_DESTROY() mtx_destroy(&jmtx)
117 #define JQUEUE_LOCK() mtx_lock(&jmtx)
118 #define JQUEUE_UNLOCK() mtx_unlock(&jmtx)
120 static void nat64lsn_enqueue_job(struct nat64lsn_cfg *cfg,
121 struct nat64lsn_job_item *ji);
122 static void nat64lsn_enqueue_jobs(struct nat64lsn_cfg *cfg,
123 struct nat64lsn_job_head *jhead, int jlen);
125 static struct nat64lsn_job_item *nat64lsn_create_job(struct nat64lsn_cfg *cfg,
126 const struct ipfw_flow_id *f_id, int jtype);
127 static int nat64lsn_request_portgroup(struct nat64lsn_cfg *cfg,
128 const struct ipfw_flow_id *f_id, struct mbuf **pm, uint32_t aaddr,
130 static int nat64lsn_request_host(struct nat64lsn_cfg *cfg,
131 const struct ipfw_flow_id *f_id, struct mbuf **pm);
132 static int nat64lsn_translate4(struct nat64lsn_cfg *cfg,
133 const struct ipfw_flow_id *f_id, struct mbuf **pm);
134 static int nat64lsn_translate6(struct nat64lsn_cfg *cfg,
135 struct ipfw_flow_id *f_id, struct mbuf **pm);
137 static int alloc_portgroup(struct nat64lsn_job_item *ji);
138 static void destroy_portgroup(struct nat64lsn_portgroup *pg);
139 static void destroy_host6(struct nat64lsn_host *nh);
140 static int alloc_host6(struct nat64lsn_cfg *cfg, struct nat64lsn_job_item *ji);
142 static int attach_portgroup(struct nat64lsn_cfg *cfg,
143 struct nat64lsn_job_item *ji);
144 static int attach_host6(struct nat64lsn_cfg *cfg, struct nat64lsn_job_item *ji);
148 static uma_zone_t nat64lsn_host_zone;
149 static uma_zone_t nat64lsn_pg_zone;
150 static uma_zone_t nat64lsn_pgidx_zone;
152 static unsigned int nat64lsn_periodic_chkstates(struct nat64lsn_cfg *cfg,
153 struct nat64lsn_host *nh);
155 #define I6_hash(x) (djb_hash((const unsigned char *)(x), 16))
156 #define I6_first(_ph, h) (_ph)[h]
157 #define I6_next(x) (x)->next
158 #define I6_val(x) (&(x)->addr)
159 #define I6_cmp(a, b) IN6_ARE_ADDR_EQUAL(a, b)
160 #define I6_lock(a, b)
161 #define I6_unlock(a, b)
163 #define I6HASH_FIND(_cfg, _res, _a) \
164 CHT_FIND(_cfg->ih, _cfg->ihsize, I6_, _res, _a)
165 #define I6HASH_INSERT(_cfg, _i) \
166 CHT_INSERT_HEAD(_cfg->ih, _cfg->ihsize, I6_, _i)
167 #define I6HASH_REMOVE(_cfg, _res, _tmp, _a) \
168 CHT_REMOVE(_cfg->ih, _cfg->ihsize, I6_, _res, _tmp, _a)
170 #define I6HASH_FOREACH_SAFE(_cfg, _x, _tmp, _cb, _arg) \
171 CHT_FOREACH_SAFE(_cfg->ih, _cfg->ihsize, I6_, _x, _tmp, _cb, _arg)
173 #define HASH_IN4(x) djb_hash((const unsigned char *)(x), 8)
176 djb_hash(const unsigned char *h, const int len)
178 unsigned int result = 0;
181 for (i = 0; i < len; i++)
182 result = 33 * result ^ h[i];
189 bitmask_size(size_t num, int *level)
194 for (c = 0, x = num; num > 1; num /= 64, c++)
201 bitmask_prepare(uint64_t *pmask, size_t bufsize, int level)
205 memset(pmask, 0xFF, bufsize);
206 for (x = 0, z = 1; level > 1; x += z, z *= 64, level--)
213 nat64lsn_log(struct pfloghdr *plog, struct mbuf *m, sa_family_t family,
214 uint32_t n, uint32_t sn)
217 memset(plog, 0, sizeof(*plog));
218 plog->length = PFLOG_REAL_HDRLEN;
220 plog->action = PF_NAT;
222 plog->rulenr = htonl(n);
223 plog->subrulenr = htonl(sn);
224 plog->ruleset[0] = '\0';
225 strlcpy(plog->ifname, "NAT64LSN", sizeof(plog->ifname));
226 ipfw_bpf_mtap2(plog, PFLOG_HDRLEN, m);
229 * Inspects icmp packets to see if the message contains different
230 * packet header so we need to alter @addr and @port.
233 inspect_icmp_mbuf(struct mbuf **m, uint8_t *nat_proto, uint32_t *addr,
239 struct icmphdr *icmp;
243 ip = mtod(*m, struct ip *); /* Outer IP header */
244 off = (ip->ip_hl << 2) + ICMP_MINLEN;
245 if ((*m)->m_len < off)
246 *m = m_pullup(*m, off);
250 ip = mtod(*m, struct ip *); /* Outer IP header */
251 icmp = L3HDR(ip, struct icmphdr *);
252 switch (icmp->icmp_type) {
255 /* Use icmp ID as distinguisher */
256 *port = ntohs(*((uint16_t *)(icmp + 1)));
265 * ICMP_UNREACH and ICMP_TIMXCEED contains IP header + 64 bits
268 if ((*m)->m_pkthdr.len < off + sizeof(struct ip) + ICMP_MINLEN)
270 if ((*m)->m_len < off + sizeof(struct ip) + ICMP_MINLEN)
271 *m = m_pullup(*m, off + sizeof(struct ip) + ICMP_MINLEN);
274 ip = mtodo(*m, off); /* Inner IP header */
276 off += ip->ip_hl << 2; /* Skip inner IP header */
277 *addr = ntohl(ip->ip_src.s_addr);
278 if ((*m)->m_len < off + ICMP_MINLEN)
279 *m = m_pullup(*m, off + ICMP_MINLEN);
284 tcp = mtodo(*m, off);
285 *nat_proto = NAT_PROTO_TCP;
286 *port = ntohs(tcp->th_sport);
289 udp = mtodo(*m, off);
290 *nat_proto = NAT_PROTO_UDP;
291 *port = ntohs(udp->uh_sport);
295 * We will translate only ICMP errors for our ICMP
298 icmp = mtodo(*m, off);
299 if (icmp->icmp_type != ICMP_ECHO)
301 *port = ntohs(*((uint16_t *)(icmp + 1)));
307 static inline uint8_t
308 convert_tcp_flags(uint8_t flags)
312 result = flags & (TH_FIN|TH_SYN);
313 result |= (flags & TH_RST) >> 2; /* Treat RST as FIN */
314 result |= (flags & TH_ACK) >> 2; /* Treat ACK as estab */
319 static NAT64NOINLINE int
320 nat64lsn_translate4(struct nat64lsn_cfg *cfg, const struct ipfw_flow_id *f_id,
323 struct pfloghdr loghdr, *logdata;
324 struct in6_addr src6;
325 struct nat64lsn_portgroup *pg;
326 struct nat64lsn_host *nh;
327 struct nat64lsn_state *st;
330 uint16_t state_flags, state_ts;
331 uint16_t port, lport;
336 port = f_id->dst_port;
337 if (addr < cfg->prefix4 || addr > cfg->pmask4) {
338 NAT64STAT_INC(&cfg->base.stats, nomatch4);
339 return (cfg->nomatch_verdict);
342 /* Check if protocol is supported and get its short id */
343 nat_proto = nat64lsn_proto_map[f_id->proto];
344 if (nat_proto == 0) {
345 NAT64STAT_INC(&cfg->base.stats, noproto);
346 return (cfg->nomatch_verdict);
349 /* We might need to handle icmp differently */
350 if (nat_proto == NAT_PROTO_ICMP) {
351 ret = inspect_icmp_mbuf(pm, &nat_proto, &addr, &port);
354 NAT64STAT_INC(&cfg->base.stats, nomem);
357 NAT64STAT_INC(&cfg->base.stats, noproto);
358 return (cfg->nomatch_verdict);
360 /* XXX: Check addr for validity */
361 if (addr < cfg->prefix4 || addr > cfg->pmask4) {
362 NAT64STAT_INC(&cfg->base.stats, nomatch4);
363 return (cfg->nomatch_verdict);
367 /* Calc portgroup offset w.r.t protocol */
368 pg = GET_PORTGROUP(cfg, addr, nat_proto, port);
370 /* Check if this port is occupied by any portgroup */
372 NAT64STAT_INC(&cfg->base.stats, nomatch4);
374 DPRINTF(DP_STATE, "NOMATCH %u %d %d (%d)", addr, nat_proto, port,
375 _GET_PORTGROUP_IDX(cfg, addr, nat_proto, port));
377 return (cfg->nomatch_verdict);
380 /* TODO: Check flags to see if we need to do some static mapping */
383 /* Prepare some fields we might need to update */
385 ip = mtod(*pm, struct ip *);
386 if (ip->ip_p == IPPROTO_TCP)
387 state_flags = convert_tcp_flags(
388 L3HDR(ip, struct tcphdr *)->th_flags);
392 /* Lock host and get port mapping */
395 st = &pg->states[port & (NAT64_CHUNK_SIZE - 1)];
396 if (st->timestamp != state_ts)
397 st->timestamp = state_ts;
398 if ((st->flags & state_flags) != state_flags)
399 st->flags |= state_flags;
400 lport = htons(st->u.s.lport);
404 if (cfg->base.flags & NAT64_LOG) {
406 nat64lsn_log(logdata, *pm, AF_INET, pg->idx, st->cur.off);
410 nat64_embed_ip4(&cfg->base, htonl(f_id->src_ip), &src6);
411 ret = nat64_do_handle_ip4(*pm, &src6, &nh->addr, lport,
412 &cfg->base, logdata);
414 if (ret == NAT64SKIP)
415 return (cfg->nomatch_verdict);
416 if (ret == NAT64MFREE)
424 nat64lsn_dump_state(const struct nat64lsn_cfg *cfg,
425 const struct nat64lsn_portgroup *pg, const struct nat64lsn_state *st,
426 const char *px, int off)
428 char s[INET6_ADDRSTRLEN], a[INET_ADDRSTRLEN], d[INET_ADDRSTRLEN];
430 if ((V_nat64_debug & DP_STATE) == 0)
432 inet_ntop(AF_INET6, &pg->host->addr, s, sizeof(s));
433 inet_ntop(AF_INET, &pg->aaddr, a, sizeof(a));
434 inet_ntop(AF_INET, &st->u.s.faddr, d, sizeof(d));
436 DPRINTF(DP_STATE, "%s: PG %d ST [%p|%d]: %s:%d/%d <%s:%d> "
437 "%s:%d AGE %d", px, pg->idx, st, off,
438 s, st->u.s.lport, pg->nat_proto, a, pg->aport + off,
439 d, st->u.s.fport, GET_AGE(st->timestamp));
443 * Check if particular TCP state is stale and should be deleted.
444 * Return 1 if true, 0 otherwise.
447 nat64lsn_periodic_check_tcp(const struct nat64lsn_cfg *cfg,
448 const struct nat64lsn_state *st, int age)
452 if (st->flags & NAT64_FLAG_FIN)
453 ttl = cfg->st_close_ttl;
454 else if (st->flags & NAT64_FLAG_ESTAB)
455 ttl = cfg->st_estab_ttl;
456 else if (st->flags & NAT64_FLAG_SYN)
457 ttl = cfg->st_syn_ttl;
459 ttl = cfg->st_syn_ttl;
467 * Check if nat state @st is stale and should be deleted.
468 * Return 1 if true, 0 otherwise.
470 static NAT64NOINLINE int
471 nat64lsn_periodic_chkstate(const struct nat64lsn_cfg *cfg,
472 const struct nat64lsn_portgroup *pg, const struct nat64lsn_state *st)
476 age = GET_AGE(st->timestamp);
479 /* Skip immutable records */
480 if (st->flags & NAT64_FLAG_RDR)
483 switch (pg->nat_proto) {
485 delete = nat64lsn_periodic_check_tcp(cfg, st, age);
488 if (age > cfg->st_udp_ttl)
492 if (age > cfg->st_icmp_ttl)
502 * The following structures and functions
503 * are used to perform SLIST_FOREACH_SAFE()
504 * analog for states identified by struct st_ptr.
508 struct nat64lsn_portgroup *pg;
509 struct nat64lsn_state *st;
510 struct st_ptr sidx_next;
513 static struct st_idx *
514 st_first(const struct nat64lsn_cfg *cfg, const struct nat64lsn_host *nh,
515 struct st_ptr *sidx, struct st_idx *si)
517 struct nat64lsn_portgroup *pg;
518 struct nat64lsn_state *st;
520 if (sidx->idx == 0) {
521 memset(si, 0, sizeof(*si));
525 pg = PORTGROUP_BYSIDX(cfg, nh, sidx->idx);
526 st = &pg->states[sidx->off];
530 si->sidx_next = st->next;
535 static struct st_idx *
536 st_next(const struct nat64lsn_cfg *cfg, const struct nat64lsn_host *nh,
540 struct nat64lsn_portgroup *pg;
541 struct nat64lsn_state *st;
543 sidx = si->sidx_next;
545 memset(si, 0, sizeof(*si));
551 pg = PORTGROUP_BYSIDX(cfg, nh, sidx.idx);
552 st = &pg->states[sidx.off];
556 si->sidx_next = st->next;
561 static struct st_idx *
562 st_save_cond(struct st_idx *si_dst, struct st_idx *si)
571 nat64lsn_periodic_chkstates(struct nat64lsn_cfg *cfg, struct nat64lsn_host *nh)
573 struct st_idx si, si_prev;
575 unsigned int delcount;
578 for (i = 0; i < nh->hsize; i++) {
579 memset(&si_prev, 0, sizeof(si_prev));
580 for (st_first(cfg, nh, &nh->phash[i], &si);
582 st_save_cond(&si_prev, &si), st_next(cfg, nh, &si)) {
583 if (nat64lsn_periodic_chkstate(cfg, si.pg, si.st) == 0)
585 nat64lsn_dump_state(cfg, si.pg, si.st, "DELETE STATE",
587 /* Unlink from hash */
588 if (si_prev.st != NULL)
589 si_prev.st->next = si.st->next;
591 nh->phash[i] = si.st->next;
592 /* Delete state and free its data */
593 PG_MARK_FREE_IDX(si.pg, si.st->cur.off);
594 memset(si.st, 0, sizeof(struct nat64lsn_state));
598 /* Update portgroup timestamp */
599 SET_AGE(si.pg->timestamp);
602 NAT64STAT_ADD(&cfg->base.stats, sdeleted, delcount);
607 * Checks if portgroup is not used and can be deleted,
608 * Returns 1 if stale, 0 otherwise
611 stale_pg(const struct nat64lsn_cfg *cfg, const struct nat64lsn_portgroup *pg)
614 if (!PG_IS_EMPTY(pg))
616 if (GET_AGE(pg->timestamp) < cfg->pg_delete_delay)
622 * Checks if host record is not used and can be deleted,
623 * Returns 1 if stale, 0 otherwise
626 stale_nh(const struct nat64lsn_cfg *cfg, const struct nat64lsn_host *nh)
629 if (nh->pg_used != 0)
631 if (GET_AGE(nh->timestamp) < cfg->nh_delete_delay)
636 struct nat64lsn_periodic_data {
637 struct nat64lsn_cfg *cfg;
638 struct nat64lsn_job_head jhead;
642 static NAT64NOINLINE int
643 nat64lsn_periodic_chkhost(struct nat64lsn_host *nh,
644 struct nat64lsn_periodic_data *d)
646 char a[INET6_ADDRSTRLEN];
647 struct nat64lsn_portgroup *pg;
648 struct nat64lsn_job_item *ji;
649 uint64_t delmask[NAT64LSN_PGPTRNMASK];
653 memset(delmask, 0, sizeof(delmask));
655 inet_ntop(AF_INET6, &nh->addr, a, sizeof(a));
656 DPRINTF(DP_JQUEUE, "Checking %s host %s on cpu %d",
657 stale_nh(d->cfg, nh) ? "stale" : "non-stale", a, curcpu);
658 if (!stale_nh(d->cfg, nh)) {
659 /* Non-stale host. Inspect internals */
662 /* Stage 1: Check&expire states */
663 if (nat64lsn_periodic_chkstates(d->cfg, nh) != 0)
664 SET_AGE(nh->timestamp);
666 /* Stage 2: Check if we need to expire */
667 for (i = 0; i < nh->pg_used; i++) {
668 pg = PORTGROUP_BYSIDX(d->cfg, nh, i + 1);
672 /* Check if we can delete portgroup */
673 if (stale_pg(d->cfg, pg) == 0)
676 DPRINTF(DP_JQUEUE, "Check PG %d", i);
677 delmask[i / 64] |= ((uint64_t)1 << (i % 64));
686 DPRINTF(DP_JQUEUE, "Queueing %d portgroups for deleting", delcount);
687 /* We have something to delete - add it to queue */
688 ji = nat64lsn_create_job(d->cfg, NULL, JTYPE_DELPORTGROUP);
692 ji->haddr = nh->addr;
693 ji->delcount = delcount;
694 memcpy(ji->delmask, delmask, sizeof(ji->delmask));
696 TAILQ_INSERT_TAIL(&d->jhead, ji, next);
702 * This procedure is used to perform various maintance
703 * on dynamic hash list. Currently it is called every second.
706 nat64lsn_periodic(void *data)
708 struct ip_fw_chain *ch;
710 struct nat64lsn_cfg *cfg;
711 struct nat64lsn_periodic_data d;
712 struct nat64lsn_host *nh, *tmp;
714 cfg = (struct nat64lsn_cfg *) data;
716 CURVNET_SET(cfg->vp);
718 memset(&d, 0, sizeof(d));
720 TAILQ_INIT(&d.jhead);
724 /* Stage 1: foreach host, check all its portgroups */
725 I6HASH_FOREACH_SAFE(cfg, nh, tmp, nat64lsn_periodic_chkhost, &d);
727 /* Enqueue everything we have requested */
728 nat64lsn_enqueue_jobs(cfg, &d.jhead, d.jlen);
730 callout_schedule(&cfg->periodic, hz * PERIODIC_DELAY);
737 static NAT64NOINLINE void
738 reinject_mbuf(struct nat64lsn_cfg *cfg, struct nat64lsn_job_item *ji)
744 /* Request has failed or packet type is wrong */
745 if (ji->f_id.addr_type != 6 || ji->done == 0) {
748 NAT64STAT_INC(&cfg->base.stats, dropped);
749 DPRINTF(DP_DROPS, "mbuf dropped: type %d, done %d",
750 ji->jtype, ji->done);
755 * XXX: Limit recursion level
758 NAT64STAT_INC(&cfg->base.stats, jreinjected);
759 DPRINTF(DP_JQUEUE, "Reinject mbuf");
760 nat64lsn_translate6(cfg, &ji->f_id, &ji->m);
764 destroy_portgroup(struct nat64lsn_portgroup *pg)
767 DPRINTF(DP_OBJ, "DESTROY PORTGROUP %d %p", pg->idx, pg);
768 uma_zfree(nat64lsn_pg_zone, pg);
771 static NAT64NOINLINE int
772 alloc_portgroup(struct nat64lsn_job_item *ji)
774 struct nat64lsn_portgroup *pg;
776 pg = uma_zalloc(nat64lsn_pg_zone, M_NOWAIT);
780 if (ji->needs_idx != 0) {
781 ji->spare_idx = uma_zalloc(nat64lsn_pgidx_zone, M_NOWAIT);
782 /* Failed alloc isn't always fatal, so don't check */
784 memset(&pg->freemask, 0xFF, sizeof(pg->freemask));
785 pg->nat_proto = ji->nat_proto;
792 destroy_host6(struct nat64lsn_host *nh)
794 char a[INET6_ADDRSTRLEN];
797 inet_ntop(AF_INET6, &nh->addr, a, sizeof(a));
798 DPRINTF(DP_OBJ, "DESTROY HOST %s %p (pg used %d)", a, nh,
800 NAT64_LOCK_DESTROY(nh);
801 for (i = 0; i < nh->pg_allocated / NAT64LSN_PGIDX_CHUNK; i++)
802 uma_zfree(nat64lsn_pgidx_zone, PORTGROUP_CHUNK(nh, i));
803 uma_zfree(nat64lsn_host_zone, nh);
806 static NAT64NOINLINE int
807 alloc_host6(struct nat64lsn_cfg *cfg, struct nat64lsn_job_item *ji)
809 struct nat64lsn_host *nh;
810 char a[INET6_ADDRSTRLEN];
812 nh = uma_zalloc(nat64lsn_host_zone, M_NOWAIT);
815 PORTGROUP_CHUNK(nh, 0) = uma_zalloc(nat64lsn_pgidx_zone, M_NOWAIT);
816 if (PORTGROUP_CHUNK(nh, 0) == NULL) {
817 uma_zfree(nat64lsn_host_zone, nh);
820 if (alloc_portgroup(ji) != 0) {
821 NAT64STAT_INC(&cfg->base.stats, jportfails);
822 uma_zfree(nat64lsn_pgidx_zone, PORTGROUP_CHUNK(nh, 0));
823 uma_zfree(nat64lsn_host_zone, nh);
828 nh->addr = ji->haddr;
829 nh->hsize = NAT64LSN_HSIZE; /* XXX: hardcoded size */
830 nh->pg_allocated = NAT64LSN_PGIDX_CHUNK;
834 inet_ntop(AF_INET6, &nh->addr, a, sizeof(a));
835 DPRINTF(DP_OBJ, "ALLOC HOST %s %p", a, ji->nh);
840 * Finds free @pg index inside @nh
842 static NAT64NOINLINE int
843 find_nh_pg_idx(struct nat64lsn_cfg *cfg, struct nat64lsn_host *nh, int *idx)
847 for (i = 0; i < nh->pg_allocated; i++) {
848 if (PORTGROUP_BYSIDX(cfg, nh, i + 1) == NULL) {
856 static NAT64NOINLINE int
857 attach_host6(struct nat64lsn_cfg *cfg, struct nat64lsn_job_item *ji)
859 char a[INET6_ADDRSTRLEN];
860 struct nat64lsn_host *nh;
862 I6HASH_FIND(cfg, nh, &ji->haddr);
864 /* Add new host to list */
866 I6HASH_INSERT(cfg, nh);
870 inet_ntop(AF_INET6, &nh->addr, a, sizeof(a));
871 DPRINTF(DP_OBJ, "ATTACH HOST %s %p", a, nh);
873 * Try to add portgroup.
874 * Note it will automatically set
875 * 'done' on ji if successful.
877 if (attach_portgroup(cfg, ji) != 0) {
878 DPRINTF(DP_DROPS, "%s %p failed to attach PG",
880 NAT64STAT_INC(&cfg->base.stats, jportfails);
887 * nh isn't NULL. This probably means we had several simultaneous
888 * host requests. The previous one request has already attached
889 * this host. Requeue attached mbuf and mark job as done, but
890 * leave nh and pg pointers not changed, so nat64lsn_do_request()
891 * will release all allocated resources.
893 inet_ntop(AF_INET6, &nh->addr, a, sizeof(a));
894 DPRINTF(DP_OBJ, "%s %p is already attached as %p",
900 static NAT64NOINLINE int
901 find_pg_place_addr(const struct nat64lsn_cfg *cfg, int addr_off,
902 int nat_proto, uint16_t *aport, int *ppg_idx)
906 pg_idx = addr_off * _ADDR_PG_COUNT +
907 (nat_proto - 1) * _ADDR_PG_PROTO_COUNT;
909 for (j = NAT64_MIN_CHUNK; j < _ADDR_PG_PROTO_COUNT; j++) {
910 if (cfg->pg[pg_idx + j] != NULL)
913 *aport = j * NAT64_CHUNK_SIZE;
914 *ppg_idx = pg_idx + j;
922 * XXX: This function needs to be rewritten to
923 * use free bitmask for faster pg finding,
924 * additionally, it should take into consideration
925 * a) randomization and
926 * b) previous addresses allocated to given nat instance
929 static NAT64NOINLINE int
930 find_portgroup_place(struct nat64lsn_cfg *cfg, struct nat64lsn_job_item *ji,
931 uint32_t *aaddr, uint16_t *aport, int *ppg_idx)
936 * XXX: Use bitmask index to be able to find/check if IP address
937 * has some spare pg's
939 nat_proto = ji->nat_proto;
941 /* First, try to use same address */
942 if (ji->aaddr != 0) {
943 i = ntohl(ji->aaddr) - cfg->prefix4;
944 if (find_pg_place_addr(cfg, i, nat_proto, aport,
947 *aaddr = htonl(cfg->prefix4 + i);
952 /* Next, try to use random address based on flow hash */
953 i = ji->fhash % (1 << (32 - cfg->plen4));
954 if (find_pg_place_addr(cfg, i, nat_proto, aport, ppg_idx) != 0) {
956 *aaddr = htonl(cfg->prefix4 + i);
961 /* Last one: simply find ANY available */
962 for (i = 0; i < (1 << (32 - cfg->plen4)); i++) {
963 if (find_pg_place_addr(cfg, i, nat_proto, aport,
966 *aaddr = htonl(cfg->prefix4 + i);
974 static NAT64NOINLINE int
975 attach_portgroup(struct nat64lsn_cfg *cfg, struct nat64lsn_job_item *ji)
977 char a[INET6_ADDRSTRLEN];
978 struct nat64lsn_portgroup *pg;
979 struct nat64lsn_host *nh;
982 int nh_pg_idx, pg_idx;
987 * Find source host and bind: we can't rely on
990 I6HASH_FIND(cfg, nh, &ji->haddr);
994 /* Find spare port chunk */
995 if (find_portgroup_place(cfg, ji, &aaddr, &aport, &pg_idx) != 0) {
996 inet_ntop(AF_INET6, &nh->addr, a, sizeof(a));
997 DPRINTF(DP_OBJ | DP_DROPS, "empty PG not found for %s", a);
1001 /* Expand PG indexes if needed */
1002 if (nh->pg_allocated < cfg->max_chunks && ji->spare_idx != NULL) {
1003 PORTGROUP_CHUNK(nh, nh->pg_allocated / NAT64LSN_PGIDX_CHUNK) =
1005 nh->pg_allocated += NAT64LSN_PGIDX_CHUNK;
1006 ji->spare_idx = NULL;
1009 /* Find empty index to store PG in the @nh */
1010 if (find_nh_pg_idx(cfg, nh, &nh_pg_idx) != 0) {
1011 inet_ntop(AF_INET6, &nh->addr, a, sizeof(a));
1012 DPRINTF(DP_OBJ | DP_DROPS, "free PG index not found for %s",
1017 cfg->pg[pg_idx] = pg;
1018 cfg->protochunks[pg->nat_proto]++;
1019 NAT64STAT_INC(&cfg->base.stats, spgcreated);
1025 SET_AGE(pg->timestamp);
1027 PORTGROUP_BYSIDX(cfg, nh, nh_pg_idx + 1) = pg;
1028 if (nh->pg_used == nh_pg_idx)
1030 SET_AGE(nh->timestamp);
1038 static NAT64NOINLINE void
1039 consider_del_portgroup(struct nat64lsn_cfg *cfg, struct nat64lsn_job_item *ji)
1041 struct nat64lsn_host *nh, *nh_tmp;
1042 struct nat64lsn_portgroup *pg, *pg_list[256];
1043 int i, pg_lidx, idx;
1045 /* Find source host */
1046 I6HASH_FIND(cfg, nh, &ji->haddr);
1047 if (nh == NULL || nh->pg_used == 0)
1050 memset(pg_list, 0, sizeof(pg_list));
1055 for (i = nh->pg_used - 1; i >= 0; i--) {
1056 if ((ji->delmask[i / 64] & ((uint64_t)1 << (i % 64))) == 0)
1058 pg = PORTGROUP_BYSIDX(cfg, nh, i + 1);
1060 /* Check that PG isn't busy. */
1061 if (stale_pg(cfg, pg) == 0)
1065 pg_list[pg_lidx++] = pg;
1066 PORTGROUP_BYSIDX(cfg, nh, i + 1) = NULL;
1068 idx = _GET_PORTGROUP_IDX(cfg, ntohl(pg->aaddr), pg->nat_proto,
1070 KASSERT(cfg->pg[idx] == pg, ("Non matched pg"));
1071 cfg->pg[idx] = NULL;
1072 cfg->protochunks[pg->nat_proto]--;
1073 NAT64STAT_INC(&cfg->base.stats, spgdeleted);
1075 /* Decrease pg_used */
1076 while (nh->pg_used > 0 &&
1077 PORTGROUP_BYSIDX(cfg, nh, nh->pg_used) == NULL)
1080 /* Check if on-stack buffer has ended */
1081 if (pg_lidx == nitems(pg_list))
1087 if (stale_nh(cfg, nh)) {
1088 I6HASH_REMOVE(cfg, nh, nh_tmp, &ji->haddr);
1089 KASSERT(nh != NULL, ("Unable to find address"));
1092 I6HASH_FIND(cfg, nh, &ji->haddr);
1093 KASSERT(nh == NULL, ("Failed to delete address"));
1096 /* TODO: Delay freeing portgroups */
1097 while (pg_lidx > 0) {
1099 NAT64STAT_INC(&cfg->base.stats, spgdeleted);
1100 destroy_portgroup(pg_list[pg_lidx]);
1105 * Main request handler.
1106 * Responsible for handling jqueue, e.g.
1107 * creating new hosts, addind/deleting portgroups.
1109 static NAT64NOINLINE void
1110 nat64lsn_do_request(void *data)
1113 struct nat64lsn_job_head jhead;
1114 struct nat64lsn_job_item *ji;
1116 struct nat64lsn_cfg *cfg = (struct nat64lsn_cfg *) data;
1117 struct ip_fw_chain *ch;
1120 CURVNET_SET(cfg->vp);
1124 /* XXX: We're running unlocked here */
1132 TAILQ_SWAP(&jhead, &cfg->jhead, nat64lsn_job_item, next);
1137 /* check if we need to resize hash */
1139 if (cfg->ihcount > cfg->ihsize && cfg->ihsize < 65536) {
1140 nhsize = cfg->ihsize;
1141 for ( ; cfg->ihcount > nhsize && nhsize < 65536; nhsize *= 2)
1143 } else if (cfg->ihcount < cfg->ihsize * 4) {
1144 nhsize = cfg->ihsize;
1145 for ( ; cfg->ihcount < nhsize * 4 && nhsize > 32; nhsize /= 2)
1151 if (TAILQ_EMPTY(&jhead)) {
1156 NAT64STAT_INC(&cfg->base.stats, jcalls);
1157 DPRINTF(DP_JQUEUE, "count=%d", jcount);
1161 * What we should do here is to build a hash
1162 * to ensure we don't have lots of duplicate requests.
1163 * Skip this for now.
1165 * TODO: Limit per-call number of items
1168 /* Pre-allocate everything for entire chain */
1169 TAILQ_FOREACH(ji, &jhead, next) {
1170 switch (ji->jtype) {
1172 if (alloc_host6(cfg, ji) != 0)
1173 NAT64STAT_INC(&cfg->base.stats,
1176 case JTYPE_NEWPORTGROUP:
1177 if (alloc_portgroup(ji) != 0)
1178 NAT64STAT_INC(&cfg->base.stats,
1181 case JTYPE_DELPORTGROUP:
1182 delcount += ji->delcount;
1190 * TODO: Alloc hew hash
1197 /* Apply all changes in batch */
1201 TAILQ_FOREACH(ji, &jhead, next) {
1202 switch (ji->jtype) {
1205 attach_host6(cfg, ji);
1207 case JTYPE_NEWPORTGROUP:
1208 if (ji->pg != NULL &&
1209 attach_portgroup(cfg, ji) != 0)
1210 NAT64STAT_INC(&cfg->base.stats,
1213 case JTYPE_DELPORTGROUP:
1214 consider_del_portgroup(cfg, ji);
1220 /* XXX: Move everything to new hash */
1224 IPFW_UH_WUNLOCK(ch);
1226 /* Flush unused entries */
1227 while (!TAILQ_EMPTY(&jhead)) {
1228 ji = TAILQ_FIRST(&jhead);
1229 TAILQ_REMOVE(&jhead, ji, next);
1231 destroy_host6(ji->nh);
1233 destroy_portgroup(ji->pg);
1235 reinject_mbuf(cfg, ji);
1236 if (ji->spare_idx != NULL)
1237 uma_zfree(nat64lsn_pgidx_zone, ji->spare_idx);
1243 static NAT64NOINLINE struct nat64lsn_job_item *
1244 nat64lsn_create_job(struct nat64lsn_cfg *cfg, const struct ipfw_flow_id *f_id,
1247 struct nat64lsn_job_item *ji;
1248 struct in6_addr haddr;
1252 * Do not try to lock possibly contested mutex if we're near the limit.
1253 * Drop packet instead.
1255 if (cfg->jlen >= cfg->jmaxlen) {
1256 NAT64STAT_INC(&cfg->base.stats, jmaxlen);
1260 memset(&haddr, 0, sizeof(haddr));
1263 haddr = f_id->src_ip6;
1264 nat_proto = nat64lsn_proto_map[f_id->proto];
1266 DPRINTF(DP_JQUEUE, "REQUEST pg nat_proto %d on proto %d",
1267 nat_proto, f_id->proto);
1273 ji = malloc(sizeof(struct nat64lsn_job_item), M_IPFW,
1277 NAT64STAT_INC(&cfg->base.stats, jnomem);
1286 ji->nat_proto = nat_proto;
1292 static NAT64NOINLINE void
1293 nat64lsn_enqueue_job(struct nat64lsn_cfg *cfg, struct nat64lsn_job_item *ji)
1300 TAILQ_INSERT_TAIL(&cfg->jhead, ji, next);
1302 NAT64STAT_INC(&cfg->base.stats, jrequests);
1304 if (callout_pending(&cfg->jcallout) == 0)
1305 callout_reset(&cfg->jcallout, 1, nat64lsn_do_request, cfg);
1309 static NAT64NOINLINE void
1310 nat64lsn_enqueue_jobs(struct nat64lsn_cfg *cfg,
1311 struct nat64lsn_job_head *jhead, int jlen)
1314 if (TAILQ_EMPTY(jhead))
1317 /* Attach current queue to execution one */
1319 TAILQ_CONCAT(&cfg->jhead, jhead, next);
1321 NAT64STAT_ADD(&cfg->base.stats, jrequests, jlen);
1323 if (callout_pending(&cfg->jcallout) == 0)
1324 callout_reset(&cfg->jcallout, 1, nat64lsn_do_request, cfg);
1329 flow6_hash(const struct ipfw_flow_id *f_id)
1331 unsigned char hbuf[36];
1333 memcpy(hbuf, &f_id->dst_ip6, 16);
1334 memcpy(&hbuf[16], &f_id->src_ip6, 16);
1335 memcpy(&hbuf[32], &f_id->dst_port, 2);
1336 memcpy(&hbuf[32], &f_id->src_port, 2);
1338 return (djb_hash(hbuf, sizeof(hbuf)));
1341 static NAT64NOINLINE int
1342 nat64lsn_request_host(struct nat64lsn_cfg *cfg,
1343 const struct ipfw_flow_id *f_id, struct mbuf **pm)
1345 struct nat64lsn_job_item *ji;
1351 ji = nat64lsn_create_job(cfg, f_id, JTYPE_NEWHOST);
1354 NAT64STAT_INC(&cfg->base.stats, dropped);
1355 DPRINTF(DP_DROPS, "failed to create job");
1358 /* Provide pseudo-random value based on flow */
1359 ji->fhash = flow6_hash(f_id);
1360 nat64lsn_enqueue_job(cfg, ji);
1361 NAT64STAT_INC(&cfg->base.stats, jhostsreq);
1364 return (IP_FW_DENY);
1367 static NAT64NOINLINE int
1368 nat64lsn_request_portgroup(struct nat64lsn_cfg *cfg,
1369 const struct ipfw_flow_id *f_id, struct mbuf **pm, uint32_t aaddr,
1372 struct nat64lsn_job_item *ji;
1378 ji = nat64lsn_create_job(cfg, f_id, JTYPE_NEWPORTGROUP);
1381 NAT64STAT_INC(&cfg->base.stats, dropped);
1382 DPRINTF(DP_DROPS, "failed to create job");
1385 /* Provide pseudo-random value based on flow */
1386 ji->fhash = flow6_hash(f_id);
1388 ji->needs_idx = needs_idx;
1389 nat64lsn_enqueue_job(cfg, ji);
1390 NAT64STAT_INC(&cfg->base.stats, jportreq);
1393 return (IP_FW_DENY);
1396 static NAT64NOINLINE struct nat64lsn_state *
1397 nat64lsn_create_state(struct nat64lsn_cfg *cfg, struct nat64lsn_host *nh,
1398 int nat_proto, struct nat64lsn_state *kst, uint32_t *aaddr)
1400 struct nat64lsn_portgroup *pg;
1401 struct nat64lsn_state *st;
1404 /* XXX: create additional bitmask for selecting proper portgroup */
1405 for (i = 0; i < nh->pg_used; i++) {
1406 pg = PORTGROUP_BYSIDX(cfg, nh, i + 1);
1411 if (pg->nat_proto != nat_proto)
1414 off = PG_GET_FREE_IDX(pg);
1416 /* We have found spare state. Use it */
1418 PG_MARK_BUSY_IDX(pg, off);
1419 st = &pg->states[off];
1422 * Fill in new info. Assume state was zeroed.
1423 * Timestamp and flags will be filled by caller.
1426 st->cur.idx = i + 1;
1429 /* Insert into host hash table */
1430 hval = HASH_IN4(&st->u.hkey) & (nh->hsize - 1);
1431 st->next = nh->phash[hval];
1432 nh->phash[hval] = st->cur;
1434 nat64lsn_dump_state(cfg, pg, st, "ALLOC STATE", off);
1436 NAT64STAT_INC(&cfg->base.stats, screated);
1440 /* Saev last used alias affress */
1447 static NAT64NOINLINE int
1448 nat64lsn_translate6(struct nat64lsn_cfg *cfg, struct ipfw_flow_id *f_id,
1451 struct pfloghdr loghdr, *logdata;
1452 char a[INET6_ADDRSTRLEN];
1453 struct nat64lsn_host *nh;
1455 struct nat64lsn_state *st, kst;
1456 struct nat64lsn_portgroup *pg;
1457 struct icmp6_hdr *icmp6;
1459 int action, hval, nat_proto, proto;
1460 uint16_t aport, state_ts, state_flags;
1462 /* Check if af/protocol is supported and get it short id */
1463 nat_proto = nat64lsn_proto_map[f_id->proto];
1464 if (nat_proto == 0) {
1466 * Since we can be called from jobs handler, we need
1467 * to free mbuf by self, do not leave this task to
1468 * ipfw_check_packet().
1470 NAT64STAT_INC(&cfg->base.stats, noproto);
1474 /* Try to find host first */
1475 I6HASH_FIND(cfg, nh, &f_id->src_ip6);
1478 return (nat64lsn_request_host(cfg, f_id, pm));
1480 /* Fill-in on-stack state structure */
1481 kst.u.s.faddr = nat64_extract_ip4(&cfg->base, &f_id->dst_ip6);
1482 if (kst.u.s.faddr == 0) {
1483 NAT64STAT_INC(&cfg->base.stats, dropped);
1486 kst.u.s.fport = f_id->dst_port;
1487 kst.u.s.lport = f_id->src_port;
1489 /* Prepare some fields we might need to update */
1491 proto = nat64_getlasthdr(*pm, &hval);
1493 NAT64STAT_INC(&cfg->base.stats, dropped);
1494 DPRINTF(DP_DROPS, "dropped due to mbuf isn't contigious");
1499 if (proto == IPPROTO_TCP)
1500 state_flags = convert_tcp_flags(
1501 TCP(mtodo(*pm, hval))->th_flags);
1504 if (proto == IPPROTO_ICMPV6) {
1505 /* Alter local port data */
1506 icmp6 = mtodo(*pm, hval);
1507 if (icmp6->icmp6_type == ICMP6_ECHO_REQUEST ||
1508 icmp6->icmp6_type == ICMP6_ECHO_REPLY)
1509 kst.u.s.lport = ntohs(icmp6->icmp6_id);
1512 hval = HASH_IN4(&kst.u.hkey) & (nh->hsize - 1);
1516 /* OK, let's find state in host hash */
1518 sidx = nh->phash[hval];
1520 while (sidx.idx != 0) {
1521 pg = PORTGROUP_BYSIDX(cfg, nh, sidx.idx);
1522 st = &pg->states[sidx.off];
1523 //DPRINTF("SISX: %d/%d next: %d/%d", sidx.idx, sidx.off,
1524 //st->next.idx, st->next.off);
1525 if (st->u.hkey == kst.u.hkey && pg->nat_proto == nat_proto)
1528 DPRINTF(DP_ALL, "XXX: too long %d/%d %d/%d\n",
1529 sidx.idx, sidx.off, st->next.idx, st->next.off);
1530 inet_ntop(AF_INET6, &nh->addr, a, sizeof(a));
1531 DPRINTF(DP_GENERIC, "TR host %s %p on cpu %d",
1538 if (sidx.idx == 0) {
1540 st = nat64lsn_create_state(cfg, nh, nat_proto, &kst, &aaddr);
1542 /* No free states. Request more if we can */
1543 if (nh->pg_used >= cfg->max_chunks) {
1545 inet_ntop(AF_INET6, &nh->addr, a, sizeof(a));
1546 DPRINTF(DP_DROPS, "PG limit reached "
1547 " for host %s (used %u, allocated %u, "
1549 nh->pg_used * NAT64_CHUNK_SIZE,
1550 nh->pg_allocated * NAT64_CHUNK_SIZE,
1551 cfg->max_chunks * NAT64_CHUNK_SIZE);
1553 NAT64STAT_INC(&cfg->base.stats, dropped);
1556 if ((nh->pg_allocated <=
1557 nh->pg_used + NAT64LSN_REMAININGPG) &&
1558 nh->pg_allocated < cfg->max_chunks)
1559 action = 1; /* Request new indexes */
1563 //DPRINTF("No state, unlock for %p", nh);
1564 return (nat64lsn_request_portgroup(cfg, f_id,
1565 pm, aaddr, action));
1568 /* We've got new state. */
1570 pg = PORTGROUP_BYSIDX(cfg, nh, sidx.idx);
1573 /* Okay, state found */
1575 /* Update necessary fileds */
1576 if (st->timestamp != state_ts)
1577 st->timestamp = state_ts;
1578 if ((st->flags & state_flags) != 0)
1579 st->flags |= state_flags;
1581 /* Copy needed state data */
1583 aport = htons(pg->aport + sidx.off);
1587 if (cfg->base.flags & NAT64_LOG) {
1589 nat64lsn_log(logdata, *pm, AF_INET6, pg->idx, st->cur.off);
1593 action = nat64_do_handle_ip6(*pm, aaddr, aport, &cfg->base, logdata);
1594 if (action == NAT64SKIP)
1595 return (cfg->nomatch_verdict);
1596 if (action == NAT64MFREE) {
1600 *pm = NULL; /* mark mbuf as consumed */
1601 return (IP_FW_DENY);
1605 * Main dataplane entry point.
1608 ipfw_nat64lsn(struct ip_fw_chain *ch, struct ip_fw_args *args,
1609 ipfw_insn *cmd, int *done)
1612 struct nat64lsn_cfg *cfg;
1615 IPFW_RLOCK_ASSERT(ch);
1617 *done = 1; /* terminate the search */
1619 if (cmd->opcode != O_EXTERNAL_ACTION ||
1620 cmd->arg1 != V_nat64lsn_eid ||
1621 icmd->opcode != O_EXTERNAL_INSTANCE ||
1622 (cfg = NAT64_LOOKUP(ch, icmd)) == NULL)
1625 switch (args->f_id.addr_type) {
1627 ret = nat64lsn_translate4(cfg, &args->f_id, &args->m);
1630 ret = nat64lsn_translate6(cfg, &args->f_id, &args->m);
1633 return (cfg->nomatch_verdict);
1639 nat64lsn_ctor_host(void *mem, int size, void *arg, int flags)
1641 struct nat64lsn_host *nh;
1643 nh = (struct nat64lsn_host *)mem;
1644 memset(nh->pg_ptr, 0, sizeof(nh->pg_ptr));
1645 memset(nh->phash, 0, sizeof(nh->phash));
1650 nat64lsn_ctor_pgidx(void *mem, int size, void *arg, int flags)
1653 memset(mem, 0, size);
1658 nat64lsn_init_internal(void)
1661 memset(nat64lsn_proto_map, 0, sizeof(nat64lsn_proto_map));
1662 /* Set up supported protocol map */
1663 nat64lsn_proto_map[IPPROTO_TCP] = NAT_PROTO_TCP;
1664 nat64lsn_proto_map[IPPROTO_UDP] = NAT_PROTO_UDP;
1665 nat64lsn_proto_map[IPPROTO_ICMP] = NAT_PROTO_ICMP;
1666 nat64lsn_proto_map[IPPROTO_ICMPV6] = NAT_PROTO_ICMP;
1667 /* Fill in reverse proto map */
1668 memset(nat64lsn_rproto_map, 0, sizeof(nat64lsn_rproto_map));
1669 nat64lsn_rproto_map[NAT_PROTO_TCP] = IPPROTO_TCP;
1670 nat64lsn_rproto_map[NAT_PROTO_UDP] = IPPROTO_UDP;
1671 nat64lsn_rproto_map[NAT_PROTO_ICMP] = IPPROTO_ICMPV6;
1674 nat64lsn_host_zone = uma_zcreate("NAT64 hosts zone",
1675 sizeof(struct nat64lsn_host), nat64lsn_ctor_host, NULL,
1676 NULL, NULL, UMA_ALIGN_PTR, 0);
1677 nat64lsn_pg_zone = uma_zcreate("NAT64 portgroups zone",
1678 sizeof(struct nat64lsn_portgroup), NULL, NULL, NULL, NULL,
1680 nat64lsn_pgidx_zone = uma_zcreate("NAT64 portgroup indexes zone",
1681 sizeof(struct nat64lsn_portgroup *) * NAT64LSN_PGIDX_CHUNK,
1682 nat64lsn_ctor_pgidx, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1686 nat64lsn_uninit_internal(void)
1689 JQUEUE_LOCK_DESTROY();
1690 uma_zdestroy(nat64lsn_host_zone);
1691 uma_zdestroy(nat64lsn_pg_zone);
1692 uma_zdestroy(nat64lsn_pgidx_zone);
1696 nat64lsn_start_instance(struct nat64lsn_cfg *cfg)
1699 callout_reset(&cfg->periodic, hz * PERIODIC_DELAY,
1700 nat64lsn_periodic, cfg);
1703 struct nat64lsn_cfg *
1704 nat64lsn_init_instance(struct ip_fw_chain *ch, size_t numaddr)
1706 struct nat64lsn_cfg *cfg;
1708 cfg = malloc(sizeof(struct nat64lsn_cfg), M_IPFW, M_WAITOK | M_ZERO);
1709 TAILQ_INIT(&cfg->jhead);
1712 COUNTER_ARRAY_ALLOC(cfg->base.stats.cnt, NAT64STATS, M_WAITOK);
1714 cfg->ihsize = NAT64LSN_HSIZE;
1715 cfg->ih = malloc(sizeof(void *) * cfg->ihsize, M_IPFW,
1718 cfg->pg = malloc(sizeof(void *) * numaddr * _ADDR_PG_COUNT, M_IPFW,
1721 callout_init(&cfg->periodic, CALLOUT_MPSAFE);
1722 callout_init(&cfg->jcallout, CALLOUT_MPSAFE);
1728 * Destroy all hosts callback.
1729 * Called on module unload when all activity already finished, so
1730 * can work without any locks.
1732 static NAT64NOINLINE int
1733 nat64lsn_destroy_host(struct nat64lsn_host *nh, struct nat64lsn_cfg *cfg)
1735 struct nat64lsn_portgroup *pg;
1738 for (i = nh->pg_used; i > 0; i--) {
1739 pg = PORTGROUP_BYSIDX(cfg, nh, i);
1742 cfg->pg[pg->idx] = NULL;
1743 destroy_portgroup(pg);
1752 nat64lsn_destroy_instance(struct nat64lsn_cfg *cfg)
1754 struct nat64lsn_host *nh, *tmp;
1756 callout_drain(&cfg->jcallout);
1757 callout_drain(&cfg->periodic);
1758 I6HASH_FOREACH_SAFE(cfg, nh, tmp, nat64lsn_destroy_host, cfg);
1759 DPRINTF(DP_OBJ, "instance %s: hosts %d", cfg->name, cfg->ihcount);
1761 COUNTER_ARRAY_FREE(cfg->base.stats.cnt, NAT64STATS);
1762 free(cfg->ih, M_IPFW);
1763 free(cfg->pg, M_IPFW);