2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2015-2019 Yandex LLC
5 * Copyright (c) 2015 Alexander V. Chernikov <melifaro@FreeBSD.org>
6 * Copyright (c) 2016-2019 Andrey V. Elsukov <ae@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/counter.h>
37 #include <sys/epoch.h>
38 #include <sys/errno.h>
40 #include <sys/kernel.h>
42 #include <sys/malloc.h>
44 #include <sys/module.h>
45 #include <sys/rmlock.h>
46 #include <sys/socket.h>
47 #include <sys/syslog.h>
48 #include <sys/sysctl.h>
51 #include <net/if_var.h>
52 #include <net/if_pflog.h>
55 #include <netinet/in.h>
56 #include <netinet/ip.h>
57 #include <netinet/ip_var.h>
58 #include <netinet/ip_fw.h>
59 #include <netinet/ip6.h>
60 #include <netinet/icmp6.h>
61 #include <netinet/ip_icmp.h>
62 #include <netinet/tcp.h>
63 #include <netinet/udp.h>
64 #include <netinet6/in6_var.h>
65 #include <netinet6/ip6_var.h>
66 #include <netinet6/ip_fw_nat64.h>
68 #include <netpfil/ipfw/ip_fw_private.h>
69 #include <netpfil/pf/pf.h>
73 MALLOC_DEFINE(M_NAT64LSN, "NAT64LSN", "NAT64LSN");
75 #define NAT64LSN_EPOCH_ENTER(et) NET_EPOCH_ENTER(et)
76 #define NAT64LSN_EPOCH_EXIT(et) NET_EPOCH_EXIT(et)
77 #define NAT64LSN_EPOCH_ASSERT() NET_EPOCH_ASSERT()
78 #define NAT64LSN_EPOCH_CALL(c, f) NET_EPOCH_CALL((f), (c))
80 static uma_zone_t nat64lsn_host_zone;
81 static uma_zone_t nat64lsn_pgchunk_zone;
82 static uma_zone_t nat64lsn_pg_zone;
83 static uma_zone_t nat64lsn_aliaslink_zone;
84 static uma_zone_t nat64lsn_state_zone;
85 static uma_zone_t nat64lsn_job_zone;
87 static void nat64lsn_periodic(void *data);
88 #define PERIODIC_DELAY 4
89 #define NAT64_LOOKUP(chain, cmd) \
90 (struct nat64lsn_cfg *)SRV_OBJECT((chain), (cmd)->arg1)
92 * Delayed job queue, used to create new hosts
101 struct nat64lsn_job_item {
102 STAILQ_ENTRY(nat64lsn_job_item) entries;
103 enum nat64lsn_jtype jtype;
106 struct { /* used by JTYPE_NEWHOST, JTYPE_NEWPORTGROUP */
108 struct nat64lsn_host *host;
109 struct nat64lsn_state *state;
112 struct ipfw_flow_id f_id;
118 struct { /* used by JTYPE_DESTROY */
119 struct nat64lsn_hosts_slist hosts;
120 struct nat64lsn_pg_slist portgroups;
121 struct nat64lsn_pgchunk *pgchunk;
122 struct epoch_context epoch_ctx;
127 static struct mtx jmtx;
128 #define JQUEUE_LOCK_INIT() mtx_init(&jmtx, "qlock", NULL, MTX_DEF)
129 #define JQUEUE_LOCK_DESTROY() mtx_destroy(&jmtx)
130 #define JQUEUE_LOCK() mtx_lock(&jmtx)
131 #define JQUEUE_UNLOCK() mtx_unlock(&jmtx)
133 static int nat64lsn_alloc_host(struct nat64lsn_cfg *cfg,
134 struct nat64lsn_job_item *ji);
135 static int nat64lsn_alloc_pg(struct nat64lsn_cfg *cfg,
136 struct nat64lsn_job_item *ji);
137 static struct nat64lsn_job_item *nat64lsn_create_job(
138 struct nat64lsn_cfg *cfg, int jtype);
139 static void nat64lsn_enqueue_job(struct nat64lsn_cfg *cfg,
140 struct nat64lsn_job_item *ji);
141 static void nat64lsn_job_destroy(epoch_context_t ctx);
142 static void nat64lsn_destroy_host(struct nat64lsn_host *host);
143 static void nat64lsn_destroy_pg(struct nat64lsn_pg *pg);
145 static int nat64lsn_translate4(struct nat64lsn_cfg *cfg,
146 const struct ipfw_flow_id *f_id, struct mbuf **mp);
147 static int nat64lsn_translate6(struct nat64lsn_cfg *cfg,
148 struct ipfw_flow_id *f_id, struct mbuf **mp);
149 static int nat64lsn_translate6_internal(struct nat64lsn_cfg *cfg,
150 struct mbuf **mp, struct nat64lsn_state *state, uint8_t flags);
152 #define NAT64_BIT_TCP_FIN 0 /* FIN was seen */
153 #define NAT64_BIT_TCP_SYN 1 /* First syn in->out */
154 #define NAT64_BIT_TCP_ESTAB 2 /* Packet with Ack */
155 #define NAT64_BIT_READY_IPV4 6 /* state is ready for translate4 */
156 #define NAT64_BIT_STALE 7 /* state is going to be expired */
158 #define NAT64_FLAG_FIN (1 << NAT64_BIT_TCP_FIN)
159 #define NAT64_FLAG_SYN (1 << NAT64_BIT_TCP_SYN)
160 #define NAT64_FLAG_ESTAB (1 << NAT64_BIT_TCP_ESTAB)
161 #define NAT64_FLAGS_TCP (NAT64_FLAG_SYN|NAT64_FLAG_ESTAB|NAT64_FLAG_FIN)
163 #define NAT64_FLAG_READY (1 << NAT64_BIT_READY_IPV4)
164 #define NAT64_FLAG_STALE (1 << NAT64_BIT_STALE)
166 static inline uint8_t
167 convert_tcp_flags(uint8_t flags)
171 result = flags & (TH_FIN|TH_SYN);
172 result |= (flags & TH_RST) >> 2; /* Treat RST as FIN */
173 result |= (flags & TH_ACK) >> 2; /* Treat ACK as estab */
179 nat64lsn_log(struct pfloghdr *plog, struct mbuf *m, sa_family_t family,
180 struct nat64lsn_state *state)
183 memset(plog, 0, sizeof(*plog));
184 plog->length = PFLOG_REAL_HDRLEN;
186 plog->action = PF_NAT;
188 plog->rulenr = htonl(state->ip_src);
189 plog->subrulenr = htonl((uint32_t)(state->aport << 16) |
190 (state->proto << 8) | (state->ip_dst & 0xff));
191 plog->ruleset[0] = '\0';
192 strlcpy(plog->ifname, "NAT64LSN", sizeof(plog->ifname));
193 ipfw_bpf_mtap2(plog, PFLOG_HDRLEN, m);
196 #define HVAL(p, n, s) jenkins_hash32((const uint32_t *)(p), (n), (s))
197 #define HOST_HVAL(c, a) HVAL((a),\
198 sizeof(struct in6_addr) / sizeof(uint32_t), (c)->hash_seed)
199 #define HOSTS(c, v) ((c)->hosts_hash[(v) & ((c)->hosts_hashsize - 1)])
201 #define ALIASLINK_HVAL(c, f) HVAL(&(f)->dst_ip6,\
202 sizeof(struct in6_addr) * 2 / sizeof(uint32_t), (c)->hash_seed)
203 #define ALIAS_BYHASH(c, v) \
204 ((c)->aliases[(v) & ((1 << (32 - (c)->plen4)) - 1)])
205 static struct nat64lsn_aliaslink*
206 nat64lsn_get_aliaslink(struct nat64lsn_cfg *cfg __unused,
207 struct nat64lsn_host *host, const struct ipfw_flow_id *f_id __unused)
211 * We can implement some different algorithms how
212 * select an alias address.
213 * XXX: for now we use first available.
215 return (CK_SLIST_FIRST(&host->aliases));
218 #define STATE_HVAL(c, d) HVAL((d), 2, (c)->hash_seed)
219 #define STATE_HASH(h, v) \
220 ((h)->states_hash[(v) & ((h)->states_hashsize - 1)])
221 #define STATES_CHUNK(p, v) \
222 ((p)->chunks_count == 1 ? (p)->states : \
223 ((p)->states_chunk[CHUNK_BY_FADDR(p, v)]))
226 #define FREEMASK_FFSLL(pg, faddr) \
227 ffsll(*FREEMASK_CHUNK((pg), (faddr)))
228 #define FREEMASK_BTR(pg, faddr, bit) \
229 ck_pr_btr_64(FREEMASK_CHUNK((pg), (faddr)), (bit))
230 #define FREEMASK_BTS(pg, faddr, bit) \
231 ck_pr_bts_64(FREEMASK_CHUNK((pg), (faddr)), (bit))
232 #define FREEMASK_ISSET(pg, faddr, bit) \
233 ISSET64(*FREEMASK_CHUNK((pg), (faddr)), (bit))
234 #define FREEMASK_COPY(pg, n, out) \
235 (out) = ck_pr_load_64(FREEMASK_CHUNK((pg), (n)))
238 freemask_ffsll(uint32_t *freemask)
242 if ((i = ffsl(freemask[0])) != 0)
244 if ((i = ffsl(freemask[1])) != 0)
248 #define FREEMASK_FFSLL(pg, faddr) \
249 freemask_ffsll(FREEMASK_CHUNK((pg), (faddr)))
250 #define FREEMASK_BTR(pg, faddr, bit) \
251 ck_pr_btr_32(FREEMASK_CHUNK((pg), (faddr)) + (bit) / 32, (bit) % 32)
252 #define FREEMASK_BTS(pg, faddr, bit) \
253 ck_pr_bts_32(FREEMASK_CHUNK((pg), (faddr)) + (bit) / 32, (bit) % 32)
254 #define FREEMASK_ISSET(pg, faddr, bit) \
255 ISSET32(*(FREEMASK_CHUNK((pg), (faddr)) + (bit) / 32), (bit) % 32)
256 #define FREEMASK_COPY(pg, n, out) \
257 (out) = ck_pr_load_32(FREEMASK_CHUNK((pg), (n))) | \
258 ((uint64_t)ck_pr_load_32(FREEMASK_CHUNK((pg), (n)) + 1) << 32)
259 #endif /* !__LP64__ */
262 #define NAT64LSN_TRY_PGCNT 32
263 static struct nat64lsn_pg*
264 nat64lsn_get_pg(uint32_t *chunkmask, uint32_t *pgmask,
265 struct nat64lsn_pgchunk **chunks, struct nat64lsn_pg **pgptr,
266 uint32_t *pgidx, in_addr_t faddr)
268 struct nat64lsn_pg *pg, *oldpg;
269 uint32_t idx, oldidx;
273 /* First try last used PG */
274 oldpg = pg = ck_pr_load_ptr(pgptr);
275 idx = oldidx = ck_pr_load_32(pgidx);
276 /* If pgidx is out of range, reset it to the first pgchunk */
277 if (!ISSET32(*chunkmask, idx / 32))
281 if (pg != NULL && FREEMASK_BITCOUNT(pg, faddr) > 0) {
283 * If last used PG has not free states,
284 * try to update pointer.
285 * NOTE: it can be already updated by jobs handler,
286 * thus we use CAS operation.
289 ck_pr_cas_ptr(pgptr, oldpg, pg);
292 /* Stop if idx is out of range */
293 if (!ISSET32(*chunkmask, idx / 32))
296 if (ISSET32(pgmask[idx / 32], idx % 32))
298 &chunks[idx / 32]->pgptr[idx % 32]);
303 } while (++cnt < NAT64LSN_TRY_PGCNT);
305 /* If pgidx is out of range, reset it to the first pgchunk */
306 if (!ISSET32(*chunkmask, idx / 32))
308 ck_pr_cas_32(pgidx, oldidx, idx);
312 static struct nat64lsn_state*
313 nat64lsn_get_state6to4(struct nat64lsn_cfg *cfg, struct nat64lsn_host *host,
314 const struct ipfw_flow_id *f_id, uint32_t hval, in_addr_t faddr,
315 uint16_t port, uint8_t proto)
317 struct nat64lsn_aliaslink *link;
318 struct nat64lsn_state *state;
319 struct nat64lsn_pg *pg;
322 NAT64LSN_EPOCH_ASSERT();
324 /* Check that we already have state for given arguments */
325 CK_SLIST_FOREACH(state, &STATE_HASH(host, hval), entries) {
326 if (state->proto == proto && state->ip_dst == faddr &&
327 state->sport == port && state->dport == f_id->dst_port)
331 link = nat64lsn_get_aliaslink(cfg, host, f_id);
337 pg = nat64lsn_get_pg(
338 &link->alias->tcp_chunkmask, link->alias->tcp_pgmask,
339 link->alias->tcp, &link->alias->tcp_pg,
340 &link->alias->tcp_pgidx, faddr);
343 pg = nat64lsn_get_pg(
344 &link->alias->udp_chunkmask, link->alias->udp_pgmask,
345 link->alias->udp, &link->alias->udp_pg,
346 &link->alias->udp_pgidx, faddr);
349 pg = nat64lsn_get_pg(
350 &link->alias->icmp_chunkmask, link->alias->icmp_pgmask,
351 link->alias->icmp, &link->alias->icmp_pg,
352 &link->alias->icmp_pgidx, faddr);
355 panic("%s: wrong proto %d", __func__, proto);
360 /* Check that PG has some free states */
362 i = FREEMASK_BITCOUNT(pg, faddr);
364 offset = FREEMASK_FFSLL(pg, faddr);
368 * No more free states in this PG.
373 /* Lets try to atomically grab the state */
374 if (FREEMASK_BTR(pg, faddr, offset - 1)) {
375 state = &STATES_CHUNK(pg, faddr)->state[offset - 1];
377 state->flags = proto != IPPROTO_TCP ? 0 :
378 convert_tcp_flags(f_id->_flags);
379 state->proto = proto;
380 state->aport = pg->base_port + offset - 1;
381 state->dport = f_id->dst_port;
383 state->ip6_dst = f_id->dst_ip6;
384 state->ip_dst = faddr;
385 state->ip_src = link->alias->addr;
388 SET_AGE(state->timestamp);
390 /* Insert new state into host's hash table */
392 CK_SLIST_INSERT_HEAD(&STATE_HASH(host, hval),
394 host->states_count++;
396 * XXX: In case if host is going to be expired,
397 * reset NAT64LSN_DEADHOST flag.
399 host->flags &= ~NAT64LSN_DEADHOST;
401 NAT64STAT_INC(&cfg->base.stats, screated);
402 /* Mark the state as ready for translate4 */
404 ck_pr_bts_32(&state->flags, NAT64_BIT_READY_IPV4);
412 * Inspects icmp packets to see if the message contains different
413 * packet header so we need to alter @addr and @port.
416 inspect_icmp_mbuf(struct mbuf **mp, uint8_t *proto, uint32_t *addr,
424 ip = mtod(*mp, struct ip *); /* Outer IP header */
425 off = (ip->ip_hl << 2) + ICMP_MINLEN;
426 if ((*mp)->m_len < off)
427 *mp = m_pullup(*mp, off);
431 ip = mtod(*mp, struct ip *); /* Outer IP header */
432 icmp = L3HDR(ip, struct icmp *);
433 switch (icmp->icmp_type) {
436 /* Use icmp ID as distinguisher */
437 *port = ntohs(icmp->icmp_id);
446 * ICMP_UNREACH and ICMP_TIMXCEED contains IP header + 64 bits
449 if ((*mp)->m_pkthdr.len < off + sizeof(struct ip) + ICMP_MINLEN)
451 if ((*mp)->m_len < off + sizeof(struct ip) + ICMP_MINLEN)
452 *mp = m_pullup(*mp, off + sizeof(struct ip) + ICMP_MINLEN);
455 ip = mtodo(*mp, off); /* Inner IP header */
456 inner_proto = ip->ip_p;
457 off += ip->ip_hl << 2; /* Skip inner IP header */
458 *addr = ntohl(ip->ip_src.s_addr);
459 if ((*mp)->m_len < off + ICMP_MINLEN)
460 *mp = m_pullup(*mp, off + ICMP_MINLEN);
463 switch (inner_proto) {
466 /* Copy source port from the header */
467 *port = ntohs(*((uint16_t *)mtodo(*mp, off)));
468 *proto = inner_proto;
472 * We will translate only ICMP errors for our ICMP
475 icmp = mtodo(*mp, off);
476 if (icmp->icmp_type != ICMP_ECHO)
478 *port = ntohs(icmp->icmp_id);
484 static struct nat64lsn_state*
485 nat64lsn_get_state4to6(struct nat64lsn_cfg *cfg, struct nat64lsn_alias *alias,
486 in_addr_t faddr, uint16_t port, uint8_t proto)
488 struct nat64lsn_state *state;
489 struct nat64lsn_pg *pg;
490 int chunk_idx, pg_idx, state_idx;
492 NAT64LSN_EPOCH_ASSERT();
494 if (port < NAT64_MIN_PORT)
497 * Alias keeps 32 pgchunks for each protocol.
498 * Each pgchunk has 32 pointers to portgroup.
499 * Each portgroup has 64 states for ports.
501 port -= NAT64_MIN_PORT;
502 chunk_idx = port / 2048;
504 port -= chunk_idx * 2048;
506 state_idx = port % 64;
509 * First check in proto_chunkmask that we have allocated PG chunk.
510 * Then check in proto_pgmask that we have valid PG pointer.
515 if (ISSET32(alias->tcp_chunkmask, chunk_idx) &&
516 ISSET32(alias->tcp_pgmask[chunk_idx], pg_idx)) {
517 pg = alias->tcp[chunk_idx]->pgptr[pg_idx];
522 if (ISSET32(alias->udp_chunkmask, chunk_idx) &&
523 ISSET32(alias->udp_pgmask[chunk_idx], pg_idx)) {
524 pg = alias->udp[chunk_idx]->pgptr[pg_idx];
529 if (ISSET32(alias->icmp_chunkmask, chunk_idx) &&
530 ISSET32(alias->icmp_pgmask[chunk_idx], pg_idx)) {
531 pg = alias->icmp[chunk_idx]->pgptr[pg_idx];
536 panic("%s: wrong proto %d", __func__, proto);
541 if (FREEMASK_ISSET(pg, faddr, state_idx))
544 state = &STATES_CHUNK(pg, faddr)->state[state_idx];
546 if (ck_pr_load_32(&state->flags) & NAT64_FLAG_READY)
552 nat64lsn_translate4(struct nat64lsn_cfg *cfg,
553 const struct ipfw_flow_id *f_id, struct mbuf **mp)
555 struct pfloghdr loghdr, *logdata;
556 struct in6_addr src6;
557 struct nat64lsn_state *state;
558 struct nat64lsn_alias *alias;
559 uint32_t addr, flags;
565 port = f_id->dst_port;
567 if (addr < cfg->prefix4 || addr > cfg->pmask4) {
568 NAT64STAT_INC(&cfg->base.stats, nomatch4);
569 return (cfg->nomatch_verdict);
572 /* Check if protocol is supported */
575 ret = inspect_icmp_mbuf(mp, &proto, &addr, &port);
578 NAT64STAT_INC(&cfg->base.stats, nomem);
581 NAT64STAT_INC(&cfg->base.stats, noproto);
582 return (cfg->nomatch_verdict);
584 if (addr < cfg->prefix4 || addr > cfg->pmask4) {
585 NAT64STAT_INC(&cfg->base.stats, nomatch4);
586 return (cfg->nomatch_verdict);
593 NAT64STAT_INC(&cfg->base.stats, noproto);
594 return (cfg->nomatch_verdict);
597 alias = &ALIAS_BYHASH(cfg, addr);
598 MPASS(addr == alias->addr);
600 /* Check that we have state for this port */
601 state = nat64lsn_get_state4to6(cfg, alias, f_id->src_ip,
604 NAT64STAT_INC(&cfg->base.stats, nomatch4);
605 return (cfg->nomatch_verdict);
608 /* TODO: Check flags to see if we need to do some static mapping */
610 /* Update some state fields if need */
612 if (f_id->proto == IPPROTO_TCP)
613 flags = convert_tcp_flags(f_id->_flags);
616 if (state->timestamp != ts)
617 state->timestamp = ts;
618 if ((state->flags & flags) != flags)
619 state->flags |= flags;
621 port = htons(state->sport);
622 src6 = state->ip6_dst;
624 if (cfg->base.flags & NAT64_LOG) {
626 nat64lsn_log(logdata, *mp, AF_INET, state);
631 * We already have src6 with embedded address, but it is possible,
632 * that src_ip is different than state->ip_dst, this is why we
633 * do embedding again.
635 nat64_embed_ip4(&src6, cfg->base.plat_plen, htonl(f_id->src_ip));
636 ret = nat64_do_handle_ip4(*mp, &src6, &state->host->addr, port,
637 &cfg->base, logdata);
638 if (ret == NAT64SKIP)
639 return (cfg->nomatch_verdict);
640 if (ret == NAT64RETURN)
646 * Check if particular state is stale and should be deleted.
647 * Return 1 if true, 0 otherwise.
650 nat64lsn_check_state(struct nat64lsn_cfg *cfg, struct nat64lsn_state *state)
654 /* State was marked as stale in previous pass. */
655 if (ISSET32(state->flags, NAT64_BIT_STALE))
658 /* State is not yet initialized, it is going to be READY */
659 if (!ISSET32(state->flags, NAT64_BIT_READY_IPV4))
662 age = GET_AGE(state->timestamp);
663 switch (state->proto) {
665 if (ISSET32(state->flags, NAT64_BIT_TCP_FIN))
666 ttl = cfg->st_close_ttl;
667 else if (ISSET32(state->flags, NAT64_BIT_TCP_ESTAB))
668 ttl = cfg->st_estab_ttl;
669 else if (ISSET32(state->flags, NAT64_BIT_TCP_SYN))
670 ttl = cfg->st_syn_ttl;
672 ttl = cfg->st_syn_ttl;
677 if (age > cfg->st_udp_ttl)
681 if (age > cfg->st_icmp_ttl)
689 nat64lsn_maintain_pg(struct nat64lsn_cfg *cfg, struct nat64lsn_pg *pg)
691 struct nat64lsn_state *state;
692 struct nat64lsn_host *host;
694 int c, i, update_age;
697 for (c = 0; c < pg->chunks_count; c++) {
698 FREEMASK_COPY(pg, c, freemask);
699 for (i = 0; i < 64; i++) {
700 if (ISSET64(freemask, i))
702 state = &STATES_CHUNK(pg, c)->state[i];
703 if (nat64lsn_check_state(cfg, state) == 0) {
709 * 1. Mark as STALE and unlink from host's hash.
710 * 2. Set bit in freemask.
712 if (ISSET32(state->flags, NAT64_BIT_STALE)) {
714 * State was marked as STALE in previous
715 * pass. Now it is safe to release it.
719 FREEMASK_BTS(pg, c, i);
720 NAT64STAT_INC(&cfg->base.stats, sdeleted);
723 MPASS(state->flags & NAT64_FLAG_READY);
727 CK_SLIST_REMOVE(&STATE_HASH(host, state->hval),
728 state, nat64lsn_state, entries);
729 host->states_count--;
732 /* Reset READY flag */
733 ck_pr_btr_32(&state->flags, NAT64_BIT_READY_IPV4);
734 /* And set STALE flag */
735 ck_pr_bts_32(&state->flags, NAT64_BIT_STALE);
738 * Now translate6 will not use this state, wait
739 * until it become safe for translate4, then mark
746 * We have some alive states, update timestamp.
749 SET_AGE(pg->timestamp);
751 if (GET_AGE(pg->timestamp) < cfg->pg_delete_delay)
758 nat64lsn_expire_portgroups(struct nat64lsn_cfg *cfg,
759 struct nat64lsn_pg_slist *portgroups)
761 struct nat64lsn_alias *alias;
762 struct nat64lsn_pg *pg, *tpg, *firstpg, **pgptr;
763 uint32_t *pgmask, *pgidx;
766 for (i = 0; i < 1 << (32 - cfg->plen4); i++) {
767 alias = &cfg->aliases[i];
768 CK_SLIST_FOREACH_SAFE(pg, &alias->portgroups, entries, tpg) {
769 if (nat64lsn_maintain_pg(cfg, pg) == 0)
771 /* Always keep first PG */
772 if (pg->base_port == NAT64_MIN_PORT)
775 * PG is expired, unlink it and schedule for
776 * deferred destroying.
778 idx = (pg->base_port - NAT64_MIN_PORT) / 64;
781 pgmask = alias->tcp_pgmask;
782 pgptr = &alias->tcp_pg;
783 pgidx = &alias->tcp_pgidx;
784 firstpg = alias->tcp[0]->pgptr[0];
787 pgmask = alias->udp_pgmask;
788 pgptr = &alias->udp_pg;
789 pgidx = &alias->udp_pgidx;
790 firstpg = alias->udp[0]->pgptr[0];
793 pgmask = alias->icmp_pgmask;
794 pgptr = &alias->icmp_pg;
795 pgidx = &alias->icmp_pgidx;
796 firstpg = alias->icmp[0]->pgptr[0];
799 /* Reset the corresponding bit in pgmask array. */
800 ck_pr_btr_32(&pgmask[idx / 32], idx % 32);
802 /* If last used PG points to this PG, reset it. */
803 ck_pr_cas_ptr(pgptr, pg, firstpg);
804 ck_pr_cas_32(pgidx, idx, 0);
805 /* Unlink PG from alias's chain */
807 CK_SLIST_REMOVE(&alias->portgroups, pg,
808 nat64lsn_pg, entries);
809 alias->portgroups_count--;
811 /* And link to job's chain for deferred destroying */
812 NAT64STAT_INC(&cfg->base.stats, spgdeleted);
813 CK_SLIST_INSERT_HEAD(portgroups, pg, entries);
819 nat64lsn_expire_hosts(struct nat64lsn_cfg *cfg,
820 struct nat64lsn_hosts_slist *hosts)
822 struct nat64lsn_host *host, *tmp;
825 for (i = 0; i < cfg->hosts_hashsize; i++) {
826 CK_SLIST_FOREACH_SAFE(host, &cfg->hosts_hash[i],
828 /* Is host was marked in previous call? */
829 if (host->flags & NAT64LSN_DEADHOST) {
830 if (host->states_count > 0) {
831 host->flags &= ~NAT64LSN_DEADHOST;
835 * Unlink host from hash table and schedule
836 * it for deferred destroying.
839 CK_SLIST_REMOVE(&cfg->hosts_hash[i], host,
840 nat64lsn_host, entries);
843 CK_SLIST_INSERT_HEAD(hosts, host, entries);
846 if (GET_AGE(host->timestamp) < cfg->host_delete_delay)
848 if (host->states_count > 0)
850 /* Mark host as going to be expired in next pass */
851 host->flags |= NAT64LSN_DEADHOST;
857 static struct nat64lsn_pgchunk*
858 nat64lsn_expire_pgchunk(struct nat64lsn_cfg *cfg)
861 struct nat64lsn_alias *alias;
862 struct nat64lsn_pgchunk *chunk;
866 for (i = 0; i < 1 << (32 - cfg->plen4); i++) {
867 alias = &cfg->aliases[i];
868 if (GET_AGE(alias->timestamp) < cfg->pgchunk_delete_delay)
870 /* Always keep single chunk allocated */
871 for (c = 1; c < 32; c++) {
872 if ((alias->tcp_chunkmask & (1 << c)) == 0)
874 chunk = ck_pr_load_ptr(&alias->tcp[c]);
875 if (ck_pr_load_32(&alias->tcp_pgmask[c]) != 0)
877 ck_pr_btr_32(&alias->tcp_chunkmask, c);
879 if (ck_pr_load_32(&alias->tcp_pgmask[c]) != 0)
889 nat64lsn_maintain_hosts(struct nat64lsn_cfg *cfg)
891 struct nat64lsn_host *h;
892 struct nat64lsn_states_slist *hash;
895 for (i = 0; i < cfg->hosts_hashsize; i++) {
896 CK_SLIST_FOREACH(h, &cfg->hosts_hash[i], entries) {
897 if (h->states_count / 2 < h->states_hashsize ||
898 h->states_hashsize >= NAT64LSN_MAX_HSIZE)
900 hsize = h->states_hashsize * 2;
901 hash = malloc(sizeof(*hash)* hsize, M_NOWAIT);
904 for (j = 0; j < hsize; j++)
905 CK_SLIST_INIT(&hash[i]);
907 ck_pr_bts_32(&h->flags, NAT64LSN_GROWHASH);
914 * This procedure is used to perform various maintance
915 * on dynamic hash list. Currently it is called every 4 seconds.
918 nat64lsn_periodic(void *data)
920 struct nat64lsn_job_item *ji;
921 struct nat64lsn_cfg *cfg;
923 cfg = (struct nat64lsn_cfg *) data;
924 CURVNET_SET(cfg->vp);
925 if (cfg->hosts_count > 0) {
926 ji = uma_zalloc(nat64lsn_job_zone, M_NOWAIT);
928 ji->jtype = JTYPE_DESTROY;
929 CK_SLIST_INIT(&ji->hosts);
930 CK_SLIST_INIT(&ji->portgroups);
931 nat64lsn_expire_hosts(cfg, &ji->hosts);
932 nat64lsn_expire_portgroups(cfg, &ji->portgroups);
933 ji->pgchunk = nat64lsn_expire_pgchunk(cfg);
934 NAT64LSN_EPOCH_CALL(&ji->epoch_ctx,
935 nat64lsn_job_destroy);
937 NAT64STAT_INC(&cfg->base.stats, jnomem);
939 callout_schedule(&cfg->periodic, hz * PERIODIC_DELAY);
943 #define ALLOC_ERROR(stage, type) ((stage) ? 10 * (type) + (stage): 0)
944 #define HOST_ERROR(stage) ALLOC_ERROR(stage, 1)
945 #define PG_ERROR(stage) ALLOC_ERROR(stage, 2)
947 nat64lsn_alloc_host(struct nat64lsn_cfg *cfg, struct nat64lsn_job_item *ji)
949 char a[INET6_ADDRSTRLEN];
950 struct nat64lsn_aliaslink *link;
951 struct nat64lsn_host *host;
952 struct nat64lsn_state *state;
953 uint32_t hval, data[2];
956 /* Check that host was not yet added. */
957 NAT64LSN_EPOCH_ASSERT();
958 CK_SLIST_FOREACH(host, &HOSTS(cfg, ji->src6_hval), entries) {
959 if (IN6_ARE_ADDR_EQUAL(&ji->f_id.src_ip6, &host->addr)) {
960 /* The host was allocated in previous call. */
966 host = ji->host = uma_zalloc(nat64lsn_host_zone, M_NOWAIT);
967 if (ji->host == NULL)
968 return (HOST_ERROR(1));
970 host->states_hashsize = NAT64LSN_HSIZE;
971 host->states_hash = malloc(sizeof(struct nat64lsn_states_slist) *
972 host->states_hashsize, M_NAT64LSN, M_NOWAIT);
973 if (host->states_hash == NULL) {
974 uma_zfree(nat64lsn_host_zone, host);
975 return (HOST_ERROR(2));
978 link = uma_zalloc(nat64lsn_aliaslink_zone, M_NOWAIT);
980 free(host->states_hash, M_NAT64LSN);
981 uma_zfree(nat64lsn_host_zone, host);
982 return (HOST_ERROR(3));
986 HOST_LOCK_INIT(host);
987 SET_AGE(host->timestamp);
988 host->addr = ji->f_id.src_ip6;
989 host->hval = ji->src6_hval;
991 host->states_count = 0;
992 host->states_hashsize = NAT64LSN_HSIZE;
993 CK_SLIST_INIT(&host->aliases);
994 for (i = 0; i < host->states_hashsize; i++)
995 CK_SLIST_INIT(&host->states_hash[i]);
997 /* Determine alias from flow hash. */
998 hval = ALIASLINK_HVAL(cfg, &ji->f_id);
999 link->alias = &ALIAS_BYHASH(cfg, hval);
1000 CK_SLIST_INSERT_HEAD(&host->aliases, link, host_entries);
1002 ALIAS_LOCK(link->alias);
1003 CK_SLIST_INSERT_HEAD(&link->alias->hosts, link, alias_entries);
1004 link->alias->hosts_count++;
1005 ALIAS_UNLOCK(link->alias);
1008 CK_SLIST_INSERT_HEAD(&HOSTS(cfg, ji->src6_hval), host, entries);
1013 data[0] = ji->faddr;
1014 data[1] = (ji->f_id.dst_port << 16) | ji->port;
1015 ji->state_hval = hval = STATE_HVAL(cfg, data);
1016 state = nat64lsn_get_state6to4(cfg, host, &ji->f_id, hval,
1017 ji->faddr, ji->port, ji->proto);
1019 * We failed to obtain new state, used alias needs new PG.
1020 * XXX: or another alias should be used.
1022 if (state == NULL) {
1023 /* Try to allocate new PG */
1024 if (nat64lsn_alloc_pg(cfg, ji) != PG_ERROR(0))
1025 return (HOST_ERROR(4));
1026 /* We assume that nat64lsn_alloc_pg() got state */
1031 DPRINTF(DP_OBJ, "ALLOC HOST %s %p",
1032 inet_ntop(AF_INET6, &host->addr, a, sizeof(a)), host);
1033 return (HOST_ERROR(0));
1037 nat64lsn_find_pg_place(uint32_t *data)
1041 for (i = 0; i < 32; i++) {
1044 return (i * 32 + ffs(~data[i]) - 1);
1050 nat64lsn_alloc_proto_pg(struct nat64lsn_cfg *cfg,
1051 struct nat64lsn_alias *alias, uint32_t *chunkmask,
1052 uint32_t *pgmask, struct nat64lsn_pgchunk **chunks,
1053 struct nat64lsn_pg **pgptr, uint8_t proto)
1055 struct nat64lsn_pg *pg;
1056 int i, pg_idx, chunk_idx;
1058 /* Find place in pgchunk where PG can be added */
1059 pg_idx = nat64lsn_find_pg_place(pgmask);
1060 if (pg_idx < 0) /* no more PGs */
1061 return (PG_ERROR(1));
1062 /* Check that we have allocated pgchunk for given PG index */
1063 chunk_idx = pg_idx / 32;
1064 if (!ISSET32(*chunkmask, chunk_idx)) {
1065 chunks[chunk_idx] = uma_zalloc(nat64lsn_pgchunk_zone,
1067 if (chunks[chunk_idx] == NULL)
1068 return (PG_ERROR(2));
1069 ck_pr_bts_32(chunkmask, chunk_idx);
1070 ck_pr_fence_store();
1072 /* Allocate PG and states chunks */
1073 pg = uma_zalloc(nat64lsn_pg_zone, M_NOWAIT);
1075 return (PG_ERROR(3));
1076 pg->chunks_count = cfg->states_chunks;
1077 if (pg->chunks_count > 1) {
1078 pg->freemask_chunk = malloc(pg->chunks_count *
1079 sizeof(uint64_t), M_NAT64LSN, M_NOWAIT);
1080 if (pg->freemask_chunk == NULL) {
1081 uma_zfree(nat64lsn_pg_zone, pg);
1082 return (PG_ERROR(4));
1084 pg->states_chunk = malloc(pg->chunks_count *
1085 sizeof(struct nat64lsn_states_chunk *), M_NAT64LSN,
1087 if (pg->states_chunk == NULL) {
1088 free(pg->freemask_chunk, M_NAT64LSN);
1089 uma_zfree(nat64lsn_pg_zone, pg);
1090 return (PG_ERROR(5));
1092 for (i = 0; i < pg->chunks_count; i++) {
1093 pg->states_chunk[i] = uma_zalloc(
1094 nat64lsn_state_zone, M_NOWAIT);
1095 if (pg->states_chunk[i] == NULL)
1098 memset(pg->freemask_chunk, 0xff,
1099 sizeof(uint64_t) * pg->chunks_count);
1101 pg->states = uma_zalloc(nat64lsn_state_zone, M_NOWAIT);
1102 if (pg->states == NULL) {
1103 uma_zfree(nat64lsn_pg_zone, pg);
1104 return (PG_ERROR(6));
1106 memset(&pg->freemask64, 0xff, sizeof(uint64_t));
1109 /* Initialize PG and hook it to pgchunk */
1110 SET_AGE(pg->timestamp);
1112 pg->base_port = NAT64_MIN_PORT + 64 * pg_idx;
1113 ck_pr_store_ptr(&chunks[chunk_idx]->pgptr[pg_idx % 32], pg);
1114 ck_pr_fence_store();
1115 ck_pr_bts_32(&pgmask[pg_idx / 32], pg_idx % 32);
1116 ck_pr_store_ptr(pgptr, pg);
1119 CK_SLIST_INSERT_HEAD(&alias->portgroups, pg, entries);
1120 SET_AGE(alias->timestamp);
1121 alias->portgroups_count++;
1122 ALIAS_UNLOCK(alias);
1123 NAT64STAT_INC(&cfg->base.stats, spgcreated);
1124 return (PG_ERROR(0));
1127 for (i = 0; i < pg->chunks_count; i++)
1128 uma_zfree(nat64lsn_state_zone, pg->states_chunk[i]);
1129 free(pg->freemask_chunk, M_NAT64LSN);
1130 free(pg->states_chunk, M_NAT64LSN);
1131 uma_zfree(nat64lsn_pg_zone, pg);
1132 return (PG_ERROR(7));
1136 nat64lsn_alloc_pg(struct nat64lsn_cfg *cfg, struct nat64lsn_job_item *ji)
1138 struct nat64lsn_aliaslink *link;
1139 struct nat64lsn_alias *alias;
1142 link = nat64lsn_get_aliaslink(cfg, ji->host, &ji->f_id);
1144 return (PG_ERROR(1));
1147 * TODO: check that we did not already allocated PG in
1152 alias = link->alias;
1153 /* Find place in pgchunk where PG can be added */
1154 switch (ji->proto) {
1156 ret = nat64lsn_alloc_proto_pg(cfg, alias,
1157 &alias->tcp_chunkmask, alias->tcp_pgmask,
1158 alias->tcp, &alias->tcp_pg, ji->proto);
1161 ret = nat64lsn_alloc_proto_pg(cfg, alias,
1162 &alias->udp_chunkmask, alias->udp_pgmask,
1163 alias->udp, &alias->udp_pg, ji->proto);
1166 ret = nat64lsn_alloc_proto_pg(cfg, alias,
1167 &alias->icmp_chunkmask, alias->icmp_pgmask,
1168 alias->icmp, &alias->icmp_pg, ji->proto);
1171 panic("%s: wrong proto %d", __func__, ji->proto);
1173 if (ret == PG_ERROR(1)) {
1175 * PG_ERROR(1) means that alias lacks free PGs
1176 * XXX: try next alias.
1178 printf("NAT64LSN: %s: failed to obtain PG\n",
1182 if (ret == PG_ERROR(0)) {
1183 ji->state = nat64lsn_get_state6to4(cfg, ji->host, &ji->f_id,
1184 ji->state_hval, ji->faddr, ji->port, ji->proto);
1185 if (ji->state == NULL)
1194 nat64lsn_do_request(void *data)
1196 struct epoch_tracker et;
1197 struct nat64lsn_job_head jhead;
1198 struct nat64lsn_job_item *ji, *ji2;
1199 struct nat64lsn_cfg *cfg;
1203 cfg = (struct nat64lsn_cfg *)data;
1207 CURVNET_SET(cfg->vp);
1208 STAILQ_INIT(&jhead);
1212 STAILQ_SWAP(&jhead, &cfg->jhead, nat64lsn_job_item);
1217 /* TODO: check if we need to resize hash */
1219 NAT64STAT_INC(&cfg->base.stats, jcalls);
1220 DPRINTF(DP_JQUEUE, "count=%d", jcount);
1224 * What we should do here is to build a hash
1225 * to ensure we don't have lots of duplicate requests.
1226 * Skip this for now.
1228 * TODO: Limit per-call number of items
1231 NAT64LSN_EPOCH_ENTER(et);
1232 STAILQ_FOREACH(ji, &jhead, entries) {
1233 switch (ji->jtype) {
1235 if (nat64lsn_alloc_host(cfg, ji) != HOST_ERROR(0))
1236 NAT64STAT_INC(&cfg->base.stats, jhostfails);
1238 case JTYPE_NEWPORTGROUP:
1239 if (nat64lsn_alloc_pg(cfg, ji) != PG_ERROR(0))
1240 NAT64STAT_INC(&cfg->base.stats, jportfails);
1245 if (ji->done != 0) {
1246 flags = ji->proto != IPPROTO_TCP ? 0 :
1247 convert_tcp_flags(ji->f_id._flags);
1248 nat64lsn_translate6_internal(cfg, &ji->m,
1250 NAT64STAT_INC(&cfg->base.stats, jreinjected);
1253 NAT64LSN_EPOCH_EXIT(et);
1255 ji = STAILQ_FIRST(&jhead);
1256 while (ji != NULL) {
1257 ji2 = STAILQ_NEXT(ji, entries);
1259 * In any case we must free mbuf if
1260 * translator did not consumed it.
1263 uma_zfree(nat64lsn_job_zone, ji);
1269 static struct nat64lsn_job_item *
1270 nat64lsn_create_job(struct nat64lsn_cfg *cfg, int jtype)
1272 struct nat64lsn_job_item *ji;
1275 * Do not try to lock possibly contested mutex if we're near the
1276 * limit. Drop packet instead.
1279 if (cfg->jlen >= cfg->jmaxlen)
1280 NAT64STAT_INC(&cfg->base.stats, jmaxlen);
1282 ji = uma_zalloc(nat64lsn_job_zone, M_NOWAIT);
1284 NAT64STAT_INC(&cfg->base.stats, jnomem);
1287 NAT64STAT_INC(&cfg->base.stats, dropped);
1288 DPRINTF(DP_DROPS, "failed to create job");
1297 nat64lsn_enqueue_job(struct nat64lsn_cfg *cfg, struct nat64lsn_job_item *ji)
1301 STAILQ_INSERT_TAIL(&cfg->jhead, ji, entries);
1302 NAT64STAT_INC(&cfg->base.stats, jrequests);
1305 if (callout_pending(&cfg->jcallout) == 0)
1306 callout_reset(&cfg->jcallout, 1, nat64lsn_do_request, cfg);
1311 nat64lsn_job_destroy(epoch_context_t ctx)
1313 struct nat64lsn_job_item *ji;
1314 struct nat64lsn_host *host;
1315 struct nat64lsn_pg *pg;
1318 ji = __containerof(ctx, struct nat64lsn_job_item, epoch_ctx);
1319 MPASS(ji->jtype == JTYPE_DESTROY);
1320 while (!CK_SLIST_EMPTY(&ji->hosts)) {
1321 host = CK_SLIST_FIRST(&ji->hosts);
1322 CK_SLIST_REMOVE_HEAD(&ji->hosts, entries);
1323 if (host->states_count > 0) {
1325 * XXX: The state has been created
1326 * during host deletion.
1328 printf("NAT64LSN: %s: destroying host with %d "
1329 "states\n", __func__, host->states_count);
1331 nat64lsn_destroy_host(host);
1333 while (!CK_SLIST_EMPTY(&ji->portgroups)) {
1334 pg = CK_SLIST_FIRST(&ji->portgroups);
1335 CK_SLIST_REMOVE_HEAD(&ji->portgroups, entries);
1336 for (i = 0; i < pg->chunks_count; i++) {
1337 if (FREEMASK_BITCOUNT(pg, i) != 64) {
1339 * XXX: The state has been created during
1342 printf("NAT64LSN: %s: destroying PG %p "
1343 "with non-empty chunk %d\n", __func__,
1347 nat64lsn_destroy_pg(pg);
1349 uma_zfree(nat64lsn_pgchunk_zone, ji->pgchunk);
1350 uma_zfree(nat64lsn_job_zone, ji);
1354 nat64lsn_request_host(struct nat64lsn_cfg *cfg,
1355 const struct ipfw_flow_id *f_id, struct mbuf **mp, uint32_t hval,
1356 in_addr_t faddr, uint16_t port, uint8_t proto)
1358 struct nat64lsn_job_item *ji;
1360 ji = nat64lsn_create_job(cfg, JTYPE_NEWHOST);
1367 ji->src6_hval = hval;
1369 nat64lsn_enqueue_job(cfg, ji);
1370 NAT64STAT_INC(&cfg->base.stats, jhostsreq);
1373 return (IP_FW_DENY);
1377 nat64lsn_request_pg(struct nat64lsn_cfg *cfg, struct nat64lsn_host *host,
1378 const struct ipfw_flow_id *f_id, struct mbuf **mp, uint32_t hval,
1379 in_addr_t faddr, uint16_t port, uint8_t proto)
1381 struct nat64lsn_job_item *ji;
1383 ji = nat64lsn_create_job(cfg, JTYPE_NEWPORTGROUP);
1390 ji->state_hval = hval;
1393 nat64lsn_enqueue_job(cfg, ji);
1394 NAT64STAT_INC(&cfg->base.stats, jportreq);
1397 return (IP_FW_DENY);
1401 nat64lsn_translate6_internal(struct nat64lsn_cfg *cfg, struct mbuf **mp,
1402 struct nat64lsn_state *state, uint8_t flags)
1404 struct pfloghdr loghdr, *logdata;
1408 /* Update timestamp and flags if needed */
1410 if (state->timestamp != ts)
1411 state->timestamp = ts;
1412 if ((state->flags & flags) != 0)
1413 state->flags |= flags;
1415 if (cfg->base.flags & NAT64_LOG) {
1417 nat64lsn_log(logdata, *mp, AF_INET6, state);
1421 ret = nat64_do_handle_ip6(*mp, htonl(state->ip_src),
1422 htons(state->aport), &cfg->base, logdata);
1423 if (ret == NAT64SKIP)
1424 return (cfg->nomatch_verdict);
1425 if (ret == NAT64RETURN)
1427 return (IP_FW_DENY);
1431 nat64lsn_translate6(struct nat64lsn_cfg *cfg, struct ipfw_flow_id *f_id,
1434 struct nat64lsn_state *state;
1435 struct nat64lsn_host *host;
1436 struct icmp6_hdr *icmp6;
1437 uint32_t addr, hval, data[2];
1442 /* Check if protocol is supported */
1443 port = f_id->src_port;
1444 proto = f_id->proto;
1445 switch (f_id->proto) {
1446 case IPPROTO_ICMPV6:
1448 * For ICMPv6 echo reply/request we use icmp6_id as
1452 proto = nat64_getlasthdr(*mp, &offset);
1454 NAT64STAT_INC(&cfg->base.stats, dropped);
1455 DPRINTF(DP_DROPS, "mbuf isn't contigious");
1456 return (IP_FW_DENY);
1458 if (proto == IPPROTO_ICMPV6) {
1459 icmp6 = mtodo(*mp, offset);
1460 if (icmp6->icmp6_type == ICMP6_ECHO_REQUEST ||
1461 icmp6->icmp6_type == ICMP6_ECHO_REPLY)
1462 port = ntohs(icmp6->icmp6_id);
1464 proto = IPPROTO_ICMP;
1470 NAT64STAT_INC(&cfg->base.stats, noproto);
1471 return (cfg->nomatch_verdict);
1474 /* Extract IPv4 from destination IPv6 address */
1475 addr = nat64_extract_ip4(&f_id->dst_ip6, cfg->base.plat_plen);
1476 if (addr == 0 || nat64_check_private_ip4(&cfg->base, addr) != 0) {
1477 char a[INET_ADDRSTRLEN];
1479 NAT64STAT_INC(&cfg->base.stats, dropped);
1480 DPRINTF(DP_DROPS, "dropped due to embedded IPv4 address %s",
1481 inet_ntop(AF_INET, &addr, a, sizeof(a)));
1482 return (IP_FW_DENY); /* XXX: add extra stats? */
1485 /* Try to find host */
1486 hval = HOST_HVAL(cfg, &f_id->src_ip6);
1487 CK_SLIST_FOREACH(host, &HOSTS(cfg, hval), entries) {
1488 if (IN6_ARE_ADDR_EQUAL(&f_id->src_ip6, &host->addr))
1491 /* We use IPv4 address in host byte order */
1494 return (nat64lsn_request_host(cfg, f_id, mp,
1495 hval, addr, port, proto));
1497 flags = proto != IPPROTO_TCP ? 0 : convert_tcp_flags(f_id->_flags);
1500 data[1] = (f_id->dst_port << 16) | port;
1501 hval = STATE_HVAL(cfg, data);
1502 state = nat64lsn_get_state6to4(cfg, host, f_id, hval, addr,
1505 return (nat64lsn_request_pg(cfg, host, f_id, mp, hval, addr,
1507 return (nat64lsn_translate6_internal(cfg, mp, state, flags));
1511 * Main dataplane entry point.
1514 ipfw_nat64lsn(struct ip_fw_chain *ch, struct ip_fw_args *args,
1515 ipfw_insn *cmd, int *done)
1517 struct nat64lsn_cfg *cfg;
1521 IPFW_RLOCK_ASSERT(ch);
1523 *done = 0; /* continue the search in case of failure */
1525 if (cmd->opcode != O_EXTERNAL_ACTION ||
1526 cmd->arg1 != V_nat64lsn_eid ||
1527 icmd->opcode != O_EXTERNAL_INSTANCE ||
1528 (cfg = NAT64_LOOKUP(ch, icmd)) == NULL)
1529 return (IP_FW_DENY);
1531 *done = 1; /* terminate the search */
1533 switch (args->f_id.addr_type) {
1535 ret = nat64lsn_translate4(cfg, &args->f_id, &args->m);
1539 * Check that destination IPv6 address matches our prefix6.
1541 if ((cfg->base.flags & NAT64LSN_ANYPREFIX) == 0 &&
1542 memcmp(&args->f_id.dst_ip6, &cfg->base.plat_prefix,
1543 cfg->base.plat_plen / 8) != 0) {
1544 ret = cfg->nomatch_verdict;
1547 ret = nat64lsn_translate6(cfg, &args->f_id, &args->m);
1550 ret = cfg->nomatch_verdict;
1553 if (ret != IP_FW_PASS && args->m != NULL) {
1561 nat64lsn_state_ctor(void *mem, int size, void *arg, int flags)
1563 struct nat64lsn_states_chunk *chunk;
1566 chunk = (struct nat64lsn_states_chunk *)mem;
1567 for (i = 0; i < 64; i++)
1568 chunk->state[i].flags = 0;
1573 nat64lsn_init_internal(void)
1576 nat64lsn_host_zone = uma_zcreate("NAT64LSN hosts",
1577 sizeof(struct nat64lsn_host), NULL, NULL, NULL, NULL,
1579 nat64lsn_pgchunk_zone = uma_zcreate("NAT64LSN portgroup chunks",
1580 sizeof(struct nat64lsn_pgchunk), NULL, NULL, NULL, NULL,
1582 nat64lsn_pg_zone = uma_zcreate("NAT64LSN portgroups",
1583 sizeof(struct nat64lsn_pg), NULL, NULL, NULL, NULL,
1585 nat64lsn_aliaslink_zone = uma_zcreate("NAT64LSN links",
1586 sizeof(struct nat64lsn_aliaslink), NULL, NULL, NULL, NULL,
1588 nat64lsn_state_zone = uma_zcreate("NAT64LSN states",
1589 sizeof(struct nat64lsn_states_chunk), nat64lsn_state_ctor,
1590 NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1591 nat64lsn_job_zone = uma_zcreate("NAT64LSN jobs",
1592 sizeof(struct nat64lsn_job_item), NULL, NULL, NULL, NULL,
1598 nat64lsn_uninit_internal(void)
1601 /* XXX: epoch_task drain */
1602 JQUEUE_LOCK_DESTROY();
1603 uma_zdestroy(nat64lsn_host_zone);
1604 uma_zdestroy(nat64lsn_pgchunk_zone);
1605 uma_zdestroy(nat64lsn_pg_zone);
1606 uma_zdestroy(nat64lsn_aliaslink_zone);
1607 uma_zdestroy(nat64lsn_state_zone);
1608 uma_zdestroy(nat64lsn_job_zone);
1612 nat64lsn_start_instance(struct nat64lsn_cfg *cfg)
1616 callout_reset(&cfg->periodic, hz * PERIODIC_DELAY,
1617 nat64lsn_periodic, cfg);
1618 CALLOUT_UNLOCK(cfg);
1621 struct nat64lsn_cfg *
1622 nat64lsn_init_instance(struct ip_fw_chain *ch, in_addr_t prefix, int plen)
1624 struct nat64lsn_cfg *cfg;
1625 struct nat64lsn_alias *alias;
1628 cfg = malloc(sizeof(struct nat64lsn_cfg), M_NAT64LSN,
1632 CALLOUT_LOCK_INIT(cfg);
1633 STAILQ_INIT(&cfg->jhead);
1635 COUNTER_ARRAY_ALLOC(cfg->base.stats.cnt, NAT64STATS, M_WAITOK);
1637 cfg->hash_seed = arc4random();
1638 cfg->hosts_hashsize = NAT64LSN_HOSTS_HSIZE;
1639 cfg->hosts_hash = malloc(sizeof(struct nat64lsn_hosts_slist) *
1640 cfg->hosts_hashsize, M_NAT64LSN, M_WAITOK | M_ZERO);
1641 for (i = 0; i < cfg->hosts_hashsize; i++)
1642 CK_SLIST_INIT(&cfg->hosts_hash[i]);
1644 naddr = 1 << (32 - plen);
1645 cfg->prefix4 = prefix;
1646 cfg->pmask4 = prefix | (naddr - 1);
1648 cfg->aliases = malloc(sizeof(struct nat64lsn_alias) * naddr,
1649 M_NAT64LSN, M_WAITOK | M_ZERO);
1650 for (i = 0; i < naddr; i++) {
1651 alias = &cfg->aliases[i];
1652 alias->addr = prefix + i; /* host byte order */
1653 CK_SLIST_INIT(&alias->hosts);
1654 ALIAS_LOCK_INIT(alias);
1657 callout_init_mtx(&cfg->periodic, &cfg->periodic_lock, 0);
1658 callout_init(&cfg->jcallout, CALLOUT_MPSAFE);
1664 nat64lsn_destroy_pg(struct nat64lsn_pg *pg)
1668 if (pg->chunks_count == 1) {
1669 uma_zfree(nat64lsn_state_zone, pg->states);
1671 for (i = 0; i < pg->chunks_count; i++)
1672 uma_zfree(nat64lsn_state_zone, pg->states_chunk[i]);
1673 free(pg->states_chunk, M_NAT64LSN);
1674 free(pg->freemask_chunk, M_NAT64LSN);
1676 uma_zfree(nat64lsn_pg_zone, pg);
1680 nat64lsn_destroy_alias(struct nat64lsn_cfg *cfg,
1681 struct nat64lsn_alias *alias)
1683 struct nat64lsn_pg *pg;
1686 while (!CK_SLIST_EMPTY(&alias->portgroups)) {
1687 pg = CK_SLIST_FIRST(&alias->portgroups);
1688 CK_SLIST_REMOVE_HEAD(&alias->portgroups, entries);
1689 nat64lsn_destroy_pg(pg);
1691 for (i = 0; i < 32; i++) {
1692 if (ISSET32(alias->tcp_chunkmask, i))
1693 uma_zfree(nat64lsn_pgchunk_zone, alias->tcp[i]);
1694 if (ISSET32(alias->udp_chunkmask, i))
1695 uma_zfree(nat64lsn_pgchunk_zone, alias->udp[i]);
1696 if (ISSET32(alias->icmp_chunkmask, i))
1697 uma_zfree(nat64lsn_pgchunk_zone, alias->icmp[i]);
1699 ALIAS_LOCK_DESTROY(alias);
1703 nat64lsn_destroy_host(struct nat64lsn_host *host)
1705 struct nat64lsn_aliaslink *link;
1707 while (!CK_SLIST_EMPTY(&host->aliases)) {
1708 link = CK_SLIST_FIRST(&host->aliases);
1709 CK_SLIST_REMOVE_HEAD(&host->aliases, host_entries);
1711 ALIAS_LOCK(link->alias);
1712 CK_SLIST_REMOVE(&link->alias->hosts, link,
1713 nat64lsn_aliaslink, alias_entries);
1714 link->alias->hosts_count--;
1715 ALIAS_UNLOCK(link->alias);
1717 uma_zfree(nat64lsn_aliaslink_zone, link);
1719 HOST_LOCK_DESTROY(host);
1720 free(host->states_hash, M_NAT64LSN);
1721 uma_zfree(nat64lsn_host_zone, host);
1725 nat64lsn_destroy_instance(struct nat64lsn_cfg *cfg)
1727 struct nat64lsn_host *host;
1731 callout_drain(&cfg->periodic);
1732 CALLOUT_UNLOCK(cfg);
1733 callout_drain(&cfg->jcallout);
1735 for (i = 0; i < cfg->hosts_hashsize; i++) {
1736 while (!CK_SLIST_EMPTY(&cfg->hosts_hash[i])) {
1737 host = CK_SLIST_FIRST(&cfg->hosts_hash[i]);
1738 CK_SLIST_REMOVE_HEAD(&cfg->hosts_hash[i], entries);
1739 nat64lsn_destroy_host(host);
1743 for (i = 0; i < (1 << (32 - cfg->plen4)); i++)
1744 nat64lsn_destroy_alias(cfg, &cfg->aliases[i]);
1746 CALLOUT_LOCK_DESTROY(cfg);
1747 CFG_LOCK_DESTROY(cfg);
1748 COUNTER_ARRAY_FREE(cfg->base.stats.cnt, NAT64STATS);
1749 free(cfg->hosts_hash, M_NAT64LSN);
1750 free(cfg->aliases, M_NAT64LSN);
1751 free(cfg, M_NAT64LSN);