2 * Copyright (c) 2001 McAfee, Inc.
3 * Copyright (c) 2006,2013 Andre Oppermann, Internet Business Solutions AG
6 * This software was developed for the FreeBSD Project by Jonathan Lemon
7 * and McAfee Research, the Security Research Division of McAfee, Inc. under
8 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program. [2001 McAfee, Inc.]
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 #include "opt_inet6.h"
38 #include "opt_ipsec.h"
39 #include "opt_pcbgroup.h"
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/sysctl.h>
45 #include <sys/limits.h>
47 #include <sys/mutex.h>
48 #include <sys/malloc.h>
50 #include <sys/proc.h> /* for proc0 declaration */
51 #include <sys/random.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/syslog.h>
55 #include <sys/ucred.h>
58 #include <crypto/siphash/siphash.h>
63 #include <net/route.h>
66 #include <netinet/in.h>
67 #include <netinet/in_systm.h>
68 #include <netinet/ip.h>
69 #include <netinet/in_var.h>
70 #include <netinet/in_pcb.h>
71 #include <netinet/ip_var.h>
72 #include <netinet/ip_options.h>
74 #include <netinet/ip6.h>
75 #include <netinet/icmp6.h>
76 #include <netinet6/nd6.h>
77 #include <netinet6/ip6_var.h>
78 #include <netinet6/in6_pcb.h>
80 #include <netinet/tcp.h>
81 #include <netinet/tcp_fsm.h>
82 #include <netinet/tcp_seq.h>
83 #include <netinet/tcp_timer.h>
84 #include <netinet/tcp_var.h>
85 #include <netinet/tcp_syncache.h>
87 #include <netinet6/tcp6_var.h>
90 #include <netinet/toecore.h>
94 #include <netipsec/ipsec.h>
96 #include <netipsec/ipsec6.h>
98 #include <netipsec/key.h>
101 #include <machine/in_cksum.h>
103 #include <security/mac/mac_framework.h>
105 static VNET_DEFINE(int, tcp_syncookies) = 1;
106 #define V_tcp_syncookies VNET(tcp_syncookies)
107 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW,
108 &VNET_NAME(tcp_syncookies), 0,
109 "Use TCP SYN cookies if the syncache overflows");
111 static VNET_DEFINE(int, tcp_syncookiesonly) = 0;
112 #define V_tcp_syncookiesonly VNET(tcp_syncookiesonly)
113 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, syncookies_only, CTLFLAG_RW,
114 &VNET_NAME(tcp_syncookiesonly), 0,
115 "Use only TCP SYN cookies");
118 #define ADDED_BY_TOE(sc) ((sc)->sc_tod != NULL)
121 static void syncache_drop(struct syncache *, struct syncache_head *);
122 static void syncache_free(struct syncache *);
123 static void syncache_insert(struct syncache *, struct syncache_head *);
124 static int syncache_respond(struct syncache *);
125 static struct socket *syncache_socket(struct syncache *, struct socket *,
127 static int syncache_sysctl_count(SYSCTL_HANDLER_ARGS);
128 static void syncache_timeout(struct syncache *sc, struct syncache_head *sch,
130 static void syncache_timer(void *);
132 static uint32_t syncookie_mac(struct in_conninfo *, tcp_seq, uint8_t,
133 uint8_t *, uintptr_t);
134 static tcp_seq syncookie_generate(struct syncache_head *, struct syncache *);
135 static struct syncache
136 *syncookie_lookup(struct in_conninfo *, struct syncache_head *,
137 struct syncache *, struct tcphdr *, struct tcpopt *,
139 static void syncookie_reseed(void *);
141 static int syncookie_cmp(struct in_conninfo *inc, struct syncache_head *sch,
142 struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
147 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies.
148 * 3 retransmits corresponds to a timeout of 3 * (1 + 2 + 4 + 8) == 45 seconds,
149 * the odds are that the user has given up attempting to connect by then.
151 #define SYNCACHE_MAXREXMTS 3
153 /* Arbitrary values */
154 #define TCP_SYNCACHE_HASHSIZE 512
155 #define TCP_SYNCACHE_BUCKETLIMIT 30
157 static VNET_DEFINE(struct tcp_syncache, tcp_syncache);
158 #define V_tcp_syncache VNET(tcp_syncache)
160 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0,
163 SYSCTL_VNET_UINT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RDTUN,
164 &VNET_NAME(tcp_syncache.bucket_limit), 0,
165 "Per-bucket hash limit for syncache");
167 SYSCTL_VNET_UINT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_RDTUN,
168 &VNET_NAME(tcp_syncache.cache_limit), 0,
169 "Overall entry limit for syncache");
171 SYSCTL_VNET_PROC(_net_inet_tcp_syncache, OID_AUTO, count, (CTLTYPE_UINT|CTLFLAG_RD),
172 NULL, 0, &syncache_sysctl_count, "IU",
173 "Current number of entries in syncache");
175 SYSCTL_VNET_UINT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_RDTUN,
176 &VNET_NAME(tcp_syncache.hashsize), 0,
177 "Size of TCP syncache hashtable");
179 SYSCTL_VNET_UINT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW,
180 &VNET_NAME(tcp_syncache.rexmt_limit), 0,
181 "Limit on SYN/ACK retransmissions");
183 VNET_DEFINE(int, tcp_sc_rst_sock_fail) = 1;
184 SYSCTL_VNET_INT(_net_inet_tcp_syncache, OID_AUTO, rst_on_sock_fail,
185 CTLFLAG_RW, &VNET_NAME(tcp_sc_rst_sock_fail), 0,
186 "Send reset on socket allocation failure");
188 static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache");
190 #define SYNCACHE_HASH(inc, mask) \
191 ((V_tcp_syncache.hash_secret ^ \
192 (inc)->inc_faddr.s_addr ^ \
193 ((inc)->inc_faddr.s_addr >> 16) ^ \
194 (inc)->inc_fport ^ (inc)->inc_lport) & mask)
196 #define SYNCACHE_HASH6(inc, mask) \
197 ((V_tcp_syncache.hash_secret ^ \
198 (inc)->inc6_faddr.s6_addr32[0] ^ \
199 (inc)->inc6_faddr.s6_addr32[3] ^ \
200 (inc)->inc_fport ^ (inc)->inc_lport) & mask)
202 #define ENDPTS_EQ(a, b) ( \
203 (a)->ie_fport == (b)->ie_fport && \
204 (a)->ie_lport == (b)->ie_lport && \
205 (a)->ie_faddr.s_addr == (b)->ie_faddr.s_addr && \
206 (a)->ie_laddr.s_addr == (b)->ie_laddr.s_addr \
209 #define ENDPTS6_EQ(a, b) (memcmp(a, b, sizeof(*a)) == 0)
211 #define SCH_LOCK(sch) mtx_lock(&(sch)->sch_mtx)
212 #define SCH_UNLOCK(sch) mtx_unlock(&(sch)->sch_mtx)
213 #define SCH_LOCK_ASSERT(sch) mtx_assert(&(sch)->sch_mtx, MA_OWNED)
216 * Requires the syncache entry to be already removed from the bucket list.
219 syncache_free(struct syncache *sc)
223 (void) m_free(sc->sc_ipopts);
227 mac_syncache_destroy(&sc->sc_label);
230 uma_zfree(V_tcp_syncache.zone, sc);
238 V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
239 V_tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
240 V_tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
241 V_tcp_syncache.hash_secret = arc4random();
243 TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
244 &V_tcp_syncache.hashsize);
245 TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
246 &V_tcp_syncache.bucket_limit);
247 if (!powerof2(V_tcp_syncache.hashsize) ||
248 V_tcp_syncache.hashsize == 0) {
249 printf("WARNING: syncache hash size is not a power of 2.\n");
250 V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
252 V_tcp_syncache.hashmask = V_tcp_syncache.hashsize - 1;
255 V_tcp_syncache.cache_limit =
256 V_tcp_syncache.hashsize * V_tcp_syncache.bucket_limit;
257 TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
258 &V_tcp_syncache.cache_limit);
260 /* Allocate the hash table. */
261 V_tcp_syncache.hashbase = malloc(V_tcp_syncache.hashsize *
262 sizeof(struct syncache_head), M_SYNCACHE, M_WAITOK | M_ZERO);
265 V_tcp_syncache.vnet = curvnet;
268 /* Initialize the hash buckets. */
269 for (i = 0; i < V_tcp_syncache.hashsize; i++) {
270 TAILQ_INIT(&V_tcp_syncache.hashbase[i].sch_bucket);
271 mtx_init(&V_tcp_syncache.hashbase[i].sch_mtx, "tcp_sc_head",
273 callout_init_mtx(&V_tcp_syncache.hashbase[i].sch_timer,
274 &V_tcp_syncache.hashbase[i].sch_mtx, 0);
275 V_tcp_syncache.hashbase[i].sch_length = 0;
276 V_tcp_syncache.hashbase[i].sch_sc = &V_tcp_syncache;
279 /* Create the syncache entry zone. */
280 V_tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache),
281 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
282 V_tcp_syncache.cache_limit = uma_zone_set_max(V_tcp_syncache.zone,
283 V_tcp_syncache.cache_limit);
285 /* Start the SYN cookie reseeder callout. */
286 callout_init(&V_tcp_syncache.secret.reseed, 1);
287 arc4rand(V_tcp_syncache.secret.key[0], SYNCOOKIE_SECRET_SIZE, 0);
288 arc4rand(V_tcp_syncache.secret.key[1], SYNCOOKIE_SECRET_SIZE, 0);
289 callout_reset(&V_tcp_syncache.secret.reseed, SYNCOOKIE_LIFETIME * hz,
290 syncookie_reseed, &V_tcp_syncache);
295 syncache_destroy(void)
297 struct syncache_head *sch;
298 struct syncache *sc, *nsc;
301 /* Cleanup hash buckets: stop timers, free entries, destroy locks. */
302 for (i = 0; i < V_tcp_syncache.hashsize; i++) {
304 sch = &V_tcp_syncache.hashbase[i];
305 callout_drain(&sch->sch_timer);
308 TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc)
309 syncache_drop(sc, sch);
311 KASSERT(TAILQ_EMPTY(&sch->sch_bucket),
312 ("%s: sch->sch_bucket not empty", __func__));
313 KASSERT(sch->sch_length == 0, ("%s: sch->sch_length %d not 0",
314 __func__, sch->sch_length));
315 mtx_destroy(&sch->sch_mtx);
318 KASSERT(uma_zone_get_cur(V_tcp_syncache.zone) == 0,
319 ("%s: cache_count not 0", __func__));
321 /* Free the allocated global resources. */
322 uma_zdestroy(V_tcp_syncache.zone);
323 free(V_tcp_syncache.hashbase, M_SYNCACHE);
325 callout_drain(&V_tcp_syncache.secret.reseed);
330 syncache_sysctl_count(SYSCTL_HANDLER_ARGS)
334 count = uma_zone_get_cur(V_tcp_syncache.zone);
335 return (sysctl_handle_int(oidp, &count, 0, req));
339 * Inserts a syncache entry into the specified bucket row.
340 * Locks and unlocks the syncache_head autonomously.
343 syncache_insert(struct syncache *sc, struct syncache_head *sch)
345 struct syncache *sc2;
350 * Make sure that we don't overflow the per-bucket limit.
351 * If the bucket is full, toss the oldest element.
353 if (sch->sch_length >= V_tcp_syncache.bucket_limit) {
354 KASSERT(!TAILQ_EMPTY(&sch->sch_bucket),
355 ("sch->sch_length incorrect"));
356 sc2 = TAILQ_LAST(&sch->sch_bucket, sch_head);
357 syncache_drop(sc2, sch);
358 TCPSTAT_INC(tcps_sc_bucketoverflow);
361 /* Put it into the bucket. */
362 TAILQ_INSERT_HEAD(&sch->sch_bucket, sc, sc_hash);
366 if (ADDED_BY_TOE(sc)) {
367 struct toedev *tod = sc->sc_tod;
369 tod->tod_syncache_added(tod, sc->sc_todctx);
373 /* Reinitialize the bucket row's timer. */
374 if (sch->sch_length == 1)
375 sch->sch_nextc = ticks + INT_MAX;
376 syncache_timeout(sc, sch, 1);
380 TCPSTAT_INC(tcps_sc_added);
384 * Remove and free entry from syncache bucket row.
385 * Expects locked syncache head.
388 syncache_drop(struct syncache *sc, struct syncache_head *sch)
391 SCH_LOCK_ASSERT(sch);
393 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
397 if (ADDED_BY_TOE(sc)) {
398 struct toedev *tod = sc->sc_tod;
400 tod->tod_syncache_removed(tod, sc->sc_todctx);
408 * Engage/reengage time on bucket row.
411 syncache_timeout(struct syncache *sc, struct syncache_head *sch, int docallout)
413 sc->sc_rxttime = ticks +
414 TCPTV_RTOBASE * (tcp_syn_backoff[sc->sc_rxmits]);
416 if (TSTMP_LT(sc->sc_rxttime, sch->sch_nextc)) {
417 sch->sch_nextc = sc->sc_rxttime;
419 callout_reset(&sch->sch_timer, sch->sch_nextc - ticks,
420 syncache_timer, (void *)sch);
425 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted.
426 * If we have retransmitted an entry the maximum number of times, expire it.
427 * One separate timer for each bucket row.
430 syncache_timer(void *xsch)
432 struct syncache_head *sch = (struct syncache_head *)xsch;
433 struct syncache *sc, *nsc;
437 CURVNET_SET(sch->sch_sc->vnet);
439 /* NB: syncache_head has already been locked by the callout. */
440 SCH_LOCK_ASSERT(sch);
443 * In the following cycle we may remove some entries and/or
444 * advance some timeouts, so re-initialize the bucket timer.
446 sch->sch_nextc = tick + INT_MAX;
448 TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc) {
450 * We do not check if the listen socket still exists
451 * and accept the case where the listen socket may be
452 * gone by the time we resend the SYN/ACK. We do
453 * not expect this to happens often. If it does,
454 * then the RST will be sent by the time the remote
455 * host does the SYN/ACK->ACK.
457 if (TSTMP_GT(sc->sc_rxttime, tick)) {
458 if (TSTMP_LT(sc->sc_rxttime, sch->sch_nextc))
459 sch->sch_nextc = sc->sc_rxttime;
462 if (sc->sc_rxmits > V_tcp_syncache.rexmt_limit) {
463 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
464 log(LOG_DEBUG, "%s; %s: Retransmits exhausted, "
465 "giving up and removing syncache entry\n",
469 syncache_drop(sc, sch);
470 TCPSTAT_INC(tcps_sc_stale);
473 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
474 log(LOG_DEBUG, "%s; %s: Response timeout, "
475 "retransmitting (%u) SYN|ACK\n",
476 s, __func__, sc->sc_rxmits);
480 (void) syncache_respond(sc);
481 TCPSTAT_INC(tcps_sc_retransmitted);
482 syncache_timeout(sc, sch, 0);
484 if (!TAILQ_EMPTY(&(sch)->sch_bucket))
485 callout_reset(&(sch)->sch_timer, (sch)->sch_nextc - tick,
486 syncache_timer, (void *)(sch));
491 * Find an entry in the syncache.
492 * Returns always with locked syncache_head plus a matching entry or NULL.
494 static struct syncache *
495 syncache_lookup(struct in_conninfo *inc, struct syncache_head **schp)
498 struct syncache_head *sch;
501 if (inc->inc_flags & INC_ISIPV6) {
502 sch = &V_tcp_syncache.hashbase[
503 SYNCACHE_HASH6(inc, V_tcp_syncache.hashmask)];
508 /* Circle through bucket row to find matching entry. */
509 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
510 if (ENDPTS6_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
516 sch = &V_tcp_syncache.hashbase[
517 SYNCACHE_HASH(inc, V_tcp_syncache.hashmask)];
522 /* Circle through bucket row to find matching entry. */
523 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
525 if (sc->sc_inc.inc_flags & INC_ISIPV6)
528 if (ENDPTS_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
532 SCH_LOCK_ASSERT(*schp);
533 return (NULL); /* always returns with locked sch */
537 * This function is called when we get a RST for a
538 * non-existent connection, so that we can see if the
539 * connection is in the syn cache. If it is, zap it.
542 syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th)
545 struct syncache_head *sch;
548 sc = syncache_lookup(inc, &sch); /* returns locked sch */
549 SCH_LOCK_ASSERT(sch);
552 * Any RST to our SYN|ACK must not carry ACK, SYN or FIN flags.
553 * See RFC 793 page 65, section SEGMENT ARRIVES.
555 if (th->th_flags & (TH_ACK|TH_SYN|TH_FIN)) {
556 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
557 log(LOG_DEBUG, "%s; %s: Spurious RST with ACK, SYN or "
558 "FIN flag set, segment ignored\n", s, __func__);
559 TCPSTAT_INC(tcps_badrst);
564 * No corresponding connection was found in syncache.
565 * If syncookies are enabled and possibly exclusively
566 * used, or we are under memory pressure, a valid RST
567 * may not find a syncache entry. In that case we're
568 * done and no SYN|ACK retransmissions will happen.
569 * Otherwise the RST was misdirected or spoofed.
572 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
573 log(LOG_DEBUG, "%s; %s: Spurious RST without matching "
574 "syncache entry (possibly syncookie only), "
575 "segment ignored\n", s, __func__);
576 TCPSTAT_INC(tcps_badrst);
581 * If the RST bit is set, check the sequence number to see
582 * if this is a valid reset segment.
584 * In all states except SYN-SENT, all reset (RST) segments
585 * are validated by checking their SEQ-fields. A reset is
586 * valid if its sequence number is in the window.
588 * The sequence number in the reset segment is normally an
589 * echo of our outgoing acknowlegement numbers, but some hosts
590 * send a reset with the sequence number at the rightmost edge
591 * of our receive window, and we have to handle this case.
593 if (SEQ_GEQ(th->th_seq, sc->sc_irs) &&
594 SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
595 syncache_drop(sc, sch);
596 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
597 log(LOG_DEBUG, "%s; %s: Our SYN|ACK was rejected, "
598 "connection attempt aborted by remote endpoint\n",
600 TCPSTAT_INC(tcps_sc_reset);
602 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
603 log(LOG_DEBUG, "%s; %s: RST with invalid SEQ %u != "
604 "IRS %u (+WND %u), segment ignored\n",
605 s, __func__, th->th_seq, sc->sc_irs, sc->sc_wnd);
606 TCPSTAT_INC(tcps_badrst);
616 syncache_badack(struct in_conninfo *inc)
619 struct syncache_head *sch;
621 sc = syncache_lookup(inc, &sch); /* returns locked sch */
622 SCH_LOCK_ASSERT(sch);
624 syncache_drop(sc, sch);
625 TCPSTAT_INC(tcps_sc_badack);
631 syncache_unreach(struct in_conninfo *inc, struct tcphdr *th)
634 struct syncache_head *sch;
636 sc = syncache_lookup(inc, &sch); /* returns locked sch */
637 SCH_LOCK_ASSERT(sch);
641 /* If the sequence number != sc_iss, then it's a bogus ICMP msg */
642 if (ntohl(th->th_seq) != sc->sc_iss)
646 * If we've rertransmitted 3 times and this is our second error,
647 * we remove the entry. Otherwise, we allow it to continue on.
648 * This prevents us from incorrectly nuking an entry during a
649 * spurious network outage.
653 if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxmits < 3 + 1) {
654 sc->sc_flags |= SCF_UNREACH;
657 syncache_drop(sc, sch);
658 TCPSTAT_INC(tcps_sc_unreach);
664 * Build a new TCP socket structure from a syncache entry.
666 static struct socket *
667 syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
669 struct inpcb *inp = NULL;
675 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
678 * Ok, create the full blown connection, and set things up
679 * as they would have been set up if we had created the
680 * connection when the SYN arrived. If we can't create
681 * the connection, abort it.
683 so = sonewconn(lso, SS_ISCONNECTED);
686 * Drop the connection; we will either send a RST or
687 * have the peer retransmit its SYN again after its
690 TCPSTAT_INC(tcps_listendrop);
691 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
692 log(LOG_DEBUG, "%s; %s: Socket create failed "
693 "due to limits or memory shortage\n",
700 mac_socketpeer_set_from_mbuf(m, so);
704 inp->inp_inc.inc_fibnum = so->so_fibnum;
706 INP_HASH_WLOCK(&V_tcbinfo);
708 /* Insert new socket into PCB hash list. */
709 inp->inp_inc.inc_flags = sc->sc_inc.inc_flags;
711 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
712 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
714 inp->inp_vflag &= ~INP_IPV6;
715 inp->inp_vflag |= INP_IPV4;
717 inp->inp_laddr = sc->sc_inc.inc_laddr;
723 * If there's an mbuf and it has a flowid, then let's initialise the
724 * inp with that particular flowid.
726 if (m != NULL && m->m_flags & M_FLOWID) {
727 inp->inp_flags |= INP_HW_FLOWID;
728 inp->inp_flags &= ~INP_SW_FLOWID;
729 inp->inp_flowid = m->m_pkthdr.flowid;
733 * Install in the reservation hash table for now, but don't yet
734 * install a connection group since the full 4-tuple isn't yet
737 inp->inp_lport = sc->sc_inc.inc_lport;
738 if ((error = in_pcbinshash_nopcbgroup(inp)) != 0) {
740 * Undo the assignments above if we failed to
741 * put the PCB on the hash lists.
744 if (sc->sc_inc.inc_flags & INC_ISIPV6)
745 inp->in6p_laddr = in6addr_any;
748 inp->inp_laddr.s_addr = INADDR_ANY;
750 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
751 log(LOG_DEBUG, "%s; %s: in_pcbinshash failed "
756 INP_HASH_WUNLOCK(&V_tcbinfo);
760 /* Copy old policy into new socket's. */
761 if (ipsec_copy_policy(sotoinpcb(lso)->inp_sp, inp->inp_sp))
762 printf("syncache_socket: could not copy policy\n");
765 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
766 struct inpcb *oinp = sotoinpcb(lso);
767 struct in6_addr laddr6;
768 struct sockaddr_in6 sin6;
770 * Inherit socket options from the listening socket.
771 * Note that in6p_inputopts are not (and should not be)
772 * copied, since it stores previously received options and is
773 * used to detect if each new option is different than the
774 * previous one and hence should be passed to a user.
775 * If we copied in6p_inputopts, a user would not be able to
776 * receive options just after calling the accept system call.
778 inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS;
779 if (oinp->in6p_outputopts)
780 inp->in6p_outputopts =
781 ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT);
783 sin6.sin6_family = AF_INET6;
784 sin6.sin6_len = sizeof(sin6);
785 sin6.sin6_addr = sc->sc_inc.inc6_faddr;
786 sin6.sin6_port = sc->sc_inc.inc_fport;
787 sin6.sin6_flowinfo = sin6.sin6_scope_id = 0;
788 laddr6 = inp->in6p_laddr;
789 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
790 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
791 if ((error = in6_pcbconnect_mbuf(inp, (struct sockaddr *)&sin6,
792 thread0.td_ucred, m)) != 0) {
793 inp->in6p_laddr = laddr6;
794 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
795 log(LOG_DEBUG, "%s; %s: in6_pcbconnect failed "
800 INP_HASH_WUNLOCK(&V_tcbinfo);
803 /* Override flowlabel from in6_pcbconnect. */
804 inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
805 inp->inp_flow |= sc->sc_flowlabel;
808 #if defined(INET) && defined(INET6)
813 struct in_addr laddr;
814 struct sockaddr_in sin;
816 inp->inp_options = (m) ? ip_srcroute(m) : NULL;
818 if (inp->inp_options == NULL) {
819 inp->inp_options = sc->sc_ipopts;
820 sc->sc_ipopts = NULL;
823 sin.sin_family = AF_INET;
824 sin.sin_len = sizeof(sin);
825 sin.sin_addr = sc->sc_inc.inc_faddr;
826 sin.sin_port = sc->sc_inc.inc_fport;
827 bzero((caddr_t)sin.sin_zero, sizeof(sin.sin_zero));
828 laddr = inp->inp_laddr;
829 if (inp->inp_laddr.s_addr == INADDR_ANY)
830 inp->inp_laddr = sc->sc_inc.inc_laddr;
831 if ((error = in_pcbconnect_mbuf(inp, (struct sockaddr *)&sin,
832 thread0.td_ucred, m)) != 0) {
833 inp->inp_laddr = laddr;
834 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
835 log(LOG_DEBUG, "%s; %s: in_pcbconnect failed "
840 INP_HASH_WUNLOCK(&V_tcbinfo);
845 INP_HASH_WUNLOCK(&V_tcbinfo);
847 tcp_state_change(tp, TCPS_SYN_RECEIVED);
848 tp->iss = sc->sc_iss;
849 tp->irs = sc->sc_irs;
852 tp->snd_wl1 = sc->sc_irs;
853 tp->snd_max = tp->iss + 1;
854 tp->snd_nxt = tp->iss + 1;
855 tp->rcv_up = sc->sc_irs + 1;
856 tp->rcv_wnd = sc->sc_wnd;
857 tp->rcv_adv += tp->rcv_wnd;
858 tp->last_ack_sent = tp->rcv_nxt;
860 tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY);
861 if (sc->sc_flags & SCF_NOOPT)
862 tp->t_flags |= TF_NOOPT;
864 if (sc->sc_flags & SCF_WINSCALE) {
865 tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE;
866 tp->snd_scale = sc->sc_requested_s_scale;
867 tp->request_r_scale = sc->sc_requested_r_scale;
869 if (sc->sc_flags & SCF_TIMESTAMP) {
870 tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP;
871 tp->ts_recent = sc->sc_tsreflect;
872 tp->ts_recent_age = tcp_ts_getticks();
873 tp->ts_offset = sc->sc_tsoff;
876 if (sc->sc_flags & SCF_SIGNATURE)
877 tp->t_flags |= TF_SIGNATURE;
879 if (sc->sc_flags & SCF_SACK)
880 tp->t_flags |= TF_SACK_PERMIT;
883 if (sc->sc_flags & SCF_ECN)
884 tp->t_flags |= TF_ECN_PERMIT;
887 * Set up MSS and get cached values from tcp_hostcache.
888 * This might overwrite some of the defaults we just set.
890 tcp_mss(tp, sc->sc_peer_mss);
893 * If the SYN,ACK was retransmitted, indicate that CWND to be
894 * limited to one segment in cc_conn_init().
895 * NB: sc_rxmits counts all SYN,ACK transmits, not just retransmits.
897 if (sc->sc_rxmits > 1)
902 * Allow a TOE driver to install its hooks. Note that we hold the
903 * pcbinfo lock too and that prevents tcp_usr_accept from accepting a
904 * new connection before the TOE driver has done its thing.
906 if (ADDED_BY_TOE(sc)) {
907 struct toedev *tod = sc->sc_tod;
909 tod->tod_offload_socket(tod, sc->sc_todctx, so);
913 * Copy and activate timers.
915 tp->t_keepinit = sototcpcb(lso)->t_keepinit;
916 tp->t_keepidle = sototcpcb(lso)->t_keepidle;
917 tp->t_keepintvl = sototcpcb(lso)->t_keepintvl;
918 tp->t_keepcnt = sototcpcb(lso)->t_keepcnt;
919 tcp_timer_activate(tp, TT_KEEP, TP_KEEPINIT(tp));
923 TCPSTAT_INC(tcps_accepts);
935 * This function gets called when we receive an ACK for a
936 * socket in the LISTEN state. We look up the connection
937 * in the syncache, and if its there, we pull it out of
938 * the cache and turn it into a full-blown connection in
939 * the SYN-RECEIVED state.
942 syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
943 struct socket **lsop, struct mbuf *m)
946 struct syncache_head *sch;
951 * Global TCP locks are held because we manipulate the PCB lists
952 * and create a new socket.
954 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
955 KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK,
956 ("%s: can handle only ACK", __func__));
958 sc = syncache_lookup(inc, &sch); /* returns locked sch */
959 SCH_LOCK_ASSERT(sch);
963 * Test code for syncookies comparing the syncache stored
964 * values with the reconstructed values from the cookie.
967 syncookie_cmp(inc, sch, sc, th, to, *lsop);
972 * There is no syncache entry, so see if this ACK is
973 * a returning syncookie. To do this, first:
974 * A. See if this socket has had a syncache entry dropped in
975 * the past. We don't want to accept a bogus syncookie
976 * if we've never received a SYN.
977 * B. check that the syncookie is valid. If it is, then
978 * cobble up a fake syncache entry, and return.
980 if (!V_tcp_syncookies) {
982 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
983 log(LOG_DEBUG, "%s; %s: Spurious ACK, "
984 "segment rejected (syncookies disabled)\n",
988 bzero(&scs, sizeof(scs));
989 sc = syncookie_lookup(inc, sch, &scs, th, to, *lsop);
992 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
993 log(LOG_DEBUG, "%s; %s: Segment failed "
994 "SYNCOOKIE authentication, segment rejected "
995 "(probably spoofed)\n", s, __func__);
999 /* Pull out the entry to unlock the bucket row. */
1000 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
1003 if (ADDED_BY_TOE(sc)) {
1004 struct toedev *tod = sc->sc_tod;
1006 tod->tod_syncache_removed(tod, sc->sc_todctx);
1013 * Segment validation:
1014 * ACK must match our initial sequence number + 1 (the SYN|ACK).
1016 if (th->th_ack != sc->sc_iss + 1) {
1017 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1018 log(LOG_DEBUG, "%s; %s: ACK %u != ISS+1 %u, segment "
1019 "rejected\n", s, __func__, th->th_ack, sc->sc_iss);
1024 * The SEQ must fall in the window starting at the received
1025 * initial receive sequence number + 1 (the SYN).
1027 if (SEQ_LEQ(th->th_seq, sc->sc_irs) ||
1028 SEQ_GT(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
1029 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1030 log(LOG_DEBUG, "%s; %s: SEQ %u != IRS+1 %u, segment "
1031 "rejected\n", s, __func__, th->th_seq, sc->sc_irs);
1036 * If timestamps were not negotiated during SYN/ACK they
1037 * must not appear on any segment during this session.
1039 if (!(sc->sc_flags & SCF_TIMESTAMP) && (to->to_flags & TOF_TS)) {
1040 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1041 log(LOG_DEBUG, "%s; %s: Timestamp not expected, "
1042 "segment rejected\n", s, __func__);
1047 * If timestamps were negotiated during SYN/ACK they should
1048 * appear on every segment during this session.
1049 * XXXAO: This is only informal as there have been unverified
1050 * reports of non-compliants stacks.
1052 if ((sc->sc_flags & SCF_TIMESTAMP) && !(to->to_flags & TOF_TS)) {
1053 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1054 log(LOG_DEBUG, "%s; %s: Timestamp missing, "
1055 "no action\n", s, __func__);
1062 * If timestamps were negotiated the reflected timestamp
1063 * must be equal to what we actually sent in the SYN|ACK.
1065 if ((to->to_flags & TOF_TS) && to->to_tsecr != sc->sc_ts) {
1066 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1067 log(LOG_DEBUG, "%s; %s: TSECR %u != TS %u, "
1068 "segment rejected\n",
1069 s, __func__, to->to_tsecr, sc->sc_ts);
1073 *lsop = syncache_socket(sc, *lsop, m);
1076 TCPSTAT_INC(tcps_sc_aborted);
1078 TCPSTAT_INC(tcps_sc_completed);
1080 /* how do we find the inp for the new socket? */
1085 if (sc != NULL && sc != &scs)
1094 * Given a LISTEN socket and an inbound SYN request, add
1095 * this to the syn cache, and send back a segment:
1096 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
1099 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN.
1100 * Doing so would require that we hold onto the data and deliver it
1101 * to the application. However, if we are the target of a SYN-flood
1102 * DoS attack, an attacker could send data which would eventually
1103 * consume all available buffer space if it were ACKed. By not ACKing
1104 * the data, we avoid this DoS scenario.
1107 syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
1108 struct inpcb *inp, struct socket **lsop, struct mbuf *m, void *tod,
1113 struct syncache *sc = NULL;
1114 struct syncache_head *sch;
1115 struct mbuf *ipopts = NULL;
1117 int win, sb_hiwat, ip_ttl, ip_tos;
1120 int autoflowlabel = 0;
1123 struct label *maclabel;
1125 struct syncache scs;
1128 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1129 INP_WLOCK_ASSERT(inp); /* listen socket */
1130 KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_SYN,
1131 ("%s: unexpected tcp flags", __func__));
1134 * Combine all so/tp operations very early to drop the INP lock as
1139 cred = crhold(so->so_cred);
1142 if ((inc->inc_flags & INC_ISIPV6) &&
1143 (inp->inp_flags & IN6P_AUTOFLOWLABEL))
1146 ip_ttl = inp->inp_ip_ttl;
1147 ip_tos = inp->inp_ip_tos;
1148 win = sbspace(&so->so_rcv);
1149 sb_hiwat = so->so_rcv.sb_hiwat;
1150 ltflags = (tp->t_flags & (TF_NOOPT | TF_SIGNATURE));
1152 /* By the time we drop the lock these should no longer be used. */
1157 if (mac_syncache_init(&maclabel) != 0) {
1159 INP_INFO_WUNLOCK(&V_tcbinfo);
1162 mac_syncache_create(maclabel, inp);
1165 INP_INFO_WUNLOCK(&V_tcbinfo);
1168 * Remember the IP options, if any.
1171 if (!(inc->inc_flags & INC_ISIPV6))
1174 ipopts = (m) ? ip_srcroute(m) : NULL;
1180 * See if we already have an entry for this connection.
1181 * If we do, resend the SYN,ACK, and reset the retransmit timer.
1183 * XXX: should the syncache be re-initialized with the contents
1184 * of the new SYN here (which may have different options?)
1186 * XXX: We do not check the sequence number to see if this is a
1187 * real retransmit or a new connection attempt. The question is
1188 * how to handle such a case; either ignore it as spoofed, or
1189 * drop the current entry and create a new one?
1191 sc = syncache_lookup(inc, &sch); /* returns locked entry */
1192 SCH_LOCK_ASSERT(sch);
1194 TCPSTAT_INC(tcps_sc_dupsyn);
1197 * If we were remembering a previous source route,
1198 * forget it and use the new one we've been given.
1201 (void) m_free(sc->sc_ipopts);
1202 sc->sc_ipopts = ipopts;
1205 * Update timestamp if present.
1207 if ((sc->sc_flags & SCF_TIMESTAMP) && (to->to_flags & TOF_TS))
1208 sc->sc_tsreflect = to->to_tsval;
1210 sc->sc_flags &= ~SCF_TIMESTAMP;
1213 * Since we have already unconditionally allocated label
1214 * storage, free it up. The syncache entry will already
1215 * have an initialized label we can use.
1217 mac_syncache_destroy(&maclabel);
1219 /* Retransmit SYN|ACK and reset retransmit count. */
1220 if ((s = tcp_log_addrs(&sc->sc_inc, th, NULL, NULL))) {
1221 log(LOG_DEBUG, "%s; %s: Received duplicate SYN, "
1222 "resetting timer and retransmitting SYN|ACK\n",
1226 if (syncache_respond(sc) == 0) {
1228 syncache_timeout(sc, sch, 1);
1229 TCPSTAT_INC(tcps_sndacks);
1230 TCPSTAT_INC(tcps_sndtotal);
1236 sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
1239 * The zone allocator couldn't provide more entries.
1240 * Treat this as if the cache was full; drop the oldest
1241 * entry and insert the new one.
1243 TCPSTAT_INC(tcps_sc_zonefail);
1244 if ((sc = TAILQ_LAST(&sch->sch_bucket, sch_head)) != NULL)
1245 syncache_drop(sc, sch);
1246 sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
1248 if (V_tcp_syncookies) {
1249 bzero(&scs, sizeof(scs));
1254 (void) m_free(ipopts);
1261 * Fill in the syncache values.
1264 sc->sc_label = maclabel;
1268 sc->sc_ipopts = ipopts;
1269 bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo));
1271 if (!(inc->inc_flags & INC_ISIPV6))
1274 sc->sc_ip_tos = ip_tos;
1275 sc->sc_ip_ttl = ip_ttl;
1279 sc->sc_todctx = todctx;
1281 sc->sc_irs = th->th_seq;
1282 sc->sc_iss = arc4random();
1284 sc->sc_flowlabel = 0;
1287 * Initial receive window: clip sbspace to [0 .. TCP_MAXWIN].
1288 * win was derived from socket earlier in the function.
1291 win = imin(win, TCP_MAXWIN);
1294 if (V_tcp_do_rfc1323) {
1296 * A timestamp received in a SYN makes
1297 * it ok to send timestamp requests and replies.
1299 if (to->to_flags & TOF_TS) {
1300 sc->sc_tsreflect = to->to_tsval;
1301 sc->sc_ts = tcp_ts_getticks();
1302 sc->sc_flags |= SCF_TIMESTAMP;
1304 if (to->to_flags & TOF_SCALE) {
1308 * Pick the smallest possible scaling factor that
1309 * will still allow us to scale up to sb_max, aka
1310 * kern.ipc.maxsockbuf.
1312 * We do this because there are broken firewalls that
1313 * will corrupt the window scale option, leading to
1314 * the other endpoint believing that our advertised
1315 * window is unscaled. At scale factors larger than
1316 * 5 the unscaled window will drop below 1500 bytes,
1317 * leading to serious problems when traversing these
1320 * With the default maxsockbuf of 256K, a scale factor
1321 * of 3 will be chosen by this algorithm. Those who
1322 * choose a larger maxsockbuf should watch out
1323 * for the compatiblity problems mentioned above.
1325 * RFC1323: The Window field in a SYN (i.e., a <SYN>
1326 * or <SYN,ACK>) segment itself is never scaled.
1328 while (wscale < TCP_MAX_WINSHIFT &&
1329 (TCP_MAXWIN << wscale) < sb_max)
1331 sc->sc_requested_r_scale = wscale;
1332 sc->sc_requested_s_scale = to->to_wscale;
1333 sc->sc_flags |= SCF_WINSCALE;
1336 #ifdef TCP_SIGNATURE
1338 * If listening socket requested TCP digests, and received SYN
1339 * contains the option, flag this in the syncache so that
1340 * syncache_respond() will do the right thing with the SYN+ACK.
1341 * XXX: Currently we always record the option by default and will
1342 * attempt to use it in syncache_respond().
1344 if (to->to_flags & TOF_SIGNATURE || ltflags & TF_SIGNATURE)
1345 sc->sc_flags |= SCF_SIGNATURE;
1347 if (to->to_flags & TOF_SACKPERM)
1348 sc->sc_flags |= SCF_SACK;
1349 if (to->to_flags & TOF_MSS)
1350 sc->sc_peer_mss = to->to_mss; /* peer mss may be zero */
1351 if (ltflags & TF_NOOPT)
1352 sc->sc_flags |= SCF_NOOPT;
1353 if ((th->th_flags & (TH_ECE|TH_CWR)) && V_tcp_do_ecn)
1354 sc->sc_flags |= SCF_ECN;
1356 if (V_tcp_syncookies)
1357 sc->sc_iss = syncookie_generate(sch, sc);
1359 if (autoflowlabel) {
1360 if (V_tcp_syncookies)
1361 sc->sc_flowlabel = sc->sc_iss;
1363 sc->sc_flowlabel = ip6_randomflowlabel();
1364 sc->sc_flowlabel = htonl(sc->sc_flowlabel) & IPV6_FLOWLABEL_MASK;
1370 * Do a standard 3-way handshake.
1372 if (syncache_respond(sc) == 0) {
1373 if (V_tcp_syncookies && V_tcp_syncookiesonly && sc != &scs)
1375 else if (sc != &scs)
1376 syncache_insert(sc, sch); /* locks and unlocks sch */
1377 TCPSTAT_INC(tcps_sndacks);
1378 TCPSTAT_INC(tcps_sndtotal);
1382 TCPSTAT_INC(tcps_sc_dropped);
1390 mac_syncache_destroy(&maclabel);
1400 syncache_respond(struct syncache *sc)
1402 struct ip *ip = NULL;
1404 struct tcphdr *th = NULL;
1405 int optlen, error = 0; /* Make compiler happy */
1406 u_int16_t hlen, tlen, mssopt;
1409 struct ip6_hdr *ip6 = NULL;
1414 (sc->sc_inc.inc_flags & INC_ISIPV6) ? sizeof(struct ip6_hdr) :
1417 tlen = hlen + sizeof(struct tcphdr);
1419 /* Determine MSS we advertize to other end of connection. */
1420 mssopt = tcp_mssopt(&sc->sc_inc);
1421 if (sc->sc_peer_mss)
1422 mssopt = max( min(sc->sc_peer_mss, mssopt), V_tcp_minmss);
1424 /* XXX: Assume that the entire packet will fit in a header mbuf. */
1425 KASSERT(max_linkhdr + tlen + TCP_MAXOLEN <= MHLEN,
1426 ("syncache: mbuf too small"));
1428 /* Create the IP+TCP header from scratch. */
1429 m = m_gethdr(M_NOWAIT, MT_DATA);
1433 mac_syncache_create_mbuf(sc->sc_label, m);
1435 m->m_data += max_linkhdr;
1437 m->m_pkthdr.len = tlen;
1438 m->m_pkthdr.rcvif = NULL;
1441 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
1442 ip6 = mtod(m, struct ip6_hdr *);
1443 ip6->ip6_vfc = IPV6_VERSION;
1444 ip6->ip6_nxt = IPPROTO_TCP;
1445 ip6->ip6_src = sc->sc_inc.inc6_laddr;
1446 ip6->ip6_dst = sc->sc_inc.inc6_faddr;
1447 ip6->ip6_plen = htons(tlen - hlen);
1448 /* ip6_hlim is set after checksum */
1449 ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK;
1450 ip6->ip6_flow |= sc->sc_flowlabel;
1452 th = (struct tcphdr *)(ip6 + 1);
1455 #if defined(INET6) && defined(INET)
1460 ip = mtod(m, struct ip *);
1461 ip->ip_v = IPVERSION;
1462 ip->ip_hl = sizeof(struct ip) >> 2;
1463 ip->ip_len = htons(tlen);
1467 ip->ip_p = IPPROTO_TCP;
1468 ip->ip_src = sc->sc_inc.inc_laddr;
1469 ip->ip_dst = sc->sc_inc.inc_faddr;
1470 ip->ip_ttl = sc->sc_ip_ttl;
1471 ip->ip_tos = sc->sc_ip_tos;
1474 * See if we should do MTU discovery. Route lookups are
1475 * expensive, so we will only unset the DF bit if:
1477 * 1) path_mtu_discovery is disabled
1478 * 2) the SCF_UNREACH flag has been set
1480 if (V_path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0))
1481 ip->ip_off |= htons(IP_DF);
1483 th = (struct tcphdr *)(ip + 1);
1486 th->th_sport = sc->sc_inc.inc_lport;
1487 th->th_dport = sc->sc_inc.inc_fport;
1489 th->th_seq = htonl(sc->sc_iss);
1490 th->th_ack = htonl(sc->sc_irs + 1);
1491 th->th_off = sizeof(struct tcphdr) >> 2;
1493 th->th_flags = TH_SYN|TH_ACK;
1494 th->th_win = htons(sc->sc_wnd);
1497 if (sc->sc_flags & SCF_ECN) {
1498 th->th_flags |= TH_ECE;
1499 TCPSTAT_INC(tcps_ecn_shs);
1502 /* Tack on the TCP options. */
1503 if ((sc->sc_flags & SCF_NOOPT) == 0) {
1507 to.to_flags = TOF_MSS;
1508 if (sc->sc_flags & SCF_WINSCALE) {
1509 to.to_wscale = sc->sc_requested_r_scale;
1510 to.to_flags |= TOF_SCALE;
1512 if (sc->sc_flags & SCF_TIMESTAMP) {
1513 /* Virgin timestamp or TCP cookie enhanced one. */
1514 to.to_tsval = sc->sc_ts;
1515 to.to_tsecr = sc->sc_tsreflect;
1516 to.to_flags |= TOF_TS;
1518 if (sc->sc_flags & SCF_SACK)
1519 to.to_flags |= TOF_SACKPERM;
1520 #ifdef TCP_SIGNATURE
1521 if (sc->sc_flags & SCF_SIGNATURE)
1522 to.to_flags |= TOF_SIGNATURE;
1524 optlen = tcp_addoptions(&to, (u_char *)(th + 1));
1526 /* Adjust headers by option size. */
1527 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
1529 m->m_pkthdr.len += optlen;
1531 #ifdef TCP_SIGNATURE
1532 if (sc->sc_flags & SCF_SIGNATURE)
1533 tcp_signature_compute(m, 0, 0, optlen,
1534 to.to_signature, IPSEC_DIR_OUTBOUND);
1537 if (sc->sc_inc.inc_flags & INC_ISIPV6)
1538 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) + optlen);
1541 ip->ip_len = htons(ntohs(ip->ip_len) + optlen);
1545 M_SETFIB(m, sc->sc_inc.inc_fibnum);
1546 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1548 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
1549 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
1550 th->th_sum = in6_cksum_pseudo(ip6, tlen + optlen - hlen,
1552 ip6->ip6_hlim = in6_selecthlim(NULL, NULL);
1554 if (ADDED_BY_TOE(sc)) {
1555 struct toedev *tod = sc->sc_tod;
1557 error = tod->tod_syncache_respond(tod, sc->sc_todctx, m);
1562 error = ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
1565 #if defined(INET6) && defined(INET)
1570 m->m_pkthdr.csum_flags = CSUM_TCP;
1571 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1572 htons(tlen + optlen - hlen + IPPROTO_TCP));
1574 if (ADDED_BY_TOE(sc)) {
1575 struct toedev *tod = sc->sc_tod;
1577 error = tod->tod_syncache_respond(tod, sc->sc_todctx, m);
1582 error = ip_output(m, sc->sc_ipopts, NULL, 0, NULL, NULL);
1589 * The purpose of syncookies is to handle spoofed SYN flooding DoS attacks
1590 * that exceed the capacity of the syncache by avoiding the storage of any
1591 * of the SYNs we receive. Syncookies defend against blind SYN flooding
1592 * attacks where the attacker does not have access to our responses.
1594 * Syncookies encode and include all necessary information about the
1595 * connection setup within the SYN|ACK that we send back. That way we
1596 * can avoid keeping any local state until the ACK to our SYN|ACK returns
1597 * (if ever). Normally the syncache and syncookies are running in parallel
1598 * with the latter taking over when the former is exhausted. When matching
1599 * syncache entry is found the syncookie is ignored.
1601 * The only reliable information persisting the 3WHS is our inital sequence
1602 * number ISS of 32 bits. Syncookies embed a cryptographically sufficient
1603 * strong hash (MAC) value and a few bits of TCP SYN options in the ISS
1604 * of our SYN|ACK. The MAC can be recomputed when the ACK to our SYN|ACK
1605 * returns and signifies a legitimate connection if it matches the ACK.
1607 * The available space of 32 bits to store the hash and to encode the SYN
1608 * option information is very tight and we should have at least 24 bits for
1609 * the MAC to keep the number of guesses by blind spoofing reasonably high.
1611 * SYN option information we have to encode to fully restore a connection:
1612 * MSS: is imporant to chose an optimal segment size to avoid IP level
1613 * fragmentation along the path. The common MSS values can be encoded
1614 * in a 3-bit table. Uncommon values are captured by the next lower value
1615 * in the table leading to a slight increase in packetization overhead.
1616 * WSCALE: is necessary to allow large windows to be used for high delay-
1617 * bandwidth product links. Not scaling the window when it was initially
1618 * negotiated is bad for performance as lack of scaling further decreases
1619 * the apparent available send window. We only need to encode the WSCALE
1620 * we received from the remote end. Our end can be recalculated at any
1621 * time. The common WSCALE values can be encoded in a 3-bit table.
1622 * Uncommon values are captured by the next lower value in the table
1623 * making us under-estimate the available window size halving our
1624 * theoretically possible maximum throughput for that connection.
1625 * SACK: Greatly assists in packet loss recovery and requires 1 bit.
1626 * TIMESTAMP and SIGNATURE is not encoded because they are permanent options
1627 * that are included in all segments on a connection. We enable them when
1630 * Security of syncookies and attack vectors:
1632 * The MAC is computed over (faddr||laddr||fport||lport||irs||flags||secmod)
1633 * together with the gloabl secret to make it unique per connection attempt.
1634 * Thus any change of any of those parameters results in a different MAC output
1635 * in an unpredictable way unless a collision is encountered. 24 bits of the
1636 * MAC are embedded into the ISS.
1638 * To prevent replay attacks two rotating global secrets are updated with a
1639 * new random value every 15 seconds. The life-time of a syncookie is thus
1642 * Vector 1: Attacking the secret. This requires finding a weakness in the
1643 * MAC itself or the way it is used here. The attacker can do a chosen plain
1644 * text attack by varying and testing the all parameters under his control.
1645 * The strength depends on the size and randomness of the secret, and the
1646 * cryptographic security of the MAC function. Due to the constant updating
1647 * of the secret the attacker has at most 29.999 seconds to find the secret
1648 * and launch spoofed connections. After that he has to start all over again.
1650 * Vector 2: Collision attack on the MAC of a single ACK. With a 24 bit MAC
1651 * size an average of 4,823 attempts are required for a 50% chance of success
1652 * to spoof a single syncookie (birthday collision paradox). However the
1653 * attacker is blind and doesn't know if one of his attempts succeeded unless
1654 * he has a side channel to interfere success from. A single connection setup
1655 * success average of 90% requires 8,790 packets, 99.99% requires 17,578 packets.
1656 * This many attempts are required for each one blind spoofed connection. For
1657 * every additional spoofed connection he has to launch another N attempts.
1658 * Thus for a sustained rate 100 spoofed connections per second approximately
1659 * 1,800,000 packets per second would have to be sent.
1661 * NB: The MAC function should be fast so that it doesn't become a CPU
1662 * exhaustion attack vector itself.
1665 * RFC4987 TCP SYN Flooding Attacks and Common Mitigations
1666 * SYN cookies were first proposed by cryptographer Dan J. Bernstein in 1996
1667 * http://cr.yp.to/syncookies.html (overview)
1668 * http://cr.yp.to/syncookies/archive (details)
1671 * Schematic construction of a syncookie enabled Initial Sequence Number:
1673 * 12345678901234567890123456789012
1674 * |xxxxxxxxxxxxxxxxxxxxxxxxWWWMMMSP|
1676 * x 24 MAC (truncated)
1677 * W 3 Send Window Scale index
1679 * S 1 SACK permitted
1680 * P 1 Odd/even secret
1684 * Distribution and probability of certain MSS values. Those in between are
1685 * rounded down to the next lower one.
1686 * [An Analysis of TCP Maximum Segment Sizes, S. Alcock and R. Nelson, 2011]
1687 * .2% .3% 5% 7% 7% 20% 15% 45%
1689 static int tcp_sc_msstab[] = { 216, 536, 1200, 1360, 1400, 1440, 1452, 1460 };
1692 * Distribution and probability of certain WSCALE values. We have to map the
1693 * (send) window scale (shift) option with a range of 0-14 from 4 bits into 3
1694 * bits based on prevalence of certain values. Where we don't have an exact
1695 * match for are rounded down to the next lower one letting us under-estimate
1696 * the true available window. At the moment this would happen only for the
1697 * very uncommon values 3, 5 and those above 8 (more than 16MB socket buffer
1698 * and window size). The absence of the WSCALE option (no scaling in either
1699 * direction) is encoded with index zero.
1700 * [WSCALE values histograms, Allman, 2012]
1701 * X 10 10 35 5 6 14 10% by host
1702 * X 11 4 5 5 18 49 3% by connections
1704 static int tcp_sc_wstab[] = { 0, 0, 1, 2, 4, 6, 7, 8 };
1707 * Compute the MAC for the SYN cookie. SIPHASH-2-4 is chosen for its speed
1708 * and good cryptographic properties.
1711 syncookie_mac(struct in_conninfo *inc, tcp_seq irs, uint8_t flags,
1712 uint8_t *secbits, uintptr_t secmod)
1715 uint32_t siphash[2];
1717 SipHash24_Init(&ctx);
1718 SipHash_SetKey(&ctx, secbits);
1719 switch (inc->inc_flags & INC_ISIPV6) {
1722 SipHash_Update(&ctx, &inc->inc_faddr, sizeof(inc->inc_faddr));
1723 SipHash_Update(&ctx, &inc->inc_laddr, sizeof(inc->inc_laddr));
1728 SipHash_Update(&ctx, &inc->inc6_faddr, sizeof(inc->inc6_faddr));
1729 SipHash_Update(&ctx, &inc->inc6_laddr, sizeof(inc->inc6_laddr));
1733 SipHash_Update(&ctx, &inc->inc_fport, sizeof(inc->inc_fport));
1734 SipHash_Update(&ctx, &inc->inc_lport, sizeof(inc->inc_lport));
1735 SipHash_Update(&ctx, &flags, sizeof(flags));
1736 SipHash_Update(&ctx, &secmod, sizeof(secmod));
1737 SipHash_Final((u_int8_t *)&siphash, &ctx);
1739 return (siphash[0] ^ siphash[1]);
1743 syncookie_generate(struct syncache_head *sch, struct syncache *sc)
1745 u_int i, mss, secbit, wscale;
1748 union syncookie cookie;
1750 SCH_LOCK_ASSERT(sch);
1754 /* Map our computed MSS into the 3-bit index. */
1755 mss = min(tcp_mssopt(&sc->sc_inc), max(sc->sc_peer_mss, V_tcp_minmss));
1756 for (i = sizeof(tcp_sc_msstab) / sizeof(*tcp_sc_msstab) - 1;
1757 tcp_sc_msstab[i] > mss && i > 0;
1760 cookie.flags.mss_idx = i;
1763 * Map the send window scale into the 3-bit index but only if
1764 * the wscale option was received.
1766 if (sc->sc_flags & SCF_WINSCALE) {
1767 wscale = sc->sc_requested_s_scale;
1768 for (i = sizeof(tcp_sc_wstab) / sizeof(*tcp_sc_wstab) - 1;
1769 tcp_sc_wstab[i] > wscale && i > 0;
1772 cookie.flags.wscale_idx = i;
1775 /* Can we do SACK? */
1776 if (sc->sc_flags & SCF_SACK)
1777 cookie.flags.sack_ok = 1;
1779 /* Which of the two secrets to use. */
1780 secbit = sch->sch_sc->secret.oddeven & 0x1;
1781 cookie.flags.odd_even = secbit;
1783 secbits = sch->sch_sc->secret.key[secbit];
1784 hash = syncookie_mac(&sc->sc_inc, sc->sc_irs, cookie.cookie, secbits,
1788 * Put the flags into the hash and XOR them to get better ISS number
1789 * variance. This doesn't enhance the cryptographic strength and is
1790 * done to prevent the 8 cookie bits from showing up directly on the
1794 iss |= cookie.cookie ^ (hash >> 24);
1796 /* Randomize the timestamp. */
1797 if (sc->sc_flags & SCF_TIMESTAMP) {
1798 sc->sc_ts = arc4random();
1799 sc->sc_tsoff = sc->sc_ts - tcp_ts_getticks();
1802 TCPSTAT_INC(tcps_sc_sendcookie);
1806 static struct syncache *
1807 syncookie_lookup(struct in_conninfo *inc, struct syncache_head *sch,
1808 struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
1814 int wnd, wscale = 0;
1815 union syncookie cookie;
1817 SCH_LOCK_ASSERT(sch);
1820 * Pull information out of SYN-ACK/ACK and revert sequence number
1823 ack = th->th_ack - 1;
1824 seq = th->th_seq - 1;
1827 * Unpack the flags containing enough information to restore the
1830 cookie.cookie = (ack & 0xff) ^ (ack >> 24);
1832 /* Which of the two secrets to use. */
1833 secbits = sch->sch_sc->secret.key[cookie.flags.odd_even];
1835 hash = syncookie_mac(inc, seq, cookie.cookie, secbits, (uintptr_t)sch);
1837 /* The recomputed hash matches the ACK if this was a genuine cookie. */
1838 if ((ack & ~0xff) != (hash & ~0xff))
1841 /* Fill in the syncache values. */
1843 bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo));
1844 sc->sc_ipopts = NULL;
1849 switch (inc->inc_flags & INC_ISIPV6) {
1852 sc->sc_ip_ttl = sotoinpcb(lso)->inp_ip_ttl;
1853 sc->sc_ip_tos = sotoinpcb(lso)->inp_ip_tos;
1858 if (sotoinpcb(lso)->inp_flags & IN6P_AUTOFLOWLABEL)
1859 sc->sc_flowlabel = sc->sc_iss & IPV6_FLOWLABEL_MASK;
1864 sc->sc_peer_mss = tcp_sc_msstab[cookie.flags.mss_idx];
1866 /* We can simply recompute receive window scale we sent earlier. */
1867 while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < sb_max)
1870 /* Only use wscale if it was enabled in the orignal SYN. */
1871 if (cookie.flags.wscale_idx > 0) {
1872 sc->sc_requested_r_scale = wscale;
1873 sc->sc_requested_s_scale = tcp_sc_wstab[cookie.flags.wscale_idx];
1874 sc->sc_flags |= SCF_WINSCALE;
1877 wnd = sbspace(&lso->so_rcv);
1879 wnd = imin(wnd, TCP_MAXWIN);
1882 if (cookie.flags.sack_ok)
1883 sc->sc_flags |= SCF_SACK;
1885 if (to->to_flags & TOF_TS) {
1886 sc->sc_flags |= SCF_TIMESTAMP;
1887 sc->sc_tsreflect = to->to_tsval;
1888 sc->sc_ts = to->to_tsecr;
1889 sc->sc_tsoff = to->to_tsecr - tcp_ts_getticks();
1892 if (to->to_flags & TOF_SIGNATURE)
1893 sc->sc_flags |= SCF_SIGNATURE;
1897 TCPSTAT_INC(tcps_sc_recvcookie);
1903 syncookie_cmp(struct in_conninfo *inc, struct syncache_head *sch,
1904 struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
1907 struct syncache scs, *scx;
1910 bzero(&scs, sizeof(scs));
1911 scx = syncookie_lookup(inc, sch, &scs, th, to, lso);
1913 if ((s = tcp_log_addrs(inc, th, NULL, NULL)) == NULL)
1917 if (sc->sc_peer_mss != scx->sc_peer_mss)
1918 log(LOG_DEBUG, "%s; %s: mss different %i vs %i\n",
1919 s, __func__, sc->sc_peer_mss, scx->sc_peer_mss);
1921 if (sc->sc_requested_r_scale != scx->sc_requested_r_scale)
1922 log(LOG_DEBUG, "%s; %s: rwscale different %i vs %i\n",
1923 s, __func__, sc->sc_requested_r_scale,
1924 scx->sc_requested_r_scale);
1926 if (sc->sc_requested_s_scale != scx->sc_requested_s_scale)
1927 log(LOG_DEBUG, "%s; %s: swscale different %i vs %i\n",
1928 s, __func__, sc->sc_requested_s_scale,
1929 scx->sc_requested_s_scale);
1931 if ((sc->sc_flags & SCF_SACK) != (scx->sc_flags & SCF_SACK))
1932 log(LOG_DEBUG, "%s; %s: SACK different\n", s, __func__);
1939 #endif /* INVARIANTS */
1942 syncookie_reseed(void *arg)
1944 struct tcp_syncache *sc = arg;
1949 * Reseeding the secret doesn't have to be protected by a lock.
1950 * It only must be ensured that the new random values are visible
1951 * to all CPUs in a SMP environment. The atomic with release
1952 * semantics ensures that.
1954 secbit = (sc->secret.oddeven & 0x1) ? 0 : 1;
1955 secbits = sc->secret.key[secbit];
1956 arc4rand(secbits, SYNCOOKIE_SECRET_SIZE, 0);
1957 atomic_add_rel_int(&sc->secret.oddeven, 1);
1959 /* Reschedule ourself. */
1960 callout_schedule(&sc->secret.reseed, SYNCOOKIE_LIFETIME * hz);
1964 * Returns the current number of syncache entries. This number
1965 * will probably change before you get around to calling
1969 syncache_pcbcount(void)
1971 struct syncache_head *sch;
1974 for (count = 0, i = 0; i < V_tcp_syncache.hashsize; i++) {
1975 /* No need to lock for a read. */
1976 sch = &V_tcp_syncache.hashbase[i];
1977 count += sch->sch_length;
1983 * Exports the syncache entries to userland so that netstat can display
1984 * them alongside the other sockets. This function is intended to be
1985 * called only from tcp_pcblist.
1987 * Due to concurrency on an active system, the number of pcbs exported
1988 * may have no relation to max_pcbs. max_pcbs merely indicates the
1989 * amount of space the caller allocated for this function to use.
1992 syncache_pcblist(struct sysctl_req *req, int max_pcbs, int *pcbs_exported)
1995 struct syncache *sc;
1996 struct syncache_head *sch;
1997 int count, error, i;
1999 for (count = 0, error = 0, i = 0; i < V_tcp_syncache.hashsize; i++) {
2000 sch = &V_tcp_syncache.hashbase[i];
2002 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
2003 if (count >= max_pcbs) {
2007 if (cr_cansee(req->td->td_ucred, sc->sc_cred) != 0)
2009 bzero(&xt, sizeof(xt));
2010 xt.xt_len = sizeof(xt);
2011 if (sc->sc_inc.inc_flags & INC_ISIPV6)
2012 xt.xt_inp.inp_vflag = INP_IPV6;
2014 xt.xt_inp.inp_vflag = INP_IPV4;
2015 bcopy(&sc->sc_inc, &xt.xt_inp.inp_inc, sizeof (struct in_conninfo));
2016 xt.xt_tp.t_inpcb = &xt.xt_inp;
2017 xt.xt_tp.t_state = TCPS_SYN_RECEIVED;
2018 xt.xt_socket.xso_protocol = IPPROTO_TCP;
2019 xt.xt_socket.xso_len = sizeof (struct xsocket);
2020 xt.xt_socket.so_type = SOCK_STREAM;
2021 xt.xt_socket.so_state = SS_ISCONNECTING;
2022 error = SYSCTL_OUT(req, &xt, sizeof xt);
2032 *pcbs_exported = count;