2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2001 McAfee, Inc.
5 * Copyright (c) 2006,2013 Andre Oppermann, Internet Business Solutions AG
8 * This software was developed for the FreeBSD Project by Jonathan Lemon
9 * and McAfee Research, the Security Research Division of McAfee, Inc. under
10 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
11 * DARPA CHATS research program. [2001 McAfee, Inc.]
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
39 #include "opt_inet6.h"
40 #include "opt_ipsec.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
45 #include <sys/refcount.h>
46 #include <sys/kernel.h>
47 #include <sys/sysctl.h>
48 #include <sys/limits.h>
50 #include <sys/mutex.h>
51 #include <sys/malloc.h>
53 #include <sys/proc.h> /* for proc0 declaration */
54 #include <sys/random.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/syslog.h>
58 #include <sys/ucred.h>
61 #include <crypto/siphash/siphash.h>
66 #include <net/if_var.h>
67 #include <net/route.h>
70 #include <netinet/in.h>
71 #include <netinet/in_kdtrace.h>
72 #include <netinet/in_systm.h>
73 #include <netinet/ip.h>
74 #include <netinet/in_var.h>
75 #include <netinet/in_pcb.h>
76 #include <netinet/ip_var.h>
77 #include <netinet/ip_options.h>
79 #include <netinet/ip6.h>
80 #include <netinet/icmp6.h>
81 #include <netinet6/nd6.h>
82 #include <netinet6/ip6_var.h>
83 #include <netinet6/in6_pcb.h>
85 #include <netinet/tcp.h>
86 #include <netinet/tcp_fastopen.h>
87 #include <netinet/tcp_fsm.h>
88 #include <netinet/tcp_seq.h>
89 #include <netinet/tcp_timer.h>
90 #include <netinet/tcp_var.h>
91 #include <netinet/tcp_syncache.h>
92 #include <netinet/tcp_ecn.h>
94 #include <netinet6/tcp6_var.h>
97 #include <netinet/toecore.h>
99 #include <netinet/udp.h>
100 #include <netinet/udp_var.h>
102 #include <netipsec/ipsec_support.h>
104 #include <machine/in_cksum.h>
106 #include <security/mac/mac_framework.h>
108 VNET_DEFINE_STATIC(int, tcp_syncookies) = 1;
109 #define V_tcp_syncookies VNET(tcp_syncookies)
110 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_VNET | CTLFLAG_RW,
111 &VNET_NAME(tcp_syncookies), 0,
112 "Use TCP SYN cookies if the syncache overflows");
114 VNET_DEFINE_STATIC(int, tcp_syncookiesonly) = 0;
115 #define V_tcp_syncookiesonly VNET(tcp_syncookiesonly)
116 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies_only, CTLFLAG_VNET | CTLFLAG_RW,
117 &VNET_NAME(tcp_syncookiesonly), 0,
118 "Use only TCP SYN cookies");
120 VNET_DEFINE_STATIC(int, functions_inherit_listen_socket_stack) = 1;
121 #define V_functions_inherit_listen_socket_stack \
122 VNET(functions_inherit_listen_socket_stack)
123 SYSCTL_INT(_net_inet_tcp, OID_AUTO, functions_inherit_listen_socket_stack,
124 CTLFLAG_VNET | CTLFLAG_RW,
125 &VNET_NAME(functions_inherit_listen_socket_stack), 0,
126 "Inherit listen socket's stack");
129 #define ADDED_BY_TOE(sc) ((sc)->sc_tod != NULL)
132 static void syncache_drop(struct syncache *, struct syncache_head *);
133 static void syncache_free(struct syncache *);
134 static void syncache_insert(struct syncache *, struct syncache_head *);
135 static int syncache_respond(struct syncache *, const struct mbuf *, int);
136 static struct socket *syncache_socket(struct syncache *, struct socket *,
138 static void syncache_timeout(struct syncache *sc, struct syncache_head *sch,
140 static void syncache_timer(void *);
142 static uint32_t syncookie_mac(struct in_conninfo *, tcp_seq, uint8_t,
143 uint8_t *, uintptr_t);
144 static tcp_seq syncookie_generate(struct syncache_head *, struct syncache *);
145 static struct syncache
146 *syncookie_lookup(struct in_conninfo *, struct syncache_head *,
147 struct syncache *, struct tcphdr *, struct tcpopt *,
148 struct socket *, uint16_t);
149 static void syncache_pause(struct in_conninfo *);
150 static void syncache_unpause(void *);
151 static void syncookie_reseed(void *);
153 static int syncookie_cmp(struct in_conninfo *inc, struct syncache_head *sch,
154 struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
155 struct socket *lso, uint16_t port);
159 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies.
160 * 3 retransmits corresponds to a timeout with default values of
161 * tcp_rexmit_initial * ( 1 +
164 * tcp_backoff[3]) + 3 * tcp_rexmit_slop,
165 * 1000 ms * (1 + 2 + 4 + 8) + 3 * 200 ms = 15600 ms,
166 * the odds are that the user has given up attempting to connect by then.
168 #define SYNCACHE_MAXREXMTS 3
170 /* Arbitrary values */
171 #define TCP_SYNCACHE_HASHSIZE 512
172 #define TCP_SYNCACHE_BUCKETLIMIT 30
174 VNET_DEFINE_STATIC(struct tcp_syncache, tcp_syncache);
175 #define V_tcp_syncache VNET(tcp_syncache)
177 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache,
178 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
181 SYSCTL_UINT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_VNET | CTLFLAG_RDTUN,
182 &VNET_NAME(tcp_syncache.bucket_limit), 0,
183 "Per-bucket hash limit for syncache");
185 SYSCTL_UINT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_VNET | CTLFLAG_RDTUN,
186 &VNET_NAME(tcp_syncache.cache_limit), 0,
187 "Overall entry limit for syncache");
189 SYSCTL_UMA_CUR(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_VNET,
190 &VNET_NAME(tcp_syncache.zone), "Current number of entries in syncache");
192 SYSCTL_UINT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN,
193 &VNET_NAME(tcp_syncache.hashsize), 0,
194 "Size of TCP syncache hashtable");
196 SYSCTL_BOOL(_net_inet_tcp_syncache, OID_AUTO, see_other, CTLFLAG_VNET |
197 CTLFLAG_RW, &VNET_NAME(tcp_syncache.see_other), 0,
198 "All syncache(4) entries are visible, ignoring UID/GID, jail(2) "
199 "and mac(4) checks");
202 sysctl_net_inet_tcp_syncache_rexmtlimit_check(SYSCTL_HANDLER_ARGS)
207 new = V_tcp_syncache.rexmt_limit;
208 error = sysctl_handle_int(oidp, &new, 0, req);
209 if ((error == 0) && (req->newptr != NULL)) {
210 if (new > TCP_MAXRXTSHIFT)
213 V_tcp_syncache.rexmt_limit = new;
218 SYSCTL_PROC(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit,
219 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
220 &VNET_NAME(tcp_syncache.rexmt_limit), 0,
221 sysctl_net_inet_tcp_syncache_rexmtlimit_check, "UI",
222 "Limit on SYN/ACK retransmissions");
224 VNET_DEFINE(int, tcp_sc_rst_sock_fail) = 1;
225 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rst_on_sock_fail,
226 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_sc_rst_sock_fail), 0,
227 "Send reset on socket allocation failure");
229 static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache");
231 #define SCH_LOCK(sch) mtx_lock(&(sch)->sch_mtx)
232 #define SCH_UNLOCK(sch) mtx_unlock(&(sch)->sch_mtx)
233 #define SCH_LOCK_ASSERT(sch) mtx_assert(&(sch)->sch_mtx, MA_OWNED)
236 * Requires the syncache entry to be already removed from the bucket list.
239 syncache_free(struct syncache *sc)
243 (void) m_free(sc->sc_ipopts);
247 mac_syncache_destroy(&sc->sc_label);
250 uma_zfree(V_tcp_syncache.zone, sc);
258 V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
259 V_tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
260 V_tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
261 V_tcp_syncache.hash_secret = arc4random();
263 TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
264 &V_tcp_syncache.hashsize);
265 TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
266 &V_tcp_syncache.bucket_limit);
267 if (!powerof2(V_tcp_syncache.hashsize) ||
268 V_tcp_syncache.hashsize == 0) {
269 printf("WARNING: syncache hash size is not a power of 2.\n");
270 V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
272 V_tcp_syncache.hashmask = V_tcp_syncache.hashsize - 1;
275 V_tcp_syncache.cache_limit =
276 V_tcp_syncache.hashsize * V_tcp_syncache.bucket_limit;
277 TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
278 &V_tcp_syncache.cache_limit);
280 /* Allocate the hash table. */
281 V_tcp_syncache.hashbase = malloc(V_tcp_syncache.hashsize *
282 sizeof(struct syncache_head), M_SYNCACHE, M_WAITOK | M_ZERO);
285 V_tcp_syncache.vnet = curvnet;
288 /* Initialize the hash buckets. */
289 for (i = 0; i < V_tcp_syncache.hashsize; i++) {
290 TAILQ_INIT(&V_tcp_syncache.hashbase[i].sch_bucket);
291 mtx_init(&V_tcp_syncache.hashbase[i].sch_mtx, "tcp_sc_head",
293 callout_init_mtx(&V_tcp_syncache.hashbase[i].sch_timer,
294 &V_tcp_syncache.hashbase[i].sch_mtx, 0);
295 V_tcp_syncache.hashbase[i].sch_length = 0;
296 V_tcp_syncache.hashbase[i].sch_sc = &V_tcp_syncache;
297 V_tcp_syncache.hashbase[i].sch_last_overflow =
298 -(SYNCOOKIE_LIFETIME + 1);
301 /* Create the syncache entry zone. */
302 V_tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache),
303 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
304 V_tcp_syncache.cache_limit = uma_zone_set_max(V_tcp_syncache.zone,
305 V_tcp_syncache.cache_limit);
307 /* Start the SYN cookie reseeder callout. */
308 callout_init(&V_tcp_syncache.secret.reseed, 1);
309 arc4rand(V_tcp_syncache.secret.key[0], SYNCOOKIE_SECRET_SIZE, 0);
310 arc4rand(V_tcp_syncache.secret.key[1], SYNCOOKIE_SECRET_SIZE, 0);
311 callout_reset(&V_tcp_syncache.secret.reseed, SYNCOOKIE_LIFETIME * hz,
312 syncookie_reseed, &V_tcp_syncache);
314 /* Initialize the pause machinery. */
315 mtx_init(&V_tcp_syncache.pause_mtx, "tcp_sc_pause", NULL, MTX_DEF);
316 callout_init_mtx(&V_tcp_syncache.pause_co, &V_tcp_syncache.pause_mtx,
318 V_tcp_syncache.pause_until = time_uptime - TCP_SYNCACHE_PAUSE_TIME;
319 V_tcp_syncache.pause_backoff = 0;
320 V_tcp_syncache.paused = false;
325 syncache_destroy(void)
327 struct syncache_head *sch;
328 struct syncache *sc, *nsc;
332 * Stop the re-seed timer before freeing resources. No need to
333 * possibly schedule it another time.
335 callout_drain(&V_tcp_syncache.secret.reseed);
337 /* Stop the SYN cache pause callout. */
338 mtx_lock(&V_tcp_syncache.pause_mtx);
339 if (callout_stop(&V_tcp_syncache.pause_co) == 0) {
340 mtx_unlock(&V_tcp_syncache.pause_mtx);
341 callout_drain(&V_tcp_syncache.pause_co);
343 mtx_unlock(&V_tcp_syncache.pause_mtx);
345 /* Cleanup hash buckets: stop timers, free entries, destroy locks. */
346 for (i = 0; i < V_tcp_syncache.hashsize; i++) {
347 sch = &V_tcp_syncache.hashbase[i];
348 callout_drain(&sch->sch_timer);
351 TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc)
352 syncache_drop(sc, sch);
354 KASSERT(TAILQ_EMPTY(&sch->sch_bucket),
355 ("%s: sch->sch_bucket not empty", __func__));
356 KASSERT(sch->sch_length == 0, ("%s: sch->sch_length %d not 0",
357 __func__, sch->sch_length));
358 mtx_destroy(&sch->sch_mtx);
361 KASSERT(uma_zone_get_cur(V_tcp_syncache.zone) == 0,
362 ("%s: cache_count not 0", __func__));
364 /* Free the allocated global resources. */
365 uma_zdestroy(V_tcp_syncache.zone);
366 free(V_tcp_syncache.hashbase, M_SYNCACHE);
367 mtx_destroy(&V_tcp_syncache.pause_mtx);
372 * Inserts a syncache entry into the specified bucket row.
373 * Locks and unlocks the syncache_head autonomously.
376 syncache_insert(struct syncache *sc, struct syncache_head *sch)
378 struct syncache *sc2;
383 * Make sure that we don't overflow the per-bucket limit.
384 * If the bucket is full, toss the oldest element.
386 if (sch->sch_length >= V_tcp_syncache.bucket_limit) {
387 KASSERT(!TAILQ_EMPTY(&sch->sch_bucket),
388 ("sch->sch_length incorrect"));
389 syncache_pause(&sc->sc_inc);
390 sc2 = TAILQ_LAST(&sch->sch_bucket, sch_head);
391 sch->sch_last_overflow = time_uptime;
392 syncache_drop(sc2, sch);
395 /* Put it into the bucket. */
396 TAILQ_INSERT_HEAD(&sch->sch_bucket, sc, sc_hash);
400 if (ADDED_BY_TOE(sc)) {
401 struct toedev *tod = sc->sc_tod;
403 tod->tod_syncache_added(tod, sc->sc_todctx);
407 /* Reinitialize the bucket row's timer. */
408 if (sch->sch_length == 1)
409 sch->sch_nextc = ticks + INT_MAX;
410 syncache_timeout(sc, sch, 1);
414 TCPSTATES_INC(TCPS_SYN_RECEIVED);
415 TCPSTAT_INC(tcps_sc_added);
419 * Remove and free entry from syncache bucket row.
420 * Expects locked syncache head.
423 syncache_drop(struct syncache *sc, struct syncache_head *sch)
426 SCH_LOCK_ASSERT(sch);
428 TCPSTATES_DEC(TCPS_SYN_RECEIVED);
429 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
433 if (ADDED_BY_TOE(sc)) {
434 struct toedev *tod = sc->sc_tod;
436 tod->tod_syncache_removed(tod, sc->sc_todctx);
444 * Engage/reengage time on bucket row.
447 syncache_timeout(struct syncache *sc, struct syncache_head *sch, int docallout)
451 if (sc->sc_rxmits == 0)
452 rexmt = tcp_rexmit_initial;
455 tcp_rexmit_initial * tcp_backoff[sc->sc_rxmits],
456 tcp_rexmit_min, TCPTV_REXMTMAX);
457 sc->sc_rxttime = ticks + rexmt;
459 if (TSTMP_LT(sc->sc_rxttime, sch->sch_nextc)) {
460 sch->sch_nextc = sc->sc_rxttime;
462 callout_reset(&sch->sch_timer, sch->sch_nextc - ticks,
463 syncache_timer, (void *)sch);
468 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted.
469 * If we have retransmitted an entry the maximum number of times, expire it.
470 * One separate timer for each bucket row.
473 syncache_timer(void *xsch)
475 struct syncache_head *sch = (struct syncache_head *)xsch;
476 struct syncache *sc, *nsc;
477 struct epoch_tracker et;
482 CURVNET_SET(sch->sch_sc->vnet);
484 /* NB: syncache_head has already been locked by the callout. */
485 SCH_LOCK_ASSERT(sch);
488 * In the following cycle we may remove some entries and/or
489 * advance some timeouts, so re-initialize the bucket timer.
491 sch->sch_nextc = tick + INT_MAX;
494 * If we have paused processing, unconditionally remove
495 * all syncache entries.
497 mtx_lock(&V_tcp_syncache.pause_mtx);
498 paused = V_tcp_syncache.paused;
499 mtx_unlock(&V_tcp_syncache.pause_mtx);
501 TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc) {
503 syncache_drop(sc, sch);
507 * We do not check if the listen socket still exists
508 * and accept the case where the listen socket may be
509 * gone by the time we resend the SYN/ACK. We do
510 * not expect this to happens often. If it does,
511 * then the RST will be sent by the time the remote
512 * host does the SYN/ACK->ACK.
514 if (TSTMP_GT(sc->sc_rxttime, tick)) {
515 if (TSTMP_LT(sc->sc_rxttime, sch->sch_nextc))
516 sch->sch_nextc = sc->sc_rxttime;
519 if (sc->sc_rxmits > V_tcp_ecn_maxretries) {
520 sc->sc_flags &= ~SCF_ECN;
522 if (sc->sc_rxmits > V_tcp_syncache.rexmt_limit) {
523 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
524 log(LOG_DEBUG, "%s; %s: Retransmits exhausted, "
525 "giving up and removing syncache entry\n",
529 syncache_drop(sc, sch);
530 TCPSTAT_INC(tcps_sc_stale);
533 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
534 log(LOG_DEBUG, "%s; %s: Response timeout, "
535 "retransmitting (%u) SYN|ACK\n",
536 s, __func__, sc->sc_rxmits);
541 syncache_respond(sc, NULL, TH_SYN|TH_ACK);
543 TCPSTAT_INC(tcps_sc_retransmitted);
544 syncache_timeout(sc, sch, 0);
546 if (!TAILQ_EMPTY(&(sch)->sch_bucket))
547 callout_reset(&(sch)->sch_timer, (sch)->sch_nextc - tick,
548 syncache_timer, (void *)(sch));
553 * Returns true if the system is only using cookies at the moment.
554 * This could be due to a sysadmin decision to only use cookies, or it
555 * could be due to the system detecting an attack.
558 syncache_cookiesonly(void)
561 return (V_tcp_syncookies && (V_tcp_syncache.paused ||
562 V_tcp_syncookiesonly));
566 * Find the hash bucket for the given connection.
568 static struct syncache_head *
569 syncache_hashbucket(struct in_conninfo *inc)
574 * The hash is built on foreign port + local port + foreign address.
575 * We rely on the fact that struct in_conninfo starts with 16 bits
576 * of foreign port, then 16 bits of local port then followed by 128
577 * bits of foreign address. In case of IPv4 address, the first 3
578 * 32-bit words of the address always are zeroes.
580 hash = jenkins_hash32((uint32_t *)&inc->inc_ie, 5,
581 V_tcp_syncache.hash_secret) & V_tcp_syncache.hashmask;
583 return (&V_tcp_syncache.hashbase[hash]);
587 * Find an entry in the syncache.
588 * Returns always with locked syncache_head plus a matching entry or NULL.
590 static struct syncache *
591 syncache_lookup(struct in_conninfo *inc, struct syncache_head **schp)
594 struct syncache_head *sch;
596 *schp = sch = syncache_hashbucket(inc);
599 /* Circle through bucket row to find matching entry. */
600 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash)
601 if (bcmp(&inc->inc_ie, &sc->sc_inc.inc_ie,
602 sizeof(struct in_endpoints)) == 0)
605 return (sc); /* Always returns with locked sch. */
609 * This function is called when we get a RST for a
610 * non-existent connection, so that we can see if the
611 * connection is in the syn cache. If it is, zap it.
612 * If required send a challenge ACK.
615 syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th, struct mbuf *m,
619 struct syncache_head *sch;
622 if (syncache_cookiesonly())
624 sc = syncache_lookup(inc, &sch); /* returns locked sch */
625 SCH_LOCK_ASSERT(sch);
628 * Any RST to our SYN|ACK must not carry ACK, SYN or FIN flags.
629 * See RFC 793 page 65, section SEGMENT ARRIVES.
631 if (tcp_get_flags(th) & (TH_ACK|TH_SYN|TH_FIN)) {
632 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
633 log(LOG_DEBUG, "%s; %s: Spurious RST with ACK, SYN or "
634 "FIN flag set, segment ignored\n", s, __func__);
635 TCPSTAT_INC(tcps_badrst);
640 * No corresponding connection was found in syncache.
641 * If syncookies are enabled and possibly exclusively
642 * used, or we are under memory pressure, a valid RST
643 * may not find a syncache entry. In that case we're
644 * done and no SYN|ACK retransmissions will happen.
645 * Otherwise the RST was misdirected or spoofed.
648 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
649 log(LOG_DEBUG, "%s; %s: Spurious RST without matching "
650 "syncache entry (possibly syncookie only), "
651 "segment ignored\n", s, __func__);
652 TCPSTAT_INC(tcps_badrst);
656 /* The remote UDP encaps port does not match. */
657 if (sc->sc_port != port) {
658 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
659 log(LOG_DEBUG, "%s; %s: Spurious RST with matching "
660 "syncache entry but non-matching UDP encaps port, "
661 "segment ignored\n", s, __func__);
662 TCPSTAT_INC(tcps_badrst);
667 * If the RST bit is set, check the sequence number to see
668 * if this is a valid reset segment.
671 * In all states except SYN-SENT, all reset (RST) segments
672 * are validated by checking their SEQ-fields. A reset is
673 * valid if its sequence number is in the window.
676 * There are four cases for the acceptability test for an incoming
679 * Segment Receive Test
681 * ------- ------- -------------------------------------------
682 * 0 0 SEG.SEQ = RCV.NXT
683 * 0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
684 * >0 0 not acceptable
685 * >0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
686 * or RCV.NXT =< SEG.SEQ+SEG.LEN-1 < RCV.NXT+RCV.WND
688 * Note that when receiving a SYN segment in the LISTEN state,
689 * IRS is set to SEG.SEQ and RCV.NXT is set to SEG.SEQ+1, as
690 * described in RFC 793, page 66.
692 if ((SEQ_GEQ(th->th_seq, sc->sc_irs + 1) &&
693 SEQ_LT(th->th_seq, sc->sc_irs + 1 + sc->sc_wnd)) ||
694 (sc->sc_wnd == 0 && th->th_seq == sc->sc_irs + 1)) {
695 if (V_tcp_insecure_rst ||
696 th->th_seq == sc->sc_irs + 1) {
697 syncache_drop(sc, sch);
698 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
700 "%s; %s: Our SYN|ACK was rejected, "
701 "connection attempt aborted by remote "
704 TCPSTAT_INC(tcps_sc_reset);
706 TCPSTAT_INC(tcps_badrst);
707 /* Send challenge ACK. */
708 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
709 log(LOG_DEBUG, "%s; %s: RST with invalid "
710 " SEQ %u != NXT %u (+WND %u), "
711 "sending challenge ACK\n",
713 th->th_seq, sc->sc_irs + 1, sc->sc_wnd);
714 syncache_respond(sc, m, TH_ACK);
717 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
718 log(LOG_DEBUG, "%s; %s: RST with invalid SEQ %u != "
719 "NXT %u (+WND %u), segment ignored\n",
721 th->th_seq, sc->sc_irs + 1, sc->sc_wnd);
722 TCPSTAT_INC(tcps_badrst);
732 syncache_badack(struct in_conninfo *inc, uint16_t port)
735 struct syncache_head *sch;
737 if (syncache_cookiesonly())
739 sc = syncache_lookup(inc, &sch); /* returns locked sch */
740 SCH_LOCK_ASSERT(sch);
741 if ((sc != NULL) && (sc->sc_port == port)) {
742 syncache_drop(sc, sch);
743 TCPSTAT_INC(tcps_sc_badack);
749 syncache_unreach(struct in_conninfo *inc, tcp_seq th_seq, uint16_t port)
752 struct syncache_head *sch;
754 if (syncache_cookiesonly())
756 sc = syncache_lookup(inc, &sch); /* returns locked sch */
757 SCH_LOCK_ASSERT(sch);
761 /* If the port != sc_port, then it's a bogus ICMP msg */
762 if (port != sc->sc_port)
765 /* If the sequence number != sc_iss, then it's a bogus ICMP msg */
766 if (ntohl(th_seq) != sc->sc_iss)
770 * If we've rertransmitted 3 times and this is our second error,
771 * we remove the entry. Otherwise, we allow it to continue on.
772 * This prevents us from incorrectly nuking an entry during a
773 * spurious network outage.
777 if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxmits < 3 + 1) {
778 sc->sc_flags |= SCF_UNREACH;
781 syncache_drop(sc, sch);
782 TCPSTAT_INC(tcps_sc_unreach);
788 * Build a new TCP socket structure from a syncache entry.
790 * On success return the newly created socket with its underlying inp locked.
792 static struct socket *
793 syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
795 struct tcp_function_block *blk;
796 struct inpcb *inp = NULL;
805 * Ok, create the full blown connection, and set things up
806 * as they would have been set up if we had created the
807 * connection when the SYN arrived.
809 if ((so = solisten_clone(lso)) == NULL)
812 mac_socketpeer_set_from_mbuf(m, so);
814 error = in_pcballoc(so, &V_tcbinfo);
820 if ((tp = tcp_newtcpcb(inp)) == NULL) {
826 inp->inp_inc.inc_flags = sc->sc_inc.inc_flags;
828 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
829 inp->inp_vflag &= ~INP_IPV4;
830 inp->inp_vflag |= INP_IPV6;
831 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
833 inp->inp_vflag &= ~INP_IPV6;
834 inp->inp_vflag |= INP_IPV4;
836 inp->inp_ip_ttl = sc->sc_ip_ttl;
837 inp->inp_ip_tos = sc->sc_ip_tos;
838 inp->inp_laddr = sc->sc_inc.inc_laddr;
844 * If there's an mbuf and it has a flowid, then let's initialise the
845 * inp with that particular flowid.
847 if (m != NULL && M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
848 inp->inp_flowid = m->m_pkthdr.flowid;
849 inp->inp_flowtype = M_HASHTYPE_GET(m);
851 inp->inp_numa_domain = m->m_pkthdr.numa_domain;
855 inp->inp_lport = sc->sc_inc.inc_lport;
857 if (inp->inp_vflag & INP_IPV6PROTO) {
858 struct inpcb *oinp = sotoinpcb(lso);
861 * Inherit socket options from the listening socket.
862 * Note that in6p_inputopts are not (and should not be)
863 * copied, since it stores previously received options and is
864 * used to detect if each new option is different than the
865 * previous one and hence should be passed to a user.
866 * If we copied in6p_inputopts, a user would not be able to
867 * receive options just after calling the accept system call.
869 inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS;
870 if (oinp->in6p_outputopts)
871 inp->in6p_outputopts =
872 ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT);
873 inp->in6p_hops = oinp->in6p_hops;
876 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
877 struct in6_addr laddr6;
878 struct sockaddr_in6 sin6;
880 sin6.sin6_family = AF_INET6;
881 sin6.sin6_len = sizeof(sin6);
882 sin6.sin6_addr = sc->sc_inc.inc6_faddr;
883 sin6.sin6_port = sc->sc_inc.inc_fport;
884 sin6.sin6_flowinfo = sin6.sin6_scope_id = 0;
885 laddr6 = inp->in6p_laddr;
886 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
887 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
888 INP_HASH_WLOCK(&V_tcbinfo);
889 error = in6_pcbconnect_mbuf(inp, (struct sockaddr *)&sin6,
890 thread0.td_ucred, m, false);
891 INP_HASH_WUNLOCK(&V_tcbinfo);
893 inp->in6p_laddr = laddr6;
896 /* Override flowlabel from in6_pcbconnect. */
897 inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
898 inp->inp_flow |= sc->sc_flowlabel;
901 #if defined(INET) && defined(INET6)
906 struct in_addr laddr;
907 struct sockaddr_in sin;
909 inp->inp_options = (m) ? ip_srcroute(m) : NULL;
911 if (inp->inp_options == NULL) {
912 inp->inp_options = sc->sc_ipopts;
913 sc->sc_ipopts = NULL;
916 sin.sin_family = AF_INET;
917 sin.sin_len = sizeof(sin);
918 sin.sin_addr = sc->sc_inc.inc_faddr;
919 sin.sin_port = sc->sc_inc.inc_fport;
920 bzero((caddr_t)sin.sin_zero, sizeof(sin.sin_zero));
921 laddr = inp->inp_laddr;
922 if (inp->inp_laddr.s_addr == INADDR_ANY)
923 inp->inp_laddr = sc->sc_inc.inc_laddr;
924 INP_HASH_WLOCK(&V_tcbinfo);
925 error = in_pcbconnect(inp, (struct sockaddr *)&sin,
926 thread0.td_ucred, false);
927 INP_HASH_WUNLOCK(&V_tcbinfo);
929 inp->inp_laddr = laddr;
934 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
935 /* Copy old policy into new socket's. */
936 if (ipsec_copy_pcbpolicy(sotoinpcb(lso), inp) != 0)
937 printf("syncache_socket: could not copy policy\n");
939 tp->t_state = TCPS_SYN_RECEIVED;
940 tp->iss = sc->sc_iss;
941 tp->irs = sc->sc_irs;
942 tp->t_port = sc->sc_port;
945 blk = sototcpcb(lso)->t_fb;
946 if (V_functions_inherit_listen_socket_stack && blk != tp->t_fb) {
948 * Our parents t_fb was not the default,
949 * we need to release our ref on tp->t_fb and
950 * pickup one on the new entry.
952 struct tcp_function_block *rblk;
954 rblk = find_and_ref_tcp_fb(blk);
955 KASSERT(rblk != NULL,
956 ("cannot find blk %p out of syncache?", blk));
957 if (tp->t_fb->tfb_tcp_fb_fini)
958 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 0);
959 refcount_release(&tp->t_fb->tfb_refcnt);
962 * XXXrrs this is quite dangerous, it is possible
963 * for the new function to fail to init. We also
964 * are not asking if the handoff_is_ok though at
965 * the very start thats probalbly ok.
967 if (tp->t_fb->tfb_tcp_fb_init) {
968 (*tp->t_fb->tfb_tcp_fb_init)(tp);
971 tp->snd_wl1 = sc->sc_irs;
972 tp->snd_max = tp->iss + 1;
973 tp->snd_nxt = tp->iss + 1;
974 tp->rcv_up = sc->sc_irs + 1;
975 tp->rcv_wnd = sc->sc_wnd;
976 tp->rcv_adv += tp->rcv_wnd;
977 tp->last_ack_sent = tp->rcv_nxt;
979 tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY);
980 if (sc->sc_flags & SCF_NOOPT)
981 tp->t_flags |= TF_NOOPT;
983 if (sc->sc_flags & SCF_WINSCALE) {
984 tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE;
985 tp->snd_scale = sc->sc_requested_s_scale;
986 tp->request_r_scale = sc->sc_requested_r_scale;
988 if (sc->sc_flags & SCF_TIMESTAMP) {
989 tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP;
990 tp->ts_recent = sc->sc_tsreflect;
991 tp->ts_recent_age = tcp_ts_getticks();
992 tp->ts_offset = sc->sc_tsoff;
994 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
995 if (sc->sc_flags & SCF_SIGNATURE)
996 tp->t_flags |= TF_SIGNATURE;
998 if (sc->sc_flags & SCF_SACK)
999 tp->t_flags |= TF_SACK_PERMIT;
1002 tcp_ecn_syncache_socket(tp, sc);
1005 * Set up MSS and get cached values from tcp_hostcache.
1006 * This might overwrite some of the defaults we just set.
1008 tcp_mss(tp, sc->sc_peer_mss);
1011 * If the SYN,ACK was retransmitted, indicate that CWND to be
1012 * limited to one segment in cc_conn_init().
1013 * NB: sc_rxmits counts all SYN,ACK transmits, not just retransmits.
1015 if (sc->sc_rxmits > 1)
1020 * Allow a TOE driver to install its hooks. Note that we hold the
1021 * pcbinfo lock too and that prevents tcp_usr_accept from accepting a
1022 * new connection before the TOE driver has done its thing.
1024 if (ADDED_BY_TOE(sc)) {
1025 struct toedev *tod = sc->sc_tod;
1027 tod->tod_offload_socket(tod, sc->sc_todctx, so);
1031 * Copy and activate timers.
1033 tp->t_keepinit = sototcpcb(lso)->t_keepinit;
1034 tp->t_keepidle = sototcpcb(lso)->t_keepidle;
1035 tp->t_keepintvl = sototcpcb(lso)->t_keepintvl;
1036 tp->t_keepcnt = sototcpcb(lso)->t_keepcnt;
1037 tcp_timer_activate(tp, TT_KEEP, TP_KEEPINIT(tp));
1039 TCPSTAT_INC(tcps_accepts);
1040 TCP_PROBE6(state__change, NULL, tp, NULL, tp, NULL, TCPS_LISTEN);
1042 if (!solisten_enqueue(so, SS_ISCONNECTED))
1043 tp->t_flags |= TF_INCQUEUE;
1049 * Drop the connection; we will either send a RST or have the peer
1050 * retransmit its SYN again after its RTO and try again.
1052 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
1053 log(LOG_DEBUG, "%s; %s: Socket create failed "
1054 "due to limits or memory shortage\n",
1058 TCPSTAT_INC(tcps_listendrop);
1065 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
1066 log(LOG_DEBUG, "%s; %s: in%s_pcbconnect failed with error %i\n",
1067 s, __func__, (sc->sc_inc.inc_flags & INC_ISIPV6) ? "6" : "",
1071 TCPSTAT_INC(tcps_listendrop);
1076 * This function gets called when we receive an ACK for a
1077 * socket in the LISTEN state. We look up the connection
1078 * in the syncache, and if its there, we pull it out of
1079 * the cache and turn it into a full-blown connection in
1080 * the SYN-RECEIVED state.
1082 * On syncache_socket() success the newly created socket
1083 * has its underlying inp locked.
1086 syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
1087 struct socket **lsop, struct mbuf *m, uint16_t port)
1089 struct syncache *sc;
1090 struct syncache_head *sch;
1091 struct syncache scs;
1096 KASSERT((tcp_get_flags(th) & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK,
1097 ("%s: can handle only ACK", __func__));
1099 if (syncache_cookiesonly()) {
1101 sch = syncache_hashbucket(inc);
1104 sc = syncache_lookup(inc, &sch); /* returns locked sch */
1106 SCH_LOCK_ASSERT(sch);
1111 * Test code for syncookies comparing the syncache stored
1112 * values with the reconstructed values from the cookie.
1115 syncookie_cmp(inc, sch, sc, th, to, *lsop, port);
1120 * There is no syncache entry, so see if this ACK is
1121 * a returning syncookie. To do this, first:
1122 * A. Check if syncookies are used in case of syncache
1124 * B. See if this socket has had a syncache entry dropped in
1125 * the recent past. We don't want to accept a bogus
1126 * syncookie if we've never received a SYN or accept it
1128 * C. check that the syncookie is valid. If it is, then
1129 * cobble up a fake syncache entry, and return.
1131 if (locked && !V_tcp_syncookies) {
1133 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1134 log(LOG_DEBUG, "%s; %s: Spurious ACK, "
1135 "segment rejected (syncookies disabled)\n",
1139 if (locked && !V_tcp_syncookiesonly &&
1140 sch->sch_last_overflow < time_uptime - SYNCOOKIE_LIFETIME) {
1142 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1143 log(LOG_DEBUG, "%s; %s: Spurious ACK, "
1144 "segment rejected (no syncache entry)\n",
1148 bzero(&scs, sizeof(scs));
1149 sc = syncookie_lookup(inc, sch, &scs, th, to, *lsop, port);
1153 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1154 log(LOG_DEBUG, "%s; %s: Segment failed "
1155 "SYNCOOKIE authentication, segment rejected "
1156 "(probably spoofed)\n", s, __func__);
1159 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1160 /* If received ACK has MD5 signature, check it. */
1161 if ((to->to_flags & TOF_SIGNATURE) != 0 &&
1162 (!TCPMD5_ENABLED() ||
1163 TCPMD5_INPUT(m, th, to->to_signature) != 0)) {
1165 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1166 log(LOG_DEBUG, "%s; %s: Segment rejected, "
1167 "MD5 signature doesn't match.\n",
1171 TCPSTAT_INC(tcps_sig_err_sigopt);
1172 return (-1); /* Do not send RST */
1174 #endif /* TCP_SIGNATURE */
1175 TCPSTATES_INC(TCPS_SYN_RECEIVED);
1177 if (sc->sc_port != port) {
1181 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1183 * If listening socket requested TCP digests, check that
1184 * received ACK has signature and it is correct.
1185 * If not, drop the ACK and leave sc entry in th cache,
1186 * because SYN was received with correct signature.
1188 if (sc->sc_flags & SCF_SIGNATURE) {
1189 if ((to->to_flags & TOF_SIGNATURE) == 0) {
1191 TCPSTAT_INC(tcps_sig_err_nosigopt);
1193 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1194 log(LOG_DEBUG, "%s; %s: Segment "
1195 "rejected, MD5 signature wasn't "
1196 "provided.\n", s, __func__);
1199 return (-1); /* Do not send RST */
1201 if (!TCPMD5_ENABLED() ||
1202 TCPMD5_INPUT(m, th, to->to_signature) != 0) {
1203 /* Doesn't match or no SA */
1205 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1206 log(LOG_DEBUG, "%s; %s: Segment "
1207 "rejected, MD5 signature doesn't "
1208 "match.\n", s, __func__);
1211 return (-1); /* Do not send RST */
1214 #endif /* TCP_SIGNATURE */
1217 * RFC 7323 PAWS: If we have a timestamp on this segment and
1218 * it's less than ts_recent, drop it.
1219 * XXXMT: RFC 7323 also requires to send an ACK.
1220 * In tcp_input.c this is only done for TCP segments
1221 * with user data, so be consistent here and just drop
1224 if (sc->sc_flags & SCF_TIMESTAMP && to->to_flags & TOF_TS &&
1225 TSTMP_LT(to->to_tsval, sc->sc_tsreflect)) {
1227 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1229 "%s; %s: SEG.TSval %u < TS.Recent %u, "
1230 "segment dropped\n", s, __func__,
1231 to->to_tsval, sc->sc_tsreflect);
1234 return (-1); /* Do not send RST */
1238 * If timestamps were not negotiated during SYN/ACK and a
1239 * segment with a timestamp is received, ignore the
1240 * timestamp and process the packet normally.
1241 * See section 3.2 of RFC 7323.
1243 if (!(sc->sc_flags & SCF_TIMESTAMP) &&
1244 (to->to_flags & TOF_TS)) {
1245 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1246 log(LOG_DEBUG, "%s; %s: Timestamp not "
1247 "expected, segment processed normally\n",
1255 * If timestamps were negotiated during SYN/ACK and a
1256 * segment without a timestamp is received, silently drop
1257 * the segment, unless the missing timestamps are tolerated.
1258 * See section 3.2 of RFC 7323.
1260 if ((sc->sc_flags & SCF_TIMESTAMP) &&
1261 !(to->to_flags & TOF_TS)) {
1262 if (V_tcp_tolerate_missing_ts) {
1263 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1265 "%s; %s: Timestamp missing, "
1266 "segment processed normally\n",
1272 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1274 "%s; %s: Timestamp missing, "
1275 "segment silently dropped\n",
1279 return (-1); /* Do not send RST */
1282 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
1285 if (ADDED_BY_TOE(sc)) {
1286 struct toedev *tod = sc->sc_tod;
1288 tod->tod_syncache_removed(tod, sc->sc_todctx);
1295 * Segment validation:
1296 * ACK must match our initial sequence number + 1 (the SYN|ACK).
1298 if (th->th_ack != sc->sc_iss + 1) {
1299 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1300 log(LOG_DEBUG, "%s; %s: ACK %u != ISS+1 %u, segment "
1301 "rejected\n", s, __func__, th->th_ack, sc->sc_iss);
1306 * The SEQ must fall in the window starting at the received
1307 * initial receive sequence number + 1 (the SYN).
1309 if (SEQ_LEQ(th->th_seq, sc->sc_irs) ||
1310 SEQ_GT(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
1311 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1312 log(LOG_DEBUG, "%s; %s: SEQ %u != IRS+1 %u, segment "
1313 "rejected\n", s, __func__, th->th_seq, sc->sc_irs);
1317 *lsop = syncache_socket(sc, *lsop, m);
1320 TCPSTAT_INC(tcps_sc_aborted);
1322 TCPSTAT_INC(tcps_sc_completed);
1324 /* how do we find the inp for the new socket? */
1330 TCPSTATES_DEC(TCPS_SYN_RECEIVED);
1340 static struct socket *
1341 syncache_tfo_expand(struct syncache *sc, struct socket *lso, struct mbuf *m,
1342 uint64_t response_cookie)
1346 unsigned int *pending_counter;
1351 pending_counter = intotcpcb(sotoinpcb(lso))->t_tfo_pending;
1352 so = syncache_socket(sc, lso, m);
1354 TCPSTAT_INC(tcps_sc_aborted);
1355 atomic_subtract_int(pending_counter, 1);
1358 inp = sotoinpcb(so);
1359 tp = intotcpcb(inp);
1360 tp->t_flags |= TF_FASTOPEN;
1361 tp->t_tfo_cookie.server = response_cookie;
1362 tp->snd_max = tp->iss;
1363 tp->snd_nxt = tp->iss;
1364 tp->t_tfo_pending = pending_counter;
1365 TCPSTATES_INC(TCPS_SYN_RECEIVED);
1366 TCPSTAT_INC(tcps_sc_completed);
1373 * Given a LISTEN socket and an inbound SYN request, add
1374 * this to the syn cache, and send back a segment:
1375 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
1378 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN.
1379 * Doing so would require that we hold onto the data and deliver it
1380 * to the application. However, if we are the target of a SYN-flood
1381 * DoS attack, an attacker could send data which would eventually
1382 * consume all available buffer space if it were ACKed. By not ACKing
1383 * the data, we avoid this DoS scenario.
1385 * The exception to the above is when a SYN with a valid TCP Fast Open (TFO)
1386 * cookie is processed and a new socket is created. In this case, any data
1387 * accompanying the SYN will be queued to the socket by tcp_input() and will
1388 * be ACKed either when the application sends response data or the delayed
1389 * ACK timer expires, whichever comes first.
1392 syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
1393 struct inpcb *inp, struct socket *so, struct mbuf *m, void *tod,
1394 void *todctx, uint8_t iptos, uint16_t port)
1397 struct socket *rv = NULL;
1398 struct syncache *sc = NULL;
1399 struct syncache_head *sch;
1400 struct mbuf *ipopts = NULL;
1402 int win, ip_ttl, ip_tos;
1405 int autoflowlabel = 0;
1408 struct label *maclabel;
1410 struct syncache scs;
1412 uint64_t tfo_response_cookie;
1413 unsigned int *tfo_pending = NULL;
1414 int tfo_cookie_valid = 0;
1415 int tfo_response_cookie_valid = 0;
1418 INP_RLOCK_ASSERT(inp); /* listen socket */
1419 KASSERT((tcp_get_flags(th) & (TH_RST|TH_ACK|TH_SYN)) == TH_SYN,
1420 ("%s: unexpected tcp flags", __func__));
1423 * Combine all so/tp operations very early to drop the INP lock as
1426 KASSERT(SOLISTENING(so), ("%s: %p not listening", __func__, so));
1428 cred = V_tcp_syncache.see_other ? NULL : crhold(so->so_cred);
1431 if (inc->inc_flags & INC_ISIPV6) {
1432 if (inp->inp_flags & IN6P_AUTOFLOWLABEL) {
1435 ip_ttl = in6_selecthlim(inp, NULL);
1436 if ((inp->in6p_outputopts == NULL) ||
1437 (inp->in6p_outputopts->ip6po_tclass == -1)) {
1440 ip_tos = inp->in6p_outputopts->ip6po_tclass;
1444 #if defined(INET6) && defined(INET)
1449 ip_ttl = inp->inp_ip_ttl;
1450 ip_tos = inp->inp_ip_tos;
1453 win = so->sol_sbrcv_hiwat;
1454 ltflags = (tp->t_flags & (TF_NOOPT | TF_SIGNATURE));
1456 if (V_tcp_fastopen_server_enable && IS_FASTOPEN(tp->t_flags) &&
1457 (tp->t_tfo_pending != NULL) &&
1458 (to->to_flags & TOF_FASTOPEN)) {
1460 * Limit the number of pending TFO connections to
1461 * approximately half of the queue limit. This prevents TFO
1462 * SYN floods from starving the service by filling the
1463 * listen queue with bogus TFO connections.
1465 if (atomic_fetchadd_int(tp->t_tfo_pending, 1) <=
1466 (so->sol_qlimit / 2)) {
1469 result = tcp_fastopen_check_cookie(inc,
1470 to->to_tfo_cookie, to->to_tfo_len,
1471 &tfo_response_cookie);
1472 tfo_cookie_valid = (result > 0);
1473 tfo_response_cookie_valid = (result >= 0);
1477 * Remember the TFO pending counter as it will have to be
1478 * decremented below if we don't make it to syncache_tfo_expand().
1480 tfo_pending = tp->t_tfo_pending;
1484 if (mac_syncache_init(&maclabel) != 0) {
1488 mac_syncache_create(maclabel, inp);
1490 if (!tfo_cookie_valid)
1494 * Remember the IP options, if any.
1497 if (!(inc->inc_flags & INC_ISIPV6))
1500 ipopts = (m) ? ip_srcroute(m) : NULL;
1505 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1507 * When the socket is TCP-MD5 enabled check that,
1508 * - a signed packet is valid
1509 * - a non-signed packet does not have a security association
1511 * If a signed packet fails validation or a non-signed packet has a
1512 * security association, the packet will be dropped.
1514 if (ltflags & TF_SIGNATURE) {
1515 if (to->to_flags & TOF_SIGNATURE) {
1516 if (!TCPMD5_ENABLED() ||
1517 TCPMD5_INPUT(m, th, to->to_signature) != 0)
1520 if (TCPMD5_ENABLED() &&
1521 TCPMD5_INPUT(m, NULL, NULL) != ENOENT)
1524 } else if (to->to_flags & TOF_SIGNATURE)
1526 #endif /* TCP_SIGNATURE */
1528 * See if we already have an entry for this connection.
1529 * If we do, resend the SYN,ACK, and reset the retransmit timer.
1531 * XXX: should the syncache be re-initialized with the contents
1532 * of the new SYN here (which may have different options?)
1534 * XXX: We do not check the sequence number to see if this is a
1535 * real retransmit or a new connection attempt. The question is
1536 * how to handle such a case; either ignore it as spoofed, or
1537 * drop the current entry and create a new one?
1539 if (syncache_cookiesonly()) {
1541 sch = syncache_hashbucket(inc);
1544 sc = syncache_lookup(inc, &sch); /* returns locked sch */
1546 SCH_LOCK_ASSERT(sch);
1549 if (tfo_cookie_valid)
1551 TCPSTAT_INC(tcps_sc_dupsyn);
1554 * If we were remembering a previous source route,
1555 * forget it and use the new one we've been given.
1558 (void) m_free(sc->sc_ipopts);
1559 sc->sc_ipopts = ipopts;
1562 * Update timestamp if present.
1564 if ((sc->sc_flags & SCF_TIMESTAMP) && (to->to_flags & TOF_TS))
1565 sc->sc_tsreflect = to->to_tsval;
1567 sc->sc_flags &= ~SCF_TIMESTAMP;
1569 * Disable ECN if needed.
1571 if ((sc->sc_flags & SCF_ECN) &&
1572 ((tcp_get_flags(th) & (TH_ECE|TH_CWR)) != (TH_ECE|TH_CWR))) {
1573 sc->sc_flags &= ~SCF_ECN;
1577 * Since we have already unconditionally allocated label
1578 * storage, free it up. The syncache entry will already
1579 * have an initialized label we can use.
1581 mac_syncache_destroy(&maclabel);
1583 TCP_PROBE5(receive, NULL, NULL, m, NULL, th);
1584 /* Retransmit SYN|ACK and reset retransmit count. */
1585 if ((s = tcp_log_addrs(&sc->sc_inc, th, NULL, NULL))) {
1586 log(LOG_DEBUG, "%s; %s: Received duplicate SYN, "
1587 "resetting timer and retransmitting SYN|ACK\n",
1591 if (syncache_respond(sc, m, TH_SYN|TH_ACK) == 0) {
1593 syncache_timeout(sc, sch, 1);
1594 TCPSTAT_INC(tcps_sndacks);
1595 TCPSTAT_INC(tcps_sndtotal);
1601 if (tfo_cookie_valid) {
1602 bzero(&scs, sizeof(scs));
1608 * Skip allocating a syncache entry if we are just going to discard
1612 bzero(&scs, sizeof(scs));
1615 sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
1618 * The zone allocator couldn't provide more entries.
1619 * Treat this as if the cache was full; drop the oldest
1620 * entry and insert the new one.
1622 TCPSTAT_INC(tcps_sc_zonefail);
1623 if ((sc = TAILQ_LAST(&sch->sch_bucket, sch_head)) != NULL) {
1624 sch->sch_last_overflow = time_uptime;
1625 syncache_drop(sc, sch);
1626 syncache_pause(inc);
1628 sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
1630 if (V_tcp_syncookies) {
1631 bzero(&scs, sizeof(scs));
1635 ("%s: bucket unexpectedly unlocked",
1639 (void) m_free(ipopts);
1646 if (!tfo_cookie_valid && tfo_response_cookie_valid)
1647 sc->sc_tfo_cookie = &tfo_response_cookie;
1650 * Fill in the syncache values.
1653 sc->sc_label = maclabel;
1658 sc->sc_ipopts = ipopts;
1659 bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo));
1660 sc->sc_ip_tos = ip_tos;
1661 sc->sc_ip_ttl = ip_ttl;
1664 sc->sc_todctx = todctx;
1666 sc->sc_irs = th->th_seq;
1668 sc->sc_flowlabel = 0;
1671 * Initial receive window: clip sbspace to [0 .. TCP_MAXWIN].
1672 * win was derived from socket earlier in the function.
1675 win = imin(win, TCP_MAXWIN);
1678 if (V_tcp_do_rfc1323 &&
1679 !(ltflags & TF_NOOPT)) {
1681 * A timestamp received in a SYN makes
1682 * it ok to send timestamp requests and replies.
1684 if (to->to_flags & TOF_TS) {
1685 sc->sc_tsreflect = to->to_tsval;
1686 sc->sc_flags |= SCF_TIMESTAMP;
1687 sc->sc_tsoff = tcp_new_ts_offset(inc);
1689 if (to->to_flags & TOF_SCALE) {
1693 * Pick the smallest possible scaling factor that
1694 * will still allow us to scale up to sb_max, aka
1695 * kern.ipc.maxsockbuf.
1697 * We do this because there are broken firewalls that
1698 * will corrupt the window scale option, leading to
1699 * the other endpoint believing that our advertised
1700 * window is unscaled. At scale factors larger than
1701 * 5 the unscaled window will drop below 1500 bytes,
1702 * leading to serious problems when traversing these
1705 * With the default maxsockbuf of 256K, a scale factor
1706 * of 3 will be chosen by this algorithm. Those who
1707 * choose a larger maxsockbuf should watch out
1708 * for the compatibility problems mentioned above.
1710 * RFC1323: The Window field in a SYN (i.e., a <SYN>
1711 * or <SYN,ACK>) segment itself is never scaled.
1713 while (wscale < TCP_MAX_WINSHIFT &&
1714 (TCP_MAXWIN << wscale) < sb_max)
1716 sc->sc_requested_r_scale = wscale;
1717 sc->sc_requested_s_scale = to->to_wscale;
1718 sc->sc_flags |= SCF_WINSCALE;
1721 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1723 * If incoming packet has an MD5 signature, flag this in the
1724 * syncache so that syncache_respond() will do the right thing
1727 if (to->to_flags & TOF_SIGNATURE)
1728 sc->sc_flags |= SCF_SIGNATURE;
1729 #endif /* TCP_SIGNATURE */
1730 if (to->to_flags & TOF_SACKPERM)
1731 sc->sc_flags |= SCF_SACK;
1732 if (to->to_flags & TOF_MSS)
1733 sc->sc_peer_mss = to->to_mss; /* peer mss may be zero */
1734 if (ltflags & TF_NOOPT)
1735 sc->sc_flags |= SCF_NOOPT;
1738 sc->sc_flags |= tcp_ecn_syncache_add(tcp_get_flags(th), iptos);
1740 if (V_tcp_syncookies)
1741 sc->sc_iss = syncookie_generate(sch, sc);
1743 sc->sc_iss = arc4random();
1745 if (autoflowlabel) {
1746 if (V_tcp_syncookies)
1747 sc->sc_flowlabel = sc->sc_iss;
1749 sc->sc_flowlabel = ip6_randomflowlabel();
1750 sc->sc_flowlabel = htonl(sc->sc_flowlabel) & IPV6_FLOWLABEL_MASK;
1756 if (tfo_cookie_valid) {
1757 rv = syncache_tfo_expand(sc, so, m, tfo_response_cookie);
1758 /* INP_RUNLOCK(inp) will be performed by the caller */
1762 TCP_PROBE5(receive, NULL, NULL, m, NULL, th);
1764 * Do a standard 3-way handshake.
1766 if (syncache_respond(sc, m, TH_SYN|TH_ACK) == 0) {
1767 if (V_tcp_syncookies && V_tcp_syncookiesonly && sc != &scs)
1769 else if (sc != &scs)
1770 syncache_insert(sc, sch); /* locks and unlocks sch */
1771 TCPSTAT_INC(tcps_sndacks);
1772 TCPSTAT_INC(tcps_sndtotal);
1776 TCPSTAT_INC(tcps_sc_dropped);
1781 TCP_PROBE5(receive, NULL, NULL, m, NULL, th);
1786 * If tfo_pending is not NULL here, then a TFO SYN that did not
1787 * result in a new socket was processed and the associated pending
1788 * counter has not yet been decremented. All such TFO processing paths
1789 * transit this point.
1791 if (tfo_pending != NULL)
1792 tcp_fastopen_decrement_counter(tfo_pending);
1799 mac_syncache_destroy(&maclabel);
1805 * Send SYN|ACK or ACK to the peer. Either in response to a peer's segment,
1806 * i.e. m0 != NULL, or upon 3WHS ACK timeout, i.e. m0 == NULL.
1809 syncache_respond(struct syncache *sc, const struct mbuf *m0, int flags)
1811 struct ip *ip = NULL;
1813 struct tcphdr *th = NULL;
1814 struct udphdr *udp = NULL;
1815 int optlen, error = 0; /* Make compiler happy */
1816 u_int16_t hlen, tlen, mssopt, ulen;
1819 struct ip6_hdr *ip6 = NULL;
1826 (sc->sc_inc.inc_flags & INC_ISIPV6) ? sizeof(struct ip6_hdr) :
1829 tlen = hlen + sizeof(struct tcphdr);
1831 tlen += sizeof(struct udphdr);
1833 /* Determine MSS we advertize to other end of connection. */
1834 mssopt = tcp_mssopt(&sc->sc_inc);
1836 mssopt -= V_tcp_udp_tunneling_overhead;
1837 mssopt = max(mssopt, V_tcp_minmss);
1839 /* XXX: Assume that the entire packet will fit in a header mbuf. */
1840 KASSERT(max_linkhdr + tlen + TCP_MAXOLEN <= MHLEN,
1841 ("syncache: mbuf too small: hlen %u, sc_port %u, max_linkhdr %d + "
1842 "tlen %d + TCP_MAXOLEN %ju <= MHLEN %d", hlen, sc->sc_port,
1843 max_linkhdr, tlen, (uintmax_t)TCP_MAXOLEN, MHLEN));
1845 /* Create the IP+TCP header from scratch. */
1846 m = m_gethdr(M_NOWAIT, MT_DATA);
1850 mac_syncache_create_mbuf(sc->sc_label, m);
1852 m->m_data += max_linkhdr;
1854 m->m_pkthdr.len = tlen;
1855 m->m_pkthdr.rcvif = NULL;
1858 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
1859 ip6 = mtod(m, struct ip6_hdr *);
1860 ip6->ip6_vfc = IPV6_VERSION;
1861 ip6->ip6_src = sc->sc_inc.inc6_laddr;
1862 ip6->ip6_dst = sc->sc_inc.inc6_faddr;
1863 ip6->ip6_plen = htons(tlen - hlen);
1864 /* ip6_hlim is set after checksum */
1865 /* Zero out traffic class and flow label. */
1866 ip6->ip6_flow &= ~IPV6_FLOWINFO_MASK;
1867 ip6->ip6_flow |= sc->sc_flowlabel;
1868 if (sc->sc_port != 0) {
1869 ip6->ip6_nxt = IPPROTO_UDP;
1870 udp = (struct udphdr *)(ip6 + 1);
1871 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
1872 udp->uh_dport = sc->sc_port;
1873 ulen = (tlen - sizeof(struct ip6_hdr));
1874 th = (struct tcphdr *)(udp + 1);
1876 ip6->ip6_nxt = IPPROTO_TCP;
1877 th = (struct tcphdr *)(ip6 + 1);
1879 ip6->ip6_flow |= htonl(sc->sc_ip_tos << 20);
1882 #if defined(INET6) && defined(INET)
1887 ip = mtod(m, struct ip *);
1888 ip->ip_v = IPVERSION;
1889 ip->ip_hl = sizeof(struct ip) >> 2;
1890 ip->ip_len = htons(tlen);
1894 ip->ip_src = sc->sc_inc.inc_laddr;
1895 ip->ip_dst = sc->sc_inc.inc_faddr;
1896 ip->ip_ttl = sc->sc_ip_ttl;
1897 ip->ip_tos = sc->sc_ip_tos;
1900 * See if we should do MTU discovery. Route lookups are
1901 * expensive, so we will only unset the DF bit if:
1903 * 1) path_mtu_discovery is disabled
1904 * 2) the SCF_UNREACH flag has been set
1906 if (V_path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0))
1907 ip->ip_off |= htons(IP_DF);
1908 if (sc->sc_port == 0) {
1909 ip->ip_p = IPPROTO_TCP;
1910 th = (struct tcphdr *)(ip + 1);
1912 ip->ip_p = IPPROTO_UDP;
1913 udp = (struct udphdr *)(ip + 1);
1914 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
1915 udp->uh_dport = sc->sc_port;
1916 ulen = (tlen - sizeof(struct ip));
1917 th = (struct tcphdr *)(udp + 1);
1921 th->th_sport = sc->sc_inc.inc_lport;
1922 th->th_dport = sc->sc_inc.inc_fport;
1925 th->th_seq = htonl(sc->sc_iss);
1927 th->th_seq = htonl(sc->sc_iss + 1);
1928 th->th_ack = htonl(sc->sc_irs + 1);
1929 th->th_off = sizeof(struct tcphdr) >> 2;
1930 th->th_win = htons(sc->sc_wnd);
1933 flags = tcp_ecn_syncache_respond(flags, sc);
1934 tcp_set_flags(th, flags);
1936 /* Tack on the TCP options. */
1937 if ((sc->sc_flags & SCF_NOOPT) == 0) {
1940 if (flags & TH_SYN) {
1942 to.to_flags = TOF_MSS;
1943 if (sc->sc_flags & SCF_WINSCALE) {
1944 to.to_wscale = sc->sc_requested_r_scale;
1945 to.to_flags |= TOF_SCALE;
1947 if (sc->sc_flags & SCF_SACK)
1948 to.to_flags |= TOF_SACKPERM;
1949 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1950 if (sc->sc_flags & SCF_SIGNATURE)
1951 to.to_flags |= TOF_SIGNATURE;
1953 if (sc->sc_tfo_cookie) {
1954 to.to_flags |= TOF_FASTOPEN;
1955 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
1956 to.to_tfo_cookie = sc->sc_tfo_cookie;
1957 /* don't send cookie again when retransmitting response */
1958 sc->sc_tfo_cookie = NULL;
1961 if (sc->sc_flags & SCF_TIMESTAMP) {
1962 to.to_tsval = sc->sc_tsoff + tcp_ts_getticks();
1963 to.to_tsecr = sc->sc_tsreflect;
1964 to.to_flags |= TOF_TS;
1966 optlen = tcp_addoptions(&to, (u_char *)(th + 1));
1968 /* Adjust headers by option size. */
1969 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
1971 m->m_pkthdr.len += optlen;
1973 if (sc->sc_inc.inc_flags & INC_ISIPV6)
1974 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) + optlen);
1977 ip->ip_len = htons(ntohs(ip->ip_len) + optlen);
1978 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1979 if (sc->sc_flags & SCF_SIGNATURE) {
1980 KASSERT(to.to_flags & TOF_SIGNATURE,
1981 ("tcp_addoptions() didn't set tcp_signature"));
1983 /* NOTE: to.to_signature is inside of mbuf */
1984 if (!TCPMD5_ENABLED() ||
1985 TCPMD5_OUTPUT(m, th, to.to_signature) != 0) {
1996 udp->uh_ulen = htons(ulen);
1998 M_SETFIB(m, sc->sc_inc.inc_fibnum);
2000 * If we have peer's SYN and it has a flowid, then let's assign it to
2001 * our SYN|ACK. ip6_output() and ip_output() will not assign flowid
2002 * to SYN|ACK due to lack of inp here.
2004 if (m0 != NULL && M_HASHTYPE_GET(m0) != M_HASHTYPE_NONE) {
2005 m->m_pkthdr.flowid = m0->m_pkthdr.flowid;
2006 M_HASHTYPE_SET(m, M_HASHTYPE_GET(m0));
2009 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
2011 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
2012 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
2013 udp->uh_sum = in6_cksum_pseudo(ip6, ulen,
2015 th->th_sum = htons(0);
2017 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
2018 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
2019 th->th_sum = in6_cksum_pseudo(ip6, tlen + optlen - hlen,
2022 ip6->ip6_hlim = sc->sc_ip_ttl;
2024 if (ADDED_BY_TOE(sc)) {
2025 struct toedev *tod = sc->sc_tod;
2027 error = tod->tod_syncache_respond(tod, sc->sc_todctx, m);
2032 TCP_PROBE5(send, NULL, NULL, ip6, NULL, th);
2033 error = ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
2036 #if defined(INET6) && defined(INET)
2042 m->m_pkthdr.csum_flags = CSUM_UDP;
2043 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
2044 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
2045 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
2046 th->th_sum = htons(0);
2048 m->m_pkthdr.csum_flags = CSUM_TCP;
2049 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
2050 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
2051 htons(tlen + optlen - hlen + IPPROTO_TCP));
2054 if (ADDED_BY_TOE(sc)) {
2055 struct toedev *tod = sc->sc_tod;
2057 error = tod->tod_syncache_respond(tod, sc->sc_todctx, m);
2062 TCP_PROBE5(send, NULL, NULL, ip, NULL, th);
2063 error = ip_output(m, sc->sc_ipopts, NULL, 0, NULL, NULL);
2070 * The purpose of syncookies is to handle spoofed SYN flooding DoS attacks
2071 * that exceed the capacity of the syncache by avoiding the storage of any
2072 * of the SYNs we receive. Syncookies defend against blind SYN flooding
2073 * attacks where the attacker does not have access to our responses.
2075 * Syncookies encode and include all necessary information about the
2076 * connection setup within the SYN|ACK that we send back. That way we
2077 * can avoid keeping any local state until the ACK to our SYN|ACK returns
2078 * (if ever). Normally the syncache and syncookies are running in parallel
2079 * with the latter taking over when the former is exhausted. When matching
2080 * syncache entry is found the syncookie is ignored.
2082 * The only reliable information persisting the 3WHS is our initial sequence
2083 * number ISS of 32 bits. Syncookies embed a cryptographically sufficient
2084 * strong hash (MAC) value and a few bits of TCP SYN options in the ISS
2085 * of our SYN|ACK. The MAC can be recomputed when the ACK to our SYN|ACK
2086 * returns and signifies a legitimate connection if it matches the ACK.
2088 * The available space of 32 bits to store the hash and to encode the SYN
2089 * option information is very tight and we should have at least 24 bits for
2090 * the MAC to keep the number of guesses by blind spoofing reasonably high.
2092 * SYN option information we have to encode to fully restore a connection:
2093 * MSS: is imporant to chose an optimal segment size to avoid IP level
2094 * fragmentation along the path. The common MSS values can be encoded
2095 * in a 3-bit table. Uncommon values are captured by the next lower value
2096 * in the table leading to a slight increase in packetization overhead.
2097 * WSCALE: is necessary to allow large windows to be used for high delay-
2098 * bandwidth product links. Not scaling the window when it was initially
2099 * negotiated is bad for performance as lack of scaling further decreases
2100 * the apparent available send window. We only need to encode the WSCALE
2101 * we received from the remote end. Our end can be recalculated at any
2102 * time. The common WSCALE values can be encoded in a 3-bit table.
2103 * Uncommon values are captured by the next lower value in the table
2104 * making us under-estimate the available window size halving our
2105 * theoretically possible maximum throughput for that connection.
2106 * SACK: Greatly assists in packet loss recovery and requires 1 bit.
2107 * TIMESTAMP and SIGNATURE is not encoded because they are permanent options
2108 * that are included in all segments on a connection. We enable them when
2111 * Security of syncookies and attack vectors:
2113 * The MAC is computed over (faddr||laddr||fport||lport||irs||flags||secmod)
2114 * together with the gloabl secret to make it unique per connection attempt.
2115 * Thus any change of any of those parameters results in a different MAC output
2116 * in an unpredictable way unless a collision is encountered. 24 bits of the
2117 * MAC are embedded into the ISS.
2119 * To prevent replay attacks two rotating global secrets are updated with a
2120 * new random value every 15 seconds. The life-time of a syncookie is thus
2123 * Vector 1: Attacking the secret. This requires finding a weakness in the
2124 * MAC itself or the way it is used here. The attacker can do a chosen plain
2125 * text attack by varying and testing the all parameters under his control.
2126 * The strength depends on the size and randomness of the secret, and the
2127 * cryptographic security of the MAC function. Due to the constant updating
2128 * of the secret the attacker has at most 29.999 seconds to find the secret
2129 * and launch spoofed connections. After that he has to start all over again.
2131 * Vector 2: Collision attack on the MAC of a single ACK. With a 24 bit MAC
2132 * size an average of 4,823 attempts are required for a 50% chance of success
2133 * to spoof a single syncookie (birthday collision paradox). However the
2134 * attacker is blind and doesn't know if one of his attempts succeeded unless
2135 * he has a side channel to interfere success from. A single connection setup
2136 * success average of 90% requires 8,790 packets, 99.99% requires 17,578 packets.
2137 * This many attempts are required for each one blind spoofed connection. For
2138 * every additional spoofed connection he has to launch another N attempts.
2139 * Thus for a sustained rate 100 spoofed connections per second approximately
2140 * 1,800,000 packets per second would have to be sent.
2142 * NB: The MAC function should be fast so that it doesn't become a CPU
2143 * exhaustion attack vector itself.
2146 * RFC4987 TCP SYN Flooding Attacks and Common Mitigations
2147 * SYN cookies were first proposed by cryptographer Dan J. Bernstein in 1996
2148 * http://cr.yp.to/syncookies.html (overview)
2149 * http://cr.yp.to/syncookies/archive (details)
2152 * Schematic construction of a syncookie enabled Initial Sequence Number:
2154 * 12345678901234567890123456789012
2155 * |xxxxxxxxxxxxxxxxxxxxxxxxWWWMMMSP|
2157 * x 24 MAC (truncated)
2158 * W 3 Send Window Scale index
2160 * S 1 SACK permitted
2161 * P 1 Odd/even secret
2165 * Distribution and probability of certain MSS values. Those in between are
2166 * rounded down to the next lower one.
2167 * [An Analysis of TCP Maximum Segment Sizes, S. Alcock and R. Nelson, 2011]
2168 * .2% .3% 5% 7% 7% 20% 15% 45%
2170 static int tcp_sc_msstab[] = { 216, 536, 1200, 1360, 1400, 1440, 1452, 1460 };
2173 * Distribution and probability of certain WSCALE values. We have to map the
2174 * (send) window scale (shift) option with a range of 0-14 from 4 bits into 3
2175 * bits based on prevalence of certain values. Where we don't have an exact
2176 * match for are rounded down to the next lower one letting us under-estimate
2177 * the true available window. At the moment this would happen only for the
2178 * very uncommon values 3, 5 and those above 8 (more than 16MB socket buffer
2179 * and window size). The absence of the WSCALE option (no scaling in either
2180 * direction) is encoded with index zero.
2181 * [WSCALE values histograms, Allman, 2012]
2182 * X 10 10 35 5 6 14 10% by host
2183 * X 11 4 5 5 18 49 3% by connections
2185 static int tcp_sc_wstab[] = { 0, 0, 1, 2, 4, 6, 7, 8 };
2188 * Compute the MAC for the SYN cookie. SIPHASH-2-4 is chosen for its speed
2189 * and good cryptographic properties.
2192 syncookie_mac(struct in_conninfo *inc, tcp_seq irs, uint8_t flags,
2193 uint8_t *secbits, uintptr_t secmod)
2196 uint32_t siphash[2];
2198 SipHash24_Init(&ctx);
2199 SipHash_SetKey(&ctx, secbits);
2200 switch (inc->inc_flags & INC_ISIPV6) {
2203 SipHash_Update(&ctx, &inc->inc_faddr, sizeof(inc->inc_faddr));
2204 SipHash_Update(&ctx, &inc->inc_laddr, sizeof(inc->inc_laddr));
2209 SipHash_Update(&ctx, &inc->inc6_faddr, sizeof(inc->inc6_faddr));
2210 SipHash_Update(&ctx, &inc->inc6_laddr, sizeof(inc->inc6_laddr));
2214 SipHash_Update(&ctx, &inc->inc_fport, sizeof(inc->inc_fport));
2215 SipHash_Update(&ctx, &inc->inc_lport, sizeof(inc->inc_lport));
2216 SipHash_Update(&ctx, &irs, sizeof(irs));
2217 SipHash_Update(&ctx, &flags, sizeof(flags));
2218 SipHash_Update(&ctx, &secmod, sizeof(secmod));
2219 SipHash_Final((u_int8_t *)&siphash, &ctx);
2221 return (siphash[0] ^ siphash[1]);
2225 syncookie_generate(struct syncache_head *sch, struct syncache *sc)
2227 u_int i, secbit, wscale;
2230 union syncookie cookie;
2234 /* Map our computed MSS into the 3-bit index. */
2235 for (i = nitems(tcp_sc_msstab) - 1;
2236 tcp_sc_msstab[i] > sc->sc_peer_mss && i > 0;
2239 cookie.flags.mss_idx = i;
2242 * Map the send window scale into the 3-bit index but only if
2243 * the wscale option was received.
2245 if (sc->sc_flags & SCF_WINSCALE) {
2246 wscale = sc->sc_requested_s_scale;
2247 for (i = nitems(tcp_sc_wstab) - 1;
2248 tcp_sc_wstab[i] > wscale && i > 0;
2251 cookie.flags.wscale_idx = i;
2254 /* Can we do SACK? */
2255 if (sc->sc_flags & SCF_SACK)
2256 cookie.flags.sack_ok = 1;
2258 /* Which of the two secrets to use. */
2259 secbit = V_tcp_syncache.secret.oddeven & 0x1;
2260 cookie.flags.odd_even = secbit;
2262 secbits = V_tcp_syncache.secret.key[secbit];
2263 hash = syncookie_mac(&sc->sc_inc, sc->sc_irs, cookie.cookie, secbits,
2267 * Put the flags into the hash and XOR them to get better ISS number
2268 * variance. This doesn't enhance the cryptographic strength and is
2269 * done to prevent the 8 cookie bits from showing up directly on the
2273 iss |= cookie.cookie ^ (hash >> 24);
2275 TCPSTAT_INC(tcps_sc_sendcookie);
2279 static struct syncache *
2280 syncookie_lookup(struct in_conninfo *inc, struct syncache_head *sch,
2281 struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
2282 struct socket *lso, uint16_t port)
2287 int wnd, wscale = 0;
2288 union syncookie cookie;
2291 * Pull information out of SYN-ACK/ACK and revert sequence number
2294 ack = th->th_ack - 1;
2295 seq = th->th_seq - 1;
2298 * Unpack the flags containing enough information to restore the
2301 cookie.cookie = (ack & 0xff) ^ (ack >> 24);
2303 /* Which of the two secrets to use. */
2304 secbits = V_tcp_syncache.secret.key[cookie.flags.odd_even];
2306 hash = syncookie_mac(inc, seq, cookie.cookie, secbits, (uintptr_t)sch);
2308 /* The recomputed hash matches the ACK if this was a genuine cookie. */
2309 if ((ack & ~0xff) != (hash & ~0xff))
2312 /* Fill in the syncache values. */
2314 bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo));
2315 sc->sc_ipopts = NULL;
2320 switch (inc->inc_flags & INC_ISIPV6) {
2323 sc->sc_ip_ttl = sotoinpcb(lso)->inp_ip_ttl;
2324 sc->sc_ip_tos = sotoinpcb(lso)->inp_ip_tos;
2329 if (sotoinpcb(lso)->inp_flags & IN6P_AUTOFLOWLABEL)
2331 htonl(sc->sc_iss) & IPV6_FLOWLABEL_MASK;
2336 sc->sc_peer_mss = tcp_sc_msstab[cookie.flags.mss_idx];
2338 /* We can simply recompute receive window scale we sent earlier. */
2339 while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < sb_max)
2342 /* Only use wscale if it was enabled in the orignal SYN. */
2343 if (cookie.flags.wscale_idx > 0) {
2344 sc->sc_requested_r_scale = wscale;
2345 sc->sc_requested_s_scale = tcp_sc_wstab[cookie.flags.wscale_idx];
2346 sc->sc_flags |= SCF_WINSCALE;
2349 wnd = lso->sol_sbrcv_hiwat;
2351 wnd = imin(wnd, TCP_MAXWIN);
2354 if (cookie.flags.sack_ok)
2355 sc->sc_flags |= SCF_SACK;
2357 if (to->to_flags & TOF_TS) {
2358 sc->sc_flags |= SCF_TIMESTAMP;
2359 sc->sc_tsreflect = to->to_tsval;
2360 sc->sc_tsoff = tcp_new_ts_offset(inc);
2363 if (to->to_flags & TOF_SIGNATURE)
2364 sc->sc_flags |= SCF_SIGNATURE;
2370 TCPSTAT_INC(tcps_sc_recvcookie);
2376 syncookie_cmp(struct in_conninfo *inc, struct syncache_head *sch,
2377 struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
2378 struct socket *lso, uint16_t port)
2380 struct syncache scs, *scx;
2383 bzero(&scs, sizeof(scs));
2384 scx = syncookie_lookup(inc, sch, &scs, th, to, lso, port);
2386 if ((s = tcp_log_addrs(inc, th, NULL, NULL)) == NULL)
2390 if (sc->sc_peer_mss != scx->sc_peer_mss)
2391 log(LOG_DEBUG, "%s; %s: mss different %i vs %i\n",
2392 s, __func__, sc->sc_peer_mss, scx->sc_peer_mss);
2394 if (sc->sc_requested_r_scale != scx->sc_requested_r_scale)
2395 log(LOG_DEBUG, "%s; %s: rwscale different %i vs %i\n",
2396 s, __func__, sc->sc_requested_r_scale,
2397 scx->sc_requested_r_scale);
2399 if (sc->sc_requested_s_scale != scx->sc_requested_s_scale)
2400 log(LOG_DEBUG, "%s; %s: swscale different %i vs %i\n",
2401 s, __func__, sc->sc_requested_s_scale,
2402 scx->sc_requested_s_scale);
2404 if ((sc->sc_flags & SCF_SACK) != (scx->sc_flags & SCF_SACK))
2405 log(LOG_DEBUG, "%s; %s: SACK different\n", s, __func__);
2412 #endif /* INVARIANTS */
2415 syncookie_reseed(void *arg)
2417 struct tcp_syncache *sc = arg;
2422 * Reseeding the secret doesn't have to be protected by a lock.
2423 * It only must be ensured that the new random values are visible
2424 * to all CPUs in a SMP environment. The atomic with release
2425 * semantics ensures that.
2427 secbit = (sc->secret.oddeven & 0x1) ? 0 : 1;
2428 secbits = sc->secret.key[secbit];
2429 arc4rand(secbits, SYNCOOKIE_SECRET_SIZE, 0);
2430 atomic_add_rel_int(&sc->secret.oddeven, 1);
2432 /* Reschedule ourself. */
2433 callout_schedule(&sc->secret.reseed, SYNCOOKIE_LIFETIME * hz);
2437 * We have overflowed a bucket. Let's pause dealing with the syncache.
2438 * This function will increment the bucketoverflow statistics appropriately
2439 * (once per pause when pausing is enabled; otherwise, once per overflow).
2442 syncache_pause(struct in_conninfo *inc)
2448 * 2. Add sysctl read here so we don't get the benefit of this
2449 * change without the new sysctl.
2453 * Try an unlocked read. If we already know that another thread
2454 * has activated the feature, there is no need to proceed.
2456 if (V_tcp_syncache.paused)
2459 /* Are cookied enabled? If not, we can't pause. */
2460 if (!V_tcp_syncookies) {
2461 TCPSTAT_INC(tcps_sc_bucketoverflow);
2466 * We may be the first thread to find an overflow. Get the lock
2467 * and evaluate if we need to take action.
2469 mtx_lock(&V_tcp_syncache.pause_mtx);
2470 if (V_tcp_syncache.paused) {
2471 mtx_unlock(&V_tcp_syncache.pause_mtx);
2475 /* Activate protection. */
2476 V_tcp_syncache.paused = true;
2477 TCPSTAT_INC(tcps_sc_bucketoverflow);
2480 * Determine the last backoff time. If we are seeing a re-newed
2481 * attack within that same time after last reactivating the syncache,
2482 * consider it an extension of the same attack.
2484 delta = TCP_SYNCACHE_PAUSE_TIME << V_tcp_syncache.pause_backoff;
2485 if (V_tcp_syncache.pause_until + delta - time_uptime > 0) {
2486 if (V_tcp_syncache.pause_backoff < TCP_SYNCACHE_MAX_BACKOFF) {
2488 V_tcp_syncache.pause_backoff++;
2491 delta = TCP_SYNCACHE_PAUSE_TIME;
2492 V_tcp_syncache.pause_backoff = 0;
2495 /* Log a warning, including IP addresses, if able. */
2497 s = tcp_log_addrs(inc, NULL, NULL, NULL);
2499 s = (const char *)NULL;
2500 log(LOG_WARNING, "TCP syncache overflow detected; using syncookies for "
2501 "the next %lld seconds%s%s%s\n", (long long)delta,
2502 (s != NULL) ? " (last SYN: " : "", (s != NULL) ? s : "",
2503 (s != NULL) ? ")" : "");
2504 free(__DECONST(void *, s), M_TCPLOG);
2506 /* Use the calculated delta to set a new pause time. */
2507 V_tcp_syncache.pause_until = time_uptime + delta;
2508 callout_reset(&V_tcp_syncache.pause_co, delta * hz, syncache_unpause,
2510 mtx_unlock(&V_tcp_syncache.pause_mtx);
2513 /* Evaluate whether we need to unpause. */
2515 syncache_unpause(void *arg)
2517 struct tcp_syncache *sc;
2521 mtx_assert(&sc->pause_mtx, MA_OWNED | MA_NOTRECURSED);
2522 callout_deactivate(&sc->pause_co);
2525 * Check to make sure we are not running early. If the pause
2526 * time has expired, then deactivate the protection.
2528 if ((delta = sc->pause_until - time_uptime) > 0)
2529 callout_schedule(&sc->pause_co, delta * hz);
2535 * Exports the syncache entries to userland so that netstat can display
2536 * them alongside the other sockets. This function is intended to be
2537 * called only from tcp_pcblist.
2539 * Due to concurrency on an active system, the number of pcbs exported
2540 * may have no relation to max_pcbs. max_pcbs merely indicates the
2541 * amount of space the caller allocated for this function to use.
2544 syncache_pcblist(struct sysctl_req *req)
2547 struct syncache *sc;
2548 struct syncache_head *sch;
2551 bzero(&xt, sizeof(xt));
2552 xt.xt_len = sizeof(xt);
2553 xt.t_state = TCPS_SYN_RECEIVED;
2554 xt.xt_inp.xi_socket.xso_protocol = IPPROTO_TCP;
2555 xt.xt_inp.xi_socket.xso_len = sizeof (struct xsocket);
2556 xt.xt_inp.xi_socket.so_type = SOCK_STREAM;
2557 xt.xt_inp.xi_socket.so_state = SS_ISCONNECTING;
2559 for (i = 0; i < V_tcp_syncache.hashsize; i++) {
2560 sch = &V_tcp_syncache.hashbase[i];
2562 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
2563 if (sc->sc_cred != NULL &&
2564 cr_cansee(req->td->td_ucred, sc->sc_cred) != 0)
2566 if (sc->sc_inc.inc_flags & INC_ISIPV6)
2567 xt.xt_inp.inp_vflag = INP_IPV6;
2569 xt.xt_inp.inp_vflag = INP_IPV4;
2570 xt.xt_encaps_port = sc->sc_port;
2571 bcopy(&sc->sc_inc, &xt.xt_inp.inp_inc,
2572 sizeof (struct in_conninfo));
2573 error = SYSCTL_OUT(req, &xt, sizeof xt);