2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2001 McAfee, Inc.
5 * Copyright (c) 2006,2013 Andre Oppermann, Internet Business Solutions AG
8 * This software was developed for the FreeBSD Project by Jonathan Lemon
9 * and McAfee Research, the Security Research Division of McAfee, Inc. under
10 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
11 * DARPA CHATS research program. [2001 McAfee, Inc.]
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
39 #include "opt_inet6.h"
40 #include "opt_ipsec.h"
41 #include "opt_pcbgroup.h"
43 #include <sys/param.h>
44 #include <sys/systm.h>
46 #include <sys/refcount.h>
47 #include <sys/kernel.h>
48 #include <sys/sysctl.h>
49 #include <sys/limits.h>
51 #include <sys/mutex.h>
52 #include <sys/malloc.h>
54 #include <sys/proc.h> /* for proc0 declaration */
55 #include <sys/random.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/syslog.h>
59 #include <sys/ucred.h>
62 #include <crypto/siphash/siphash.h>
67 #include <net/if_var.h>
68 #include <net/route.h>
71 #include <netinet/in.h>
72 #include <netinet/in_kdtrace.h>
73 #include <netinet/in_systm.h>
74 #include <netinet/ip.h>
75 #include <netinet/in_var.h>
76 #include <netinet/in_pcb.h>
77 #include <netinet/ip_var.h>
78 #include <netinet/ip_options.h>
80 #include <netinet/ip6.h>
81 #include <netinet/icmp6.h>
82 #include <netinet6/nd6.h>
83 #include <netinet6/ip6_var.h>
84 #include <netinet6/in6_pcb.h>
86 #include <netinet/tcp.h>
87 #include <netinet/tcp_fastopen.h>
88 #include <netinet/tcp_fsm.h>
89 #include <netinet/tcp_seq.h>
90 #include <netinet/tcp_timer.h>
91 #include <netinet/tcp_var.h>
92 #include <netinet/tcp_syncache.h>
94 #include <netinet6/tcp6_var.h>
97 #include <netinet/toecore.h>
100 #include <netipsec/ipsec_support.h>
102 #include <machine/in_cksum.h>
104 #include <security/mac/mac_framework.h>
106 VNET_DEFINE_STATIC(int, tcp_syncookies) = 1;
107 #define V_tcp_syncookies VNET(tcp_syncookies)
108 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_VNET | CTLFLAG_RW,
109 &VNET_NAME(tcp_syncookies), 0,
110 "Use TCP SYN cookies if the syncache overflows");
112 VNET_DEFINE_STATIC(int, tcp_syncookiesonly) = 0;
113 #define V_tcp_syncookiesonly VNET(tcp_syncookiesonly)
114 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies_only, CTLFLAG_VNET | CTLFLAG_RW,
115 &VNET_NAME(tcp_syncookiesonly), 0,
116 "Use only TCP SYN cookies");
118 VNET_DEFINE_STATIC(int, functions_inherit_listen_socket_stack) = 1;
119 #define V_functions_inherit_listen_socket_stack \
120 VNET(functions_inherit_listen_socket_stack)
121 SYSCTL_INT(_net_inet_tcp, OID_AUTO, functions_inherit_listen_socket_stack,
122 CTLFLAG_VNET | CTLFLAG_RW,
123 &VNET_NAME(functions_inherit_listen_socket_stack), 0,
124 "Inherit listen socket's stack");
127 #define ADDED_BY_TOE(sc) ((sc)->sc_tod != NULL)
130 static void syncache_drop(struct syncache *, struct syncache_head *);
131 static void syncache_free(struct syncache *);
132 static void syncache_insert(struct syncache *, struct syncache_head *);
133 static int syncache_respond(struct syncache *, struct syncache_head *,
134 const struct mbuf *, int);
135 static struct socket *syncache_socket(struct syncache *, struct socket *,
137 static void syncache_timeout(struct syncache *sc, struct syncache_head *sch,
139 static void syncache_timer(void *);
141 static uint32_t syncookie_mac(struct in_conninfo *, tcp_seq, uint8_t,
142 uint8_t *, uintptr_t);
143 static tcp_seq syncookie_generate(struct syncache_head *, struct syncache *);
144 static struct syncache
145 *syncookie_lookup(struct in_conninfo *, struct syncache_head *,
146 struct syncache *, struct tcphdr *, struct tcpopt *,
148 static void syncookie_reseed(void *);
150 static int syncookie_cmp(struct in_conninfo *inc, struct syncache_head *sch,
151 struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
156 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies.
157 * 3 retransmits corresponds to a timeout with default values of
158 * TCPTV_RTOBASE * ( 1 +
159 * tcp_syn_backoff[1] +
160 * tcp_syn_backoff[2] +
161 * tcp_syn_backoff[3]) + 3 * tcp_rexmit_slop,
162 * 3000 ms * (1 + 1 + 1 + 1) + 3 * 200 ms = 12600 ms,
163 * the odds are that the user has given up attempting to connect by then.
165 #define SYNCACHE_MAXREXMTS 3
167 /* Arbitrary values */
168 #define TCP_SYNCACHE_HASHSIZE 512
169 #define TCP_SYNCACHE_BUCKETLIMIT 30
171 VNET_DEFINE_STATIC(struct tcp_syncache, tcp_syncache);
172 #define V_tcp_syncache VNET(tcp_syncache)
174 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0,
177 SYSCTL_UINT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_VNET | CTLFLAG_RDTUN,
178 &VNET_NAME(tcp_syncache.bucket_limit), 0,
179 "Per-bucket hash limit for syncache");
181 SYSCTL_UINT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_VNET | CTLFLAG_RDTUN,
182 &VNET_NAME(tcp_syncache.cache_limit), 0,
183 "Overall entry limit for syncache");
185 SYSCTL_UMA_CUR(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_VNET,
186 &VNET_NAME(tcp_syncache.zone), "Current number of entries in syncache");
188 SYSCTL_UINT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN,
189 &VNET_NAME(tcp_syncache.hashsize), 0,
190 "Size of TCP syncache hashtable");
193 sysctl_net_inet_tcp_syncache_rexmtlimit_check(SYSCTL_HANDLER_ARGS)
198 new = V_tcp_syncache.rexmt_limit;
199 error = sysctl_handle_int(oidp, &new, 0, req);
200 if ((error == 0) && (req->newptr != NULL)) {
201 if (new > TCP_MAXRXTSHIFT)
204 V_tcp_syncache.rexmt_limit = new;
209 SYSCTL_PROC(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit,
210 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW,
211 &VNET_NAME(tcp_syncache.rexmt_limit), 0,
212 sysctl_net_inet_tcp_syncache_rexmtlimit_check, "UI",
213 "Limit on SYN/ACK retransmissions");
215 VNET_DEFINE(int, tcp_sc_rst_sock_fail) = 1;
216 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rst_on_sock_fail,
217 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_sc_rst_sock_fail), 0,
218 "Send reset on socket allocation failure");
220 static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache");
222 #define SCH_LOCK(sch) mtx_lock(&(sch)->sch_mtx)
223 #define SCH_UNLOCK(sch) mtx_unlock(&(sch)->sch_mtx)
224 #define SCH_LOCK_ASSERT(sch) mtx_assert(&(sch)->sch_mtx, MA_OWNED)
227 * Requires the syncache entry to be already removed from the bucket list.
230 syncache_free(struct syncache *sc)
234 (void) m_free(sc->sc_ipopts);
238 mac_syncache_destroy(&sc->sc_label);
241 uma_zfree(V_tcp_syncache.zone, sc);
249 V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
250 V_tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
251 V_tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
252 V_tcp_syncache.hash_secret = arc4random();
254 TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
255 &V_tcp_syncache.hashsize);
256 TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
257 &V_tcp_syncache.bucket_limit);
258 if (!powerof2(V_tcp_syncache.hashsize) ||
259 V_tcp_syncache.hashsize == 0) {
260 printf("WARNING: syncache hash size is not a power of 2.\n");
261 V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
263 V_tcp_syncache.hashmask = V_tcp_syncache.hashsize - 1;
266 V_tcp_syncache.cache_limit =
267 V_tcp_syncache.hashsize * V_tcp_syncache.bucket_limit;
268 TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
269 &V_tcp_syncache.cache_limit);
271 /* Allocate the hash table. */
272 V_tcp_syncache.hashbase = malloc(V_tcp_syncache.hashsize *
273 sizeof(struct syncache_head), M_SYNCACHE, M_WAITOK | M_ZERO);
276 V_tcp_syncache.vnet = curvnet;
279 /* Initialize the hash buckets. */
280 for (i = 0; i < V_tcp_syncache.hashsize; i++) {
281 TAILQ_INIT(&V_tcp_syncache.hashbase[i].sch_bucket);
282 mtx_init(&V_tcp_syncache.hashbase[i].sch_mtx, "tcp_sc_head",
284 callout_init_mtx(&V_tcp_syncache.hashbase[i].sch_timer,
285 &V_tcp_syncache.hashbase[i].sch_mtx, 0);
286 V_tcp_syncache.hashbase[i].sch_length = 0;
287 V_tcp_syncache.hashbase[i].sch_sc = &V_tcp_syncache;
288 V_tcp_syncache.hashbase[i].sch_last_overflow =
289 -(SYNCOOKIE_LIFETIME + 1);
292 /* Create the syncache entry zone. */
293 V_tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache),
294 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
295 V_tcp_syncache.cache_limit = uma_zone_set_max(V_tcp_syncache.zone,
296 V_tcp_syncache.cache_limit);
298 /* Start the SYN cookie reseeder callout. */
299 callout_init(&V_tcp_syncache.secret.reseed, 1);
300 arc4rand(V_tcp_syncache.secret.key[0], SYNCOOKIE_SECRET_SIZE, 0);
301 arc4rand(V_tcp_syncache.secret.key[1], SYNCOOKIE_SECRET_SIZE, 0);
302 callout_reset(&V_tcp_syncache.secret.reseed, SYNCOOKIE_LIFETIME * hz,
303 syncookie_reseed, &V_tcp_syncache);
308 syncache_destroy(void)
310 struct syncache_head *sch;
311 struct syncache *sc, *nsc;
315 * Stop the re-seed timer before freeing resources. No need to
316 * possibly schedule it another time.
318 callout_drain(&V_tcp_syncache.secret.reseed);
320 /* Cleanup hash buckets: stop timers, free entries, destroy locks. */
321 for (i = 0; i < V_tcp_syncache.hashsize; i++) {
323 sch = &V_tcp_syncache.hashbase[i];
324 callout_drain(&sch->sch_timer);
327 TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc)
328 syncache_drop(sc, sch);
330 KASSERT(TAILQ_EMPTY(&sch->sch_bucket),
331 ("%s: sch->sch_bucket not empty", __func__));
332 KASSERT(sch->sch_length == 0, ("%s: sch->sch_length %d not 0",
333 __func__, sch->sch_length));
334 mtx_destroy(&sch->sch_mtx);
337 KASSERT(uma_zone_get_cur(V_tcp_syncache.zone) == 0,
338 ("%s: cache_count not 0", __func__));
340 /* Free the allocated global resources. */
341 uma_zdestroy(V_tcp_syncache.zone);
342 free(V_tcp_syncache.hashbase, M_SYNCACHE);
347 * Inserts a syncache entry into the specified bucket row.
348 * Locks and unlocks the syncache_head autonomously.
351 syncache_insert(struct syncache *sc, struct syncache_head *sch)
353 struct syncache *sc2;
358 * Make sure that we don't overflow the per-bucket limit.
359 * If the bucket is full, toss the oldest element.
361 if (sch->sch_length >= V_tcp_syncache.bucket_limit) {
362 KASSERT(!TAILQ_EMPTY(&sch->sch_bucket),
363 ("sch->sch_length incorrect"));
364 sc2 = TAILQ_LAST(&sch->sch_bucket, sch_head);
365 sch->sch_last_overflow = time_uptime;
366 syncache_drop(sc2, sch);
367 TCPSTAT_INC(tcps_sc_bucketoverflow);
370 /* Put it into the bucket. */
371 TAILQ_INSERT_HEAD(&sch->sch_bucket, sc, sc_hash);
375 if (ADDED_BY_TOE(sc)) {
376 struct toedev *tod = sc->sc_tod;
378 tod->tod_syncache_added(tod, sc->sc_todctx);
382 /* Reinitialize the bucket row's timer. */
383 if (sch->sch_length == 1)
384 sch->sch_nextc = ticks + INT_MAX;
385 syncache_timeout(sc, sch, 1);
389 TCPSTATES_INC(TCPS_SYN_RECEIVED);
390 TCPSTAT_INC(tcps_sc_added);
394 * Remove and free entry from syncache bucket row.
395 * Expects locked syncache head.
398 syncache_drop(struct syncache *sc, struct syncache_head *sch)
401 SCH_LOCK_ASSERT(sch);
403 TCPSTATES_DEC(TCPS_SYN_RECEIVED);
404 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
408 if (ADDED_BY_TOE(sc)) {
409 struct toedev *tod = sc->sc_tod;
411 tod->tod_syncache_removed(tod, sc->sc_todctx);
419 * Engage/reengage time on bucket row.
422 syncache_timeout(struct syncache *sc, struct syncache_head *sch, int docallout)
426 if (sc->sc_rxmits == 0)
427 rexmt = TCPTV_RTOBASE;
429 TCPT_RANGESET(rexmt, TCPTV_RTOBASE * tcp_syn_backoff[sc->sc_rxmits],
430 tcp_rexmit_min, TCPTV_REXMTMAX);
431 sc->sc_rxttime = ticks + rexmt;
433 if (TSTMP_LT(sc->sc_rxttime, sch->sch_nextc)) {
434 sch->sch_nextc = sc->sc_rxttime;
436 callout_reset(&sch->sch_timer, sch->sch_nextc - ticks,
437 syncache_timer, (void *)sch);
442 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted.
443 * If we have retransmitted an entry the maximum number of times, expire it.
444 * One separate timer for each bucket row.
447 syncache_timer(void *xsch)
449 struct syncache_head *sch = (struct syncache_head *)xsch;
450 struct syncache *sc, *nsc;
454 CURVNET_SET(sch->sch_sc->vnet);
456 /* NB: syncache_head has already been locked by the callout. */
457 SCH_LOCK_ASSERT(sch);
460 * In the following cycle we may remove some entries and/or
461 * advance some timeouts, so re-initialize the bucket timer.
463 sch->sch_nextc = tick + INT_MAX;
465 TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc) {
467 * We do not check if the listen socket still exists
468 * and accept the case where the listen socket may be
469 * gone by the time we resend the SYN/ACK. We do
470 * not expect this to happens often. If it does,
471 * then the RST will be sent by the time the remote
472 * host does the SYN/ACK->ACK.
474 if (TSTMP_GT(sc->sc_rxttime, tick)) {
475 if (TSTMP_LT(sc->sc_rxttime, sch->sch_nextc))
476 sch->sch_nextc = sc->sc_rxttime;
479 if (sc->sc_rxmits > V_tcp_syncache.rexmt_limit) {
480 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
481 log(LOG_DEBUG, "%s; %s: Retransmits exhausted, "
482 "giving up and removing syncache entry\n",
486 syncache_drop(sc, sch);
487 TCPSTAT_INC(tcps_sc_stale);
490 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
491 log(LOG_DEBUG, "%s; %s: Response timeout, "
492 "retransmitting (%u) SYN|ACK\n",
493 s, __func__, sc->sc_rxmits);
497 syncache_respond(sc, sch, NULL, TH_SYN|TH_ACK);
498 TCPSTAT_INC(tcps_sc_retransmitted);
499 syncache_timeout(sc, sch, 0);
501 if (!TAILQ_EMPTY(&(sch)->sch_bucket))
502 callout_reset(&(sch)->sch_timer, (sch)->sch_nextc - tick,
503 syncache_timer, (void *)(sch));
508 * Find an entry in the syncache.
509 * Returns always with locked syncache_head plus a matching entry or NULL.
511 static struct syncache *
512 syncache_lookup(struct in_conninfo *inc, struct syncache_head **schp)
515 struct syncache_head *sch;
519 * The hash is built on foreign port + local port + foreign address.
520 * We rely on the fact that struct in_conninfo starts with 16 bits
521 * of foreign port, then 16 bits of local port then followed by 128
522 * bits of foreign address. In case of IPv4 address, the first 3
523 * 32-bit words of the address always are zeroes.
525 hash = jenkins_hash32((uint32_t *)&inc->inc_ie, 5,
526 V_tcp_syncache.hash_secret) & V_tcp_syncache.hashmask;
528 sch = &V_tcp_syncache.hashbase[hash];
532 /* Circle through bucket row to find matching entry. */
533 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash)
534 if (bcmp(&inc->inc_ie, &sc->sc_inc.inc_ie,
535 sizeof(struct in_endpoints)) == 0)
538 return (sc); /* Always returns with locked sch. */
542 * This function is called when we get a RST for a
543 * non-existent connection, so that we can see if the
544 * connection is in the syn cache. If it is, zap it.
545 * If required send a challenge ACK.
548 syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th, struct mbuf *m)
551 struct syncache_head *sch;
554 sc = syncache_lookup(inc, &sch); /* returns locked sch */
555 SCH_LOCK_ASSERT(sch);
558 * Any RST to our SYN|ACK must not carry ACK, SYN or FIN flags.
559 * See RFC 793 page 65, section SEGMENT ARRIVES.
561 if (th->th_flags & (TH_ACK|TH_SYN|TH_FIN)) {
562 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
563 log(LOG_DEBUG, "%s; %s: Spurious RST with ACK, SYN or "
564 "FIN flag set, segment ignored\n", s, __func__);
565 TCPSTAT_INC(tcps_badrst);
570 * No corresponding connection was found in syncache.
571 * If syncookies are enabled and possibly exclusively
572 * used, or we are under memory pressure, a valid RST
573 * may not find a syncache entry. In that case we're
574 * done and no SYN|ACK retransmissions will happen.
575 * Otherwise the RST was misdirected or spoofed.
578 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
579 log(LOG_DEBUG, "%s; %s: Spurious RST without matching "
580 "syncache entry (possibly syncookie only), "
581 "segment ignored\n", s, __func__);
582 TCPSTAT_INC(tcps_badrst);
587 * If the RST bit is set, check the sequence number to see
588 * if this is a valid reset segment.
591 * In all states except SYN-SENT, all reset (RST) segments
592 * are validated by checking their SEQ-fields. A reset is
593 * valid if its sequence number is in the window.
596 * There are four cases for the acceptability test for an incoming
599 * Segment Receive Test
601 * ------- ------- -------------------------------------------
602 * 0 0 SEG.SEQ = RCV.NXT
603 * 0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
604 * >0 0 not acceptable
605 * >0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
606 * or RCV.NXT =< SEG.SEQ+SEG.LEN-1 < RCV.NXT+RCV.WND
608 * Note that when receiving a SYN segment in the LISTEN state,
609 * IRS is set to SEG.SEQ and RCV.NXT is set to SEG.SEQ+1, as
610 * described in RFC 793, page 66.
612 if ((SEQ_GEQ(th->th_seq, sc->sc_irs + 1) &&
613 SEQ_LT(th->th_seq, sc->sc_irs + 1 + sc->sc_wnd)) ||
614 (sc->sc_wnd == 0 && th->th_seq == sc->sc_irs + 1)) {
615 if (V_tcp_insecure_rst ||
616 th->th_seq == sc->sc_irs + 1) {
617 syncache_drop(sc, sch);
618 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
620 "%s; %s: Our SYN|ACK was rejected, "
621 "connection attempt aborted by remote "
624 TCPSTAT_INC(tcps_sc_reset);
626 TCPSTAT_INC(tcps_badrst);
627 /* Send challenge ACK. */
628 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
629 log(LOG_DEBUG, "%s; %s: RST with invalid "
630 " SEQ %u != NXT %u (+WND %u), "
631 "sending challenge ACK\n",
633 th->th_seq, sc->sc_irs + 1, sc->sc_wnd);
634 syncache_respond(sc, sch, m, TH_ACK);
637 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
638 log(LOG_DEBUG, "%s; %s: RST with invalid SEQ %u != "
639 "NXT %u (+WND %u), segment ignored\n",
641 th->th_seq, sc->sc_irs + 1, sc->sc_wnd);
642 TCPSTAT_INC(tcps_badrst);
652 syncache_badack(struct in_conninfo *inc)
655 struct syncache_head *sch;
657 sc = syncache_lookup(inc, &sch); /* returns locked sch */
658 SCH_LOCK_ASSERT(sch);
660 syncache_drop(sc, sch);
661 TCPSTAT_INC(tcps_sc_badack);
667 syncache_unreach(struct in_conninfo *inc, tcp_seq th_seq)
670 struct syncache_head *sch;
672 sc = syncache_lookup(inc, &sch); /* returns locked sch */
673 SCH_LOCK_ASSERT(sch);
677 /* If the sequence number != sc_iss, then it's a bogus ICMP msg */
678 if (ntohl(th_seq) != sc->sc_iss)
682 * If we've rertransmitted 3 times and this is our second error,
683 * we remove the entry. Otherwise, we allow it to continue on.
684 * This prevents us from incorrectly nuking an entry during a
685 * spurious network outage.
689 if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxmits < 3 + 1) {
690 sc->sc_flags |= SCF_UNREACH;
693 syncache_drop(sc, sch);
694 TCPSTAT_INC(tcps_sc_unreach);
700 * Build a new TCP socket structure from a syncache entry.
702 * On success return the newly created socket with its underlying inp locked.
704 static struct socket *
705 syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
707 struct tcp_function_block *blk;
708 struct inpcb *inp = NULL;
714 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
717 * Ok, create the full blown connection, and set things up
718 * as they would have been set up if we had created the
719 * connection when the SYN arrived. If we can't create
720 * the connection, abort it.
722 so = sonewconn(lso, 0);
725 * Drop the connection; we will either send a RST or
726 * have the peer retransmit its SYN again after its
729 TCPSTAT_INC(tcps_listendrop);
730 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
731 log(LOG_DEBUG, "%s; %s: Socket create failed "
732 "due to limits or memory shortage\n",
739 mac_socketpeer_set_from_mbuf(m, so);
743 inp->inp_inc.inc_fibnum = so->so_fibnum;
746 * Exclusive pcbinfo lock is not required in syncache socket case even
747 * if two inpcb locks can be acquired simultaneously:
748 * - the inpcb in LISTEN state,
749 * - the newly created inp.
751 * In this case, an inp cannot be at same time in LISTEN state and
752 * just created by an accept() call.
754 INP_HASH_WLOCK(&V_tcbinfo);
756 /* Insert new socket into PCB hash list. */
757 inp->inp_inc.inc_flags = sc->sc_inc.inc_flags;
759 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
760 inp->inp_vflag &= ~INP_IPV4;
761 inp->inp_vflag |= INP_IPV6;
762 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
764 inp->inp_vflag &= ~INP_IPV6;
765 inp->inp_vflag |= INP_IPV4;
767 inp->inp_laddr = sc->sc_inc.inc_laddr;
773 * If there's an mbuf and it has a flowid, then let's initialise the
774 * inp with that particular flowid.
776 if (m != NULL && M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
777 inp->inp_flowid = m->m_pkthdr.flowid;
778 inp->inp_flowtype = M_HASHTYPE_GET(m);
782 * Install in the reservation hash table for now, but don't yet
783 * install a connection group since the full 4-tuple isn't yet
786 inp->inp_lport = sc->sc_inc.inc_lport;
787 if ((error = in_pcbinshash_nopcbgroup(inp)) != 0) {
789 * Undo the assignments above if we failed to
790 * put the PCB on the hash lists.
793 if (sc->sc_inc.inc_flags & INC_ISIPV6)
794 inp->in6p_laddr = in6addr_any;
797 inp->inp_laddr.s_addr = INADDR_ANY;
799 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
800 log(LOG_DEBUG, "%s; %s: in_pcbinshash failed "
805 INP_HASH_WUNLOCK(&V_tcbinfo);
809 if (inp->inp_vflag & INP_IPV6PROTO) {
810 struct inpcb *oinp = sotoinpcb(lso);
813 * Inherit socket options from the listening socket.
814 * Note that in6p_inputopts are not (and should not be)
815 * copied, since it stores previously received options and is
816 * used to detect if each new option is different than the
817 * previous one and hence should be passed to a user.
818 * If we copied in6p_inputopts, a user would not be able to
819 * receive options just after calling the accept system call.
821 inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS;
822 if (oinp->in6p_outputopts)
823 inp->in6p_outputopts =
824 ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT);
827 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
828 struct in6_addr laddr6;
829 struct sockaddr_in6 sin6;
831 sin6.sin6_family = AF_INET6;
832 sin6.sin6_len = sizeof(sin6);
833 sin6.sin6_addr = sc->sc_inc.inc6_faddr;
834 sin6.sin6_port = sc->sc_inc.inc_fport;
835 sin6.sin6_flowinfo = sin6.sin6_scope_id = 0;
836 laddr6 = inp->in6p_laddr;
837 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
838 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
839 if ((error = in6_pcbconnect_mbuf(inp, (struct sockaddr *)&sin6,
840 thread0.td_ucred, m)) != 0) {
841 inp->in6p_laddr = laddr6;
842 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
843 log(LOG_DEBUG, "%s; %s: in6_pcbconnect failed "
848 INP_HASH_WUNLOCK(&V_tcbinfo);
851 /* Override flowlabel from in6_pcbconnect. */
852 inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
853 inp->inp_flow |= sc->sc_flowlabel;
856 #if defined(INET) && defined(INET6)
861 struct in_addr laddr;
862 struct sockaddr_in sin;
864 inp->inp_options = (m) ? ip_srcroute(m) : NULL;
866 if (inp->inp_options == NULL) {
867 inp->inp_options = sc->sc_ipopts;
868 sc->sc_ipopts = NULL;
871 sin.sin_family = AF_INET;
872 sin.sin_len = sizeof(sin);
873 sin.sin_addr = sc->sc_inc.inc_faddr;
874 sin.sin_port = sc->sc_inc.inc_fport;
875 bzero((caddr_t)sin.sin_zero, sizeof(sin.sin_zero));
876 laddr = inp->inp_laddr;
877 if (inp->inp_laddr.s_addr == INADDR_ANY)
878 inp->inp_laddr = sc->sc_inc.inc_laddr;
879 if ((error = in_pcbconnect_mbuf(inp, (struct sockaddr *)&sin,
880 thread0.td_ucred, m)) != 0) {
881 inp->inp_laddr = laddr;
882 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
883 log(LOG_DEBUG, "%s; %s: in_pcbconnect failed "
888 INP_HASH_WUNLOCK(&V_tcbinfo);
893 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
894 /* Copy old policy into new socket's. */
895 if (ipsec_copy_pcbpolicy(sotoinpcb(lso), inp) != 0)
896 printf("syncache_socket: could not copy policy\n");
898 INP_HASH_WUNLOCK(&V_tcbinfo);
900 tcp_state_change(tp, TCPS_SYN_RECEIVED);
901 tp->iss = sc->sc_iss;
902 tp->irs = sc->sc_irs;
905 blk = sototcpcb(lso)->t_fb;
906 if (V_functions_inherit_listen_socket_stack && blk != tp->t_fb) {
908 * Our parents t_fb was not the default,
909 * we need to release our ref on tp->t_fb and
910 * pickup one on the new entry.
912 struct tcp_function_block *rblk;
914 rblk = find_and_ref_tcp_fb(blk);
915 KASSERT(rblk != NULL,
916 ("cannot find blk %p out of syncache?", blk));
917 if (tp->t_fb->tfb_tcp_fb_fini)
918 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 0);
919 refcount_release(&tp->t_fb->tfb_refcnt);
922 * XXXrrs this is quite dangerous, it is possible
923 * for the new function to fail to init. We also
924 * are not asking if the handoff_is_ok though at
925 * the very start thats probalbly ok.
927 if (tp->t_fb->tfb_tcp_fb_init) {
928 (*tp->t_fb->tfb_tcp_fb_init)(tp);
931 tp->snd_wl1 = sc->sc_irs;
932 tp->snd_max = tp->iss + 1;
933 tp->snd_nxt = tp->iss + 1;
934 tp->rcv_up = sc->sc_irs + 1;
935 tp->rcv_wnd = sc->sc_wnd;
936 tp->rcv_adv += tp->rcv_wnd;
937 tp->last_ack_sent = tp->rcv_nxt;
939 tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY);
940 if (sc->sc_flags & SCF_NOOPT)
941 tp->t_flags |= TF_NOOPT;
943 if (sc->sc_flags & SCF_WINSCALE) {
944 tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE;
945 tp->snd_scale = sc->sc_requested_s_scale;
946 tp->request_r_scale = sc->sc_requested_r_scale;
948 if (sc->sc_flags & SCF_TIMESTAMP) {
949 tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP;
950 tp->ts_recent = sc->sc_tsreflect;
951 tp->ts_recent_age = tcp_ts_getticks();
952 tp->ts_offset = sc->sc_tsoff;
954 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
955 if (sc->sc_flags & SCF_SIGNATURE)
956 tp->t_flags |= TF_SIGNATURE;
958 if (sc->sc_flags & SCF_SACK)
959 tp->t_flags |= TF_SACK_PERMIT;
962 if (sc->sc_flags & SCF_ECN)
963 tp->t_flags |= TF_ECN_PERMIT;
966 * Set up MSS and get cached values from tcp_hostcache.
967 * This might overwrite some of the defaults we just set.
969 tcp_mss(tp, sc->sc_peer_mss);
972 * If the SYN,ACK was retransmitted, indicate that CWND to be
973 * limited to one segment in cc_conn_init().
974 * NB: sc_rxmits counts all SYN,ACK transmits, not just retransmits.
976 if (sc->sc_rxmits > 1)
981 * Allow a TOE driver to install its hooks. Note that we hold the
982 * pcbinfo lock too and that prevents tcp_usr_accept from accepting a
983 * new connection before the TOE driver has done its thing.
985 if (ADDED_BY_TOE(sc)) {
986 struct toedev *tod = sc->sc_tod;
988 tod->tod_offload_socket(tod, sc->sc_todctx, so);
992 * Copy and activate timers.
994 tp->t_keepinit = sototcpcb(lso)->t_keepinit;
995 tp->t_keepidle = sototcpcb(lso)->t_keepidle;
996 tp->t_keepintvl = sototcpcb(lso)->t_keepintvl;
997 tp->t_keepcnt = sototcpcb(lso)->t_keepcnt;
998 tcp_timer_activate(tp, TT_KEEP, TP_KEEPINIT(tp));
1000 TCPSTAT_INC(tcps_accepts);
1012 * This function gets called when we receive an ACK for a
1013 * socket in the LISTEN state. We look up the connection
1014 * in the syncache, and if its there, we pull it out of
1015 * the cache and turn it into a full-blown connection in
1016 * the SYN-RECEIVED state.
1018 * On syncache_socket() success the newly created socket
1019 * has its underlying inp locked.
1022 syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
1023 struct socket **lsop, struct mbuf *m)
1025 struct syncache *sc;
1026 struct syncache_head *sch;
1027 struct syncache scs;
1031 * Global TCP locks are held because we manipulate the PCB lists
1032 * and create a new socket.
1034 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1035 KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK,
1036 ("%s: can handle only ACK", __func__));
1038 sc = syncache_lookup(inc, &sch); /* returns locked sch */
1039 SCH_LOCK_ASSERT(sch);
1043 * Test code for syncookies comparing the syncache stored
1044 * values with the reconstructed values from the cookie.
1047 syncookie_cmp(inc, sch, sc, th, to, *lsop);
1052 * There is no syncache entry, so see if this ACK is
1053 * a returning syncookie. To do this, first:
1054 * A. Check if syncookies are used in case of syncache
1056 * B. See if this socket has had a syncache entry dropped in
1057 * the recent past. We don't want to accept a bogus
1058 * syncookie if we've never received a SYN or accept it
1060 * C. check that the syncookie is valid. If it is, then
1061 * cobble up a fake syncache entry, and return.
1063 if (!V_tcp_syncookies) {
1065 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1066 log(LOG_DEBUG, "%s; %s: Spurious ACK, "
1067 "segment rejected (syncookies disabled)\n",
1071 if (!V_tcp_syncookiesonly &&
1072 sch->sch_last_overflow < time_uptime - SYNCOOKIE_LIFETIME) {
1074 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1075 log(LOG_DEBUG, "%s; %s: Spurious ACK, "
1076 "segment rejected (no syncache entry)\n",
1080 bzero(&scs, sizeof(scs));
1081 sc = syncookie_lookup(inc, sch, &scs, th, to, *lsop);
1084 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1085 log(LOG_DEBUG, "%s; %s: Segment failed "
1086 "SYNCOOKIE authentication, segment rejected "
1087 "(probably spoofed)\n", s, __func__);
1090 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1091 /* If received ACK has MD5 signature, check it. */
1092 if ((to->to_flags & TOF_SIGNATURE) != 0 &&
1093 (!TCPMD5_ENABLED() ||
1094 TCPMD5_INPUT(m, th, to->to_signature) != 0)) {
1096 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1097 log(LOG_DEBUG, "%s; %s: Segment rejected, "
1098 "MD5 signature doesn't match.\n",
1102 TCPSTAT_INC(tcps_sig_err_sigopt);
1103 return (-1); /* Do not send RST */
1105 #endif /* TCP_SIGNATURE */
1107 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1109 * If listening socket requested TCP digests, check that
1110 * received ACK has signature and it is correct.
1111 * If not, drop the ACK and leave sc entry in th cache,
1112 * because SYN was received with correct signature.
1114 if (sc->sc_flags & SCF_SIGNATURE) {
1115 if ((to->to_flags & TOF_SIGNATURE) == 0) {
1117 TCPSTAT_INC(tcps_sig_err_nosigopt);
1119 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1120 log(LOG_DEBUG, "%s; %s: Segment "
1121 "rejected, MD5 signature wasn't "
1122 "provided.\n", s, __func__);
1125 return (-1); /* Do not send RST */
1127 if (!TCPMD5_ENABLED() ||
1128 TCPMD5_INPUT(m, th, to->to_signature) != 0) {
1129 /* Doesn't match or no SA */
1131 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1132 log(LOG_DEBUG, "%s; %s: Segment "
1133 "rejected, MD5 signature doesn't "
1134 "match.\n", s, __func__);
1137 return (-1); /* Do not send RST */
1140 #endif /* TCP_SIGNATURE */
1142 * Pull out the entry to unlock the bucket row.
1144 * NOTE: We must decrease TCPS_SYN_RECEIVED count here, not
1145 * tcp_state_change(). The tcpcb is not existent at this
1146 * moment. A new one will be allocated via syncache_socket->
1147 * sonewconn->tcp_usr_attach in TCPS_CLOSED state, then
1148 * syncache_socket() will change it to TCPS_SYN_RECEIVED.
1150 TCPSTATES_DEC(TCPS_SYN_RECEIVED);
1151 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
1154 if (ADDED_BY_TOE(sc)) {
1155 struct toedev *tod = sc->sc_tod;
1157 tod->tod_syncache_removed(tod, sc->sc_todctx);
1164 * Segment validation:
1165 * ACK must match our initial sequence number + 1 (the SYN|ACK).
1167 if (th->th_ack != sc->sc_iss + 1) {
1168 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1169 log(LOG_DEBUG, "%s; %s: ACK %u != ISS+1 %u, segment "
1170 "rejected\n", s, __func__, th->th_ack, sc->sc_iss);
1175 * The SEQ must fall in the window starting at the received
1176 * initial receive sequence number + 1 (the SYN).
1178 if (SEQ_LEQ(th->th_seq, sc->sc_irs) ||
1179 SEQ_GT(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
1180 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1181 log(LOG_DEBUG, "%s; %s: SEQ %u != IRS+1 %u, segment "
1182 "rejected\n", s, __func__, th->th_seq, sc->sc_irs);
1187 * If timestamps were not negotiated during SYN/ACK they
1188 * must not appear on any segment during this session.
1190 if (!(sc->sc_flags & SCF_TIMESTAMP) && (to->to_flags & TOF_TS)) {
1191 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1192 log(LOG_DEBUG, "%s; %s: Timestamp not expected, "
1193 "segment rejected\n", s, __func__);
1198 * If timestamps were negotiated during SYN/ACK they should
1199 * appear on every segment during this session.
1200 * XXXAO: This is only informal as there have been unverified
1201 * reports of non-compliants stacks.
1203 if ((sc->sc_flags & SCF_TIMESTAMP) && !(to->to_flags & TOF_TS)) {
1204 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1205 log(LOG_DEBUG, "%s; %s: Timestamp missing, "
1206 "no action\n", s, __func__);
1212 *lsop = syncache_socket(sc, *lsop, m);
1215 TCPSTAT_INC(tcps_sc_aborted);
1217 TCPSTAT_INC(tcps_sc_completed);
1219 /* how do we find the inp for the new socket? */
1224 if (sc != NULL && sc != &scs)
1233 syncache_tfo_expand(struct syncache *sc, struct socket **lsop, struct mbuf *m,
1234 uint64_t response_cookie)
1238 unsigned int *pending_counter;
1241 * Global TCP locks are held because we manipulate the PCB lists
1242 * and create a new socket.
1244 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1246 pending_counter = intotcpcb(sotoinpcb(*lsop))->t_tfo_pending;
1247 *lsop = syncache_socket(sc, *lsop, m);
1248 if (*lsop == NULL) {
1249 TCPSTAT_INC(tcps_sc_aborted);
1250 atomic_subtract_int(pending_counter, 1);
1252 soisconnected(*lsop);
1253 inp = sotoinpcb(*lsop);
1254 tp = intotcpcb(inp);
1255 tp->t_flags |= TF_FASTOPEN;
1256 tp->t_tfo_cookie.server = response_cookie;
1257 tp->snd_max = tp->iss;
1258 tp->snd_nxt = tp->iss;
1259 tp->t_tfo_pending = pending_counter;
1260 TCPSTAT_INC(tcps_sc_completed);
1265 * Given a LISTEN socket and an inbound SYN request, add
1266 * this to the syn cache, and send back a segment:
1267 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
1270 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN.
1271 * Doing so would require that we hold onto the data and deliver it
1272 * to the application. However, if we are the target of a SYN-flood
1273 * DoS attack, an attacker could send data which would eventually
1274 * consume all available buffer space if it were ACKed. By not ACKing
1275 * the data, we avoid this DoS scenario.
1277 * The exception to the above is when a SYN with a valid TCP Fast Open (TFO)
1278 * cookie is processed and a new socket is created. In this case, any data
1279 * accompanying the SYN will be queued to the socket by tcp_input() and will
1280 * be ACKed either when the application sends response data or the delayed
1281 * ACK timer expires, whichever comes first.
1284 syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
1285 struct inpcb *inp, struct socket **lsop, struct mbuf *m, void *tod,
1290 struct syncache *sc = NULL;
1291 struct syncache_head *sch;
1292 struct mbuf *ipopts = NULL;
1294 int win, ip_ttl, ip_tos;
1298 int autoflowlabel = 0;
1301 struct label *maclabel;
1303 struct syncache scs;
1305 uint64_t tfo_response_cookie;
1306 unsigned int *tfo_pending = NULL;
1307 int tfo_cookie_valid = 0;
1308 int tfo_response_cookie_valid = 0;
1310 INP_WLOCK_ASSERT(inp); /* listen socket */
1311 KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_SYN,
1312 ("%s: unexpected tcp flags", __func__));
1315 * Combine all so/tp operations very early to drop the INP lock as
1319 KASSERT(SOLISTENING(so), ("%s: %p not listening", __func__, so));
1321 cred = crhold(so->so_cred);
1324 if ((inc->inc_flags & INC_ISIPV6) &&
1325 (inp->inp_flags & IN6P_AUTOFLOWLABEL))
1328 ip_ttl = inp->inp_ip_ttl;
1329 ip_tos = inp->inp_ip_tos;
1330 win = so->sol_sbrcv_hiwat;
1331 ltflags = (tp->t_flags & (TF_NOOPT | TF_SIGNATURE));
1333 if (V_tcp_fastopen_server_enable && IS_FASTOPEN(tp->t_flags) &&
1334 (tp->t_tfo_pending != NULL) &&
1335 (to->to_flags & TOF_FASTOPEN)) {
1337 * Limit the number of pending TFO connections to
1338 * approximately half of the queue limit. This prevents TFO
1339 * SYN floods from starving the service by filling the
1340 * listen queue with bogus TFO connections.
1342 if (atomic_fetchadd_int(tp->t_tfo_pending, 1) <=
1343 (so->sol_qlimit / 2)) {
1346 result = tcp_fastopen_check_cookie(inc,
1347 to->to_tfo_cookie, to->to_tfo_len,
1348 &tfo_response_cookie);
1349 tfo_cookie_valid = (result > 0);
1350 tfo_response_cookie_valid = (result >= 0);
1354 * Remember the TFO pending counter as it will have to be
1355 * decremented below if we don't make it to syncache_tfo_expand().
1357 tfo_pending = tp->t_tfo_pending;
1360 /* By the time we drop the lock these should no longer be used. */
1365 if (mac_syncache_init(&maclabel) != 0) {
1369 mac_syncache_create(maclabel, inp);
1371 if (!tfo_cookie_valid)
1375 * Remember the IP options, if any.
1378 if (!(inc->inc_flags & INC_ISIPV6))
1381 ipopts = (m) ? ip_srcroute(m) : NULL;
1386 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1388 * If listening socket requested TCP digests, check that received
1389 * SYN has signature and it is correct. If signature doesn't match
1390 * or TCP_SIGNATURE support isn't enabled, drop the packet.
1392 if (ltflags & TF_SIGNATURE) {
1393 if ((to->to_flags & TOF_SIGNATURE) == 0) {
1394 TCPSTAT_INC(tcps_sig_err_nosigopt);
1397 if (!TCPMD5_ENABLED() ||
1398 TCPMD5_INPUT(m, th, to->to_signature) != 0)
1401 #endif /* TCP_SIGNATURE */
1403 * See if we already have an entry for this connection.
1404 * If we do, resend the SYN,ACK, and reset the retransmit timer.
1406 * XXX: should the syncache be re-initialized with the contents
1407 * of the new SYN here (which may have different options?)
1409 * XXX: We do not check the sequence number to see if this is a
1410 * real retransmit or a new connection attempt. The question is
1411 * how to handle such a case; either ignore it as spoofed, or
1412 * drop the current entry and create a new one?
1414 sc = syncache_lookup(inc, &sch); /* returns locked entry */
1415 SCH_LOCK_ASSERT(sch);
1417 if (tfo_cookie_valid)
1419 TCPSTAT_INC(tcps_sc_dupsyn);
1422 * If we were remembering a previous source route,
1423 * forget it and use the new one we've been given.
1426 (void) m_free(sc->sc_ipopts);
1427 sc->sc_ipopts = ipopts;
1430 * Update timestamp if present.
1432 if ((sc->sc_flags & SCF_TIMESTAMP) && (to->to_flags & TOF_TS))
1433 sc->sc_tsreflect = to->to_tsval;
1435 sc->sc_flags &= ~SCF_TIMESTAMP;
1438 * Since we have already unconditionally allocated label
1439 * storage, free it up. The syncache entry will already
1440 * have an initialized label we can use.
1442 mac_syncache_destroy(&maclabel);
1444 TCP_PROBE5(receive, NULL, NULL, m, NULL, th);
1445 /* Retransmit SYN|ACK and reset retransmit count. */
1446 if ((s = tcp_log_addrs(&sc->sc_inc, th, NULL, NULL))) {
1447 log(LOG_DEBUG, "%s; %s: Received duplicate SYN, "
1448 "resetting timer and retransmitting SYN|ACK\n",
1452 if (syncache_respond(sc, sch, m, TH_SYN|TH_ACK) == 0) {
1454 syncache_timeout(sc, sch, 1);
1455 TCPSTAT_INC(tcps_sndacks);
1456 TCPSTAT_INC(tcps_sndtotal);
1462 if (tfo_cookie_valid) {
1463 bzero(&scs, sizeof(scs));
1468 sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
1471 * The zone allocator couldn't provide more entries.
1472 * Treat this as if the cache was full; drop the oldest
1473 * entry and insert the new one.
1475 TCPSTAT_INC(tcps_sc_zonefail);
1476 if ((sc = TAILQ_LAST(&sch->sch_bucket, sch_head)) != NULL) {
1477 sch->sch_last_overflow = time_uptime;
1478 syncache_drop(sc, sch);
1480 sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
1482 if (V_tcp_syncookies) {
1483 bzero(&scs, sizeof(scs));
1488 (void) m_free(ipopts);
1495 if (!tfo_cookie_valid && tfo_response_cookie_valid)
1496 sc->sc_tfo_cookie = &tfo_response_cookie;
1499 * Fill in the syncache values.
1502 sc->sc_label = maclabel;
1506 sc->sc_ipopts = ipopts;
1507 bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo));
1509 if (!(inc->inc_flags & INC_ISIPV6))
1512 sc->sc_ip_tos = ip_tos;
1513 sc->sc_ip_ttl = ip_ttl;
1517 sc->sc_todctx = todctx;
1519 sc->sc_irs = th->th_seq;
1520 sc->sc_iss = arc4random();
1522 sc->sc_flowlabel = 0;
1525 * Initial receive window: clip sbspace to [0 .. TCP_MAXWIN].
1526 * win was derived from socket earlier in the function.
1529 win = imin(win, TCP_MAXWIN);
1532 if (V_tcp_do_rfc1323) {
1534 * A timestamp received in a SYN makes
1535 * it ok to send timestamp requests and replies.
1537 if (to->to_flags & TOF_TS) {
1538 sc->sc_tsreflect = to->to_tsval;
1539 sc->sc_flags |= SCF_TIMESTAMP;
1540 sc->sc_tsoff = tcp_new_ts_offset(inc);
1542 if (to->to_flags & TOF_SCALE) {
1546 * Pick the smallest possible scaling factor that
1547 * will still allow us to scale up to sb_max, aka
1548 * kern.ipc.maxsockbuf.
1550 * We do this because there are broken firewalls that
1551 * will corrupt the window scale option, leading to
1552 * the other endpoint believing that our advertised
1553 * window is unscaled. At scale factors larger than
1554 * 5 the unscaled window will drop below 1500 bytes,
1555 * leading to serious problems when traversing these
1558 * With the default maxsockbuf of 256K, a scale factor
1559 * of 3 will be chosen by this algorithm. Those who
1560 * choose a larger maxsockbuf should watch out
1561 * for the compatibility problems mentioned above.
1563 * RFC1323: The Window field in a SYN (i.e., a <SYN>
1564 * or <SYN,ACK>) segment itself is never scaled.
1566 while (wscale < TCP_MAX_WINSHIFT &&
1567 (TCP_MAXWIN << wscale) < sb_max)
1569 sc->sc_requested_r_scale = wscale;
1570 sc->sc_requested_s_scale = to->to_wscale;
1571 sc->sc_flags |= SCF_WINSCALE;
1574 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1576 * If listening socket requested TCP digests, flag this in the
1577 * syncache so that syncache_respond() will do the right thing
1580 if (ltflags & TF_SIGNATURE)
1581 sc->sc_flags |= SCF_SIGNATURE;
1582 #endif /* TCP_SIGNATURE */
1583 if (to->to_flags & TOF_SACKPERM)
1584 sc->sc_flags |= SCF_SACK;
1585 if (to->to_flags & TOF_MSS)
1586 sc->sc_peer_mss = to->to_mss; /* peer mss may be zero */
1587 if (ltflags & TF_NOOPT)
1588 sc->sc_flags |= SCF_NOOPT;
1589 if ((th->th_flags & (TH_ECE|TH_CWR)) && V_tcp_do_ecn)
1590 sc->sc_flags |= SCF_ECN;
1592 if (V_tcp_syncookies)
1593 sc->sc_iss = syncookie_generate(sch, sc);
1595 if (autoflowlabel) {
1596 if (V_tcp_syncookies)
1597 sc->sc_flowlabel = sc->sc_iss;
1599 sc->sc_flowlabel = ip6_randomflowlabel();
1600 sc->sc_flowlabel = htonl(sc->sc_flowlabel) & IPV6_FLOWLABEL_MASK;
1605 if (tfo_cookie_valid) {
1606 syncache_tfo_expand(sc, lsop, m, tfo_response_cookie);
1607 /* INP_WUNLOCK(inp) will be performed by the caller */
1612 TCP_PROBE5(receive, NULL, NULL, m, NULL, th);
1614 * Do a standard 3-way handshake.
1616 if (syncache_respond(sc, sch, m, TH_SYN|TH_ACK) == 0) {
1617 if (V_tcp_syncookies && V_tcp_syncookiesonly && sc != &scs)
1619 else if (sc != &scs)
1620 syncache_insert(sc, sch); /* locks and unlocks sch */
1621 TCPSTAT_INC(tcps_sndacks);
1622 TCPSTAT_INC(tcps_sndtotal);
1626 TCPSTAT_INC(tcps_sc_dropped);
1631 TCP_PROBE5(receive, NULL, NULL, m, NULL, th);
1638 * If tfo_pending is not NULL here, then a TFO SYN that did not
1639 * result in a new socket was processed and the associated pending
1640 * counter has not yet been decremented. All such TFO processing paths
1641 * transit this point.
1643 if (tfo_pending != NULL)
1644 tcp_fastopen_decrement_counter(tfo_pending);
1651 mac_syncache_destroy(&maclabel);
1657 * Send SYN|ACK or ACK to the peer. Either in response to a peer's segment,
1658 * i.e. m0 != NULL, or upon 3WHS ACK timeout, i.e. m0 == NULL.
1661 syncache_respond(struct syncache *sc, struct syncache_head *sch,
1662 const struct mbuf *m0, int flags)
1664 struct ip *ip = NULL;
1666 struct tcphdr *th = NULL;
1667 int optlen, error = 0; /* Make compiler happy */
1668 u_int16_t hlen, tlen, mssopt;
1671 struct ip6_hdr *ip6 = NULL;
1675 (sc->sc_inc.inc_flags & INC_ISIPV6) ? sizeof(struct ip6_hdr) :
1678 tlen = hlen + sizeof(struct tcphdr);
1680 /* Determine MSS we advertize to other end of connection. */
1681 mssopt = max(tcp_mssopt(&sc->sc_inc), V_tcp_minmss);
1683 /* XXX: Assume that the entire packet will fit in a header mbuf. */
1684 KASSERT(max_linkhdr + tlen + TCP_MAXOLEN <= MHLEN,
1685 ("syncache: mbuf too small"));
1687 /* Create the IP+TCP header from scratch. */
1688 m = m_gethdr(M_NOWAIT, MT_DATA);
1692 mac_syncache_create_mbuf(sc->sc_label, m);
1694 m->m_data += max_linkhdr;
1696 m->m_pkthdr.len = tlen;
1697 m->m_pkthdr.rcvif = NULL;
1700 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
1701 ip6 = mtod(m, struct ip6_hdr *);
1702 ip6->ip6_vfc = IPV6_VERSION;
1703 ip6->ip6_nxt = IPPROTO_TCP;
1704 ip6->ip6_src = sc->sc_inc.inc6_laddr;
1705 ip6->ip6_dst = sc->sc_inc.inc6_faddr;
1706 ip6->ip6_plen = htons(tlen - hlen);
1707 /* ip6_hlim is set after checksum */
1708 ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK;
1709 ip6->ip6_flow |= sc->sc_flowlabel;
1711 th = (struct tcphdr *)(ip6 + 1);
1714 #if defined(INET6) && defined(INET)
1719 ip = mtod(m, struct ip *);
1720 ip->ip_v = IPVERSION;
1721 ip->ip_hl = sizeof(struct ip) >> 2;
1722 ip->ip_len = htons(tlen);
1726 ip->ip_p = IPPROTO_TCP;
1727 ip->ip_src = sc->sc_inc.inc_laddr;
1728 ip->ip_dst = sc->sc_inc.inc_faddr;
1729 ip->ip_ttl = sc->sc_ip_ttl;
1730 ip->ip_tos = sc->sc_ip_tos;
1733 * See if we should do MTU discovery. Route lookups are
1734 * expensive, so we will only unset the DF bit if:
1736 * 1) path_mtu_discovery is disabled
1737 * 2) the SCF_UNREACH flag has been set
1739 if (V_path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0))
1740 ip->ip_off |= htons(IP_DF);
1742 th = (struct tcphdr *)(ip + 1);
1745 th->th_sport = sc->sc_inc.inc_lport;
1746 th->th_dport = sc->sc_inc.inc_fport;
1749 th->th_seq = htonl(sc->sc_iss);
1751 th->th_seq = htonl(sc->sc_iss + 1);
1752 th->th_ack = htonl(sc->sc_irs + 1);
1753 th->th_off = sizeof(struct tcphdr) >> 2;
1755 th->th_flags = flags;
1756 th->th_win = htons(sc->sc_wnd);
1759 if ((flags & TH_SYN) && (sc->sc_flags & SCF_ECN)) {
1760 th->th_flags |= TH_ECE;
1761 TCPSTAT_INC(tcps_ecn_shs);
1764 /* Tack on the TCP options. */
1765 if ((sc->sc_flags & SCF_NOOPT) == 0) {
1768 if (flags & TH_SYN) {
1770 to.to_flags = TOF_MSS;
1771 if (sc->sc_flags & SCF_WINSCALE) {
1772 to.to_wscale = sc->sc_requested_r_scale;
1773 to.to_flags |= TOF_SCALE;
1775 if (sc->sc_flags & SCF_SACK)
1776 to.to_flags |= TOF_SACKPERM;
1777 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1778 if (sc->sc_flags & SCF_SIGNATURE)
1779 to.to_flags |= TOF_SIGNATURE;
1781 if (sc->sc_tfo_cookie) {
1782 to.to_flags |= TOF_FASTOPEN;
1783 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
1784 to.to_tfo_cookie = sc->sc_tfo_cookie;
1785 /* don't send cookie again when retransmitting response */
1786 sc->sc_tfo_cookie = NULL;
1789 if (sc->sc_flags & SCF_TIMESTAMP) {
1790 to.to_tsval = sc->sc_tsoff + tcp_ts_getticks();
1791 to.to_tsecr = sc->sc_tsreflect;
1792 to.to_flags |= TOF_TS;
1794 optlen = tcp_addoptions(&to, (u_char *)(th + 1));
1796 /* Adjust headers by option size. */
1797 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
1799 m->m_pkthdr.len += optlen;
1801 if (sc->sc_inc.inc_flags & INC_ISIPV6)
1802 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) + optlen);
1805 ip->ip_len = htons(ntohs(ip->ip_len) + optlen);
1806 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1807 if (sc->sc_flags & SCF_SIGNATURE) {
1808 KASSERT(to.to_flags & TOF_SIGNATURE,
1809 ("tcp_addoptions() didn't set tcp_signature"));
1811 /* NOTE: to.to_signature is inside of mbuf */
1812 if (!TCPMD5_ENABLED() ||
1813 TCPMD5_OUTPUT(m, th, to.to_signature) != 0) {
1822 M_SETFIB(m, sc->sc_inc.inc_fibnum);
1823 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1825 * If we have peer's SYN and it has a flowid, then let's assign it to
1826 * our SYN|ACK. ip6_output() and ip_output() will not assign flowid
1827 * to SYN|ACK due to lack of inp here.
1829 if (m0 != NULL && M_HASHTYPE_GET(m0) != M_HASHTYPE_NONE) {
1830 m->m_pkthdr.flowid = m0->m_pkthdr.flowid;
1831 M_HASHTYPE_SET(m, M_HASHTYPE_GET(m0));
1834 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
1835 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
1836 th->th_sum = in6_cksum_pseudo(ip6, tlen + optlen - hlen,
1838 ip6->ip6_hlim = in6_selecthlim(NULL, NULL);
1840 if (ADDED_BY_TOE(sc)) {
1841 struct toedev *tod = sc->sc_tod;
1843 error = tod->tod_syncache_respond(tod, sc->sc_todctx, m);
1848 TCP_PROBE5(send, NULL, NULL, ip6, NULL, th);
1849 error = ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
1852 #if defined(INET6) && defined(INET)
1857 m->m_pkthdr.csum_flags = CSUM_TCP;
1858 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1859 htons(tlen + optlen - hlen + IPPROTO_TCP));
1861 if (ADDED_BY_TOE(sc)) {
1862 struct toedev *tod = sc->sc_tod;
1864 error = tod->tod_syncache_respond(tod, sc->sc_todctx, m);
1869 TCP_PROBE5(send, NULL, NULL, ip, NULL, th);
1870 error = ip_output(m, sc->sc_ipopts, NULL, 0, NULL, NULL);
1877 * The purpose of syncookies is to handle spoofed SYN flooding DoS attacks
1878 * that exceed the capacity of the syncache by avoiding the storage of any
1879 * of the SYNs we receive. Syncookies defend against blind SYN flooding
1880 * attacks where the attacker does not have access to our responses.
1882 * Syncookies encode and include all necessary information about the
1883 * connection setup within the SYN|ACK that we send back. That way we
1884 * can avoid keeping any local state until the ACK to our SYN|ACK returns
1885 * (if ever). Normally the syncache and syncookies are running in parallel
1886 * with the latter taking over when the former is exhausted. When matching
1887 * syncache entry is found the syncookie is ignored.
1889 * The only reliable information persisting the 3WHS is our initial sequence
1890 * number ISS of 32 bits. Syncookies embed a cryptographically sufficient
1891 * strong hash (MAC) value and a few bits of TCP SYN options in the ISS
1892 * of our SYN|ACK. The MAC can be recomputed when the ACK to our SYN|ACK
1893 * returns and signifies a legitimate connection if it matches the ACK.
1895 * The available space of 32 bits to store the hash and to encode the SYN
1896 * option information is very tight and we should have at least 24 bits for
1897 * the MAC to keep the number of guesses by blind spoofing reasonably high.
1899 * SYN option information we have to encode to fully restore a connection:
1900 * MSS: is imporant to chose an optimal segment size to avoid IP level
1901 * fragmentation along the path. The common MSS values can be encoded
1902 * in a 3-bit table. Uncommon values are captured by the next lower value
1903 * in the table leading to a slight increase in packetization overhead.
1904 * WSCALE: is necessary to allow large windows to be used for high delay-
1905 * bandwidth product links. Not scaling the window when it was initially
1906 * negotiated is bad for performance as lack of scaling further decreases
1907 * the apparent available send window. We only need to encode the WSCALE
1908 * we received from the remote end. Our end can be recalculated at any
1909 * time. The common WSCALE values can be encoded in a 3-bit table.
1910 * Uncommon values are captured by the next lower value in the table
1911 * making us under-estimate the available window size halving our
1912 * theoretically possible maximum throughput for that connection.
1913 * SACK: Greatly assists in packet loss recovery and requires 1 bit.
1914 * TIMESTAMP and SIGNATURE is not encoded because they are permanent options
1915 * that are included in all segments on a connection. We enable them when
1918 * Security of syncookies and attack vectors:
1920 * The MAC is computed over (faddr||laddr||fport||lport||irs||flags||secmod)
1921 * together with the gloabl secret to make it unique per connection attempt.
1922 * Thus any change of any of those parameters results in a different MAC output
1923 * in an unpredictable way unless a collision is encountered. 24 bits of the
1924 * MAC are embedded into the ISS.
1926 * To prevent replay attacks two rotating global secrets are updated with a
1927 * new random value every 15 seconds. The life-time of a syncookie is thus
1930 * Vector 1: Attacking the secret. This requires finding a weakness in the
1931 * MAC itself or the way it is used here. The attacker can do a chosen plain
1932 * text attack by varying and testing the all parameters under his control.
1933 * The strength depends on the size and randomness of the secret, and the
1934 * cryptographic security of the MAC function. Due to the constant updating
1935 * of the secret the attacker has at most 29.999 seconds to find the secret
1936 * and launch spoofed connections. After that he has to start all over again.
1938 * Vector 2: Collision attack on the MAC of a single ACK. With a 24 bit MAC
1939 * size an average of 4,823 attempts are required for a 50% chance of success
1940 * to spoof a single syncookie (birthday collision paradox). However the
1941 * attacker is blind and doesn't know if one of his attempts succeeded unless
1942 * he has a side channel to interfere success from. A single connection setup
1943 * success average of 90% requires 8,790 packets, 99.99% requires 17,578 packets.
1944 * This many attempts are required for each one blind spoofed connection. For
1945 * every additional spoofed connection he has to launch another N attempts.
1946 * Thus for a sustained rate 100 spoofed connections per second approximately
1947 * 1,800,000 packets per second would have to be sent.
1949 * NB: The MAC function should be fast so that it doesn't become a CPU
1950 * exhaustion attack vector itself.
1953 * RFC4987 TCP SYN Flooding Attacks and Common Mitigations
1954 * SYN cookies were first proposed by cryptographer Dan J. Bernstein in 1996
1955 * http://cr.yp.to/syncookies.html (overview)
1956 * http://cr.yp.to/syncookies/archive (details)
1959 * Schematic construction of a syncookie enabled Initial Sequence Number:
1961 * 12345678901234567890123456789012
1962 * |xxxxxxxxxxxxxxxxxxxxxxxxWWWMMMSP|
1964 * x 24 MAC (truncated)
1965 * W 3 Send Window Scale index
1967 * S 1 SACK permitted
1968 * P 1 Odd/even secret
1972 * Distribution and probability of certain MSS values. Those in between are
1973 * rounded down to the next lower one.
1974 * [An Analysis of TCP Maximum Segment Sizes, S. Alcock and R. Nelson, 2011]
1975 * .2% .3% 5% 7% 7% 20% 15% 45%
1977 static int tcp_sc_msstab[] = { 216, 536, 1200, 1360, 1400, 1440, 1452, 1460 };
1980 * Distribution and probability of certain WSCALE values. We have to map the
1981 * (send) window scale (shift) option with a range of 0-14 from 4 bits into 3
1982 * bits based on prevalence of certain values. Where we don't have an exact
1983 * match for are rounded down to the next lower one letting us under-estimate
1984 * the true available window. At the moment this would happen only for the
1985 * very uncommon values 3, 5 and those above 8 (more than 16MB socket buffer
1986 * and window size). The absence of the WSCALE option (no scaling in either
1987 * direction) is encoded with index zero.
1988 * [WSCALE values histograms, Allman, 2012]
1989 * X 10 10 35 5 6 14 10% by host
1990 * X 11 4 5 5 18 49 3% by connections
1992 static int tcp_sc_wstab[] = { 0, 0, 1, 2, 4, 6, 7, 8 };
1995 * Compute the MAC for the SYN cookie. SIPHASH-2-4 is chosen for its speed
1996 * and good cryptographic properties.
1999 syncookie_mac(struct in_conninfo *inc, tcp_seq irs, uint8_t flags,
2000 uint8_t *secbits, uintptr_t secmod)
2003 uint32_t siphash[2];
2005 SipHash24_Init(&ctx);
2006 SipHash_SetKey(&ctx, secbits);
2007 switch (inc->inc_flags & INC_ISIPV6) {
2010 SipHash_Update(&ctx, &inc->inc_faddr, sizeof(inc->inc_faddr));
2011 SipHash_Update(&ctx, &inc->inc_laddr, sizeof(inc->inc_laddr));
2016 SipHash_Update(&ctx, &inc->inc6_faddr, sizeof(inc->inc6_faddr));
2017 SipHash_Update(&ctx, &inc->inc6_laddr, sizeof(inc->inc6_laddr));
2021 SipHash_Update(&ctx, &inc->inc_fport, sizeof(inc->inc_fport));
2022 SipHash_Update(&ctx, &inc->inc_lport, sizeof(inc->inc_lport));
2023 SipHash_Update(&ctx, &irs, sizeof(irs));
2024 SipHash_Update(&ctx, &flags, sizeof(flags));
2025 SipHash_Update(&ctx, &secmod, sizeof(secmod));
2026 SipHash_Final((u_int8_t *)&siphash, &ctx);
2028 return (siphash[0] ^ siphash[1]);
2032 syncookie_generate(struct syncache_head *sch, struct syncache *sc)
2034 u_int i, secbit, wscale;
2037 union syncookie cookie;
2039 SCH_LOCK_ASSERT(sch);
2043 /* Map our computed MSS into the 3-bit index. */
2044 for (i = nitems(tcp_sc_msstab) - 1;
2045 tcp_sc_msstab[i] > sc->sc_peer_mss && i > 0;
2048 cookie.flags.mss_idx = i;
2051 * Map the send window scale into the 3-bit index but only if
2052 * the wscale option was received.
2054 if (sc->sc_flags & SCF_WINSCALE) {
2055 wscale = sc->sc_requested_s_scale;
2056 for (i = nitems(tcp_sc_wstab) - 1;
2057 tcp_sc_wstab[i] > wscale && i > 0;
2060 cookie.flags.wscale_idx = i;
2063 /* Can we do SACK? */
2064 if (sc->sc_flags & SCF_SACK)
2065 cookie.flags.sack_ok = 1;
2067 /* Which of the two secrets to use. */
2068 secbit = sch->sch_sc->secret.oddeven & 0x1;
2069 cookie.flags.odd_even = secbit;
2071 secbits = sch->sch_sc->secret.key[secbit];
2072 hash = syncookie_mac(&sc->sc_inc, sc->sc_irs, cookie.cookie, secbits,
2076 * Put the flags into the hash and XOR them to get better ISS number
2077 * variance. This doesn't enhance the cryptographic strength and is
2078 * done to prevent the 8 cookie bits from showing up directly on the
2082 iss |= cookie.cookie ^ (hash >> 24);
2084 TCPSTAT_INC(tcps_sc_sendcookie);
2088 static struct syncache *
2089 syncookie_lookup(struct in_conninfo *inc, struct syncache_head *sch,
2090 struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
2096 int wnd, wscale = 0;
2097 union syncookie cookie;
2099 SCH_LOCK_ASSERT(sch);
2102 * Pull information out of SYN-ACK/ACK and revert sequence number
2105 ack = th->th_ack - 1;
2106 seq = th->th_seq - 1;
2109 * Unpack the flags containing enough information to restore the
2112 cookie.cookie = (ack & 0xff) ^ (ack >> 24);
2114 /* Which of the two secrets to use. */
2115 secbits = sch->sch_sc->secret.key[cookie.flags.odd_even];
2117 hash = syncookie_mac(inc, seq, cookie.cookie, secbits, (uintptr_t)sch);
2119 /* The recomputed hash matches the ACK if this was a genuine cookie. */
2120 if ((ack & ~0xff) != (hash & ~0xff))
2123 /* Fill in the syncache values. */
2125 bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo));
2126 sc->sc_ipopts = NULL;
2131 switch (inc->inc_flags & INC_ISIPV6) {
2134 sc->sc_ip_ttl = sotoinpcb(lso)->inp_ip_ttl;
2135 sc->sc_ip_tos = sotoinpcb(lso)->inp_ip_tos;
2140 if (sotoinpcb(lso)->inp_flags & IN6P_AUTOFLOWLABEL)
2141 sc->sc_flowlabel = sc->sc_iss & IPV6_FLOWLABEL_MASK;
2146 sc->sc_peer_mss = tcp_sc_msstab[cookie.flags.mss_idx];
2148 /* We can simply recompute receive window scale we sent earlier. */
2149 while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < sb_max)
2152 /* Only use wscale if it was enabled in the orignal SYN. */
2153 if (cookie.flags.wscale_idx > 0) {
2154 sc->sc_requested_r_scale = wscale;
2155 sc->sc_requested_s_scale = tcp_sc_wstab[cookie.flags.wscale_idx];
2156 sc->sc_flags |= SCF_WINSCALE;
2159 wnd = lso->sol_sbrcv_hiwat;
2161 wnd = imin(wnd, TCP_MAXWIN);
2164 if (cookie.flags.sack_ok)
2165 sc->sc_flags |= SCF_SACK;
2167 if (to->to_flags & TOF_TS) {
2168 sc->sc_flags |= SCF_TIMESTAMP;
2169 sc->sc_tsreflect = to->to_tsval;
2170 sc->sc_tsoff = tcp_new_ts_offset(inc);
2173 if (to->to_flags & TOF_SIGNATURE)
2174 sc->sc_flags |= SCF_SIGNATURE;
2178 TCPSTAT_INC(tcps_sc_recvcookie);
2184 syncookie_cmp(struct in_conninfo *inc, struct syncache_head *sch,
2185 struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
2188 struct syncache scs, *scx;
2191 bzero(&scs, sizeof(scs));
2192 scx = syncookie_lookup(inc, sch, &scs, th, to, lso);
2194 if ((s = tcp_log_addrs(inc, th, NULL, NULL)) == NULL)
2198 if (sc->sc_peer_mss != scx->sc_peer_mss)
2199 log(LOG_DEBUG, "%s; %s: mss different %i vs %i\n",
2200 s, __func__, sc->sc_peer_mss, scx->sc_peer_mss);
2202 if (sc->sc_requested_r_scale != scx->sc_requested_r_scale)
2203 log(LOG_DEBUG, "%s; %s: rwscale different %i vs %i\n",
2204 s, __func__, sc->sc_requested_r_scale,
2205 scx->sc_requested_r_scale);
2207 if (sc->sc_requested_s_scale != scx->sc_requested_s_scale)
2208 log(LOG_DEBUG, "%s; %s: swscale different %i vs %i\n",
2209 s, __func__, sc->sc_requested_s_scale,
2210 scx->sc_requested_s_scale);
2212 if ((sc->sc_flags & SCF_SACK) != (scx->sc_flags & SCF_SACK))
2213 log(LOG_DEBUG, "%s; %s: SACK different\n", s, __func__);
2220 #endif /* INVARIANTS */
2223 syncookie_reseed(void *arg)
2225 struct tcp_syncache *sc = arg;
2230 * Reseeding the secret doesn't have to be protected by a lock.
2231 * It only must be ensured that the new random values are visible
2232 * to all CPUs in a SMP environment. The atomic with release
2233 * semantics ensures that.
2235 secbit = (sc->secret.oddeven & 0x1) ? 0 : 1;
2236 secbits = sc->secret.key[secbit];
2237 arc4rand(secbits, SYNCOOKIE_SECRET_SIZE, 0);
2238 atomic_add_rel_int(&sc->secret.oddeven, 1);
2240 /* Reschedule ourself. */
2241 callout_schedule(&sc->secret.reseed, SYNCOOKIE_LIFETIME * hz);
2245 * Exports the syncache entries to userland so that netstat can display
2246 * them alongside the other sockets. This function is intended to be
2247 * called only from tcp_pcblist.
2249 * Due to concurrency on an active system, the number of pcbs exported
2250 * may have no relation to max_pcbs. max_pcbs merely indicates the
2251 * amount of space the caller allocated for this function to use.
2254 syncache_pcblist(struct sysctl_req *req, int max_pcbs, int *pcbs_exported)
2257 struct syncache *sc;
2258 struct syncache_head *sch;
2259 int count, error, i;
2261 for (count = 0, error = 0, i = 0; i < V_tcp_syncache.hashsize; i++) {
2262 sch = &V_tcp_syncache.hashbase[i];
2264 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
2265 if (count >= max_pcbs) {
2269 if (cr_cansee(req->td->td_ucred, sc->sc_cred) != 0)
2271 bzero(&xt, sizeof(xt));
2272 xt.xt_len = sizeof(xt);
2273 if (sc->sc_inc.inc_flags & INC_ISIPV6)
2274 xt.xt_inp.inp_vflag = INP_IPV6;
2276 xt.xt_inp.inp_vflag = INP_IPV4;
2277 bcopy(&sc->sc_inc, &xt.xt_inp.inp_inc,
2278 sizeof (struct in_conninfo));
2279 xt.t_state = TCPS_SYN_RECEIVED;
2280 xt.xt_inp.xi_socket.xso_protocol = IPPROTO_TCP;
2281 xt.xt_inp.xi_socket.xso_len = sizeof (struct xsocket);
2282 xt.xt_inp.xi_socket.so_type = SOCK_STREAM;
2283 xt.xt_inp.xi_socket.so_state = SS_ISCONNECTING;
2284 error = SYSCTL_OUT(req, &xt, sizeof xt);
2294 *pcbs_exported = count;