2 * Copyright (c) 2001 McAfee, Inc.
3 * Copyright (c) 2006 Andre Oppermann, Internet Business Solutions AG
6 * This software was developed for the FreeBSD Project by Jonathan Lemon
7 * and McAfee Research, the Security Research Division of McAfee, Inc. under
8 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include "opt_inet6.h"
37 #include "opt_ipsec.h"
39 #include "opt_tcpdebug.h"
40 #include "opt_tcp_sack.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/sysctl.h>
47 #include <sys/mutex.h>
48 #include <sys/malloc.h>
52 #include <sys/proc.h> /* for proc0 declaration */
53 #include <sys/random.h>
54 #include <sys/rwlock.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
59 #include <net/route.h>
61 #include <netinet/in.h>
62 #include <netinet/in_systm.h>
63 #include <netinet/ip.h>
64 #include <netinet/in_var.h>
65 #include <netinet/in_pcb.h>
66 #include <netinet/ip_var.h>
67 #include <netinet/ip_options.h>
69 #include <netinet/ip6.h>
70 #include <netinet/icmp6.h>
71 #include <netinet6/nd6.h>
72 #include <netinet6/ip6_var.h>
73 #include <netinet6/in6_pcb.h>
75 #include <netinet/tcp.h>
77 #include <netinet/tcpip.h>
79 #include <netinet/tcp_fsm.h>
80 #include <netinet/tcp_seq.h>
81 #include <netinet/tcp_timer.h>
82 #include <netinet/tcp_var.h>
84 #include <netinet/tcp_debug.h>
87 #include <netinet6/tcp6_var.h>
91 #include <netinet6/ipsec.h>
93 #include <netinet6/ipsec6.h>
98 #include <netipsec/ipsec.h>
100 #include <netipsec/ipsec6.h>
102 #include <netipsec/key.h>
103 #endif /*FAST_IPSEC*/
105 #include <machine/in_cksum.h>
108 static int tcp_syncookies = 1;
109 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW,
111 "Use TCP SYN cookies if the syncache overflows");
114 TAILQ_ENTRY(syncache) sc_hash;
115 struct in_conninfo sc_inc; /* addresses */
116 u_long sc_rxttime; /* retransmit time */
117 u_int16_t sc_rxmits; /* retransmit counter */
119 u_int32_t sc_tsrecent;
120 u_int32_t sc_flowlabel; /* IPv6 flowlabel */
121 tcp_seq sc_irs; /* seq from peer */
122 tcp_seq sc_iss; /* our ISS */
123 struct mbuf *sc_ipopts; /* source route */
125 u_int16_t sc_peer_mss; /* peer's MSS */
126 u_int16_t sc_wnd; /* advertised window */
127 u_int8_t sc_ip_ttl; /* IPv4 TTL */
128 u_int8_t sc_ip_tos; /* IPv4 TOS */
129 u_int8_t sc_requested_s_scale:4,
130 sc_request_r_scale:4;
132 #define SCF_NOOPT 0x01 /* no TCP options */
133 #define SCF_WINSCALE 0x02 /* negotiated window scaling */
134 #define SCF_TIMESTAMP 0x04 /* negotiated timestamps */
135 #define SCF_UNREACH 0x10 /* icmp unreachable received */
136 #define SCF_SIGNATURE 0x20 /* send MD5 digests */
137 #define SCF_SACK 0x80 /* send SACK option */
140 struct syncache_head {
142 TAILQ_HEAD(sch_head, syncache) sch_bucket;
143 struct callout sch_timer;
148 static void syncache_drop(struct syncache *, struct syncache_head *);
149 static void syncache_free(struct syncache *);
150 static void syncache_insert(struct syncache *, struct syncache_head *);
151 struct syncache *syncache_lookup(struct in_conninfo *, struct syncache_head **);
152 static int syncache_respond(struct syncache *, struct mbuf *);
153 static struct socket *syncache_socket(struct syncache *, struct socket *,
155 static void syncache_timer(void *);
156 static void syncookie_init(void);
157 static u_int32_t syncookie_generate(struct syncache *, u_int32_t *);
158 static struct syncache
159 *syncookie_lookup(struct in_conninfo *, struct tcphdr *,
163 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies.
164 * 3 retransmits corresponds to a timeout of (1 + 2 + 4 + 8 == 15) seconds,
165 * the odds are that the user has given up attempting to connect by then.
167 #define SYNCACHE_MAXREXMTS 3
169 /* Arbitrary values */
170 #define TCP_SYNCACHE_HASHSIZE 512
171 #define TCP_SYNCACHE_BUCKETLIMIT 30
173 struct tcp_syncache {
174 struct syncache_head *hashbase;
179 u_int cache_count; /* XXX: unprotected */
184 static struct tcp_syncache tcp_syncache;
186 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, "TCP SYN cache");
188 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RDTUN,
189 &tcp_syncache.bucket_limit, 0, "Per-bucket hash limit for syncache");
191 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_RDTUN,
192 &tcp_syncache.cache_limit, 0, "Overall entry limit for syncache");
194 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_RD,
195 &tcp_syncache.cache_count, 0, "Current number of entries in syncache");
197 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_RDTUN,
198 &tcp_syncache.hashsize, 0, "Size of TCP syncache hashtable");
200 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW,
201 &tcp_syncache.rexmt_limit, 0, "Limit on SYN/ACK retransmissions");
203 static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache");
205 #define SYNCACHE_HASH(inc, mask) \
206 ((tcp_syncache.hash_secret ^ \
207 (inc)->inc_faddr.s_addr ^ \
208 ((inc)->inc_faddr.s_addr >> 16) ^ \
209 (inc)->inc_fport ^ (inc)->inc_lport) & mask)
211 #define SYNCACHE_HASH6(inc, mask) \
212 ((tcp_syncache.hash_secret ^ \
213 (inc)->inc6_faddr.s6_addr32[0] ^ \
214 (inc)->inc6_faddr.s6_addr32[3] ^ \
215 (inc)->inc_fport ^ (inc)->inc_lport) & mask)
217 #define ENDPTS_EQ(a, b) ( \
218 (a)->ie_fport == (b)->ie_fport && \
219 (a)->ie_lport == (b)->ie_lport && \
220 (a)->ie_faddr.s_addr == (b)->ie_faddr.s_addr && \
221 (a)->ie_laddr.s_addr == (b)->ie_laddr.s_addr \
224 #define ENDPTS6_EQ(a, b) (memcmp(a, b, sizeof(*a)) == 0)
226 #define SYNCACHE_TIMEOUT(sc, sch, co) do { \
228 (sc)->sc_rxttime = ticks + \
229 TCPTV_RTOBASE * tcp_backoff[(sc)->sc_rxmits - 1]; \
230 if ((sch)->sch_nextc > (sc)->sc_rxttime) \
231 (sch)->sch_nextc = (sc)->sc_rxttime; \
232 if (!TAILQ_EMPTY(&(sch)->sch_bucket) && !(co)) \
233 callout_reset(&(sch)->sch_timer, \
234 (sch)->sch_nextc - ticks, \
235 syncache_timer, (void *)(sch)); \
238 #define SCH_LOCK(sch) mtx_lock(&(sch)->sch_mtx)
239 #define SCH_UNLOCK(sch) mtx_unlock(&(sch)->sch_mtx)
240 #define SCH_LOCK_ASSERT(sch) mtx_assert(&(sch)->sch_mtx, MA_OWNED)
243 * Requires the syncache entry to be already removed from the bucket list.
246 syncache_free(struct syncache *sc)
249 (void) m_free(sc->sc_ipopts);
251 uma_zfree(tcp_syncache.zone, sc);
259 tcp_syncache.cache_count = 0;
260 tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
261 tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
262 tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
263 tcp_syncache.hash_secret = arc4random();
265 TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
266 &tcp_syncache.hashsize);
267 TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
268 &tcp_syncache.bucket_limit);
269 if (!powerof2(tcp_syncache.hashsize) || tcp_syncache.hashsize == 0) {
270 printf("WARNING: syncache hash size is not a power of 2.\n");
271 tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
273 tcp_syncache.hashmask = tcp_syncache.hashsize - 1;
276 tcp_syncache.cache_limit =
277 tcp_syncache.hashsize * tcp_syncache.bucket_limit;
278 TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
279 &tcp_syncache.cache_limit);
281 /* Allocate the hash table. */
282 MALLOC(tcp_syncache.hashbase, struct syncache_head *,
283 tcp_syncache.hashsize * sizeof(struct syncache_head),
284 M_SYNCACHE, M_WAITOK);
286 /* Initialize the hash buckets. */
287 for (i = 0; i < tcp_syncache.hashsize; i++) {
288 TAILQ_INIT(&tcp_syncache.hashbase[i].sch_bucket);
289 mtx_init(&tcp_syncache.hashbase[i].sch_mtx, "tcp_sc_head",
291 callout_init_mtx(&tcp_syncache.hashbase[i].sch_timer,
292 &tcp_syncache.hashbase[i].sch_mtx, 0);
293 tcp_syncache.hashbase[i].sch_length = 0;
298 /* Create the syncache entry zone. */
299 tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache),
300 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
301 uma_zone_set_max(tcp_syncache.zone, tcp_syncache.cache_limit);
305 * Inserts a syncache entry into the specified bucket row.
306 * Locks and unlocks the syncache_head autonomously.
309 syncache_insert(struct syncache *sc, struct syncache_head *sch)
311 struct syncache *sc2;
316 * Make sure that we don't overflow the per-bucket limit.
317 * If the bucket is full, toss the oldest element.
319 if (sch->sch_length >= tcp_syncache.bucket_limit) {
320 KASSERT(!TAILQ_EMPTY(&sch->sch_bucket),
321 ("sch->sch_length incorrect"));
322 sc2 = TAILQ_LAST(&sch->sch_bucket, sch_head);
323 syncache_drop(sc2, sch);
324 tcpstat.tcps_sc_bucketoverflow++;
327 /* Put it into the bucket. */
328 TAILQ_INSERT_HEAD(&sch->sch_bucket, sc, sc_hash);
331 /* Reinitialize the bucket row's timer. */
332 SYNCACHE_TIMEOUT(sc, sch, 1);
336 tcp_syncache.cache_count++;
337 tcpstat.tcps_sc_added++;
341 * Remove and free entry from syncache bucket row.
342 * Expects locked syncache head.
345 syncache_drop(struct syncache *sc, struct syncache_head *sch)
348 SCH_LOCK_ASSERT(sch);
350 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
354 tcp_syncache.cache_count--;
358 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted.
359 * If we have retransmitted an entry the maximum number of times, expire it.
360 * One separate timer for each bucket row.
363 syncache_timer(void *xsch)
365 struct syncache_head *sch = (struct syncache_head *)xsch;
366 struct syncache *sc, *nsc;
369 /* NB: syncache_head has already been locked by the callout. */
370 SCH_LOCK_ASSERT(sch);
372 TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc) {
374 * We do not check if the listen socket still exists
375 * and accept the case where the listen socket may be
376 * gone by the time we resend the SYN/ACK. We do
377 * not expect this to happens often. If it does,
378 * then the RST will be sent by the time the remote
379 * host does the SYN/ACK->ACK.
381 if (sc->sc_rxttime >= tick) {
382 if (sc->sc_rxttime < sch->sch_nextc)
383 sch->sch_nextc = sc->sc_rxttime;
387 if (sc->sc_rxmits > tcp_syncache.rexmt_limit) {
388 syncache_drop(sc, sch);
389 tcpstat.tcps_sc_stale++;
393 (void) syncache_respond(sc, NULL);
394 tcpstat.tcps_sc_retransmitted++;
395 SYNCACHE_TIMEOUT(sc, sch, 0);
397 if (!TAILQ_EMPTY(&(sch)->sch_bucket))
398 callout_reset(&(sch)->sch_timer, (sch)->sch_nextc - tick,
399 syncache_timer, (void *)(sch));
403 * Find an entry in the syncache.
404 * Returns always with locked syncache_head plus a matching entry or NULL.
407 syncache_lookup(struct in_conninfo *inc, struct syncache_head **schp)
410 struct syncache_head *sch;
413 if (inc->inc_isipv6) {
414 sch = &tcp_syncache.hashbase[
415 SYNCACHE_HASH6(inc, tcp_syncache.hashmask)];
420 /* Circle through bucket row to find matching entry. */
421 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
422 if (ENDPTS6_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
428 sch = &tcp_syncache.hashbase[
429 SYNCACHE_HASH(inc, tcp_syncache.hashmask)];
434 /* Circle through bucket row to find matching entry. */
435 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
437 if (sc->sc_inc.inc_isipv6)
440 if (ENDPTS_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
444 SCH_LOCK_ASSERT(*schp);
445 return (NULL); /* always returns with locked sch */
449 * This function is called when we get a RST for a
450 * non-existent connection, so that we can see if the
451 * connection is in the syn cache. If it is, zap it.
454 syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th)
457 struct syncache_head *sch;
459 sc = syncache_lookup(inc, &sch); /* returns locked sch */
460 SCH_LOCK_ASSERT(sch);
465 * If the RST bit is set, check the sequence number to see
466 * if this is a valid reset segment.
468 * In all states except SYN-SENT, all reset (RST) segments
469 * are validated by checking their SEQ-fields. A reset is
470 * valid if its sequence number is in the window.
472 * The sequence number in the reset segment is normally an
473 * echo of our outgoing acknowlegement numbers, but some hosts
474 * send a reset with the sequence number at the rightmost edge
475 * of our receive window, and we have to handle this case.
477 if (SEQ_GEQ(th->th_seq, sc->sc_irs) &&
478 SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
479 syncache_drop(sc, sch);
480 tcpstat.tcps_sc_reset++;
487 syncache_badack(struct in_conninfo *inc)
490 struct syncache_head *sch;
492 sc = syncache_lookup(inc, &sch); /* returns locked sch */
493 SCH_LOCK_ASSERT(sch);
495 syncache_drop(sc, sch);
496 tcpstat.tcps_sc_badack++;
502 syncache_unreach(struct in_conninfo *inc, struct tcphdr *th)
505 struct syncache_head *sch;
507 sc = syncache_lookup(inc, &sch); /* returns locked sch */
508 SCH_LOCK_ASSERT(sch);
512 /* If the sequence number != sc_iss, then it's a bogus ICMP msg */
513 if (ntohl(th->th_seq) != sc->sc_iss)
517 * If we've rertransmitted 3 times and this is our second error,
518 * we remove the entry. Otherwise, we allow it to continue on.
519 * This prevents us from incorrectly nuking an entry during a
520 * spurious network outage.
524 if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxmits < 3 + 1) {
525 sc->sc_flags |= SCF_UNREACH;
528 syncache_drop(sc, sch);
529 tcpstat.tcps_sc_unreach++;
535 * Build a new TCP socket structure from a syncache entry.
537 static struct socket *
538 syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
540 struct inpcb *inp = NULL;
545 INP_INFO_WLOCK_ASSERT(&tcbinfo);
548 * Ok, create the full blown connection, and set things up
549 * as they would have been set up if we had created the
550 * connection when the SYN arrived. If we can't create
551 * the connection, abort it.
553 so = sonewconn(lso, SS_ISCONNECTED);
556 * Drop the connection; we will send a RST if the peer
557 * retransmits the ACK,
559 tcpstat.tcps_listendrop++;
564 mac_set_socket_peer_from_mbuf(m, so);
571 /* Insert new socket into PCB hash list. */
572 inp->inp_inc.inc_isipv6 = sc->sc_inc.inc_isipv6;
574 if (sc->sc_inc.inc_isipv6) {
575 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
577 inp->inp_vflag &= ~INP_IPV6;
578 inp->inp_vflag |= INP_IPV4;
580 inp->inp_laddr = sc->sc_inc.inc_laddr;
584 inp->inp_lport = sc->sc_inc.inc_lport;
585 if (in_pcbinshash(inp) != 0) {
587 * Undo the assignments above if we failed to
588 * put the PCB on the hash lists.
591 if (sc->sc_inc.inc_isipv6)
592 inp->in6p_laddr = in6addr_any;
595 inp->inp_laddr.s_addr = INADDR_ANY;
600 /* Copy old policy into new socket's. */
601 if (ipsec_copy_pcbpolicy(sotoinpcb(lso)->inp_sp, inp->inp_sp))
602 printf("syncache_expand: could not copy policy\n");
605 /* Copy old policy into new socket's. */
606 if (ipsec_copy_policy(sotoinpcb(lso)->inp_sp, inp->inp_sp))
607 printf("syncache_expand: could not copy policy\n");
610 if (sc->sc_inc.inc_isipv6) {
611 struct inpcb *oinp = sotoinpcb(lso);
612 struct in6_addr laddr6;
613 struct sockaddr_in6 sin6;
615 * Inherit socket options from the listening socket.
616 * Note that in6p_inputopts are not (and should not be)
617 * copied, since it stores previously received options and is
618 * used to detect if each new option is different than the
619 * previous one and hence should be passed to a user.
620 * If we copied in6p_inputopts, a user would not be able to
621 * receive options just after calling the accept system call.
623 inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS;
624 if (oinp->in6p_outputopts)
625 inp->in6p_outputopts =
626 ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT);
628 sin6.sin6_family = AF_INET6;
629 sin6.sin6_len = sizeof(sin6);
630 sin6.sin6_addr = sc->sc_inc.inc6_faddr;
631 sin6.sin6_port = sc->sc_inc.inc_fport;
632 sin6.sin6_flowinfo = sin6.sin6_scope_id = 0;
633 laddr6 = inp->in6p_laddr;
634 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
635 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
636 if (in6_pcbconnect(inp, (struct sockaddr *)&sin6,
638 inp->in6p_laddr = laddr6;
641 /* Override flowlabel from in6_pcbconnect. */
642 inp->in6p_flowinfo &= ~IPV6_FLOWLABEL_MASK;
643 inp->in6p_flowinfo |= sc->sc_flowlabel;
647 struct in_addr laddr;
648 struct sockaddr_in sin;
650 inp->inp_options = ip_srcroute(m);
651 if (inp->inp_options == NULL) {
652 inp->inp_options = sc->sc_ipopts;
653 sc->sc_ipopts = NULL;
656 sin.sin_family = AF_INET;
657 sin.sin_len = sizeof(sin);
658 sin.sin_addr = sc->sc_inc.inc_faddr;
659 sin.sin_port = sc->sc_inc.inc_fport;
660 bzero((caddr_t)sin.sin_zero, sizeof(sin.sin_zero));
661 laddr = inp->inp_laddr;
662 if (inp->inp_laddr.s_addr == INADDR_ANY)
663 inp->inp_laddr = sc->sc_inc.inc_laddr;
664 if (in_pcbconnect(inp, (struct sockaddr *)&sin,
666 inp->inp_laddr = laddr;
671 tp->t_state = TCPS_SYN_RECEIVED;
672 tp->iss = sc->sc_iss;
673 tp->irs = sc->sc_irs;
676 tp->snd_wl1 = sc->sc_irs;
677 tp->rcv_up = sc->sc_irs + 1;
678 tp->rcv_wnd = sc->sc_wnd;
679 tp->rcv_adv += tp->rcv_wnd;
681 tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY);
682 if (sc->sc_flags & SCF_NOOPT)
683 tp->t_flags |= TF_NOOPT;
684 if (sc->sc_flags & SCF_WINSCALE) {
685 tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE;
686 tp->snd_scale = sc->sc_requested_s_scale;
687 tp->request_r_scale = sc->sc_request_r_scale;
689 if (sc->sc_flags & SCF_TIMESTAMP) {
690 tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP;
691 tp->ts_recent = sc->sc_tsrecent;
692 tp->ts_recent_age = ticks;
695 if (sc->sc_flags & SCF_SIGNATURE)
696 tp->t_flags |= TF_SIGNATURE;
698 if (sc->sc_flags & SCF_SACK) {
700 tp->t_flags |= TF_SACK_PERMIT;
704 * Set up MSS and get cached values from tcp_hostcache.
705 * This might overwrite some of the defaults we just set.
707 tcp_mss(tp, sc->sc_peer_mss);
710 * If the SYN,ACK was retransmitted, reset cwnd to 1 segment.
712 if (sc->sc_rxmits > 1)
713 tp->snd_cwnd = tp->t_maxseg;
714 callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp);
718 tcpstat.tcps_accepts++;
730 * This function gets called when we receive an ACK for a
731 * socket in the LISTEN state. We look up the connection
732 * in the syncache, and if its there, we pull it out of
733 * the cache and turn it into a full-blown connection in
734 * the SYN-RECEIVED state.
737 syncache_expand(struct in_conninfo *inc, struct tcphdr *th,
738 struct socket **lsop, struct mbuf *m)
741 struct syncache_head *sch;
745 * Global TCP locks are held because we manipulate the PCB lists
746 * and create a new socket.
748 INP_INFO_WLOCK_ASSERT(&tcbinfo);
750 sc = syncache_lookup(inc, &sch); /* returns locked sch */
751 SCH_LOCK_ASSERT(sch);
754 * There is no syncache entry, so see if this ACK is
755 * a returning syncookie. To do this, first:
756 * A. See if this socket has had a syncache entry dropped in
757 * the past. We don't want to accept a bogus syncookie
758 * if we've never received a SYN.
759 * B. check that the syncookie is valid. If it is, then
760 * cobble up a fake syncache entry, and return.
767 sc = syncookie_lookup(inc, th, *lsop);
770 tcpstat.tcps_sc_recvcookie++;
772 /* Pull out the entry to unlock the bucket row. */
773 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
779 * If seg contains an ACK, but not for our SYN/ACK, send a RST.
781 if (th->th_ack != sc->sc_iss + 1)
784 so = syncache_socket(sc, *lsop, m);
789 /* XXXjlemon check this - is this correct? */
790 (void) tcp_respond(NULL, m, m, th,
791 th->th_seq + tlen, (tcp_seq)0, TH_RST|TH_ACK);
793 m_freem(m); /* XXX: only needed for above */
794 tcpstat.tcps_sc_aborted++;
796 syncache_insert(sc, sch); /* try again later */
801 tcpstat.tcps_sc_completed++;
813 * Given a LISTEN socket and an inbound SYN request, add
814 * this to the syn cache, and send back a segment:
815 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
818 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN.
819 * Doing so would require that we hold onto the data and deliver it
820 * to the application. However, if we are the target of a SYN-flood
821 * DoS attack, an attacker could send data which would eventually
822 * consume all available buffer space if it were ACKed. By not ACKing
823 * the data, we avoid this DoS scenario.
826 syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
827 struct inpcb *inp, struct socket **lsop, struct mbuf *m)
831 struct syncache *sc = NULL;
832 struct syncache_head *sch;
833 struct mbuf *ipopts = NULL;
835 int win, sb_hiwat, ip_ttl, ip_tos;
837 int autoflowlabel = 0;
840 INP_INFO_WLOCK_ASSERT(&tcbinfo);
841 INP_LOCK_ASSERT(inp); /* listen socket */
844 * Combine all so/tp operations very early to drop the INP lock as
851 if (inc->inc_isipv6 &&
852 (inp->in6p_flags & IN6P_AUTOFLOWLABEL))
855 ip_ttl = inp->inp_ip_ttl;
856 ip_tos = inp->inp_ip_tos;
857 win = sbspace(&so->so_rcv);
858 sb_hiwat = so->so_rcv.sb_hiwat;
859 if (tp->t_flags & TF_NOOPT)
860 sc->sc_flags = SCF_NOOPT;
866 INP_INFO_WUNLOCK(&tcbinfo);
869 * Remember the IP options, if any.
872 if (!inc->inc_isipv6)
874 ipopts = ip_srcroute(m);
877 * See if we already have an entry for this connection.
878 * If we do, resend the SYN,ACK, and reset the retransmit timer.
880 * XXX: should the syncache be re-initialized with the contents
881 * of the new SYN here (which may have different options?)
883 sc = syncache_lookup(inc, &sch); /* returns locked entry */
884 SCH_LOCK_ASSERT(sch);
886 tcpstat.tcps_sc_dupsyn++;
889 * If we were remembering a previous source route,
890 * forget it and use the new one we've been given.
893 (void) m_free(sc->sc_ipopts);
894 sc->sc_ipopts = ipopts;
897 * Update timestamp if present.
899 if (sc->sc_flags & SCF_TIMESTAMP)
900 sc->sc_tsrecent = to->to_tsval;
901 if (syncache_respond(sc, m) == 0) {
902 SYNCACHE_TIMEOUT(sc, sch, 1);
903 tcpstat.tcps_sndacks++;
904 tcpstat.tcps_sndtotal++;
910 sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT | M_ZERO);
913 * The zone allocator couldn't provide more entries.
914 * Treat this as if the cache was full; drop the oldest
915 * entry and insert the new one.
917 tcpstat.tcps_sc_zonefail++;
918 sc = TAILQ_LAST(&sch->sch_bucket, sch_head);
919 syncache_drop(sc, sch);
921 sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT | M_ZERO);
924 (void) m_free(ipopts);
931 * Fill in the syncache values.
933 sc->sc_ipopts = ipopts;
934 sc->sc_inc.inc_fport = inc->inc_fport;
935 sc->sc_inc.inc_lport = inc->inc_lport;
937 sc->sc_inc.inc_isipv6 = inc->inc_isipv6;
938 if (inc->inc_isipv6) {
939 sc->sc_inc.inc6_faddr = inc->inc6_faddr;
940 sc->sc_inc.inc6_laddr = inc->inc6_laddr;
944 sc->sc_inc.inc_faddr = inc->inc_faddr;
945 sc->sc_inc.inc_laddr = inc->inc_laddr;
946 sc->sc_ip_tos = ip_tos;
947 sc->sc_ip_ttl = ip_ttl;
949 sc->sc_irs = th->th_seq;
951 sc->sc_peer_mss = to->to_flags & TOF_MSS ? to->to_mss : 0;
952 sc->sc_flowlabel = 0;
953 if (tcp_syncookies) {
954 sc->sc_iss = syncookie_generate(sc, &flowtmp);
957 sc->sc_flowlabel = flowtmp & IPV6_FLOWLABEL_MASK;
960 sc->sc_iss = arc4random();
964 (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
969 * Initial receive window: clip sbspace to [0 .. TCP_MAXWIN].
970 * win was derived from socket earlier in the function.
973 win = imin(win, TCP_MAXWIN);
976 if (tcp_do_rfc1323) {
978 * A timestamp received in a SYN makes
979 * it ok to send timestamp requests and replies.
981 if (to->to_flags & TOF_TS) {
982 sc->sc_tsrecent = to->to_tsval;
983 sc->sc_flags |= SCF_TIMESTAMP;
985 if (to->to_flags & TOF_SCALE) {
988 /* Compute proper scaling value from buffer space */
989 while (wscale < TCP_MAX_WINSHIFT &&
990 (TCP_MAXWIN << wscale) < sb_hiwat)
992 sc->sc_request_r_scale = wscale;
993 sc->sc_requested_s_scale = to->to_requested_s_scale;
994 sc->sc_flags |= SCF_WINSCALE;
999 * If listening socket requested TCP digests, and received SYN
1000 * contains the option, flag this in the syncache so that
1001 * syncache_respond() will do the right thing with the SYN+ACK.
1002 * XXX: Currently we always record the option by default and will
1003 * attempt to use it in syncache_respond().
1005 if (to->to_flags & TOF_SIGNATURE)
1006 sc->sc_flags |= SCF_SIGNATURE;
1009 if (to->to_flags & TOF_SACK)
1010 sc->sc_flags |= SCF_SACK;
1013 * Do a standard 3-way handshake.
1015 if (syncache_respond(sc, m) == 0) {
1016 syncache_insert(sc, sch); /* locks and unlocks sch */
1017 tcpstat.tcps_sndacks++;
1018 tcpstat.tcps_sndtotal++;
1021 tcpstat.tcps_sc_dropped++;
1030 syncache_respond(struct syncache *sc, struct mbuf *m)
1034 u_int16_t tlen, hlen, mssopt;
1035 struct ip *ip = NULL;
1038 struct ip6_hdr *ip6 = NULL;
1041 struct inpcb *inp = NULL;
1046 (sc->sc_inc.inc_isipv6) ? sizeof(struct ip6_hdr) :
1050 KASSERT((&sc->sc_inc) != NULL, ("syncache_respond with NULL in_conninfo pointer"));
1052 /* Determine MSS we advertize to other end of connection. */
1053 mssopt = tcp_mssopt(&sc->sc_inc);
1055 /* Compute the size of the TCP options. */
1056 if (sc->sc_flags & SCF_NOOPT) {
1059 optlen = TCPOLEN_MAXSEG +
1060 ((sc->sc_flags & SCF_WINSCALE) ? 4 : 0) +
1061 ((sc->sc_flags & SCF_TIMESTAMP) ? TCPOLEN_TSTAMP_APPA : 0);
1062 #ifdef TCP_SIGNATURE
1063 if (sc->sc_flags & SCF_SIGNATURE)
1064 optlen += TCPOLEN_SIGNATURE;
1066 if (sc->sc_flags & SCF_SACK)
1067 optlen += TCPOLEN_SACK_PERMITTED;
1068 optlen = roundup2(optlen, 4);
1070 tlen = hlen + sizeof(struct tcphdr) + optlen;
1073 * XXX: Assume that the entire packet will fit in a header mbuf.
1075 KASSERT(max_linkhdr + tlen <= MHLEN, ("syncache: mbuf too small"));
1077 /* Create the IP+TCP header from scratch. */
1081 m = m_gethdr(M_DONTWAIT, MT_DATA);
1084 m->m_data += max_linkhdr;
1086 m->m_pkthdr.len = tlen;
1087 m->m_pkthdr.rcvif = NULL;
1091 * For MAC look up the inpcb to get access to the label information.
1092 * We don't store the inpcb pointer in struct syncache to make locking
1093 * less complicated and to save locking operations. However for MAC
1094 * this gives a slight overhead as we have to do a full pcblookup here.
1096 INP_INFO_RLOCK(&tcbinfo);
1098 #ifdef INET6 /* && MAC */
1099 if (sc->sc_inc.inc_isipv6)
1100 inp = in6_pcblookup_hash(&tcbinfo,
1101 &sc->sc_inc.inc6_laddr, sc->sc_inc.inc_lport,
1102 &sc->sc_inc.inc6_faddr, sc->sc_inc.inc_fport,
1106 inp = in_pcblookup_hash(&tcbinfo,
1107 sc->sc_inc.inc_laddr, sc->sc_inc.inc_lport,
1108 sc->sc_inc.inc_faddr, sc->sc_inc.inc_fport,
1112 INP_INFO_RUNLOCK(&tcbinfo);
1117 if (!inp->inp_socket->so_options & SO_ACCEPTCONN) {
1120 INP_INFO_RUNLOCK(&tcbinfo);
1123 mac_create_mbuf_from_inpcb(inp, m);
1125 INP_INFO_RUNLOCK(&tcbinfo);
1129 if (sc->sc_inc.inc_isipv6) {
1130 ip6 = mtod(m, struct ip6_hdr *);
1131 ip6->ip6_vfc = IPV6_VERSION;
1132 ip6->ip6_nxt = IPPROTO_TCP;
1133 ip6->ip6_src = sc->sc_inc.inc6_laddr;
1134 ip6->ip6_dst = sc->sc_inc.inc6_faddr;
1135 ip6->ip6_plen = htons(tlen - hlen);
1136 /* ip6_hlim is set after checksum */
1137 ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK;
1138 ip6->ip6_flow |= sc->sc_flowlabel;
1140 th = (struct tcphdr *)(ip6 + 1);
1144 ip = mtod(m, struct ip *);
1145 ip->ip_v = IPVERSION;
1146 ip->ip_hl = sizeof(struct ip) >> 2;
1151 ip->ip_p = IPPROTO_TCP;
1152 ip->ip_src = sc->sc_inc.inc_laddr;
1153 ip->ip_dst = sc->sc_inc.inc_faddr;
1154 ip->ip_ttl = sc->sc_ip_ttl;
1155 ip->ip_tos = sc->sc_ip_tos;
1158 * See if we should do MTU discovery. Route lookups are
1159 * expensive, so we will only unset the DF bit if:
1161 * 1) path_mtu_discovery is disabled
1162 * 2) the SCF_UNREACH flag has been set
1164 if (path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0))
1165 ip->ip_off |= IP_DF;
1167 th = (struct tcphdr *)(ip + 1);
1169 th->th_sport = sc->sc_inc.inc_lport;
1170 th->th_dport = sc->sc_inc.inc_fport;
1172 th->th_seq = htonl(sc->sc_iss);
1173 th->th_ack = htonl(sc->sc_irs + 1);
1174 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
1176 th->th_flags = TH_SYN|TH_ACK;
1177 th->th_win = htons(sc->sc_wnd);
1180 /* Tack on the TCP options. */
1182 optp = (u_int8_t *)(th + 1);
1183 *optp++ = TCPOPT_MAXSEG;
1184 *optp++ = TCPOLEN_MAXSEG;
1185 *optp++ = (mssopt >> 8) & 0xff;
1186 *optp++ = mssopt & 0xff;
1188 if (sc->sc_flags & SCF_WINSCALE) {
1189 *((u_int32_t *)optp) = htonl(TCPOPT_NOP << 24 |
1190 TCPOPT_WINDOW << 16 | TCPOLEN_WINDOW << 8 |
1191 sc->sc_request_r_scale);
1195 if (sc->sc_flags & SCF_TIMESTAMP) {
1196 u_int32_t *lp = (u_int32_t *)(optp);
1198 /* Form timestamp option per appendix A of RFC 1323. */
1199 *lp++ = htonl(TCPOPT_TSTAMP_HDR);
1200 *lp++ = htonl(ticks);
1201 *lp = htonl(sc->sc_tsrecent);
1202 optp += TCPOLEN_TSTAMP_APPA;
1205 #ifdef TCP_SIGNATURE
1207 * Handle TCP-MD5 passive opener response.
1209 if (sc->sc_flags & SCF_SIGNATURE) {
1210 u_int8_t *bp = optp;
1213 *bp++ = TCPOPT_SIGNATURE;
1214 *bp++ = TCPOLEN_SIGNATURE;
1215 for (i = 0; i < TCP_SIGLEN; i++)
1217 tcp_signature_compute(m, sizeof(struct ip), 0, optlen,
1218 optp + 2, IPSEC_DIR_OUTBOUND);
1219 optp += TCPOLEN_SIGNATURE;
1221 #endif /* TCP_SIGNATURE */
1223 if (sc->sc_flags & SCF_SACK) {
1224 *optp++ = TCPOPT_SACK_PERMITTED;
1225 *optp++ = TCPOLEN_SACK_PERMITTED;
1229 /* Pad TCP options to a 4 byte boundary */
1230 int padlen = optlen - (optp - (u_int8_t *)(th + 1));
1231 while (padlen-- > 0)
1232 *optp++ = TCPOPT_EOL;
1237 if (sc->sc_inc.inc_isipv6) {
1239 th->th_sum = in6_cksum(m, IPPROTO_TCP, hlen, tlen - hlen);
1240 ip6->ip6_hlim = in6_selecthlim(NULL, NULL);
1241 error = ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
1245 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1246 htons(tlen - hlen + IPPROTO_TCP));
1247 m->m_pkthdr.csum_flags = CSUM_TCP;
1248 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1249 error = ip_output(m, sc->sc_ipopts, NULL, 0, NULL, NULL);
1257 * |. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .|
1259 * | MD5(laddr,faddr,secret,lport,fport) |. . . . . . .|
1261 * (A): peer mss index
1265 * The values below are chosen to minimize the size of the tcp_secret
1266 * table, as well as providing roughly a 16 second lifetime for the cookie.
1269 #define SYNCOOKIE_WNDBITS 5 /* exposed bits for window indexing */
1270 #define SYNCOOKIE_TIMESHIFT 1 /* scale ticks to window time units */
1272 #define SYNCOOKIE_WNDMASK ((1 << SYNCOOKIE_WNDBITS) - 1)
1273 #define SYNCOOKIE_NSECRETS (1 << SYNCOOKIE_WNDBITS)
1274 #define SYNCOOKIE_TIMEOUT \
1275 (hz * (1 << SYNCOOKIE_WNDBITS) / (1 << SYNCOOKIE_TIMESHIFT))
1276 #define SYNCOOKIE_DATAMASK ((3 << SYNCOOKIE_WNDBITS) | SYNCOOKIE_WNDMASK)
1278 #define SYNCOOKIE_RLOCK(ts) (rw_rlock(&(ts).ts_rwmtx))
1279 #define SYNCOOKIE_RUNLOCK(ts) (rw_runlock(&(ts).ts_rwmtx))
1280 #define SYNCOOKIE_TRY_UPGRADE(ts) (rw_try_upgrade(&(ts).ts_rwmtx))
1281 #define SYNCOOKIE_DOWNGRADE(ts) (rw_downgrade(&(ts).ts_rwmtx))
1284 struct rwlock ts_rwmtx;
1285 u_int ts_expire; /* ticks */
1286 u_int32_t ts_secbits[4];
1287 } tcp_secret[SYNCOOKIE_NSECRETS];
1289 static int tcp_msstab[] = { 0, 536, 1460, 8960 };
1291 static MD5_CTX syn_ctx;
1293 #define MD5Add(v) MD5Update(&syn_ctx, (u_char *)&v, sizeof(v))
1296 u_int32_t laddr, faddr;
1297 u_int32_t secbits[4];
1298 u_int16_t lport, fport;
1302 CTASSERT(sizeof(struct md5_add) == 28);
1306 * Consider the problem of a recreated (and retransmitted) cookie. If the
1307 * original SYN was accepted, the connection is established. The second
1308 * SYN is inflight, and if it arrives with an ISN that falls within the
1309 * receive window, the connection is killed.
1311 * However, since cookies have other problems, this may not be worth
1316 syncookie_init(void) {
1319 for (idx = 0; idx < SYNCOOKIE_NSECRETS; idx++) {
1320 rw_init(&(tcp_secret[idx].ts_rwmtx), "tcp_secret");
1325 syncookie_generate(struct syncache *sc, u_int32_t *flowid)
1327 u_int32_t md5_buffer[4];
1332 idx = ((ticks << SYNCOOKIE_TIMESHIFT) / hz) & SYNCOOKIE_WNDMASK;
1333 SYNCOOKIE_RLOCK(tcp_secret[idx]);
1334 if (tcp_secret[idx].ts_expire < time_uptime &&
1335 SYNCOOKIE_TRY_UPGRADE(tcp_secret[idx]) ) {
1336 /* need write access */
1337 for (i = 0; i < 4; i++)
1338 tcp_secret[idx].ts_secbits[i] = arc4random();
1339 tcp_secret[idx].ts_expire = ticks + SYNCOOKIE_TIMEOUT;
1340 SYNCOOKIE_DOWNGRADE(tcp_secret[idx]);
1342 for (data = sizeof(tcp_msstab) / sizeof(int) - 1; data > 0; data--)
1343 if (tcp_msstab[data] <= sc->sc_peer_mss)
1345 data = (data << SYNCOOKIE_WNDBITS) | idx;
1346 data ^= sc->sc_irs; /* peer's iss */
1349 if (sc->sc_inc.inc_isipv6) {
1350 MD5Add(sc->sc_inc.inc6_laddr);
1351 MD5Add(sc->sc_inc.inc6_faddr);
1357 add.laddr = sc->sc_inc.inc_laddr.s_addr;
1358 add.faddr = sc->sc_inc.inc_faddr.s_addr;
1360 add.lport = sc->sc_inc.inc_lport;
1361 add.fport = sc->sc_inc.inc_fport;
1362 add.secbits[0] = tcp_secret[idx].ts_secbits[0];
1363 add.secbits[1] = tcp_secret[idx].ts_secbits[1];
1364 add.secbits[2] = tcp_secret[idx].ts_secbits[2];
1365 add.secbits[3] = tcp_secret[idx].ts_secbits[3];
1366 SYNCOOKIE_RUNLOCK(tcp_secret[idx]);
1368 MD5Final((u_char *)&md5_buffer, &syn_ctx);
1369 data ^= (md5_buffer[0] & ~SYNCOOKIE_WNDMASK);
1370 *flowid = md5_buffer[1];
1374 static struct syncache *
1375 syncookie_lookup(struct in_conninfo *inc, struct tcphdr *th, struct socket *so)
1377 u_int32_t md5_buffer[4];
1378 struct syncache *sc;
1383 data = (th->th_ack - 1) ^ (th->th_seq - 1); /* remove ISS */
1384 idx = data & SYNCOOKIE_WNDMASK;
1385 SYNCOOKIE_RLOCK(tcp_secret[idx]);
1386 if (tcp_secret[idx].ts_expire < ticks ||
1387 sototcpcb(so)->ts_recent + SYNCOOKIE_TIMEOUT < ticks) {
1388 SYNCOOKIE_RUNLOCK(tcp_secret[idx]);
1393 if (inc->inc_isipv6) {
1394 MD5Add(inc->inc6_laddr);
1395 MD5Add(inc->inc6_faddr);
1401 add.laddr = inc->inc_laddr.s_addr;
1402 add.faddr = inc->inc_faddr.s_addr;
1404 add.lport = inc->inc_lport;
1405 add.fport = inc->inc_fport;
1406 add.secbits[0] = tcp_secret[idx].ts_secbits[0];
1407 add.secbits[1] = tcp_secret[idx].ts_secbits[1];
1408 add.secbits[2] = tcp_secret[idx].ts_secbits[2];
1409 add.secbits[3] = tcp_secret[idx].ts_secbits[3];
1410 SYNCOOKIE_RUNLOCK(tcp_secret[idx]);
1412 MD5Final((u_char *)&md5_buffer, &syn_ctx);
1413 data ^= md5_buffer[0];
1414 if ((data & ~SYNCOOKIE_DATAMASK) != 0)
1416 data = data >> SYNCOOKIE_WNDBITS;
1418 sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT | M_ZERO);
1422 * Fill in the syncache values.
1423 * XXX: duplicate code from syncache_add
1425 sc->sc_ipopts = NULL;
1426 sc->sc_inc.inc_fport = inc->inc_fport;
1427 sc->sc_inc.inc_lport = inc->inc_lport;
1429 sc->sc_inc.inc_isipv6 = inc->inc_isipv6;
1430 if (inc->inc_isipv6) {
1431 sc->sc_inc.inc6_faddr = inc->inc6_faddr;
1432 sc->sc_inc.inc6_laddr = inc->inc6_laddr;
1433 if (sotoinpcb(so)->in6p_flags & IN6P_AUTOFLOWLABEL)
1434 sc->sc_flowlabel = md5_buffer[1] & IPV6_FLOWLABEL_MASK;
1438 sc->sc_inc.inc_faddr = inc->inc_faddr;
1439 sc->sc_inc.inc_laddr = inc->inc_laddr;
1440 sc->sc_ip_ttl = sotoinpcb(so)->inp_ip_ttl;
1441 sc->sc_ip_tos = sotoinpcb(so)->inp_ip_tos;
1443 sc->sc_irs = th->th_seq - 1;
1444 sc->sc_iss = th->th_ack - 1;
1445 wnd = sbspace(&so->so_rcv);
1447 wnd = imin(wnd, TCP_MAXWIN);
1451 sc->sc_peer_mss = tcp_msstab[data];