2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include "opt_ipfw.h" /* for ipfw_fwd */
37 #include "opt_inet6.h"
38 #include "opt_ipsec.h"
40 #include "opt_tcpdebug.h"
42 #include <sys/param.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
46 #include <sys/proc.h> /* for proc0 declaration */
47 #include <sys/protosw.h>
48 #include <sys/signalvar.h>
49 #include <sys/socket.h>
50 #include <sys/socketvar.h>
51 #include <sys/sysctl.h>
52 #include <sys/syslog.h>
53 #include <sys/systm.h>
55 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
60 #include <net/route.h>
62 #define TCPSTATES /* for logging */
64 #include <netinet/in.h>
65 #include <netinet/in_pcb.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
69 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
70 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
71 #include <netinet/ip_var.h>
72 #include <netinet/ip_options.h>
73 #include <netinet/ip6.h>
74 #include <netinet/icmp6.h>
75 #include <netinet6/in6_pcb.h>
76 #include <netinet6/ip6_var.h>
77 #include <netinet6/nd6.h>
78 #include <netinet/tcp.h>
79 #include <netinet/tcp_fsm.h>
80 #include <netinet/tcp_seq.h>
81 #include <netinet/tcp_timer.h>
82 #include <netinet/tcp_var.h>
83 #include <netinet6/tcp6_var.h>
84 #include <netinet/tcpip.h>
85 #include <netinet/tcp_syncache.h>
87 #include <netinet/tcp_debug.h>
91 #include <netipsec/ipsec.h>
92 #include <netipsec/ipsec6.h>
95 #include <machine/in_cksum.h>
97 #include <security/mac/mac_framework.h>
99 static const int tcprexmtthresh = 3;
101 struct tcpstat tcpstat;
102 SYSCTL_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW,
103 &tcpstat , tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
105 int tcp_log_in_vain = 0;
106 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
107 &tcp_log_in_vain, 0, "Log all incoming TCP segments to closed ports");
109 static int blackhole = 0;
110 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW,
111 &blackhole, 0, "Do not send RST on segments to closed ports");
113 int tcp_delack_enabled = 1;
114 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
115 &tcp_delack_enabled, 0,
116 "Delay ACK to try and piggyback it onto a data packet");
118 static int drop_synfin = 0;
119 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW,
120 &drop_synfin, 0, "Drop TCP packets with SYN+FIN set");
122 static int tcp_do_rfc3042 = 1;
123 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW,
124 &tcp_do_rfc3042, 0, "Enable RFC 3042 (Limited Transmit)");
126 static int tcp_do_rfc3390 = 1;
127 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW,
129 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
131 static int tcp_insecure_rst = 0;
132 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW,
133 &tcp_insecure_rst, 0,
134 "Follow the old (insecure) criteria for accepting RST packets");
136 int tcp_do_autorcvbuf = 1;
137 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW,
138 &tcp_do_autorcvbuf, 0, "Enable automatic receive buffer sizing");
140 int tcp_autorcvbuf_inc = 16*1024;
141 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW,
142 &tcp_autorcvbuf_inc, 0,
143 "Incrementor step size of automatic receive buffer");
145 int tcp_autorcvbuf_max = 256*1024;
146 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW,
147 &tcp_autorcvbuf_max, 0, "Max size of automatic receive buffer");
149 struct inpcbhead tcb;
150 #define tcb6 tcb /* for KAME src sync over BSD*'s */
151 struct inpcbinfo tcbinfo;
153 static void tcp_dooptions(struct tcpopt *, u_char *, int, int);
154 static void tcp_do_segment(struct mbuf *, struct tcphdr *,
155 struct socket *, struct tcpcb *, int, int);
156 static void tcp_dropwithreset(struct mbuf *, struct tcphdr *,
157 struct tcpcb *, int, int);
158 static void tcp_pulloutofband(struct socket *,
159 struct tcphdr *, struct mbuf *, int);
160 static void tcp_xmit_timer(struct tcpcb *, int);
161 static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *);
163 /* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
165 #define ND6_HINT(tp) \
167 if ((tp) && (tp)->t_inpcb && \
168 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \
169 nd6_nud_hint(NULL, NULL, 0); \
176 * Indicate whether this ack should be delayed. We can delay the ack if
177 * - there is no delayed ack timer in progress and
178 * - our last ack wasn't a 0-sized window. We never want to delay
179 * the ack that opens up a 0-sized window and
180 * - delayed acks are enabled or
181 * - this is a half-synchronized T/TCP connection.
183 #define DELAY_ACK(tp) \
184 ((!tcp_timer_active(tp, TT_DELACK) && \
185 (tp->t_flags & TF_RXWIN0SENT) == 0) && \
186 (tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
190 * TCP input handling is split into multiple parts:
191 * tcp6_input is a thin wrapper around tcp_input for the extended
192 * ip6_protox[] call format in ip6_input
193 * tcp_input handles primary segment validation, inpcb lookup and
194 * SYN processing on listen sockets
195 * tcp_do_segment processes the ACK and text of the segment for
196 * establishing, established and closing connections
200 tcp6_input(struct mbuf **mp, int *offp, int proto)
202 struct mbuf *m = *mp;
203 struct in6_ifaddr *ia6;
205 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
208 * draft-itojun-ipv6-tcp-to-anycast
209 * better place to put this in?
211 ia6 = ip6_getdstifaddr(m);
212 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
215 ip6 = mtod(m, struct ip6_hdr *);
216 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
217 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
227 tcp_input(struct mbuf *m, int off0)
230 struct ip *ip = NULL;
232 struct inpcb *inp = NULL;
233 struct tcpcb *tp = NULL;
234 struct socket *so = NULL;
240 int rstreason = 0; /* For badport_bandlim accounting purposes */
241 #ifdef IPFIREWALL_FORWARD
242 struct m_tag *fwd_tag;
245 struct ip6_hdr *ip6 = NULL;
248 const void *ip6 = NULL;
249 const int isipv6 = 0;
251 struct tcpopt to; /* options in this segment */
252 char *s = NULL; /* address and port logging */
256 * The size of tcp_saveipgen must be the size of the max ip header,
259 u_char tcp_saveipgen[IP6_HDR_LEN];
260 struct tcphdr tcp_savetcp;
265 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
269 tcpstat.tcps_rcvtotal++;
273 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */
274 ip6 = mtod(m, struct ip6_hdr *);
275 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
276 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) {
277 tcpstat.tcps_rcvbadsum++;
280 th = (struct tcphdr *)((caddr_t)ip6 + off0);
283 * Be proactive about unspecified IPv6 address in source.
284 * As we use all-zero to indicate unbounded/unconnected pcb,
285 * unspecified IPv6 address can be used to confuse us.
287 * Note that packets with unspecified IPv6 destination is
288 * already dropped in ip6_input.
290 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
295 th = NULL; /* XXX: Avoid compiler warning. */
299 * Get IP and TCP header together in first mbuf.
300 * Note: IP leaves IP header in first mbuf.
302 if (off0 > sizeof (struct ip)) {
303 ip_stripoptions(m, (struct mbuf *)0);
304 off0 = sizeof(struct ip);
306 if (m->m_len < sizeof (struct tcpiphdr)) {
307 if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
309 tcpstat.tcps_rcvshort++;
313 ip = mtod(m, struct ip *);
314 ipov = (struct ipovly *)ip;
315 th = (struct tcphdr *)((caddr_t)ip + off0);
318 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
319 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
320 th->th_sum = m->m_pkthdr.csum_data;
322 th->th_sum = in_pseudo(ip->ip_src.s_addr,
324 htonl(m->m_pkthdr.csum_data +
327 th->th_sum ^= 0xffff;
329 ipov->ih_len = (u_short)tlen;
330 ipov->ih_len = htons(ipov->ih_len);
334 * Checksum extended TCP header and data.
336 len = sizeof (struct ip) + tlen;
337 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
338 ipov->ih_len = (u_short)tlen;
339 ipov->ih_len = htons(ipov->ih_len);
340 th->th_sum = in_cksum(m, len);
343 tcpstat.tcps_rcvbadsum++;
346 /* Re-initialization for later version check */
347 ip->ip_v = IPVERSION;
351 * Check that TCP offset makes sense,
352 * pull out TCP options and adjust length. XXX
354 off = th->th_off << 2;
355 if (off < sizeof (struct tcphdr) || off > tlen) {
356 tcpstat.tcps_rcvbadoff++;
359 tlen -= off; /* tlen is used instead of ti->ti_len */
360 if (off > sizeof (struct tcphdr)) {
363 IP6_EXTHDR_CHECK(m, off0, off, );
364 ip6 = mtod(m, struct ip6_hdr *);
365 th = (struct tcphdr *)((caddr_t)ip6 + off0);
368 if (m->m_len < sizeof(struct ip) + off) {
369 if ((m = m_pullup(m, sizeof (struct ip) + off))
371 tcpstat.tcps_rcvshort++;
374 ip = mtod(m, struct ip *);
375 ipov = (struct ipovly *)ip;
376 th = (struct tcphdr *)((caddr_t)ip + off0);
379 optlen = off - sizeof (struct tcphdr);
380 optp = (u_char *)(th + 1);
382 thflags = th->th_flags;
385 * Convert TCP protocol specific fields to host format.
387 th->th_seq = ntohl(th->th_seq);
388 th->th_ack = ntohl(th->th_ack);
389 th->th_win = ntohs(th->th_win);
390 th->th_urp = ntohs(th->th_urp);
393 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options.
395 drop_hdrlen = off0 + off;
398 * Locate pcb for segment.
400 INP_INFO_WLOCK(&tcbinfo);
402 INP_INFO_WLOCK_ASSERT(&tcbinfo);
403 #ifdef IPFIREWALL_FORWARD
405 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
407 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
409 if (fwd_tag != NULL && isipv6 == 0) { /* IPv6 support is not yet */
410 struct sockaddr_in *next_hop;
412 next_hop = (struct sockaddr_in *)(fwd_tag+1);
414 * Transparently forwarded. Pretend to be the destination.
415 * already got one like this?
417 inp = in_pcblookup_hash(&tcbinfo,
418 ip->ip_src, th->th_sport,
419 ip->ip_dst, th->th_dport,
420 0, m->m_pkthdr.rcvif);
422 /* It's new. Try to find the ambushing socket. */
423 inp = in_pcblookup_hash(&tcbinfo,
424 ip->ip_src, th->th_sport,
427 ntohs(next_hop->sin_port) :
432 /* Remove the tag from the packet. We don't need it anymore. */
433 m_tag_delete(m, fwd_tag);
435 #endif /* IPFIREWALL_FORWARD */
439 inp = in6_pcblookup_hash(&tcbinfo,
440 &ip6->ip6_src, th->th_sport,
441 &ip6->ip6_dst, th->th_dport,
446 inp = in_pcblookup_hash(&tcbinfo,
447 ip->ip_src, th->th_sport,
448 ip->ip_dst, th->th_dport,
454 * If the INPCB does not exist then all data in the incoming
455 * segment is discarded and an appropriate RST is sent back.
456 * XXX MRT Send RST using which routing table?
460 * Log communication attempts to ports that are not
463 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
464 tcp_log_in_vain == 2) {
465 if ((s = tcp_log_addrs(NULL, th, (void *)ip, ip6)))
466 log(LOG_INFO, "%s; %s: Connection attempt "
467 "to closed port\n", s, __func__);
470 * When blackholing do not respond with a RST but
471 * completely ignore the segment and drop it.
473 if ((blackhole == 1 && (thflags & TH_SYN)) ||
477 rstreason = BANDLIM_RST_CLOSEDPORT;
484 if (isipv6 && ipsec6_in_reject(m, inp)) {
485 ipsec6stat.in_polvio++;
489 if (ipsec4_in_reject(m, inp) != 0) {
490 ipsec4stat.in_polvio++;
496 * Check the minimum TTL for socket.
498 if (inp->inp_ip_minttl != 0) {
500 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim)
504 if (inp->inp_ip_minttl > ip->ip_ttl)
509 * A previous connection in TIMEWAIT state is supposed to catch
510 * stray or duplicate segments arriving late. If this segment
511 * was a legitimate new connection attempt the old INPCB gets
512 * removed and we can try again to find a listening socket.
514 if (inp->inp_flags & INP_TIMEWAIT) {
515 if (thflags & TH_SYN)
516 tcp_dooptions(&to, optp, optlen, TO_SYN);
518 * NB: tcp_twcheck unlocks the INP and frees the mbuf.
520 if (tcp_twcheck(inp, &to, th, m, tlen))
522 INP_INFO_WUNLOCK(&tcbinfo);
526 * The TCPCB may no longer exist if the connection is winding
527 * down or it is in the CLOSED state. Either way we drop the
528 * segment and send an appropriate response.
531 if (tp == NULL || tp->t_state == TCPS_CLOSED) {
532 rstreason = BANDLIM_RST_CLOSEDPORT;
537 INP_WLOCK_ASSERT(inp);
538 if (mac_check_inpcb_deliver(inp, m))
541 so = inp->inp_socket;
542 KASSERT(so != NULL, ("%s: so == NULL", __func__));
544 if (so->so_options & SO_DEBUG) {
545 ostate = tp->t_state;
548 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6));
551 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
556 * When the socket is accepting connections (the INPCB is in LISTEN
557 * state) we look into the SYN cache if this is a new connection
558 * attempt or the completion of a previous one.
560 if (so->so_options & SO_ACCEPTCONN) {
561 struct in_conninfo inc;
563 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but "
564 "tp not listening", __func__));
566 bzero(&inc, sizeof(inc));
569 inc.inc_flags |= INC_ISIPV6;
570 inc.inc6_faddr = ip6->ip6_src;
571 inc.inc6_laddr = ip6->ip6_dst;
575 inc.inc_faddr = ip->ip_src;
576 inc.inc_laddr = ip->ip_dst;
578 inc.inc_fport = th->th_sport;
579 inc.inc_lport = th->th_dport;
582 * Check for an existing connection attempt in syncache if
583 * the flag is only ACK. A successful lookup creates a new
584 * socket appended to the listen queue in SYN_RECEIVED state.
586 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
588 * Parse the TCP options here because
589 * syncookies need access to the reflected
592 tcp_dooptions(&to, optp, optlen, 0);
594 * NB: syncache_expand() doesn't unlock
595 * inp and tcpinfo locks.
597 if (!syncache_expand(&inc, &to, th, &so, m)) {
599 * No syncache entry or ACK was not
600 * for our SYN/ACK. Send a RST.
601 * NB: syncache did its own logging
602 * of the failure cause.
604 rstreason = BANDLIM_RST_OPENPORT;
609 * We completed the 3-way handshake
610 * but could not allocate a socket
611 * either due to memory shortage,
612 * listen queue length limits or
613 * global socket limits. Send RST
614 * or wait and have the remote end
615 * retransmit the ACK for another
618 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
619 log(LOG_DEBUG, "%s; %s: Listen socket: "
620 "Socket allocation failed due to "
621 "limits or memory shortage, %s\n",
622 s, __func__, (tcp_sc_rst_sock_fail ?
623 "sending RST" : "try again"));
624 if (tcp_sc_rst_sock_fail) {
625 rstreason = BANDLIM_UNLIMITED;
631 * Socket is created in state SYN_RECEIVED.
632 * Unlock the listen socket, lock the newly
633 * created socket and update the tp variable.
635 INP_WUNLOCK(inp); /* listen socket */
637 INP_WLOCK(inp); /* new connection */
639 KASSERT(tp->t_state == TCPS_SYN_RECEIVED,
642 * Process the segment and the data it
643 * contains. tcp_do_segment() consumes
644 * the mbuf chain and unlocks the inpcb.
646 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen);
647 INP_INFO_UNLOCK_ASSERT(&tcbinfo);
651 * Segment flag validation for new connection attempts:
653 * Our (SYN|ACK) response was rejected.
654 * Check with syncache and remove entry to prevent
657 * NB: syncache_chkrst does its own logging of failure
660 if (thflags & TH_RST) {
661 syncache_chkrst(&inc, th);
665 * We can't do anything without SYN.
667 if ((thflags & TH_SYN) == 0) {
668 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
669 log(LOG_DEBUG, "%s; %s: Listen socket: "
670 "SYN is missing, segment ignored\n",
672 tcpstat.tcps_badsyn++;
676 * (SYN|ACK) is bogus on a listen socket.
678 if (thflags & TH_ACK) {
679 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
680 log(LOG_DEBUG, "%s; %s: Listen socket: "
681 "SYN|ACK invalid, segment rejected\n",
683 syncache_badack(&inc); /* XXX: Not needed! */
684 tcpstat.tcps_badsyn++;
685 rstreason = BANDLIM_RST_OPENPORT;
689 * If the drop_synfin option is enabled, drop all
690 * segments with both the SYN and FIN bits set.
691 * This prevents e.g. nmap from identifying the
693 * XXX: Poor reasoning. nmap has other methods
694 * and is constantly refining its stack detection
696 * XXX: This is a violation of the TCP specification
697 * and was used by RFC1644.
699 if ((thflags & TH_FIN) && drop_synfin) {
700 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
701 log(LOG_DEBUG, "%s; %s: Listen socket: "
702 "SYN|FIN segment ignored (based on "
703 "sysctl setting)\n", s, __func__);
704 tcpstat.tcps_badsyn++;
708 * Segment's flags are (SYN) or (SYN|FIN).
710 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored
711 * as they do not affect the state of the TCP FSM.
712 * The data pointed to by TH_URG and th_urp is ignored.
714 KASSERT((thflags & (TH_RST|TH_ACK)) == 0,
715 ("%s: Listen socket: TH_RST or TH_ACK set", __func__));
716 KASSERT(thflags & (TH_SYN),
717 ("%s: Listen socket: TH_SYN not set", __func__));
720 * If deprecated address is forbidden,
721 * we do not accept SYN to deprecated interface
722 * address to prevent any new inbound connection from
723 * getting established.
724 * When we do not accept SYN, we send a TCP RST,
725 * with deprecated source address (instead of dropping
726 * it). We compromise it as it is much better for peer
727 * to send a RST, and RST will be the final packet
730 * If we do not forbid deprecated addresses, we accept
731 * the SYN packet. RFC2462 does not suggest dropping
733 * If we decipher RFC2462 5.5.4, it says like this:
734 * 1. use of deprecated addr with existing
735 * communication is okay - "SHOULD continue to be
737 * 2. use of it with new communication:
738 * (2a) "SHOULD NOT be used if alternate address
739 * with sufficient scope is available"
740 * (2b) nothing mentioned otherwise.
741 * Here we fall into (2b) case as we have no choice in
742 * our source address selection - we must obey the peer.
744 * The wording in RFC2462 is confusing, and there are
745 * multiple description text for deprecated address
746 * handling - worse, they are not exactly the same.
747 * I believe 5.5.4 is the best one, so we follow 5.5.4.
749 if (isipv6 && !ip6_use_deprecated) {
750 struct in6_ifaddr *ia6;
752 if ((ia6 = ip6_getdstifaddr(m)) &&
753 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
754 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
755 log(LOG_DEBUG, "%s; %s: Listen socket: "
756 "Connection attempt to deprecated "
757 "IPv6 address rejected\n",
759 rstreason = BANDLIM_RST_OPENPORT;
765 * Basic sanity checks on incoming SYN requests:
766 * Don't respond if the destination is a link layer
767 * broadcast according to RFC1122 4.2.3.10, p. 104.
768 * If it is from this socket it must be forged.
769 * Don't respond if the source or destination is a
770 * global or subnet broad- or multicast address.
771 * Note that it is quite possible to receive unicast
772 * link-layer packets with a broadcast IP address. Use
773 * in_broadcast() to find them.
775 if (m->m_flags & (M_BCAST|M_MCAST)) {
776 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
777 log(LOG_DEBUG, "%s; %s: Listen socket: "
778 "Connection attempt from broad- or multicast "
779 "link layer address ignored\n", s, __func__);
784 if (th->th_dport == th->th_sport &&
785 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) {
786 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
787 log(LOG_DEBUG, "%s; %s: Listen socket: "
788 "Connection attempt to/from self "
789 "ignored\n", s, __func__);
792 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
793 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
794 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
795 log(LOG_DEBUG, "%s; %s: Listen socket: "
796 "Connection attempt from/to multicast "
797 "address ignored\n", s, __func__);
802 if (th->th_dport == th->th_sport &&
803 ip->ip_dst.s_addr == ip->ip_src.s_addr) {
804 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
805 log(LOG_DEBUG, "%s; %s: Listen socket: "
806 "Connection attempt from/to self "
807 "ignored\n", s, __func__);
810 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
811 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
812 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
813 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
814 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
815 log(LOG_DEBUG, "%s; %s: Listen socket: "
816 "Connection attempt from/to broad- "
817 "or multicast address ignored\n",
823 * SYN appears to be valid. Create compressed TCP state
827 if (so->so_options & SO_DEBUG)
828 tcp_trace(TA_INPUT, ostate, tp,
829 (void *)tcp_saveipgen, &tcp_savetcp, 0);
831 tcp_dooptions(&to, optp, optlen, TO_SYN);
832 syncache_add(&inc, &to, th, inp, &so, m);
834 * Entry added to syncache and mbuf consumed.
835 * Everything already unlocked by syncache_add().
837 INP_INFO_UNLOCK_ASSERT(&tcbinfo);
842 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later
843 * state. tcp_do_segment() always consumes the mbuf chain, unlocks
844 * the inpcb, and unlocks pcbinfo.
846 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen);
847 INP_INFO_UNLOCK_ASSERT(&tcbinfo);
851 INP_INFO_WLOCK_ASSERT(&tcbinfo);
852 INP_INFO_WUNLOCK(&tcbinfo);
855 tcp_dropwithreset(m, th, tp, tlen, rstreason);
858 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
859 m = NULL; /* mbuf chain got consumed. */
863 INP_INFO_WLOCK_ASSERT(&tcbinfo);
866 INP_INFO_WUNLOCK(&tcbinfo);
869 INP_INFO_UNLOCK_ASSERT(&tcbinfo);
877 tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
878 struct tcpcb *tp, int drop_hdrlen, int tlen)
880 int thflags, acked, ourfinisacked, needoutput = 0;
882 int rstreason, todrop, win;
888 * The size of tcp_saveipgen must be the size of the max ip header,
891 u_char tcp_saveipgen[IP6_HDR_LEN];
892 struct tcphdr tcp_savetcp;
895 thflags = th->th_flags;
897 INP_INFO_WLOCK_ASSERT(&tcbinfo);
898 INP_WLOCK_ASSERT(tp->t_inpcb);
899 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
901 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
905 * Segment received on connection.
906 * Reset idle time and keep-alive timer.
907 * XXX: This should be done after segment
908 * validation to ignore broken/spoofed segs.
910 tp->t_rcvtime = ticks;
911 if (TCPS_HAVEESTABLISHED(tp->t_state))
912 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
915 * Unscale the window into a 32-bit value.
916 * For the SYN_SENT state the scale is zero.
918 tiwin = th->th_win << tp->snd_scale;
921 * Parse options on any incoming segment.
923 tcp_dooptions(&to, (u_char *)(th + 1),
924 (th->th_off << 2) - sizeof(struct tcphdr),
925 (thflags & TH_SYN) ? TO_SYN : 0);
928 * If echoed timestamp is later than the current time,
929 * fall back to non RFC1323 RTT calculation. Normalize
930 * timestamp if syncookies were used when this connection
933 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
934 to.to_tsecr -= tp->ts_offset;
935 if (TSTMP_GT(to.to_tsecr, ticks))
940 * Process options only when we get SYN/ACK back. The SYN case
941 * for incoming connections is handled in tcp_syncache.
942 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
943 * or <SYN,ACK>) segment itself is never scaled.
944 * XXX this is traditional behavior, may need to be cleaned up.
946 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
947 if ((to.to_flags & TOF_SCALE) &&
948 (tp->t_flags & TF_REQ_SCALE)) {
949 tp->t_flags |= TF_RCVD_SCALE;
950 tp->snd_scale = to.to_wscale;
953 * Initial send window. It will be updated with
954 * the next incoming segment to the scaled value.
956 tp->snd_wnd = th->th_win;
957 if (to.to_flags & TOF_TS) {
958 tp->t_flags |= TF_RCVD_TSTMP;
959 tp->ts_recent = to.to_tsval;
960 tp->ts_recent_age = ticks;
962 if (to.to_flags & TOF_MSS)
963 tcp_mss(tp, to.to_mss);
964 if ((tp->t_flags & TF_SACK_PERMIT) &&
965 (to.to_flags & TOF_SACKPERM) == 0)
966 tp->t_flags &= ~TF_SACK_PERMIT;
970 * Header prediction: check for the two common cases
971 * of a uni-directional data xfer. If the packet has
972 * no control flags, is in-sequence, the window didn't
973 * change and we're not retransmitting, it's a
974 * candidate. If the length is zero and the ack moved
975 * forward, we're the sender side of the xfer. Just
976 * free the data acked & wake any higher level process
977 * that was blocked waiting for space. If the length
978 * is non-zero and the ack didn't move, we're the
979 * receiver side. If we're getting packets in-order
980 * (the reassembly queue is empty), add the data to
981 * the socket buffer and note that we need a delayed ack.
982 * Make sure that the hidden state-flags are also off.
983 * Since we check for TCPS_ESTABLISHED first, it can only
986 if (tp->t_state == TCPS_ESTABLISHED &&
987 th->th_seq == tp->rcv_nxt &&
988 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
989 tp->snd_nxt == tp->snd_max &&
990 tiwin && tiwin == tp->snd_wnd &&
991 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
992 LIST_EMPTY(&tp->t_segq) &&
993 ((to.to_flags & TOF_TS) == 0 ||
994 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) {
997 * If last ACK falls within this segment's sequence numbers,
998 * record the timestamp.
999 * NOTE that the test is modified according to the latest
1000 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1002 if ((to.to_flags & TOF_TS) != 0 &&
1003 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1004 tp->ts_recent_age = ticks;
1005 tp->ts_recent = to.to_tsval;
1009 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1010 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1011 tp->snd_cwnd >= tp->snd_wnd &&
1012 ((!tcp_do_newreno &&
1013 !(tp->t_flags & TF_SACK_PERMIT) &&
1014 tp->t_dupacks < tcprexmtthresh) ||
1016 (tp->t_flags & TF_SACK_PERMIT)) &&
1017 !IN_FASTRECOVERY(tp) &&
1018 (to.to_flags & TOF_SACK) == 0 &&
1019 TAILQ_EMPTY(&tp->snd_holes)))) {
1021 ("%s: headlocked", __func__));
1022 INP_INFO_WUNLOCK(&tcbinfo);
1025 * This is a pure ack for outstanding data.
1027 ++tcpstat.tcps_predack;
1029 * "bad retransmit" recovery.
1031 if (tp->t_rxtshift == 1 &&
1032 ticks < tp->t_badrxtwin) {
1033 ++tcpstat.tcps_sndrexmitbad;
1034 tp->snd_cwnd = tp->snd_cwnd_prev;
1036 tp->snd_ssthresh_prev;
1037 tp->snd_recover = tp->snd_recover_prev;
1038 if (tp->t_flags & TF_WASFRECOVERY)
1039 ENTER_FASTRECOVERY(tp);
1040 tp->snd_nxt = tp->snd_max;
1041 tp->t_badrxtwin = 0;
1045 * Recalculate the transmit timer / rtt.
1047 * Some boxes send broken timestamp replies
1048 * during the SYN+ACK phase, ignore
1049 * timestamps of 0 or we could calculate a
1050 * huge RTT and blow up the retransmit timer.
1052 if ((to.to_flags & TOF_TS) != 0 &&
1054 if (!tp->t_rttlow ||
1055 tp->t_rttlow > ticks - to.to_tsecr)
1056 tp->t_rttlow = ticks - to.to_tsecr;
1058 ticks - to.to_tsecr + 1);
1059 } else if (tp->t_rtttime &&
1060 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1061 if (!tp->t_rttlow ||
1062 tp->t_rttlow > ticks - tp->t_rtttime)
1063 tp->t_rttlow = ticks - tp->t_rtttime;
1065 ticks - tp->t_rtttime);
1067 tcp_xmit_bandwidth_limit(tp, th->th_ack);
1068 acked = th->th_ack - tp->snd_una;
1069 tcpstat.tcps_rcvackpack++;
1070 tcpstat.tcps_rcvackbyte += acked;
1071 sbdrop(&so->so_snd, acked);
1072 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
1073 SEQ_LEQ(th->th_ack, tp->snd_recover))
1074 tp->snd_recover = th->th_ack - 1;
1075 tp->snd_una = th->th_ack;
1077 * Pull snd_wl2 up to prevent seq wrap relative
1080 tp->snd_wl2 = th->th_ack;
1083 ND6_HINT(tp); /* Some progress has been made. */
1086 * If all outstanding data are acked, stop
1087 * retransmit timer, otherwise restart timer
1088 * using current (possibly backed-off) value.
1089 * If process is waiting for space,
1090 * wakeup/selwakeup/signal. If data
1091 * are ready to send, let tcp_output
1092 * decide between more output or persist.
1095 if (so->so_options & SO_DEBUG)
1096 tcp_trace(TA_INPUT, ostate, tp,
1097 (void *)tcp_saveipgen,
1100 if (tp->snd_una == tp->snd_max)
1101 tcp_timer_activate(tp, TT_REXMT, 0);
1102 else if (!tcp_timer_active(tp, TT_PERSIST))
1103 tcp_timer_activate(tp, TT_REXMT,
1106 if (so->so_snd.sb_cc)
1107 (void) tcp_output(tp);
1110 } else if (th->th_ack == tp->snd_una &&
1111 tlen <= sbspace(&so->so_rcv)) {
1112 int newsize = 0; /* automatic sockbuf scaling */
1114 KASSERT(headlocked, ("%s: headlocked", __func__));
1115 INP_INFO_WUNLOCK(&tcbinfo);
1118 * This is a pure, in-sequence data packet
1119 * with nothing on the reassembly queue and
1120 * we have enough buffer space to take it.
1122 /* Clean receiver SACK report if present */
1123 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
1124 tcp_clean_sackreport(tp);
1125 ++tcpstat.tcps_preddat;
1126 tp->rcv_nxt += tlen;
1128 * Pull snd_wl1 up to prevent seq wrap relative to
1131 tp->snd_wl1 = th->th_seq;
1133 * Pull rcv_up up to prevent seq wrap relative to
1136 tp->rcv_up = tp->rcv_nxt;
1137 tcpstat.tcps_rcvpack++;
1138 tcpstat.tcps_rcvbyte += tlen;
1139 ND6_HINT(tp); /* Some progress has been made */
1141 if (so->so_options & SO_DEBUG)
1142 tcp_trace(TA_INPUT, ostate, tp,
1143 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1146 * Automatic sizing of receive socket buffer. Often the send
1147 * buffer size is not optimally adjusted to the actual network
1148 * conditions at hand (delay bandwidth product). Setting the
1149 * buffer size too small limits throughput on links with high
1150 * bandwidth and high delay (eg. trans-continental/oceanic links).
1152 * On the receive side the socket buffer memory is only rarely
1153 * used to any significant extent. This allows us to be much
1154 * more aggressive in scaling the receive socket buffer. For
1155 * the case that the buffer space is actually used to a large
1156 * extent and we run out of kernel memory we can simply drop
1157 * the new segments; TCP on the sender will just retransmit it
1158 * later. Setting the buffer size too big may only consume too
1159 * much kernel memory if the application doesn't read() from
1160 * the socket or packet loss or reordering makes use of the
1163 * The criteria to step up the receive buffer one notch are:
1164 * 1. the number of bytes received during the time it takes
1165 * one timestamp to be reflected back to us (the RTT);
1166 * 2. received bytes per RTT is within seven eighth of the
1167 * current socket buffer size;
1168 * 3. receive buffer size has not hit maximal automatic size;
1170 * This algorithm does one step per RTT at most and only if
1171 * we receive a bulk stream w/o packet losses or reorderings.
1172 * Shrinking the buffer during idle times is not necessary as
1173 * it doesn't consume any memory when idle.
1175 * TODO: Only step up if the application is actually serving
1176 * the buffer to better manage the socket buffer resources.
1178 if (tcp_do_autorcvbuf &&
1180 (so->so_rcv.sb_flags & SB_AUTOSIZE)) {
1181 if (to.to_tsecr > tp->rfbuf_ts &&
1182 to.to_tsecr - tp->rfbuf_ts < hz) {
1184 (so->so_rcv.sb_hiwat / 8 * 7) &&
1185 so->so_rcv.sb_hiwat <
1186 tcp_autorcvbuf_max) {
1188 min(so->so_rcv.sb_hiwat +
1190 tcp_autorcvbuf_max);
1192 /* Start over with next RTT. */
1196 tp->rfbuf_cnt += tlen; /* add up */
1199 /* Add data to socket buffer. */
1200 SOCKBUF_LOCK(&so->so_rcv);
1201 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1205 * Set new socket buffer size.
1206 * Give up when limit is reached.
1209 if (!sbreserve_locked(&so->so_rcv,
1211 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
1212 m_adj(m, drop_hdrlen); /* delayed header drop */
1213 sbappendstream_locked(&so->so_rcv, m);
1215 /* NB: sorwakeup_locked() does an implicit unlock. */
1216 sorwakeup_locked(so);
1217 if (DELAY_ACK(tp)) {
1218 tp->t_flags |= TF_DELACK;
1220 tp->t_flags |= TF_ACKNOW;
1228 * Calculate amount of space in receive window,
1229 * and then do TCP input processing.
1230 * Receive window is amount of space in rcv queue,
1231 * but not less than advertised window.
1233 win = sbspace(&so->so_rcv);
1236 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1238 /* Reset receive buffer auto scaling when not in bulk receive mode. */
1242 switch (tp->t_state) {
1245 * If the state is SYN_RECEIVED:
1246 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1248 case TCPS_SYN_RECEIVED:
1249 if ((thflags & TH_ACK) &&
1250 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1251 SEQ_GT(th->th_ack, tp->snd_max))) {
1252 rstreason = BANDLIM_RST_OPENPORT;
1258 * If the state is SYN_SENT:
1259 * if seg contains an ACK, but not for our SYN, drop the input.
1260 * if seg contains a RST, then drop the connection.
1261 * if seg does not contain SYN, then drop it.
1262 * Otherwise this is an acceptable SYN segment
1263 * initialize tp->rcv_nxt and tp->irs
1264 * if seg contains ack then advance tp->snd_una
1265 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1266 * arrange for segment to be acked (eventually)
1267 * continue processing rest of data/controls, beginning with URG
1270 if ((thflags & TH_ACK) &&
1271 (SEQ_LEQ(th->th_ack, tp->iss) ||
1272 SEQ_GT(th->th_ack, tp->snd_max))) {
1273 rstreason = BANDLIM_UNLIMITED;
1276 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST))
1277 tp = tcp_drop(tp, ECONNREFUSED);
1278 if (thflags & TH_RST)
1280 if (!(thflags & TH_SYN))
1283 tp->irs = th->th_seq;
1285 if (thflags & TH_ACK) {
1286 tcpstat.tcps_connects++;
1290 mac_set_socket_peer_from_mbuf(m, so);
1293 /* Do window scaling on this connection? */
1294 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1295 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1296 tp->rcv_scale = tp->request_r_scale;
1298 tp->rcv_adv += tp->rcv_wnd;
1299 tp->snd_una++; /* SYN is acked */
1301 * If there's data, delay ACK; if there's also a FIN
1302 * ACKNOW will be turned on later.
1304 if (DELAY_ACK(tp) && tlen != 0)
1305 tcp_timer_activate(tp, TT_DELACK,
1308 tp->t_flags |= TF_ACKNOW;
1310 * Received <SYN,ACK> in SYN_SENT[*] state.
1312 * SYN_SENT --> ESTABLISHED
1313 * SYN_SENT* --> FIN_WAIT_1
1315 tp->t_starttime = ticks;
1316 if (tp->t_flags & TF_NEEDFIN) {
1317 tp->t_state = TCPS_FIN_WAIT_1;
1318 tp->t_flags &= ~TF_NEEDFIN;
1321 tp->t_state = TCPS_ESTABLISHED;
1322 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
1326 * Received initial SYN in SYN-SENT[*] state =>
1327 * simultaneous open. If segment contains CC option
1328 * and there is a cached CC, apply TAO test.
1329 * If it succeeds, connection is * half-synchronized.
1330 * Otherwise, do 3-way handshake:
1331 * SYN-SENT -> SYN-RECEIVED
1332 * SYN-SENT* -> SYN-RECEIVED*
1333 * If there was no CC option, clear cached CC value.
1335 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
1336 tcp_timer_activate(tp, TT_REXMT, 0);
1337 tp->t_state = TCPS_SYN_RECEIVED;
1340 KASSERT(headlocked, ("%s: trimthenstep6: head not locked",
1342 INP_WLOCK_ASSERT(tp->t_inpcb);
1345 * Advance th->th_seq to correspond to first data byte.
1346 * If data, trim to stay within window,
1347 * dropping FIN if necessary.
1350 if (tlen > tp->rcv_wnd) {
1351 todrop = tlen - tp->rcv_wnd;
1355 tcpstat.tcps_rcvpackafterwin++;
1356 tcpstat.tcps_rcvbyteafterwin += todrop;
1358 tp->snd_wl1 = th->th_seq - 1;
1359 tp->rcv_up = th->th_seq;
1361 * Client side of transaction: already sent SYN and data.
1362 * If the remote host used T/TCP to validate the SYN,
1363 * our data will be ACK'd; if so, enter normal data segment
1364 * processing in the middle of step 5, ack processing.
1365 * Otherwise, goto step 6.
1367 if (thflags & TH_ACK)
1373 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
1374 * do normal processing.
1376 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
1380 break; /* continue normal processing */
1384 * States other than LISTEN or SYN_SENT.
1385 * First check the RST flag and sequence number since reset segments
1386 * are exempt from the timestamp and connection count tests. This
1387 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
1388 * below which allowed reset segments in half the sequence space
1389 * to fall though and be processed (which gives forged reset
1390 * segments with a random sequence number a 50 percent chance of
1391 * killing a connection).
1392 * Then check timestamp, if present.
1393 * Then check the connection count, if present.
1394 * Then check that at least some bytes of segment are within
1395 * receive window. If segment begins before rcv_nxt,
1396 * drop leading data (and SYN); if nothing left, just ack.
1399 * If the RST bit is set, check the sequence number to see
1400 * if this is a valid reset segment.
1402 * In all states except SYN-SENT, all reset (RST) segments
1403 * are validated by checking their SEQ-fields. A reset is
1404 * valid if its sequence number is in the window.
1405 * Note: this does not take into account delayed ACKs, so
1406 * we should test against last_ack_sent instead of rcv_nxt.
1407 * The sequence number in the reset segment is normally an
1408 * echo of our outgoing acknowlegement numbers, but some hosts
1409 * send a reset with the sequence number at the rightmost edge
1410 * of our receive window, and we have to handle this case.
1411 * Note 2: Paul Watson's paper "Slipping in the Window" has shown
1412 * that brute force RST attacks are possible. To combat this,
1413 * we use a much stricter check while in the ESTABLISHED state,
1414 * only accepting RSTs where the sequence number is equal to
1415 * last_ack_sent. In all other states (the states in which a
1416 * RST is more likely), the more permissive check is used.
1417 * If we have multiple segments in flight, the intial reset
1418 * segment sequence numbers will be to the left of last_ack_sent,
1419 * but they will eventually catch up.
1420 * In any case, it never made sense to trim reset segments to
1421 * fit the receive window since RFC 1122 says:
1422 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
1424 * A TCP SHOULD allow a received RST segment to include data.
1427 * It has been suggested that a RST segment could contain
1428 * ASCII text that encoded and explained the cause of the
1429 * RST. No standard has yet been established for such
1432 * If the reset segment passes the sequence number test examine
1434 * SYN_RECEIVED STATE:
1435 * If passive open, return to LISTEN state.
1436 * If active open, inform user that connection was refused.
1437 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
1438 * Inform user that connection was reset, and close tcb.
1439 * CLOSING, LAST_ACK STATES:
1442 * Drop the segment - see Stevens, vol. 2, p. 964 and
1445 if (thflags & TH_RST) {
1446 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
1447 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
1448 switch (tp->t_state) {
1450 case TCPS_SYN_RECEIVED:
1451 so->so_error = ECONNREFUSED;
1454 case TCPS_ESTABLISHED:
1455 if (tcp_insecure_rst == 0 &&
1456 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) &&
1457 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) &&
1458 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
1459 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) {
1460 tcpstat.tcps_badrst++;
1464 case TCPS_FIN_WAIT_1:
1465 case TCPS_FIN_WAIT_2:
1466 case TCPS_CLOSE_WAIT:
1467 so->so_error = ECONNRESET;
1469 tp->t_state = TCPS_CLOSED;
1470 tcpstat.tcps_drops++;
1471 KASSERT(headlocked, ("%s: trimthenstep6: "
1472 "tcp_close: head not locked", __func__));
1478 KASSERT(headlocked, ("%s: trimthenstep6: "
1479 "tcp_close.2: head not locked", __func__));
1488 * RFC 1323 PAWS: If we have a timestamp reply on this segment
1489 * and it's less than ts_recent, drop it.
1491 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
1492 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
1494 /* Check to see if ts_recent is over 24 days old. */
1495 if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) {
1497 * Invalidate ts_recent. If this segment updates
1498 * ts_recent, the age will be reset later and ts_recent
1499 * will get a valid value. If it does not, setting
1500 * ts_recent to zero will at least satisfy the
1501 * requirement that zero be placed in the timestamp
1502 * echo reply when ts_recent isn't valid. The
1503 * age isn't reset until we get a valid ts_recent
1504 * because we don't want out-of-order segments to be
1505 * dropped when ts_recent is old.
1509 tcpstat.tcps_rcvduppack++;
1510 tcpstat.tcps_rcvdupbyte += tlen;
1511 tcpstat.tcps_pawsdrop++;
1519 * In the SYN-RECEIVED state, validate that the packet belongs to
1520 * this connection before trimming the data to fit the receive
1521 * window. Check the sequence number versus IRS since we know
1522 * the sequence numbers haven't wrapped. This is a partial fix
1523 * for the "LAND" DoS attack.
1525 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
1526 rstreason = BANDLIM_RST_OPENPORT;
1530 todrop = tp->rcv_nxt - th->th_seq;
1532 if (thflags & TH_SYN) {
1542 * Following if statement from Stevens, vol. 2, p. 960.
1545 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
1547 * Any valid FIN must be to the left of the window.
1548 * At this point the FIN must be a duplicate or out
1549 * of sequence; drop it.
1554 * Send an ACK to resynchronize and drop any data.
1555 * But keep on processing for RST or ACK.
1557 tp->t_flags |= TF_ACKNOW;
1559 tcpstat.tcps_rcvduppack++;
1560 tcpstat.tcps_rcvdupbyte += todrop;
1562 tcpstat.tcps_rcvpartduppack++;
1563 tcpstat.tcps_rcvpartdupbyte += todrop;
1565 drop_hdrlen += todrop; /* drop from the top afterwards */
1566 th->th_seq += todrop;
1568 if (th->th_urp > todrop)
1569 th->th_urp -= todrop;
1577 * If new data are received on a connection after the
1578 * user processes are gone, then RST the other end.
1580 if ((so->so_state & SS_NOFDREF) &&
1581 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
1584 KASSERT(headlocked, ("%s: trimthenstep6: tcp_close.3: head "
1585 "not locked", __func__));
1586 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) {
1587 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data after socket "
1588 "was closed, sending RST and removing tcpcb\n",
1589 s, __func__, tcpstates[tp->t_state], tlen);
1593 tcpstat.tcps_rcvafterclose++;
1594 rstreason = BANDLIM_UNLIMITED;
1599 * If segment ends after window, drop trailing data
1600 * (and PUSH and FIN); if nothing left, just ACK.
1602 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
1604 tcpstat.tcps_rcvpackafterwin++;
1605 if (todrop >= tlen) {
1606 tcpstat.tcps_rcvbyteafterwin += tlen;
1608 * If window is closed can only take segments at
1609 * window edge, and have to drop data and PUSH from
1610 * incoming segments. Continue processing, but
1611 * remember to ack. Otherwise, drop segment
1614 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
1615 tp->t_flags |= TF_ACKNOW;
1616 tcpstat.tcps_rcvwinprobe++;
1620 tcpstat.tcps_rcvbyteafterwin += todrop;
1623 thflags &= ~(TH_PUSH|TH_FIN);
1627 * If last ACK falls within this segment's sequence numbers,
1628 * record its timestamp.
1630 * 1) That the test incorporates suggestions from the latest
1631 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1632 * 2) That updating only on newer timestamps interferes with
1633 * our earlier PAWS tests, so this check should be solely
1634 * predicated on the sequence space of this segment.
1635 * 3) That we modify the segment boundary check to be
1636 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
1637 * instead of RFC1323's
1638 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
1639 * This modified check allows us to overcome RFC1323's
1640 * limitations as described in Stevens TCP/IP Illustrated
1641 * Vol. 2 p.869. In such cases, we can still calculate the
1642 * RTT correctly when RCV.NXT == Last.ACK.Sent.
1644 if ((to.to_flags & TOF_TS) != 0 &&
1645 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
1646 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
1647 ((thflags & (TH_SYN|TH_FIN)) != 0))) {
1648 tp->ts_recent_age = ticks;
1649 tp->ts_recent = to.to_tsval;
1653 * If a SYN is in the window, then this is an
1654 * error and we send an RST and drop the connection.
1656 if (thflags & TH_SYN) {
1657 KASSERT(headlocked, ("%s: tcp_drop: trimthenstep6: "
1658 "head not locked", __func__));
1659 tp = tcp_drop(tp, ECONNRESET);
1660 rstreason = BANDLIM_UNLIMITED;
1665 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
1666 * flag is on (half-synchronized state), then queue data for
1667 * later processing; else drop segment and return.
1669 if ((thflags & TH_ACK) == 0) {
1670 if (tp->t_state == TCPS_SYN_RECEIVED ||
1671 (tp->t_flags & TF_NEEDSYN))
1673 else if (tp->t_flags & TF_ACKNOW)
1682 switch (tp->t_state) {
1685 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
1686 * ESTABLISHED state and continue processing.
1687 * The ACK was checked above.
1689 case TCPS_SYN_RECEIVED:
1691 tcpstat.tcps_connects++;
1693 /* Do window scaling? */
1694 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1695 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1696 tp->rcv_scale = tp->request_r_scale;
1697 tp->snd_wnd = tiwin;
1701 * SYN-RECEIVED -> ESTABLISHED
1702 * SYN-RECEIVED* -> FIN-WAIT-1
1704 tp->t_starttime = ticks;
1705 if (tp->t_flags & TF_NEEDFIN) {
1706 tp->t_state = TCPS_FIN_WAIT_1;
1707 tp->t_flags &= ~TF_NEEDFIN;
1709 tp->t_state = TCPS_ESTABLISHED;
1710 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
1713 * If segment contains data or ACK, will call tcp_reass()
1714 * later; if not, do so now to pass queued data to user.
1716 if (tlen == 0 && (thflags & TH_FIN) == 0)
1717 (void) tcp_reass(tp, (struct tcphdr *)0, 0,
1719 tp->snd_wl1 = th->th_seq - 1;
1723 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
1724 * ACKs. If the ack is in the range
1725 * tp->snd_una < th->th_ack <= tp->snd_max
1726 * then advance tp->snd_una to th->th_ack and drop
1727 * data from the retransmission queue. If this ACK reflects
1728 * more up to date window information we update our window information.
1730 case TCPS_ESTABLISHED:
1731 case TCPS_FIN_WAIT_1:
1732 case TCPS_FIN_WAIT_2:
1733 case TCPS_CLOSE_WAIT:
1736 if (SEQ_GT(th->th_ack, tp->snd_max)) {
1737 tcpstat.tcps_rcvacktoomuch++;
1740 if ((tp->t_flags & TF_SACK_PERMIT) &&
1741 ((to.to_flags & TOF_SACK) ||
1742 !TAILQ_EMPTY(&tp->snd_holes)))
1743 tcp_sack_doack(tp, &to, th->th_ack);
1744 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
1745 if (tlen == 0 && tiwin == tp->snd_wnd) {
1746 tcpstat.tcps_rcvdupack++;
1748 * If we have outstanding data (other than
1749 * a window probe), this is a completely
1750 * duplicate ack (ie, window info didn't
1751 * change), the ack is the biggest we've
1752 * seen and we've seen exactly our rexmt
1753 * threshhold of them, assume a packet
1754 * has been dropped and retransmit it.
1755 * Kludge snd_nxt & the congestion
1756 * window so we send only this one
1759 * We know we're losing at the current
1760 * window size so do congestion avoidance
1761 * (set ssthresh to half the current window
1762 * and pull our congestion window back to
1763 * the new ssthresh).
1765 * Dup acks mean that packets have left the
1766 * network (they're now cached at the receiver)
1767 * so bump cwnd by the amount in the receiver
1768 * to keep a constant cwnd packets in the
1771 if (!tcp_timer_active(tp, TT_REXMT) ||
1772 th->th_ack != tp->snd_una)
1774 else if (++tp->t_dupacks > tcprexmtthresh ||
1776 (tp->t_flags & TF_SACK_PERMIT)) &&
1777 IN_FASTRECOVERY(tp))) {
1778 if ((tp->t_flags & TF_SACK_PERMIT) &&
1779 IN_FASTRECOVERY(tp)) {
1783 * Compute the amount of data in flight first.
1784 * We can inject new data into the pipe iff
1785 * we have less than 1/2 the original window's
1786 * worth of data in flight.
1788 awnd = (tp->snd_nxt - tp->snd_fack) +
1789 tp->sackhint.sack_bytes_rexmit;
1790 if (awnd < tp->snd_ssthresh) {
1791 tp->snd_cwnd += tp->t_maxseg;
1792 if (tp->snd_cwnd > tp->snd_ssthresh)
1793 tp->snd_cwnd = tp->snd_ssthresh;
1796 tp->snd_cwnd += tp->t_maxseg;
1797 (void) tcp_output(tp);
1799 } else if (tp->t_dupacks == tcprexmtthresh) {
1800 tcp_seq onxt = tp->snd_nxt;
1804 * If we're doing sack, check to
1805 * see if we're already in sack
1806 * recovery. If we're not doing sack,
1807 * check to see if we're in newreno
1810 if (tp->t_flags & TF_SACK_PERMIT) {
1811 if (IN_FASTRECOVERY(tp)) {
1815 } else if (tcp_do_newreno) {
1816 if (SEQ_LEQ(th->th_ack,
1822 win = min(tp->snd_wnd, tp->snd_cwnd) /
1826 tp->snd_ssthresh = win * tp->t_maxseg;
1827 ENTER_FASTRECOVERY(tp);
1828 tp->snd_recover = tp->snd_max;
1829 tcp_timer_activate(tp, TT_REXMT, 0);
1831 if (tp->t_flags & TF_SACK_PERMIT) {
1832 tcpstat.tcps_sack_recovery_episode++;
1833 tp->sack_newdata = tp->snd_nxt;
1834 tp->snd_cwnd = tp->t_maxseg;
1835 (void) tcp_output(tp);
1838 tp->snd_nxt = th->th_ack;
1839 tp->snd_cwnd = tp->t_maxseg;
1840 (void) tcp_output(tp);
1841 KASSERT(tp->snd_limited <= 2,
1842 ("%s: tp->snd_limited too big",
1844 tp->snd_cwnd = tp->snd_ssthresh +
1846 (tp->t_dupacks - tp->snd_limited);
1847 if (SEQ_GT(onxt, tp->snd_nxt))
1850 } else if (tcp_do_rfc3042) {
1851 u_long oldcwnd = tp->snd_cwnd;
1852 tcp_seq oldsndmax = tp->snd_max;
1855 KASSERT(tp->t_dupacks == 1 ||
1857 ("%s: dupacks not 1 or 2",
1859 if (tp->t_dupacks == 1)
1860 tp->snd_limited = 0;
1862 (tp->snd_nxt - tp->snd_una) +
1863 (tp->t_dupacks - tp->snd_limited) *
1865 (void) tcp_output(tp);
1866 sent = tp->snd_max - oldsndmax;
1867 if (sent > tp->t_maxseg) {
1868 KASSERT((tp->t_dupacks == 2 &&
1869 tp->snd_limited == 0) ||
1870 (sent == tp->t_maxseg + 1 &&
1871 tp->t_flags & TF_SENTFIN),
1872 ("%s: sent too much",
1874 tp->snd_limited = 2;
1875 } else if (sent > 0)
1877 tp->snd_cwnd = oldcwnd;
1885 KASSERT(SEQ_GT(th->th_ack, tp->snd_una),
1886 ("%s: th_ack <= snd_una", __func__));
1889 * If the congestion window was inflated to account
1890 * for the other side's cached packets, retract it.
1892 if (tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) {
1893 if (IN_FASTRECOVERY(tp)) {
1894 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
1895 if (tp->t_flags & TF_SACK_PERMIT)
1896 tcp_sack_partialack(tp, th);
1898 tcp_newreno_partial_ack(tp, th);
1901 * Out of fast recovery.
1902 * Window inflation should have left us
1903 * with approximately snd_ssthresh
1905 * But in case we would be inclined to
1906 * send a burst, better to do it via
1907 * the slow start mechanism.
1909 if (SEQ_GT(th->th_ack +
1912 tp->snd_cwnd = tp->snd_max -
1916 tp->snd_cwnd = tp->snd_ssthresh;
1920 if (tp->t_dupacks >= tcprexmtthresh &&
1921 tp->snd_cwnd > tp->snd_ssthresh)
1922 tp->snd_cwnd = tp->snd_ssthresh;
1926 * If we reach this point, ACK is not a duplicate,
1927 * i.e., it ACKs something we sent.
1929 if (tp->t_flags & TF_NEEDSYN) {
1931 * T/TCP: Connection was half-synchronized, and our
1932 * SYN has been ACK'd (so connection is now fully
1933 * synchronized). Go to non-starred state,
1934 * increment snd_una for ACK of SYN, and check if
1935 * we can do window scaling.
1937 tp->t_flags &= ~TF_NEEDSYN;
1939 /* Do window scaling? */
1940 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1941 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1942 tp->rcv_scale = tp->request_r_scale;
1943 /* Send window already scaled. */
1948 KASSERT(headlocked, ("%s: process_ACK: head not locked",
1950 INP_WLOCK_ASSERT(tp->t_inpcb);
1952 acked = th->th_ack - tp->snd_una;
1953 tcpstat.tcps_rcvackpack++;
1954 tcpstat.tcps_rcvackbyte += acked;
1957 * If we just performed our first retransmit, and the ACK
1958 * arrives within our recovery window, then it was a mistake
1959 * to do the retransmit in the first place. Recover our
1960 * original cwnd and ssthresh, and proceed to transmit where
1963 if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) {
1964 ++tcpstat.tcps_sndrexmitbad;
1965 tp->snd_cwnd = tp->snd_cwnd_prev;
1966 tp->snd_ssthresh = tp->snd_ssthresh_prev;
1967 tp->snd_recover = tp->snd_recover_prev;
1968 if (tp->t_flags & TF_WASFRECOVERY)
1969 ENTER_FASTRECOVERY(tp);
1970 tp->snd_nxt = tp->snd_max;
1971 tp->t_badrxtwin = 0; /* XXX probably not required */
1975 * If we have a timestamp reply, update smoothed
1976 * round trip time. If no timestamp is present but
1977 * transmit timer is running and timed sequence
1978 * number was acked, update smoothed round trip time.
1979 * Since we now have an rtt measurement, cancel the
1980 * timer backoff (cf., Phil Karn's retransmit alg.).
1981 * Recompute the initial retransmit timer.
1983 * Some boxes send broken timestamp replies
1984 * during the SYN+ACK phase, ignore
1985 * timestamps of 0 or we could calculate a
1986 * huge RTT and blow up the retransmit timer.
1988 if ((to.to_flags & TOF_TS) != 0 &&
1990 if (!tp->t_rttlow || tp->t_rttlow > ticks - to.to_tsecr)
1991 tp->t_rttlow = ticks - to.to_tsecr;
1992 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1);
1993 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
1994 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime)
1995 tp->t_rttlow = ticks - tp->t_rtttime;
1996 tcp_xmit_timer(tp, ticks - tp->t_rtttime);
1998 tcp_xmit_bandwidth_limit(tp, th->th_ack);
2001 * If all outstanding data is acked, stop retransmit
2002 * timer and remember to restart (more output or persist).
2003 * If there is more data to be acked, restart retransmit
2004 * timer, using current (possibly backed-off) value.
2006 if (th->th_ack == tp->snd_max) {
2007 tcp_timer_activate(tp, TT_REXMT, 0);
2009 } else if (!tcp_timer_active(tp, TT_PERSIST))
2010 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
2013 * If no data (only SYN) was ACK'd,
2014 * skip rest of ACK processing.
2020 * When new data is acked, open the congestion window.
2021 * If the window gives us less than ssthresh packets
2022 * in flight, open exponentially (maxseg per packet).
2023 * Otherwise open linearly: maxseg per window
2024 * (maxseg^2 / cwnd per packet).
2025 * If cwnd > maxseg^2, fix the cwnd increment at 1 byte
2026 * to avoid capping cwnd (as suggested in RFC2581).
2028 if ((!tcp_do_newreno && !(tp->t_flags & TF_SACK_PERMIT)) ||
2029 !IN_FASTRECOVERY(tp)) {
2030 u_int cw = tp->snd_cwnd;
2031 u_int incr = tp->t_maxseg;
2032 if (cw > tp->snd_ssthresh)
2033 incr = max((incr * incr / cw), 1);
2034 tp->snd_cwnd = min(cw+incr, TCP_MAXWIN<<tp->snd_scale);
2036 SOCKBUF_LOCK(&so->so_snd);
2037 if (acked > so->so_snd.sb_cc) {
2038 tp->snd_wnd -= so->so_snd.sb_cc;
2039 sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc);
2042 sbdrop_locked(&so->so_snd, acked);
2043 tp->snd_wnd -= acked;
2046 /* NB: sowwakeup_locked() does an implicit unlock. */
2047 sowwakeup_locked(so);
2048 /* Detect una wraparound. */
2049 if ((tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) &&
2050 !IN_FASTRECOVERY(tp) &&
2051 SEQ_GT(tp->snd_una, tp->snd_recover) &&
2052 SEQ_LEQ(th->th_ack, tp->snd_recover))
2053 tp->snd_recover = th->th_ack - 1;
2054 if ((tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) &&
2055 IN_FASTRECOVERY(tp) &&
2056 SEQ_GEQ(th->th_ack, tp->snd_recover))
2057 EXIT_FASTRECOVERY(tp);
2058 tp->snd_una = th->th_ack;
2059 if (tp->t_flags & TF_SACK_PERMIT) {
2060 if (SEQ_GT(tp->snd_una, tp->snd_recover))
2061 tp->snd_recover = tp->snd_una;
2063 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2064 tp->snd_nxt = tp->snd_una;
2066 switch (tp->t_state) {
2069 * In FIN_WAIT_1 STATE in addition to the processing
2070 * for the ESTABLISHED state if our FIN is now acknowledged
2071 * then enter FIN_WAIT_2.
2073 case TCPS_FIN_WAIT_1:
2074 if (ourfinisacked) {
2076 * If we can't receive any more
2077 * data, then closing user can proceed.
2078 * Starting the timer is contrary to the
2079 * specification, but if we don't get a FIN
2080 * we'll hang forever.
2083 * we should release the tp also, and use a
2086 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2089 soisdisconnected(so);
2090 timeout = (tcp_fast_finwait2_recycle) ?
2091 tcp_finwait2_timeout : tcp_maxidle;
2092 tcp_timer_activate(tp, TT_2MSL, timeout);
2094 tp->t_state = TCPS_FIN_WAIT_2;
2099 * In CLOSING STATE in addition to the processing for
2100 * the ESTABLISHED state if the ACK acknowledges our FIN
2101 * then enter the TIME-WAIT state, otherwise ignore
2105 if (ourfinisacked) {
2106 KASSERT(headlocked, ("%s: process_ACK: "
2107 "head not locked", __func__));
2109 INP_INFO_WUNLOCK(&tcbinfo);
2117 * In LAST_ACK, we may still be waiting for data to drain
2118 * and/or to be acked, as well as for the ack of our FIN.
2119 * If our FIN is now acknowledged, delete the TCB,
2120 * enter the closed state and return.
2123 if (ourfinisacked) {
2124 KASSERT(headlocked, ("%s: process_ACK: "
2125 "tcp_close: head not locked", __func__));
2134 KASSERT(headlocked, ("%s: step6: head not locked", __func__));
2135 INP_WLOCK_ASSERT(tp->t_inpcb);
2138 * Update window information.
2139 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2141 if ((thflags & TH_ACK) &&
2142 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
2143 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
2144 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
2145 /* keep track of pure window updates */
2147 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
2148 tcpstat.tcps_rcvwinupd++;
2149 tp->snd_wnd = tiwin;
2150 tp->snd_wl1 = th->th_seq;
2151 tp->snd_wl2 = th->th_ack;
2152 if (tp->snd_wnd > tp->max_sndwnd)
2153 tp->max_sndwnd = tp->snd_wnd;
2158 * Process segments with URG.
2160 if ((thflags & TH_URG) && th->th_urp &&
2161 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2163 * This is a kludge, but if we receive and accept
2164 * random urgent pointers, we'll crash in
2165 * soreceive. It's hard to imagine someone
2166 * actually wanting to send this much urgent data.
2168 SOCKBUF_LOCK(&so->so_rcv);
2169 if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
2170 th->th_urp = 0; /* XXX */
2171 thflags &= ~TH_URG; /* XXX */
2172 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
2173 goto dodata; /* XXX */
2176 * If this segment advances the known urgent pointer,
2177 * then mark the data stream. This should not happen
2178 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2179 * a FIN has been received from the remote side.
2180 * In these states we ignore the URG.
2182 * According to RFC961 (Assigned Protocols),
2183 * the urgent pointer points to the last octet
2184 * of urgent data. We continue, however,
2185 * to consider it to indicate the first octet
2186 * of data past the urgent section as the original
2187 * spec states (in one of two places).
2189 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
2190 tp->rcv_up = th->th_seq + th->th_urp;
2191 so->so_oobmark = so->so_rcv.sb_cc +
2192 (tp->rcv_up - tp->rcv_nxt) - 1;
2193 if (so->so_oobmark == 0)
2194 so->so_rcv.sb_state |= SBS_RCVATMARK;
2196 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
2198 SOCKBUF_UNLOCK(&so->so_rcv);
2200 * Remove out of band data so doesn't get presented to user.
2201 * This can happen independent of advancing the URG pointer,
2202 * but if two URG's are pending at once, some out-of-band
2203 * data may creep in... ick.
2205 if (th->th_urp <= (u_long)tlen &&
2206 !(so->so_options & SO_OOBINLINE)) {
2207 /* hdr drop is delayed */
2208 tcp_pulloutofband(so, th, m, drop_hdrlen);
2212 * If no out of band data is expected,
2213 * pull receive urgent pointer along
2214 * with the receive window.
2216 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
2217 tp->rcv_up = tp->rcv_nxt;
2220 KASSERT(headlocked, ("%s: dodata: head not locked", __func__));
2221 INP_WLOCK_ASSERT(tp->t_inpcb);
2224 * Process the segment text, merging it into the TCP sequencing queue,
2225 * and arranging for acknowledgment of receipt if necessary.
2226 * This process logically involves adjusting tp->rcv_wnd as data
2227 * is presented to the user (this happens in tcp_usrreq.c,
2228 * case PRU_RCVD). If a FIN has already been received on this
2229 * connection then we just ignore the text.
2231 if ((tlen || (thflags & TH_FIN)) &&
2232 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2233 tcp_seq save_start = th->th_seq;
2234 m_adj(m, drop_hdrlen); /* delayed header drop */
2236 * Insert segment which includes th into TCP reassembly queue
2237 * with control block tp. Set thflags to whether reassembly now
2238 * includes a segment with FIN. This handles the common case
2239 * inline (segment is the next to be received on an established
2240 * connection, and the queue is empty), avoiding linkage into
2241 * and removal from the queue and repetition of various
2243 * Set DELACK for segments received in order, but ack
2244 * immediately when segments are out of order (so
2245 * fast retransmit can work).
2247 if (th->th_seq == tp->rcv_nxt &&
2248 LIST_EMPTY(&tp->t_segq) &&
2249 TCPS_HAVEESTABLISHED(tp->t_state)) {
2251 tp->t_flags |= TF_DELACK;
2253 tp->t_flags |= TF_ACKNOW;
2254 tp->rcv_nxt += tlen;
2255 thflags = th->th_flags & TH_FIN;
2256 tcpstat.tcps_rcvpack++;
2257 tcpstat.tcps_rcvbyte += tlen;
2259 SOCKBUF_LOCK(&so->so_rcv);
2260 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
2263 sbappendstream_locked(&so->so_rcv, m);
2264 /* NB: sorwakeup_locked() does an implicit unlock. */
2265 sorwakeup_locked(so);
2268 * XXX: Due to the header drop above "th" is
2269 * theoretically invalid by now. Fortunately
2270 * m_adj() doesn't actually frees any mbufs
2271 * when trimming from the head.
2273 thflags = tcp_reass(tp, th, &tlen, m);
2274 tp->t_flags |= TF_ACKNOW;
2276 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT))
2277 tcp_update_sack_list(tp, save_start, save_start + tlen);
2280 * Note the amount of data that peer has sent into
2281 * our window, in order to estimate the sender's
2285 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
2293 * If FIN is received ACK the FIN and let the user know
2294 * that the connection is closing.
2296 if (thflags & TH_FIN) {
2297 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2300 * If connection is half-synchronized
2301 * (ie NEEDSYN flag on) then delay ACK,
2302 * so it may be piggybacked when SYN is sent.
2303 * Otherwise, since we received a FIN then no
2304 * more input can be expected, send ACK now.
2306 if (tp->t_flags & TF_NEEDSYN)
2307 tp->t_flags |= TF_DELACK;
2309 tp->t_flags |= TF_ACKNOW;
2312 switch (tp->t_state) {
2315 * In SYN_RECEIVED and ESTABLISHED STATES
2316 * enter the CLOSE_WAIT state.
2318 case TCPS_SYN_RECEIVED:
2319 tp->t_starttime = ticks;
2321 case TCPS_ESTABLISHED:
2322 tp->t_state = TCPS_CLOSE_WAIT;
2326 * If still in FIN_WAIT_1 STATE FIN has not been acked so
2327 * enter the CLOSING state.
2329 case TCPS_FIN_WAIT_1:
2330 tp->t_state = TCPS_CLOSING;
2334 * In FIN_WAIT_2 state enter the TIME_WAIT state,
2335 * starting the time-wait timer, turning off the other
2338 case TCPS_FIN_WAIT_2:
2339 KASSERT(headlocked == 1, ("%s: dodata: "
2340 "TCP_FIN_WAIT_2: head not locked", __func__));
2342 INP_INFO_WUNLOCK(&tcbinfo);
2346 INP_INFO_WUNLOCK(&tcbinfo);
2349 if (so->so_options & SO_DEBUG)
2350 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
2355 * Return any desired output.
2357 if (needoutput || (tp->t_flags & TF_ACKNOW))
2358 (void) tcp_output(tp);
2361 KASSERT(headlocked == 0, ("%s: check_delack: head locked",
2363 INP_INFO_UNLOCK_ASSERT(&tcbinfo);
2364 INP_WLOCK_ASSERT(tp->t_inpcb);
2365 if (tp->t_flags & TF_DELACK) {
2366 tp->t_flags &= ~TF_DELACK;
2367 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
2369 INP_WUNLOCK(tp->t_inpcb);
2373 KASSERT(headlocked, ("%s: dropafterack: head not locked", __func__));
2375 * Generate an ACK dropping incoming segment if it occupies
2376 * sequence space, where the ACK reflects our state.
2378 * We can now skip the test for the RST flag since all
2379 * paths to this code happen after packets containing
2380 * RST have been dropped.
2382 * In the SYN-RECEIVED state, don't send an ACK unless the
2383 * segment we received passes the SYN-RECEIVED ACK test.
2384 * If it fails send a RST. This breaks the loop in the
2385 * "LAND" DoS attack, and also prevents an ACK storm
2386 * between two listening ports that have been sent forged
2387 * SYN segments, each with the source address of the other.
2389 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
2390 (SEQ_GT(tp->snd_una, th->th_ack) ||
2391 SEQ_GT(th->th_ack, tp->snd_max)) ) {
2392 rstreason = BANDLIM_RST_OPENPORT;
2396 if (so->so_options & SO_DEBUG)
2397 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
2400 KASSERT(headlocked, ("%s: headlocked should be 1", __func__));
2401 INP_INFO_WUNLOCK(&tcbinfo);
2402 tp->t_flags |= TF_ACKNOW;
2403 (void) tcp_output(tp);
2404 INP_WUNLOCK(tp->t_inpcb);
2409 KASSERT(headlocked, ("%s: dropwithreset: head not locked", __func__));
2410 INP_INFO_WUNLOCK(&tcbinfo);
2413 tcp_dropwithreset(m, th, tp, tlen, rstreason);
2414 INP_WUNLOCK(tp->t_inpcb);
2416 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
2421 * Drop space held by incoming segment and return.
2424 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
2425 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
2429 INP_WUNLOCK(tp->t_inpcb);
2431 INP_INFO_WUNLOCK(&tcbinfo);
2436 * Issue RST and make ACK acceptable to originator of segment.
2437 * The mbuf must still include the original packet header.
2441 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
2442 int tlen, int rstreason)
2446 struct ip6_hdr *ip6;
2450 INP_WLOCK_ASSERT(tp->t_inpcb);
2453 /* Don't bother if destination was broadcast/multicast. */
2454 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
2457 if (mtod(m, struct ip *)->ip_v == 6) {
2458 ip6 = mtod(m, struct ip6_hdr *);
2459 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
2460 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
2462 /* IPv6 anycast check is done at tcp6_input() */
2466 ip = mtod(m, struct ip *);
2467 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
2468 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
2469 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
2470 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
2474 /* Perform bandwidth limiting. */
2475 if (badport_bandlim(rstreason) < 0)
2478 /* tcp_respond consumes the mbuf chain. */
2479 if (th->th_flags & TH_ACK) {
2480 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0,
2481 th->th_ack, TH_RST);
2483 if (th->th_flags & TH_SYN)
2485 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
2486 (tcp_seq)0, TH_RST|TH_ACK);
2494 * Parse TCP options and place in tcpopt.
2497 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
2502 for (; cnt > 0; cnt -= optlen, cp += optlen) {
2504 if (opt == TCPOPT_EOL)
2506 if (opt == TCPOPT_NOP)
2512 if (optlen < 2 || optlen > cnt)
2517 if (optlen != TCPOLEN_MAXSEG)
2519 if (!(flags & TO_SYN))
2521 to->to_flags |= TOF_MSS;
2522 bcopy((char *)cp + 2,
2523 (char *)&to->to_mss, sizeof(to->to_mss));
2524 to->to_mss = ntohs(to->to_mss);
2527 if (optlen != TCPOLEN_WINDOW)
2529 if (!(flags & TO_SYN))
2531 to->to_flags |= TOF_SCALE;
2532 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT);
2534 case TCPOPT_TIMESTAMP:
2535 if (optlen != TCPOLEN_TIMESTAMP)
2537 to->to_flags |= TOF_TS;
2538 bcopy((char *)cp + 2,
2539 (char *)&to->to_tsval, sizeof(to->to_tsval));
2540 to->to_tsval = ntohl(to->to_tsval);
2541 bcopy((char *)cp + 6,
2542 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
2543 to->to_tsecr = ntohl(to->to_tsecr);
2545 #ifdef TCP_SIGNATURE
2547 * XXX In order to reply to a host which has set the
2548 * TCP_SIGNATURE option in its initial SYN, we have to
2549 * record the fact that the option was observed here
2550 * for the syncache code to perform the correct response.
2552 case TCPOPT_SIGNATURE:
2553 if (optlen != TCPOLEN_SIGNATURE)
2555 to->to_flags |= TOF_SIGNATURE;
2556 to->to_signature = cp + 2;
2559 case TCPOPT_SACK_PERMITTED:
2560 if (optlen != TCPOLEN_SACK_PERMITTED)
2562 if (!(flags & TO_SYN))
2566 to->to_flags |= TOF_SACKPERM;
2569 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
2573 to->to_flags |= TOF_SACK;
2574 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
2575 to->to_sacks = cp + 2;
2576 tcpstat.tcps_sack_rcv_blocks++;
2585 * Pull out of band byte out of a segment so
2586 * it doesn't appear in the user's data queue.
2587 * It is still reflected in the segment length for
2588 * sequencing purposes.
2591 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m,
2594 int cnt = off + th->th_urp - 1;
2597 if (m->m_len > cnt) {
2598 char *cp = mtod(m, caddr_t) + cnt;
2599 struct tcpcb *tp = sototcpcb(so);
2601 INP_WLOCK_ASSERT(tp->t_inpcb);
2604 tp->t_oobflags |= TCPOOB_HAVEDATA;
2605 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
2607 if (m->m_flags & M_PKTHDR)
2616 panic("tcp_pulloutofband");
2620 * Collect new round-trip time estimate
2621 * and update averages and current timeout.
2624 tcp_xmit_timer(struct tcpcb *tp, int rtt)
2628 INP_WLOCK_ASSERT(tp->t_inpcb);
2630 tcpstat.tcps_rttupdated++;
2632 if (tp->t_srtt != 0) {
2634 * srtt is stored as fixed point with 5 bits after the
2635 * binary point (i.e., scaled by 8). The following magic
2636 * is equivalent to the smoothing algorithm in rfc793 with
2637 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
2638 * point). Adjust rtt to origin 0.
2640 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
2641 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
2643 if ((tp->t_srtt += delta) <= 0)
2647 * We accumulate a smoothed rtt variance (actually, a
2648 * smoothed mean difference), then set the retransmit
2649 * timer to smoothed rtt + 4 times the smoothed variance.
2650 * rttvar is stored as fixed point with 4 bits after the
2651 * binary point (scaled by 16). The following is
2652 * equivalent to rfc793 smoothing with an alpha of .75
2653 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
2654 * rfc793's wired-in beta.
2658 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
2659 if ((tp->t_rttvar += delta) <= 0)
2661 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
2662 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2665 * No rtt measurement yet - use the unsmoothed rtt.
2666 * Set the variance to half the rtt (so our first
2667 * retransmit happens at 3*rtt).
2669 tp->t_srtt = rtt << TCP_RTT_SHIFT;
2670 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
2671 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2677 * the retransmit should happen at rtt + 4 * rttvar.
2678 * Because of the way we do the smoothing, srtt and rttvar
2679 * will each average +1/2 tick of bias. When we compute
2680 * the retransmit timer, we want 1/2 tick of rounding and
2681 * 1 extra tick because of +-1/2 tick uncertainty in the
2682 * firing of the timer. The bias will give us exactly the
2683 * 1.5 tick we need. But, because the bias is
2684 * statistical, we have to test that we don't drop below
2685 * the minimum feasible timer (which is 2 ticks).
2687 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
2688 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
2691 * We received an ack for a packet that wasn't retransmitted;
2692 * it is probably safe to discard any error indications we've
2693 * received recently. This isn't quite right, but close enough
2694 * for now (a route might have failed after we sent a segment,
2695 * and the return path might not be symmetrical).
2697 tp->t_softerror = 0;
2701 * Determine a reasonable value for maxseg size.
2702 * If the route is known, check route for mtu.
2703 * If none, use an mss that can be handled on the outgoing
2704 * interface without forcing IP to fragment; if bigger than
2705 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
2706 * to utilize large mbufs. If no route is found, route has no mtu,
2707 * or the destination isn't local, use a default, hopefully conservative
2708 * size (usually 512 or the default IP max size, but no more than the mtu
2709 * of the interface), as we can't discover anything about intervening
2710 * gateways or networks. We also initialize the congestion/slow start
2711 * window to be a single segment if the destination isn't local.
2712 * While looking at the routing entry, we also initialize other path-dependent
2713 * parameters from pre-set or cached values in the routing entry.
2715 * Also take into account the space needed for options that we
2716 * send regularly. Make maxseg shorter by that amount to assure
2717 * that we can send maxseg amount of data even when the options
2718 * are present. Store the upper limit of the length of options plus
2721 * In case of T/TCP, we call this routine during implicit connection
2722 * setup as well (offer = -1), to initialize maxseg from the cached
2725 * NOTE that this routine is only called when we process an incoming
2726 * segment. Outgoing SYN/ACK MSS settings are handled in tcp_mssopt().
2729 tcp_mss(struct tcpcb *tp, int offer)
2734 struct inpcb *inp = tp->t_inpcb;
2736 struct hc_metrics_lite metrics;
2737 int origoffer = offer;
2740 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
2741 size_t min_protoh = isipv6 ?
2742 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
2743 sizeof (struct tcpiphdr);
2745 const size_t min_protoh = sizeof(struct tcpiphdr);
2748 INP_WLOCK_ASSERT(tp->t_inpcb);
2753 maxmtu = tcp_maxmtu6(&inp->inp_inc, &mtuflags);
2754 tp->t_maxopd = tp->t_maxseg = tcp_v6mssdflt;
2758 maxmtu = tcp_maxmtu(&inp->inp_inc, &mtuflags);
2759 tp->t_maxopd = tp->t_maxseg = tcp_mssdflt;
2763 * No route to sender, stay with default mss and return.
2768 /* What have we got? */
2772 * Offer == 0 means that there was no MSS on the SYN
2773 * segment, in this case we use tcp_mssdflt as
2774 * already assigned to t_maxopd above.
2776 offer = tp->t_maxopd;
2781 * Offer == -1 means that we didn't receive SYN yet.
2787 * Prevent DoS attack with too small MSS. Round up
2788 * to at least minmss.
2790 offer = max(offer, tcp_minmss);
2792 * Sanity check: make sure that maxopd will be large
2793 * enough to allow some data on segments even if the
2794 * all the option space is used (40bytes). Otherwise
2795 * funny things may happen in tcp_output.
2797 offer = max(offer, 64);
2801 * rmx information is now retrieved from tcp_hostcache.
2803 tcp_hc_get(&inp->inp_inc, &metrics);
2806 * If there's a discovered mtu int tcp hostcache, use it
2807 * else, use the link mtu.
2809 if (metrics.rmx_mtu)
2810 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
2814 mss = maxmtu - min_protoh;
2815 if (!path_mtu_discovery &&
2816 !in6_localaddr(&inp->in6p_faddr))
2817 mss = min(mss, tcp_v6mssdflt);
2821 mss = maxmtu - min_protoh;
2822 if (!path_mtu_discovery &&
2823 !in_localaddr(inp->inp_faddr))
2824 mss = min(mss, tcp_mssdflt);
2827 mss = min(mss, offer);
2830 * maxopd stores the maximum length of data AND options
2831 * in a segment; maxseg is the amount of data in a normal
2832 * segment. We need to store this value (maxopd) apart
2833 * from maxseg, because now every segment carries options
2834 * and thus we normally have somewhat less data in segments.
2839 * origoffer==-1 indicates that no segments were received yet.
2840 * In this case we just guess.
2842 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
2844 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
2845 mss -= TCPOLEN_TSTAMP_APPA;
2847 #if (MCLBYTES & (MCLBYTES - 1)) == 0
2849 mss &= ~(MCLBYTES-1);
2852 mss = mss / MCLBYTES * MCLBYTES;
2857 * If there's a pipesize, change the socket buffer to that size,
2858 * don't change if sb_hiwat is different than default (then it
2859 * has been changed on purpose with setsockopt).
2860 * Make the socket buffers an integral number of mss units;
2861 * if the mss is larger than the socket buffer, decrease the mss.
2863 so = inp->inp_socket;
2864 SOCKBUF_LOCK(&so->so_snd);
2865 if ((so->so_snd.sb_hiwat == tcp_sendspace) && metrics.rmx_sendpipe)
2866 bufsize = metrics.rmx_sendpipe;
2868 bufsize = so->so_snd.sb_hiwat;
2872 bufsize = roundup(bufsize, mss);
2873 if (bufsize > sb_max)
2875 if (bufsize > so->so_snd.sb_hiwat)
2876 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL);
2878 SOCKBUF_UNLOCK(&so->so_snd);
2881 SOCKBUF_LOCK(&so->so_rcv);
2882 if ((so->so_rcv.sb_hiwat == tcp_recvspace) && metrics.rmx_recvpipe)
2883 bufsize = metrics.rmx_recvpipe;
2885 bufsize = so->so_rcv.sb_hiwat;
2886 if (bufsize > mss) {
2887 bufsize = roundup(bufsize, mss);
2888 if (bufsize > sb_max)
2890 if (bufsize > so->so_rcv.sb_hiwat)
2891 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL);
2893 SOCKBUF_UNLOCK(&so->so_rcv);
2895 * While we're here, check the others too.
2897 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
2899 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
2900 tcpstat.tcps_usedrtt++;
2901 if (metrics.rmx_rttvar) {
2902 tp->t_rttvar = metrics.rmx_rttvar;
2903 tcpstat.tcps_usedrttvar++;
2905 /* default variation is +- 1 rtt */
2907 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
2909 TCPT_RANGESET(tp->t_rxtcur,
2910 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
2911 tp->t_rttmin, TCPTV_REXMTMAX);
2913 if (metrics.rmx_ssthresh) {
2915 * There's some sort of gateway or interface
2916 * buffer limit on the path. Use this to set
2917 * the slow start threshhold, but set the
2918 * threshold to no less than 2*mss.
2920 tp->snd_ssthresh = max(2 * mss, metrics.rmx_ssthresh);
2921 tcpstat.tcps_usedssthresh++;
2923 if (metrics.rmx_bandwidth)
2924 tp->snd_bandwidth = metrics.rmx_bandwidth;
2927 * Set the slow-start flight size depending on whether this
2928 * is a local network or not.
2930 * Extend this so we cache the cwnd too and retrieve it here.
2931 * Make cwnd even bigger than RFC3390 suggests but only if we
2932 * have previous experience with the remote host. Be careful
2933 * not make cwnd bigger than remote receive window or our own
2934 * send socket buffer. Maybe put some additional upper bound
2935 * on the retrieved cwnd. Should do incremental updates to
2936 * hostcache when cwnd collapses so next connection doesn't
2937 * overloads the path again.
2939 * RFC3390 says only do this if SYN or SYN/ACK didn't got lost.
2940 * We currently check only in syncache_socket for that.
2942 #define TCP_METRICS_CWND
2943 #ifdef TCP_METRICS_CWND
2944 if (metrics.rmx_cwnd)
2945 tp->snd_cwnd = max(mss,
2946 min(metrics.rmx_cwnd / 2,
2947 min(tp->snd_wnd, so->so_snd.sb_hiwat)));
2951 tp->snd_cwnd = min(4 * mss, max(2 * mss, 4380));
2953 else if ((isipv6 && in6_localaddr(&inp->in6p_faddr)) ||
2954 (!isipv6 && in_localaddr(inp->inp_faddr)))
2956 else if (in_localaddr(inp->inp_faddr))
2958 tp->snd_cwnd = mss * ss_fltsz_local;
2960 tp->snd_cwnd = mss * ss_fltsz;
2962 /* Check the interface for TSO capabilities. */
2963 if (mtuflags & CSUM_TSO)
2964 tp->t_flags |= TF_TSO;
2968 * Determine the MSS option to send on an outgoing SYN.
2971 tcp_mssopt(struct in_conninfo *inc)
2978 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
2981 if (inc->inc_flags & INC_ISIPV6) {
2982 mss = tcp_v6mssdflt;
2983 maxmtu = tcp_maxmtu6(inc, NULL);
2984 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
2985 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
2990 maxmtu = tcp_maxmtu(inc, NULL);
2991 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
2992 min_protoh = sizeof(struct tcpiphdr);
2994 if (maxmtu && thcmtu)
2995 mss = min(maxmtu, thcmtu) - min_protoh;
2996 else if (maxmtu || thcmtu)
2997 mss = max(maxmtu, thcmtu) - min_protoh;
3004 * On a partial ack arrives, force the retransmission of the
3005 * next unacknowledged segment. Do not clear tp->t_dupacks.
3006 * By setting snd_nxt to ti_ack, this forces retransmission timer to
3010 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
3012 tcp_seq onxt = tp->snd_nxt;
3013 u_long ocwnd = tp->snd_cwnd;
3015 INP_WLOCK_ASSERT(tp->t_inpcb);
3017 tcp_timer_activate(tp, TT_REXMT, 0);
3019 tp->snd_nxt = th->th_ack;
3021 * Set snd_cwnd to one segment beyond acknowledged offset.
3022 * (tp->snd_una has not yet been updated when this function is called.)
3024 tp->snd_cwnd = tp->t_maxseg + (th->th_ack - tp->snd_una);
3025 tp->t_flags |= TF_ACKNOW;
3026 (void) tcp_output(tp);
3027 tp->snd_cwnd = ocwnd;
3028 if (SEQ_GT(onxt, tp->snd_nxt))
3031 * Partial window deflation. Relies on fact that tp->snd_una
3034 if (tp->snd_cwnd > th->th_ack - tp->snd_una)
3035 tp->snd_cwnd -= th->th_ack - tp->snd_una;
3038 tp->snd_cwnd += tp->t_maxseg;