2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
33 #include "opt_ipfw.h" /* for ipfw_fwd */
35 #include "opt_inet6.h"
36 #include "opt_ipsec.h"
38 #include "opt_tcpdebug.h"
39 #include "opt_tcp_sack.h"
41 #include <sys/param.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
45 #include <sys/proc.h> /* for proc0 declaration */
46 #include <sys/protosw.h>
47 #include <sys/signalvar.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/sysctl.h>
51 #include <sys/syslog.h>
52 #include <sys/systm.h>
54 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
59 #include <net/route.h>
61 #include <netinet/in.h>
62 #include <netinet/in_pcb.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/in_var.h>
65 #include <netinet/ip.h>
66 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
67 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
68 #include <netinet/ip_var.h>
69 #include <netinet/ip_options.h>
70 #include <netinet/ip6.h>
71 #include <netinet/icmp6.h>
72 #include <netinet6/in6_pcb.h>
73 #include <netinet6/ip6_var.h>
74 #include <netinet6/nd6.h>
75 #include <netinet/tcp.h>
76 #include <netinet/tcp_fsm.h>
77 #include <netinet/tcp_seq.h>
78 #include <netinet/tcp_timer.h>
79 #include <netinet/tcp_var.h>
80 #include <netinet6/tcp6_var.h>
81 #include <netinet/tcpip.h>
83 #include <netinet/tcp_debug.h>
87 #include <netipsec/ipsec.h>
88 #include <netipsec/ipsec6.h>
92 #include <netinet6/ipsec.h>
93 #include <netinet6/ipsec6.h>
94 #include <netkey/key.h>
97 #include <machine/in_cksum.h>
99 #include <security/mac/mac_framework.h>
101 static const int tcprexmtthresh = 3;
103 struct tcpstat tcpstat;
104 SYSCTL_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW,
105 &tcpstat , tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
107 static int tcp_log_in_vain = 0;
108 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
109 &tcp_log_in_vain, 0, "Log all incoming TCP segments to closed ports");
111 static int blackhole = 0;
112 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW,
113 &blackhole, 0, "Do not send RST on segments to closed ports");
115 int tcp_delack_enabled = 1;
116 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
117 &tcp_delack_enabled, 0,
118 "Delay ACK to try and piggyback it onto a data packet");
120 static int drop_synfin = 0;
121 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW,
122 &drop_synfin, 0, "Drop TCP packets with SYN+FIN set");
124 static int tcp_do_rfc3042 = 1;
125 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW,
126 &tcp_do_rfc3042, 0, "Enable RFC 3042 (Limited Transmit)");
128 static int tcp_do_rfc3390 = 1;
129 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW,
131 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
133 static int tcp_insecure_rst = 0;
134 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW,
135 &tcp_insecure_rst, 0,
136 "Follow the old (insecure) criteria for accepting RST packets");
138 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW, 0,
139 "TCP Segment Reassembly Queue");
141 static int tcp_reass_maxseg = 0;
142 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RDTUN,
143 &tcp_reass_maxseg, 0,
144 "Global maximum number of TCP Segments in Reassembly Queue");
146 int tcp_reass_qsize = 0;
147 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, cursegments, CTLFLAG_RD,
149 "Global number of TCP Segments currently in Reassembly Queue");
151 static int tcp_reass_maxqlen = 48;
152 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxqlen, CTLFLAG_RW,
153 &tcp_reass_maxqlen, 0,
154 "Maximum number of TCP Segments per individual Reassembly Queue");
156 static int tcp_reass_overflows = 0;
157 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows, CTLFLAG_RD,
158 &tcp_reass_overflows, 0,
159 "Global number of TCP Segment Reassembly Queue Overflows");
161 int tcp_do_autorcvbuf = 1;
162 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW,
163 &tcp_do_autorcvbuf, 0, "Enable automatic receive buffer sizing");
165 int tcp_autorcvbuf_inc = 16*1024;
166 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW,
167 &tcp_autorcvbuf_inc, 0,
168 "Incrementor step size of automatic receive buffer");
170 int tcp_autorcvbuf_max = 256*1024;
171 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW,
172 &tcp_autorcvbuf_max, 0, "Max size of automatic receive buffer");
174 struct inpcbhead tcb;
175 #define tcb6 tcb /* for KAME src sync over BSD*'s */
176 struct inpcbinfo tcbinfo;
177 struct mtx *tcbinfo_mtx;
179 static void tcp_dooptions(struct tcpopt *, u_char *, int, int);
180 static int tcp_do_segment(struct mbuf *, struct tcphdr *,
181 struct socket *, struct tcpcb *, int, int);
182 static void tcp_dropwithreset(struct mbuf *, struct tcphdr *,
183 struct tcpcb *, int, int);
184 static void tcp_pulloutofband(struct socket *,
185 struct tcphdr *, struct mbuf *, int);
186 static int tcp_reass(struct tcpcb *, struct tcphdr *, int *,
188 static void tcp_xmit_timer(struct tcpcb *, int);
189 static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *);
190 static int tcp_timewait(struct inpcb *, struct tcpopt *,
191 struct tcphdr *, struct mbuf *, int);
193 /* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
195 #define ND6_HINT(tp) \
197 if ((tp) && (tp)->t_inpcb && \
198 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \
199 nd6_nud_hint(NULL, NULL, 0); \
206 * Indicate whether this ack should be delayed. We can delay the ack if
207 * - there is no delayed ack timer in progress and
208 * - our last ack wasn't a 0-sized window. We never want to delay
209 * the ack that opens up a 0-sized window and
210 * - delayed acks are enabled or
211 * - this is a half-synchronized T/TCP connection.
213 #define DELAY_ACK(tp) \
214 ((!callout_active(tp->tt_delack) && \
215 (tp->t_flags & TF_RXWIN0SENT) == 0) && \
216 (tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
218 /* Initialize TCP reassembly queue */
220 tcp_reass_zone_change(void *tag)
223 tcp_reass_maxseg = nmbclusters / 16;
224 uma_zone_set_max(tcp_reass_zone, tcp_reass_maxseg);
227 uma_zone_t tcp_reass_zone;
231 tcp_reass_maxseg = nmbclusters / 16;
232 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments",
234 tcp_reass_zone = uma_zcreate("tcpreass", sizeof (struct tseg_qent),
235 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
236 uma_zone_set_max(tcp_reass_zone, tcp_reass_maxseg);
237 EVENTHANDLER_REGISTER(nmbclusters_change,
238 tcp_reass_zone_change, NULL, EVENTHANDLER_PRI_ANY);
242 tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m)
245 struct tseg_qent *p = NULL;
246 struct tseg_qent *nq;
247 struct tseg_qent *te = NULL;
248 struct socket *so = tp->t_inpcb->inp_socket;
251 INP_LOCK_ASSERT(tp->t_inpcb);
254 * XXX: tcp_reass() is rather inefficient with its data structures
255 * and should be rewritten (see NetBSD for optimizations). While
256 * doing that it should move to its own file tcp_reass.c.
260 * Call with th==NULL after become established to
261 * force pre-ESTABLISHED data up to user socket.
267 * Limit the number of segments in the reassembly queue to prevent
268 * holding on to too many segments (and thus running out of mbufs).
269 * Make sure to let the missing segment through which caused this
270 * queue. Always keep one global queue entry spare to be able to
271 * process the missing segment.
273 if (th->th_seq != tp->rcv_nxt &&
274 (tcp_reass_qsize + 1 >= tcp_reass_maxseg ||
275 tp->t_segqlen >= tcp_reass_maxqlen)) {
276 tcp_reass_overflows++;
277 tcpstat.tcps_rcvmemdrop++;
284 * Allocate a new queue entry. If we can't, or hit the zone limit
287 te = uma_zalloc(tcp_reass_zone, M_NOWAIT);
289 tcpstat.tcps_rcvmemdrop++;
298 * Find a segment which begins after this one does.
300 LIST_FOREACH(q, &tp->t_segq, tqe_q) {
301 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq))
307 * If there is a preceding segment, it may provide some of
308 * our data already. If so, drop the data from the incoming
309 * segment. If it provides all of our data, drop us.
313 /* conversion to int (in i) handles seq wraparound */
314 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
317 tcpstat.tcps_rcvduppack++;
318 tcpstat.tcps_rcvdupbyte += *tlenp;
320 uma_zfree(tcp_reass_zone, te);
324 * Try to present any queued data
325 * at the left window edge to the user.
326 * This is needed after the 3-WHS
329 goto present; /* ??? */
336 tcpstat.tcps_rcvoopack++;
337 tcpstat.tcps_rcvoobyte += *tlenp;
340 * While we overlap succeeding segments trim them or,
341 * if they are completely covered, dequeue them.
344 int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
347 if (i < q->tqe_len) {
348 q->tqe_th->th_seq += i;
354 nq = LIST_NEXT(q, tqe_q);
355 LIST_REMOVE(q, tqe_q);
357 uma_zfree(tcp_reass_zone, q);
363 /* Insert the new segment queue entry into place. */
366 te->tqe_len = *tlenp;
369 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q);
371 LIST_INSERT_AFTER(p, te, tqe_q);
376 * Present data to user, advancing rcv_nxt through
377 * completed sequence space.
379 if (!TCPS_HAVEESTABLISHED(tp->t_state))
381 q = LIST_FIRST(&tp->t_segq);
382 if (!q || q->tqe_th->th_seq != tp->rcv_nxt)
384 SOCKBUF_LOCK(&so->so_rcv);
386 tp->rcv_nxt += q->tqe_len;
387 flags = q->tqe_th->th_flags & TH_FIN;
388 nq = LIST_NEXT(q, tqe_q);
389 LIST_REMOVE(q, tqe_q);
390 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
393 sbappendstream_locked(&so->so_rcv, q->tqe_m);
394 uma_zfree(tcp_reass_zone, q);
398 } while (q && q->tqe_th->th_seq == tp->rcv_nxt);
400 sorwakeup_locked(so);
405 * TCP input routine, follows pages 65-76 of the
406 * protocol specification dated September, 1981 very closely.
410 tcp6_input(struct mbuf **mp, int *offp, int proto)
412 struct mbuf *m = *mp;
413 struct in6_ifaddr *ia6;
415 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
418 * draft-itojun-ipv6-tcp-to-anycast
419 * better place to put this in?
421 ia6 = ip6_getdstifaddr(m);
422 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
425 ip6 = mtod(m, struct ip6_hdr *);
426 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
427 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
437 tcp_input(struct mbuf *m, int off0)
440 struct ip *ip = NULL;
442 struct inpcb *inp = NULL;
443 struct tcpcb *tp = NULL;
444 struct socket *so = NULL;
450 int rstreason = 0; /* For badport_bandlim accounting purposes */
451 #ifdef IPFIREWALL_FORWARD
452 struct m_tag *fwd_tag;
455 struct ip6_hdr *ip6 = NULL;
457 char ip6buf[INET6_ADDRSTRLEN];
459 const int isipv6 = 0;
461 struct tcpopt to; /* options in this segment */
465 * The size of tcp_saveipgen must be the size of the max ip header,
468 u_char tcp_saveipgen[IP6_HDR_LEN];
469 struct tcphdr tcp_savetcp;
474 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
478 tcpstat.tcps_rcvtotal++;
482 /* IP6_EXTHDR_CHECK() is already done at tcp6_input() */
483 ip6 = mtod(m, struct ip6_hdr *);
484 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
485 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) {
486 tcpstat.tcps_rcvbadsum++;
489 th = (struct tcphdr *)((caddr_t)ip6 + off0);
492 * Be proactive about unspecified IPv6 address in source.
493 * As we use all-zero to indicate unbounded/unconnected pcb,
494 * unspecified IPv6 address can be used to confuse us.
496 * Note that packets with unspecified IPv6 destination is
497 * already dropped in ip6_input.
499 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
504 th = NULL; /* XXX: avoid compiler warning */
508 * Get IP and TCP header together in first mbuf.
509 * Note: IP leaves IP header in first mbuf.
511 if (off0 > sizeof (struct ip)) {
512 ip_stripoptions(m, (struct mbuf *)0);
513 off0 = sizeof(struct ip);
515 if (m->m_len < sizeof (struct tcpiphdr)) {
516 if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
518 tcpstat.tcps_rcvshort++;
522 ip = mtod(m, struct ip *);
523 ipov = (struct ipovly *)ip;
524 th = (struct tcphdr *)((caddr_t)ip + off0);
527 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
528 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
529 th->th_sum = m->m_pkthdr.csum_data;
531 th->th_sum = in_pseudo(ip->ip_src.s_addr,
533 htonl(m->m_pkthdr.csum_data +
536 th->th_sum ^= 0xffff;
538 ipov->ih_len = (u_short)tlen;
539 ipov->ih_len = htons(ipov->ih_len);
543 * Checksum extended TCP header and data.
545 len = sizeof (struct ip) + tlen;
546 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
547 ipov->ih_len = (u_short)tlen;
548 ipov->ih_len = htons(ipov->ih_len);
549 th->th_sum = in_cksum(m, len);
552 tcpstat.tcps_rcvbadsum++;
555 /* Re-initialization for later version check */
556 ip->ip_v = IPVERSION;
560 * Check that TCP offset makes sense,
561 * pull out TCP options and adjust length. XXX
563 off = th->th_off << 2;
564 if (off < sizeof (struct tcphdr) || off > tlen) {
565 tcpstat.tcps_rcvbadoff++;
568 tlen -= off; /* tlen is used instead of ti->ti_len */
569 if (off > sizeof (struct tcphdr)) {
572 IP6_EXTHDR_CHECK(m, off0, off, );
573 ip6 = mtod(m, struct ip6_hdr *);
574 th = (struct tcphdr *)((caddr_t)ip6 + off0);
577 if (m->m_len < sizeof(struct ip) + off) {
578 if ((m = m_pullup(m, sizeof (struct ip) + off))
580 tcpstat.tcps_rcvshort++;
583 ip = mtod(m, struct ip *);
584 ipov = (struct ipovly *)ip;
585 th = (struct tcphdr *)((caddr_t)ip + off0);
588 optlen = off - sizeof (struct tcphdr);
589 optp = (u_char *)(th + 1);
591 thflags = th->th_flags;
594 * If the drop_synfin option is enabled, drop all packets with
595 * both the SYN and FIN bits set. This prevents e.g. nmap from
596 * identifying the TCP/IP stack.
598 * This is a violation of the TCP specification.
600 if (drop_synfin && (thflags & (TH_SYN|TH_FIN)) == (TH_SYN|TH_FIN))
604 * Convert TCP protocol specific fields to host format.
606 th->th_seq = ntohl(th->th_seq);
607 th->th_ack = ntohl(th->th_ack);
608 th->th_win = ntohs(th->th_win);
609 th->th_urp = ntohs(th->th_urp);
612 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options.
614 drop_hdrlen = off0 + off;
617 * Locate pcb for segment.
619 INP_INFO_WLOCK(&tcbinfo);
621 INP_INFO_WLOCK_ASSERT(&tcbinfo);
622 #ifdef IPFIREWALL_FORWARD
623 /* Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. */
624 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
626 if (fwd_tag != NULL && isipv6 == 0) { /* IPv6 support is not yet */
627 struct sockaddr_in *next_hop;
629 next_hop = (struct sockaddr_in *)(fwd_tag+1);
631 * Transparently forwarded. Pretend to be the destination.
632 * already got one like this?
634 inp = in_pcblookup_hash(&tcbinfo,
635 ip->ip_src, th->th_sport,
636 ip->ip_dst, th->th_dport,
637 0, m->m_pkthdr.rcvif);
639 /* It's new. Try to find the ambushing socket. */
640 inp = in_pcblookup_hash(&tcbinfo,
641 ip->ip_src, th->th_sport,
644 ntohs(next_hop->sin_port) :
649 /* Remove the tag from the packet. We don't need it anymore. */
650 m_tag_delete(m, fwd_tag);
652 #endif /* IPFIREWALL_FORWARD */
656 inp = in6_pcblookup_hash(&tcbinfo,
657 &ip6->ip6_src, th->th_sport,
658 &ip6->ip6_dst, th->th_dport,
663 inp = in_pcblookup_hash(&tcbinfo,
664 ip->ip_src, th->th_sport,
665 ip->ip_dst, th->th_dport,
670 #if defined(IPSEC) || defined(FAST_IPSEC)
672 if (isipv6 && inp != NULL && ipsec6_in_reject(m, inp)) {
674 ipsec6stat.in_polvio++;
679 if (inp != NULL && ipsec4_in_reject(m, inp)) {
681 ipsecstat.in_polvio++;
685 #endif /*IPSEC || FAST_IPSEC*/
688 * If the INPCB does not exist then all data in the incoming
689 * segment is discarded and an appropriate RST is sent back.
693 * Log communication attempts to ports that are not
696 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
697 tcp_log_in_vain == 2) {
699 char dbuf[4*sizeof "123"], sbuf[4*sizeof "123"];
701 char dbuf[INET6_ADDRSTRLEN+2], sbuf[INET6_ADDRSTRLEN+2];
705 ip6_sprintf(ip6buf, &ip6->ip6_dst));
709 ip6_sprintf(ip6buf, &ip6->ip6_src));
714 strcpy(dbuf, inet_ntoa(ip->ip_dst));
715 strcpy(sbuf, inet_ntoa(ip->ip_src));
718 "Connection attempt to TCP %s:%d "
719 "from %s:%d flags:0x%02x\n",
720 dbuf, ntohs(th->th_dport), sbuf,
721 ntohs(th->th_sport), thflags);
724 * When blackholing do not respond with a RST but
725 * completely ignore the segment and drop it.
727 if ((blackhole == 1 && (thflags & TH_SYN)) ||
731 rstreason = BANDLIM_RST_CLOSEDPORT;
736 /* Check the minimum TTL for socket. */
737 if (inp->inp_ip_minttl != 0) {
739 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim)
743 if (inp->inp_ip_minttl > ip->ip_ttl)
748 * A previous connection in TIMEWAIT state is supposed to catch
749 * stray or duplicate segments arriving late. If this segment
750 * was a legitimate new connection attempt the old INPCB gets
751 * removed and we can try again to find a listening socket.
753 if (inp->inp_vflag & INP_TIMEWAIT) {
754 if (thflags & TH_SYN)
755 tcp_dooptions(&to, optp, optlen, TO_SYN);
756 if (tcp_timewait(inp, &to, th, m, tlen))
758 /* tcp_timewait unlocks inp. */
759 INP_INFO_WUNLOCK(&tcbinfo);
763 * The TCPCB may no longer exist if the connection is winding
764 * down or it is in the CLOSED state. Either way we drop the
765 * segment and send an appropriate response.
770 rstreason = BANDLIM_RST_CLOSEDPORT;
773 if (tp->t_state == TCPS_CLOSED)
774 goto dropunlock; /* XXX: dropwithreset??? */
777 INP_LOCK_ASSERT(inp);
778 if (mac_check_inpcb_deliver(inp, m))
781 so = inp->inp_socket;
782 KASSERT(so != NULL, ("%s: so == NULL", __func__));
784 if (so->so_options & SO_DEBUG) {
785 ostate = tp->t_state;
787 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6));
789 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
794 * When the socket is accepting connections (the INPCB is in LISTEN
795 * state) we look into the SYN cache if this is a new connection
796 * attempt or the completion of a previous one.
798 if (so->so_options & SO_ACCEPTCONN) {
799 struct in_conninfo inc;
801 bzero(&inc, sizeof(inc));
802 inc.inc_isipv6 = isipv6;
805 inc.inc6_faddr = ip6->ip6_src;
806 inc.inc6_laddr = ip6->ip6_dst;
810 inc.inc_faddr = ip->ip_src;
811 inc.inc_laddr = ip->ip_dst;
813 inc.inc_fport = th->th_sport;
814 inc.inc_lport = th->th_dport;
817 * If the state is LISTEN then ignore segment if it contains
818 * a RST. If the segment contains an ACK then it is bad and
819 * send a RST. If it does not contain a SYN then it is not
820 * interesting; drop it.
822 * If the state is SYN_RECEIVED (syncache) and seg contains
823 * an ACK, but not for our SYN/ACK, send a RST. If the seg
824 * contains a RST, check the sequence number to see if it
825 * is a valid reset segment.
827 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) != TH_SYN) {
828 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
830 * Parse the TCP options here because
831 * syncookies need access to the reflected
834 tcp_dooptions(&to, optp, optlen, 0);
835 if (!syncache_expand(&inc, &to, th, &so, m)) {
837 * No syncache entry, or ACK was not
838 * for our SYN/ACK. Send a RST.
840 tcpstat.tcps_badsyn++;
841 rstreason = BANDLIM_RST_OPENPORT;
846 * Could not complete 3-way handshake,
847 * connection is being closed down, and
848 * syncache has free'd mbuf.
851 INP_INFO_WUNLOCK(&tcbinfo);
855 * Socket is created in state SYN_RECEIVED.
856 * Continue processing segment.
863 * This is what would have happened in
864 * tcp_output() when the SYN,ACK was sent.
866 tp->snd_up = tp->snd_una;
867 tp->snd_max = tp->snd_nxt = tp->iss + 1;
868 tp->last_ack_sent = tp->rcv_nxt;
871 * Process the segment and the data it
872 * contains. tcp_do_segment() consumes
873 * the mbuf chain and unlocks the inpcb.
874 * XXX: The potential return value of
875 * TIME_WAIT nuked is supposed to be
878 if (tcp_do_segment(m, th, so, tp,
880 goto findpcb; /* TIME_WAIT nuked */
883 if (thflags & TH_RST) {
884 syncache_chkrst(&inc, th);
887 if (thflags & TH_ACK) {
888 syncache_badack(&inc);
889 tcpstat.tcps_badsyn++;
890 rstreason = BANDLIM_RST_OPENPORT;
897 * Segment's flags are (SYN) or (SYN|FIN).
901 * If deprecated address is forbidden,
902 * we do not accept SYN to deprecated interface
903 * address to prevent any new inbound connection from
904 * getting established.
905 * When we do not accept SYN, we send a TCP RST,
906 * with deprecated source address (instead of dropping
907 * it). We compromise it as it is much better for peer
908 * to send a RST, and RST will be the final packet
911 * If we do not forbid deprecated addresses, we accept
912 * the SYN packet. RFC2462 does not suggest dropping
914 * If we decipher RFC2462 5.5.4, it says like this:
915 * 1. use of deprecated addr with existing
916 * communication is okay - "SHOULD continue to be
918 * 2. use of it with new communication:
919 * (2a) "SHOULD NOT be used if alternate address
920 * with sufficient scope is available"
921 * (2b) nothing mentioned otherwise.
922 * Here we fall into (2b) case as we have no choice in
923 * our source address selection - we must obey the peer.
925 * The wording in RFC2462 is confusing, and there are
926 * multiple description text for deprecated address
927 * handling - worse, they are not exactly the same.
928 * I believe 5.5.4 is the best one, so we follow 5.5.4.
930 if (isipv6 && !ip6_use_deprecated) {
931 struct in6_ifaddr *ia6;
933 if ((ia6 = ip6_getdstifaddr(m)) &&
934 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
937 rstreason = BANDLIM_RST_OPENPORT;
943 * Basic sanity checks on incoming SYN requests:
945 * Don't bother responding if the destination was a
946 * broadcast according to RFC1122 4.2.3.10, p. 104.
948 * If it is from this socket, drop it, it must be forged.
950 * Note that it is quite possible to receive unicast
951 * link-layer packets with a broadcast IP address. Use
952 * in_broadcast() to find them.
954 if (m->m_flags & (M_BCAST|M_MCAST))
958 if (th->th_dport == th->th_sport &&
959 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src))
961 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
962 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
966 if (th->th_dport == th->th_sport &&
967 ip->ip_dst.s_addr == ip->ip_src.s_addr)
969 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
970 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
971 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
972 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
976 * SYN appears to be valid. Create compressed TCP state
979 if (so->so_qlen <= so->so_qlimit) {
981 if (so->so_options & SO_DEBUG)
982 tcp_trace(TA_INPUT, ostate, tp,
983 (void *)tcp_saveipgen, &tcp_savetcp, 0);
985 tcp_dooptions(&to, optp, optlen, TO_SYN);
986 if (!syncache_add(&inc, &to, th, inp, &so, m))
989 * Entry added to syncache, mbuf used to
990 * send SYN-ACK packet. Everything unlocked
995 /* Catch all. Everthing that makes it down here is junk. */
1000 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or
1001 * later state. tcp_do_segment() always consumes the mbuf chain
1002 * and unlocks the inpcb.
1004 if (tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen))
1005 goto findpcb; /* XXX: TIME_WAIT was nuked. */
1009 tcp_dropwithreset(m, th, tp, tlen, rstreason);
1010 m = NULL; /* mbuf chain got consumed. */
1014 INP_INFO_WUNLOCK(&tcbinfo);
1022 tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
1023 struct tcpcb *tp, int drop_hdrlen, int tlen)
1025 int thflags, acked, ourfinisacked, needoutput = 0;
1027 int rstreason, todrop, win;
1033 * The size of tcp_saveipgen must be the size of the max ip header,
1036 u_char tcp_saveipgen[IP6_HDR_LEN];
1037 struct tcphdr tcp_savetcp;
1040 thflags = th->th_flags;
1042 INP_INFO_WLOCK_ASSERT(&tcbinfo);
1043 INP_LOCK_ASSERT(tp->t_inpcb);
1044 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", __func__));
1047 * Segment received on connection.
1048 * Reset idle time and keep-alive timer.
1050 tp->t_rcvtime = ticks;
1051 if (TCPS_HAVEESTABLISHED(tp->t_state))
1052 callout_reset(tp->tt_keep, tcp_keepidle, tcp_timer_keep, tp);
1055 * Unscale the window into a 32-bit value.
1056 * This value is bogus for the TCPS_SYN_SENT state
1057 * and is overwritten later.
1059 tiwin = th->th_win << tp->snd_scale;
1062 * Parse options on any incoming segment.
1064 tcp_dooptions(&to, (u_char *)(th + 1),
1065 (th->th_off << 2) - sizeof(struct tcphdr),
1066 (thflags & TH_SYN) ? TO_SYN : 0);
1069 * If echoed timestamp is later than the current time,
1070 * fall back to non RFC1323 RTT calculation. Normalize
1071 * timestamp if syncookies were used when this connection
1074 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
1075 to.to_tsecr -= tp->ts_offset;
1076 if (TSTMP_GT(to.to_tsecr, ticks))
1081 * Process options only when we get SYN/ACK back. The SYN case
1082 * for incoming connections is handled in tcp_syncache.
1083 * XXX this is traditional behavior, may need to be cleaned up.
1085 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
1086 if ((to.to_flags & TOF_SCALE) &&
1087 (tp->t_flags & TF_REQ_SCALE)) {
1088 tp->t_flags |= TF_RCVD_SCALE;
1089 tp->snd_scale = to.to_wscale;
1090 tp->snd_wnd = th->th_win << tp->snd_scale;
1091 tiwin = tp->snd_wnd;
1093 if (to.to_flags & TOF_TS) {
1094 tp->t_flags |= TF_RCVD_TSTMP;
1095 tp->ts_recent = to.to_tsval;
1096 tp->ts_recent_age = ticks;
1098 /* Initial send window, already scaled. */
1099 tp->snd_wnd = th->th_win;
1100 if (to.to_flags & TOF_MSS)
1101 tcp_mss(tp, to.to_mss);
1102 if (tp->sack_enable) {
1103 if (!(to.to_flags & TOF_SACKPERM))
1104 tp->sack_enable = 0;
1106 tp->t_flags |= TF_SACK_PERMIT;
1112 * Header prediction: check for the two common cases
1113 * of a uni-directional data xfer. If the packet has
1114 * no control flags, is in-sequence, the window didn't
1115 * change and we're not retransmitting, it's a
1116 * candidate. If the length is zero and the ack moved
1117 * forward, we're the sender side of the xfer. Just
1118 * free the data acked & wake any higher level process
1119 * that was blocked waiting for space. If the length
1120 * is non-zero and the ack didn't move, we're the
1121 * receiver side. If we're getting packets in-order
1122 * (the reassembly queue is empty), add the data to
1123 * the socket buffer and note that we need a delayed ack.
1124 * Make sure that the hidden state-flags are also off.
1125 * Since we check for TCPS_ESTABLISHED above, it can only
1128 if (tp->t_state == TCPS_ESTABLISHED &&
1129 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1130 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
1131 ((to.to_flags & TOF_TS) == 0 ||
1132 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) &&
1133 th->th_seq == tp->rcv_nxt && tiwin && tiwin == tp->snd_wnd &&
1134 tp->snd_nxt == tp->snd_max) {
1137 * If last ACK falls within this segment's sequence numbers,
1138 * record the timestamp.
1139 * NOTE that the test is modified according to the latest
1140 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1142 if ((to.to_flags & TOF_TS) != 0 &&
1143 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1144 tp->ts_recent_age = ticks;
1145 tp->ts_recent = to.to_tsval;
1149 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1150 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1151 tp->snd_cwnd >= tp->snd_wnd &&
1152 ((!tcp_do_newreno && !tp->sack_enable &&
1153 tp->t_dupacks < tcprexmtthresh) ||
1154 ((tcp_do_newreno || tp->sack_enable) &&
1155 !IN_FASTRECOVERY(tp) &&
1156 (to.to_flags & TOF_SACK) == 0 &&
1157 TAILQ_EMPTY(&tp->snd_holes)))) {
1159 ("%s: headlocked", __func__));
1160 INP_INFO_WUNLOCK(&tcbinfo);
1163 * this is a pure ack for outstanding data.
1165 ++tcpstat.tcps_predack;
1167 * "bad retransmit" recovery
1169 if (tp->t_rxtshift == 1 &&
1170 ticks < tp->t_badrxtwin) {
1171 ++tcpstat.tcps_sndrexmitbad;
1172 tp->snd_cwnd = tp->snd_cwnd_prev;
1174 tp->snd_ssthresh_prev;
1175 tp->snd_recover = tp->snd_recover_prev;
1176 if (tp->t_flags & TF_WASFRECOVERY)
1177 ENTER_FASTRECOVERY(tp);
1178 tp->snd_nxt = tp->snd_max;
1179 tp->t_badrxtwin = 0;
1183 * Recalculate the transmit timer / rtt.
1185 * Some boxes send broken timestamp replies
1186 * during the SYN+ACK phase, ignore
1187 * timestamps of 0 or we could calculate a
1188 * huge RTT and blow up the retransmit timer.
1190 if ((to.to_flags & TOF_TS) != 0 &&
1192 if (!tp->t_rttlow ||
1193 tp->t_rttlow > ticks - to.to_tsecr)
1194 tp->t_rttlow = ticks - to.to_tsecr;
1196 ticks - to.to_tsecr + 1);
1197 } else if (tp->t_rtttime &&
1198 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1199 if (!tp->t_rttlow ||
1200 tp->t_rttlow > ticks - tp->t_rtttime)
1201 tp->t_rttlow = ticks - tp->t_rtttime;
1203 ticks - tp->t_rtttime);
1205 tcp_xmit_bandwidth_limit(tp, th->th_ack);
1206 acked = th->th_ack - tp->snd_una;
1207 tcpstat.tcps_rcvackpack++;
1208 tcpstat.tcps_rcvackbyte += acked;
1209 sbdrop(&so->so_snd, acked);
1210 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
1211 SEQ_LEQ(th->th_ack, tp->snd_recover))
1212 tp->snd_recover = th->th_ack - 1;
1213 tp->snd_una = th->th_ack;
1215 * pull snd_wl2 up to prevent seq wrap relative
1218 tp->snd_wl2 = th->th_ack;
1221 ND6_HINT(tp); /* some progress has been done */
1224 * If all outstanding data are acked, stop
1225 * retransmit timer, otherwise restart timer
1226 * using current (possibly backed-off) value.
1227 * If process is waiting for space,
1228 * wakeup/selwakeup/signal. If data
1229 * are ready to send, let tcp_output
1230 * decide between more output or persist.
1233 if (so->so_options & SO_DEBUG)
1234 tcp_trace(TA_INPUT, ostate, tp,
1235 (void *)tcp_saveipgen,
1239 if (tp->snd_una == tp->snd_max)
1240 callout_stop(tp->tt_rexmt);
1241 else if (!callout_active(tp->tt_persist))
1242 callout_reset(tp->tt_rexmt,
1244 tcp_timer_rexmt, tp);
1247 if (so->so_snd.sb_cc)
1248 (void) tcp_output(tp);
1251 } else if (th->th_ack == tp->snd_una &&
1252 LIST_EMPTY(&tp->t_segq) &&
1253 tlen <= sbspace(&so->so_rcv)) {
1254 int newsize = 0; /* automatic sockbuf scaling */
1256 KASSERT(headlocked, ("%s: headlocked", __func__));
1257 INP_INFO_WUNLOCK(&tcbinfo);
1260 * this is a pure, in-sequence data packet
1261 * with nothing on the reassembly queue and
1262 * we have enough buffer space to take it.
1264 /* Clean receiver SACK report if present */
1265 if (tp->sack_enable && tp->rcv_numsacks)
1266 tcp_clean_sackreport(tp);
1267 ++tcpstat.tcps_preddat;
1268 tp->rcv_nxt += tlen;
1270 * Pull snd_wl1 up to prevent seq wrap relative to
1273 tp->snd_wl1 = th->th_seq;
1275 * Pull rcv_up up to prevent seq wrap relative to
1278 tp->rcv_up = tp->rcv_nxt;
1279 tcpstat.tcps_rcvpack++;
1280 tcpstat.tcps_rcvbyte += tlen;
1281 ND6_HINT(tp); /* some progress has been done */
1283 if (so->so_options & SO_DEBUG)
1284 tcp_trace(TA_INPUT, ostate, tp,
1285 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1288 * Automatic sizing of receive socket buffer. Often the send
1289 * buffer size is not optimally adjusted to the actual network
1290 * conditions at hand (delay bandwidth product). Setting the
1291 * buffer size too small limits throughput on links with high
1292 * bandwidth and high delay (eg. trans-continental/oceanic links).
1294 * On the receive side the socket buffer memory is only rarely
1295 * used to any significant extent. This allows us to be much
1296 * more aggressive in scaling the receive socket buffer. For
1297 * the case that the buffer space is actually used to a large
1298 * extent and we run out of kernel memory we can simply drop
1299 * the new segments; TCP on the sender will just retransmit it
1300 * later. Setting the buffer size too big may only consume too
1301 * much kernel memory if the application doesn't read() from
1302 * the socket or packet loss or reordering makes use of the
1305 * The criteria to step up the receive buffer one notch are:
1306 * 1. the number of bytes received during the time it takes
1307 * one timestamp to be reflected back to us (the RTT);
1308 * 2. received bytes per RTT is within seven eighth of the
1309 * current socket buffer size;
1310 * 3. receive buffer size has not hit maximal automatic size;
1312 * This algorithm does one step per RTT at most and only if
1313 * we receive a bulk stream w/o packet losses or reorderings.
1314 * Shrinking the buffer during idle times is not necessary as
1315 * it doesn't consume any memory when idle.
1317 * TODO: Only step up if the application is actually serving
1318 * the buffer to better manage the socket buffer resources.
1320 if (tcp_do_autorcvbuf &&
1322 (so->so_rcv.sb_flags & SB_AUTOSIZE)) {
1323 if (to.to_tsecr > tp->rfbuf_ts &&
1324 to.to_tsecr - tp->rfbuf_ts < hz) {
1326 (so->so_rcv.sb_hiwat / 8 * 7) &&
1327 so->so_rcv.sb_hiwat <
1328 tcp_autorcvbuf_max) {
1330 min(so->so_rcv.sb_hiwat +
1332 tcp_autorcvbuf_max);
1334 /* Start over with next RTT. */
1338 tp->rfbuf_cnt += tlen; /* add up */
1341 /* Add data to socket buffer. */
1342 SOCKBUF_LOCK(&so->so_rcv);
1343 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1347 * Set new socket buffer size.
1348 * Give up when limit is reached.
1351 if (!sbreserve_locked(&so->so_rcv,
1352 newsize, so, curthread))
1353 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
1354 m_adj(m, drop_hdrlen); /* delayed header drop */
1355 sbappendstream_locked(&so->so_rcv, m);
1357 sorwakeup_locked(so);
1358 if (DELAY_ACK(tp)) {
1359 tp->t_flags |= TF_DELACK;
1361 tp->t_flags |= TF_ACKNOW;
1369 * Calculate amount of space in receive window,
1370 * and then do TCP input processing.
1371 * Receive window is amount of space in rcv queue,
1372 * but not less than advertised window.
1374 win = sbspace(&so->so_rcv);
1377 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1379 /* Reset receive buffer auto scaling when not in bulk receive mode. */
1383 switch (tp->t_state) {
1386 * If the state is SYN_RECEIVED:
1387 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1389 case TCPS_SYN_RECEIVED:
1390 if ((thflags & TH_ACK) &&
1391 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1392 SEQ_GT(th->th_ack, tp->snd_max))) {
1393 rstreason = BANDLIM_RST_OPENPORT;
1399 * If the state is SYN_SENT:
1400 * if seg contains an ACK, but not for our SYN, drop the input.
1401 * if seg contains a RST, then drop the connection.
1402 * if seg does not contain SYN, then drop it.
1403 * Otherwise this is an acceptable SYN segment
1404 * initialize tp->rcv_nxt and tp->irs
1405 * if seg contains ack then advance tp->snd_una
1406 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1407 * arrange for segment to be acked (eventually)
1408 * continue processing rest of data/controls, beginning with URG
1411 if ((thflags & TH_ACK) &&
1412 (SEQ_LEQ(th->th_ack, tp->iss) ||
1413 SEQ_GT(th->th_ack, tp->snd_max))) {
1414 rstreason = BANDLIM_UNLIMITED;
1417 if (thflags & TH_RST) {
1418 if (thflags & TH_ACK) {
1419 KASSERT(headlocked, ("%s: after_listen: "
1420 "tcp_drop.2: head not locked", __func__));
1421 tp = tcp_drop(tp, ECONNREFUSED);
1425 if ((thflags & TH_SYN) == 0)
1428 tp->irs = th->th_seq;
1430 if (thflags & TH_ACK) {
1431 tcpstat.tcps_connects++;
1435 mac_set_socket_peer_from_mbuf(m, so);
1438 /* Do window scaling on this connection? */
1439 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1440 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1441 tp->rcv_scale = tp->request_r_scale;
1443 tp->rcv_adv += tp->rcv_wnd;
1444 tp->snd_una++; /* SYN is acked */
1446 * If there's data, delay ACK; if there's also a FIN
1447 * ACKNOW will be turned on later.
1449 if (DELAY_ACK(tp) && tlen != 0)
1450 callout_reset(tp->tt_delack, tcp_delacktime,
1451 tcp_timer_delack, tp);
1453 tp->t_flags |= TF_ACKNOW;
1455 * Received <SYN,ACK> in SYN_SENT[*] state.
1457 * SYN_SENT --> ESTABLISHED
1458 * SYN_SENT* --> FIN_WAIT_1
1460 tp->t_starttime = ticks;
1461 if (tp->t_flags & TF_NEEDFIN) {
1462 tp->t_state = TCPS_FIN_WAIT_1;
1463 tp->t_flags &= ~TF_NEEDFIN;
1466 tp->t_state = TCPS_ESTABLISHED;
1467 callout_reset(tp->tt_keep, tcp_keepidle,
1468 tcp_timer_keep, tp);
1472 * Received initial SYN in SYN-SENT[*] state =>
1473 * simultaneous open. If segment contains CC option
1474 * and there is a cached CC, apply TAO test.
1475 * If it succeeds, connection is * half-synchronized.
1476 * Otherwise, do 3-way handshake:
1477 * SYN-SENT -> SYN-RECEIVED
1478 * SYN-SENT* -> SYN-RECEIVED*
1479 * If there was no CC option, clear cached CC value.
1481 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
1482 callout_stop(tp->tt_rexmt);
1483 tp->t_state = TCPS_SYN_RECEIVED;
1486 KASSERT(headlocked, ("%s: trimthenstep6: head not locked",
1488 INP_LOCK_ASSERT(tp->t_inpcb);
1491 * Advance th->th_seq to correspond to first data byte.
1492 * If data, trim to stay within window,
1493 * dropping FIN if necessary.
1496 if (tlen > tp->rcv_wnd) {
1497 todrop = tlen - tp->rcv_wnd;
1501 tcpstat.tcps_rcvpackafterwin++;
1502 tcpstat.tcps_rcvbyteafterwin += todrop;
1504 tp->snd_wl1 = th->th_seq - 1;
1505 tp->rcv_up = th->th_seq;
1507 * Client side of transaction: already sent SYN and data.
1508 * If the remote host used T/TCP to validate the SYN,
1509 * our data will be ACK'd; if so, enter normal data segment
1510 * processing in the middle of step 5, ack processing.
1511 * Otherwise, goto step 6.
1513 if (thflags & TH_ACK)
1519 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
1520 * do normal processing.
1522 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
1526 case TCPS_TIME_WAIT:
1527 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: timewait",
1529 break; /* continue normal processing */
1533 * States other than LISTEN or SYN_SENT.
1534 * First check the RST flag and sequence number since reset segments
1535 * are exempt from the timestamp and connection count tests. This
1536 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
1537 * below which allowed reset segments in half the sequence space
1538 * to fall though and be processed (which gives forged reset
1539 * segments with a random sequence number a 50 percent chance of
1540 * killing a connection).
1541 * Then check timestamp, if present.
1542 * Then check the connection count, if present.
1543 * Then check that at least some bytes of segment are within
1544 * receive window. If segment begins before rcv_nxt,
1545 * drop leading data (and SYN); if nothing left, just ack.
1548 * If the RST bit is set, check the sequence number to see
1549 * if this is a valid reset segment.
1551 * In all states except SYN-SENT, all reset (RST) segments
1552 * are validated by checking their SEQ-fields. A reset is
1553 * valid if its sequence number is in the window.
1554 * Note: this does not take into account delayed ACKs, so
1555 * we should test against last_ack_sent instead of rcv_nxt.
1556 * The sequence number in the reset segment is normally an
1557 * echo of our outgoing acknowlegement numbers, but some hosts
1558 * send a reset with the sequence number at the rightmost edge
1559 * of our receive window, and we have to handle this case.
1560 * Note 2: Paul Watson's paper "Slipping in the Window" has shown
1561 * that brute force RST attacks are possible. To combat this,
1562 * we use a much stricter check while in the ESTABLISHED state,
1563 * only accepting RSTs where the sequence number is equal to
1564 * last_ack_sent. In all other states (the states in which a
1565 * RST is more likely), the more permissive check is used.
1566 * If we have multiple segments in flight, the intial reset
1567 * segment sequence numbers will be to the left of last_ack_sent,
1568 * but they will eventually catch up.
1569 * In any case, it never made sense to trim reset segments to
1570 * fit the receive window since RFC 1122 says:
1571 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
1573 * A TCP SHOULD allow a received RST segment to include data.
1576 * It has been suggested that a RST segment could contain
1577 * ASCII text that encoded and explained the cause of the
1578 * RST. No standard has yet been established for such
1581 * If the reset segment passes the sequence number test examine
1583 * SYN_RECEIVED STATE:
1584 * If passive open, return to LISTEN state.
1585 * If active open, inform user that connection was refused.
1586 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
1587 * Inform user that connection was reset, and close tcb.
1588 * CLOSING, LAST_ACK STATES:
1591 * Drop the segment - see Stevens, vol. 2, p. 964 and
1594 if (thflags & TH_RST) {
1595 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
1596 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
1597 switch (tp->t_state) {
1599 case TCPS_SYN_RECEIVED:
1600 so->so_error = ECONNREFUSED;
1603 case TCPS_ESTABLISHED:
1604 if (tcp_insecure_rst == 0 &&
1605 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) &&
1606 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) &&
1607 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
1608 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) {
1609 tcpstat.tcps_badrst++;
1612 case TCPS_FIN_WAIT_1:
1613 case TCPS_FIN_WAIT_2:
1614 case TCPS_CLOSE_WAIT:
1615 so->so_error = ECONNRESET;
1617 tp->t_state = TCPS_CLOSED;
1618 tcpstat.tcps_drops++;
1619 KASSERT(headlocked, ("%s: trimthenstep6: "
1620 "tcp_close: head not locked", __func__));
1626 KASSERT(headlocked, ("%s: trimthenstep6: "
1627 "tcp_close.2: head not locked", __func__));
1631 case TCPS_TIME_WAIT:
1632 KASSERT(tp->t_state != TCPS_TIME_WAIT,
1633 ("%s: timewait", __func__));
1641 * RFC 1323 PAWS: If we have a timestamp reply on this segment
1642 * and it's less than ts_recent, drop it.
1644 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
1645 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
1647 /* Check to see if ts_recent is over 24 days old. */
1648 if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) {
1650 * Invalidate ts_recent. If this segment updates
1651 * ts_recent, the age will be reset later and ts_recent
1652 * will get a valid value. If it does not, setting
1653 * ts_recent to zero will at least satisfy the
1654 * requirement that zero be placed in the timestamp
1655 * echo reply when ts_recent isn't valid. The
1656 * age isn't reset until we get a valid ts_recent
1657 * because we don't want out-of-order segments to be
1658 * dropped when ts_recent is old.
1662 tcpstat.tcps_rcvduppack++;
1663 tcpstat.tcps_rcvdupbyte += tlen;
1664 tcpstat.tcps_pawsdrop++;
1672 * In the SYN-RECEIVED state, validate that the packet belongs to
1673 * this connection before trimming the data to fit the receive
1674 * window. Check the sequence number versus IRS since we know
1675 * the sequence numbers haven't wrapped. This is a partial fix
1676 * for the "LAND" DoS attack.
1678 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
1679 rstreason = BANDLIM_RST_OPENPORT;
1683 todrop = tp->rcv_nxt - th->th_seq;
1685 if (thflags & TH_SYN) {
1695 * Following if statement from Stevens, vol. 2, p. 960.
1698 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
1700 * Any valid FIN must be to the left of the window.
1701 * At this point the FIN must be a duplicate or out
1702 * of sequence; drop it.
1707 * Send an ACK to resynchronize and drop any data.
1708 * But keep on processing for RST or ACK.
1710 tp->t_flags |= TF_ACKNOW;
1712 tcpstat.tcps_rcvduppack++;
1713 tcpstat.tcps_rcvdupbyte += todrop;
1715 tcpstat.tcps_rcvpartduppack++;
1716 tcpstat.tcps_rcvpartdupbyte += todrop;
1718 drop_hdrlen += todrop; /* drop from the top afterwards */
1719 th->th_seq += todrop;
1721 if (th->th_urp > todrop)
1722 th->th_urp -= todrop;
1730 * If new data are received on a connection after the
1731 * user processes are gone, then RST the other end.
1733 if ((so->so_state & SS_NOFDREF) &&
1734 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
1735 KASSERT(headlocked, ("%s: trimthenstep6: tcp_close.3: head "
1736 "not locked", __func__));
1738 tcpstat.tcps_rcvafterclose++;
1739 rstreason = BANDLIM_UNLIMITED;
1744 * If segment ends after window, drop trailing data
1745 * (and PUSH and FIN); if nothing left, just ACK.
1747 todrop = (th->th_seq+tlen) - (tp->rcv_nxt+tp->rcv_wnd);
1749 tcpstat.tcps_rcvpackafterwin++;
1750 if (todrop >= tlen) {
1751 tcpstat.tcps_rcvbyteafterwin += tlen;
1753 * If a new connection request is received
1754 * while in TIME_WAIT, drop the old connection
1755 * and start over if the sequence numbers
1756 * are above the previous ones.
1758 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: timewait",
1760 if (thflags & TH_SYN &&
1761 tp->t_state == TCPS_TIME_WAIT &&
1762 SEQ_GT(th->th_seq, tp->rcv_nxt)) {
1763 KASSERT(headlocked, ("%s: trimthenstep6: "
1764 "tcp_close.4: head not locked", __func__));
1766 /* XXX: Shouldn't be possible. */
1770 * If window is closed can only take segments at
1771 * window edge, and have to drop data and PUSH from
1772 * incoming segments. Continue processing, but
1773 * remember to ack. Otherwise, drop segment
1776 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
1777 tp->t_flags |= TF_ACKNOW;
1778 tcpstat.tcps_rcvwinprobe++;
1782 tcpstat.tcps_rcvbyteafterwin += todrop;
1785 thflags &= ~(TH_PUSH|TH_FIN);
1789 * If last ACK falls within this segment's sequence numbers,
1790 * record its timestamp.
1792 * 1) That the test incorporates suggestions from the latest
1793 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1794 * 2) That updating only on newer timestamps interferes with
1795 * our earlier PAWS tests, so this check should be solely
1796 * predicated on the sequence space of this segment.
1797 * 3) That we modify the segment boundary check to be
1798 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
1799 * instead of RFC1323's
1800 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
1801 * This modified check allows us to overcome RFC1323's
1802 * limitations as described in Stevens TCP/IP Illustrated
1803 * Vol. 2 p.869. In such cases, we can still calculate the
1804 * RTT correctly when RCV.NXT == Last.ACK.Sent.
1806 if ((to.to_flags & TOF_TS) != 0 &&
1807 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
1808 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
1809 ((thflags & (TH_SYN|TH_FIN)) != 0))) {
1810 tp->ts_recent_age = ticks;
1811 tp->ts_recent = to.to_tsval;
1815 * If a SYN is in the window, then this is an
1816 * error and we send an RST and drop the connection.
1818 if (thflags & TH_SYN) {
1819 KASSERT(headlocked, ("%s: tcp_drop: trimthenstep6: "
1820 "head not locked", __func__));
1821 tp = tcp_drop(tp, ECONNRESET);
1822 rstreason = BANDLIM_UNLIMITED;
1827 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
1828 * flag is on (half-synchronized state), then queue data for
1829 * later processing; else drop segment and return.
1831 if ((thflags & TH_ACK) == 0) {
1832 if (tp->t_state == TCPS_SYN_RECEIVED ||
1833 (tp->t_flags & TF_NEEDSYN))
1835 else if (tp->t_flags & TF_ACKNOW)
1844 switch (tp->t_state) {
1847 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
1848 * ESTABLISHED state and continue processing.
1849 * The ACK was checked above.
1851 case TCPS_SYN_RECEIVED:
1853 tcpstat.tcps_connects++;
1855 /* Do window scaling? */
1856 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1857 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1858 tp->rcv_scale = tp->request_r_scale;
1859 tp->snd_wnd = tiwin;
1863 * SYN-RECEIVED -> ESTABLISHED
1864 * SYN-RECEIVED* -> FIN-WAIT-1
1866 tp->t_starttime = ticks;
1867 if (tp->t_flags & TF_NEEDFIN) {
1868 tp->t_state = TCPS_FIN_WAIT_1;
1869 tp->t_flags &= ~TF_NEEDFIN;
1871 tp->t_state = TCPS_ESTABLISHED;
1872 callout_reset(tp->tt_keep, tcp_keepidle,
1873 tcp_timer_keep, tp);
1876 * If segment contains data or ACK, will call tcp_reass()
1877 * later; if not, do so now to pass queued data to user.
1879 if (tlen == 0 && (thflags & TH_FIN) == 0)
1880 (void) tcp_reass(tp, (struct tcphdr *)0, 0,
1882 tp->snd_wl1 = th->th_seq - 1;
1886 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
1887 * ACKs. If the ack is in the range
1888 * tp->snd_una < th->th_ack <= tp->snd_max
1889 * then advance tp->snd_una to th->th_ack and drop
1890 * data from the retransmission queue. If this ACK reflects
1891 * more up to date window information we update our window information.
1893 case TCPS_ESTABLISHED:
1894 case TCPS_FIN_WAIT_1:
1895 case TCPS_FIN_WAIT_2:
1896 case TCPS_CLOSE_WAIT:
1899 case TCPS_TIME_WAIT:
1900 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: timewait",
1902 if (SEQ_GT(th->th_ack, tp->snd_max)) {
1903 tcpstat.tcps_rcvacktoomuch++;
1906 if (tp->sack_enable &&
1907 ((to.to_flags & TOF_SACK) ||
1908 !TAILQ_EMPTY(&tp->snd_holes)))
1909 tcp_sack_doack(tp, &to, th->th_ack);
1910 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
1911 if (tlen == 0 && tiwin == tp->snd_wnd) {
1912 tcpstat.tcps_rcvdupack++;
1914 * If we have outstanding data (other than
1915 * a window probe), this is a completely
1916 * duplicate ack (ie, window info didn't
1917 * change), the ack is the biggest we've
1918 * seen and we've seen exactly our rexmt
1919 * threshhold of them, assume a packet
1920 * has been dropped and retransmit it.
1921 * Kludge snd_nxt & the congestion
1922 * window so we send only this one
1925 * We know we're losing at the current
1926 * window size so do congestion avoidance
1927 * (set ssthresh to half the current window
1928 * and pull our congestion window back to
1929 * the new ssthresh).
1931 * Dup acks mean that packets have left the
1932 * network (they're now cached at the receiver)
1933 * so bump cwnd by the amount in the receiver
1934 * to keep a constant cwnd packets in the
1937 if (!callout_active(tp->tt_rexmt) ||
1938 th->th_ack != tp->snd_una)
1940 else if (++tp->t_dupacks > tcprexmtthresh ||
1941 ((tcp_do_newreno || tp->sack_enable) &&
1942 IN_FASTRECOVERY(tp))) {
1943 if (tp->sack_enable && IN_FASTRECOVERY(tp)) {
1947 * Compute the amount of data in flight first.
1948 * We can inject new data into the pipe iff
1949 * we have less than 1/2 the original window's
1950 * worth of data in flight.
1952 awnd = (tp->snd_nxt - tp->snd_fack) +
1953 tp->sackhint.sack_bytes_rexmit;
1954 if (awnd < tp->snd_ssthresh) {
1955 tp->snd_cwnd += tp->t_maxseg;
1956 if (tp->snd_cwnd > tp->snd_ssthresh)
1957 tp->snd_cwnd = tp->snd_ssthresh;
1960 tp->snd_cwnd += tp->t_maxseg;
1961 (void) tcp_output(tp);
1963 } else if (tp->t_dupacks == tcprexmtthresh) {
1964 tcp_seq onxt = tp->snd_nxt;
1968 * If we're doing sack, check to
1969 * see if we're already in sack
1970 * recovery. If we're not doing sack,
1971 * check to see if we're in newreno
1974 if (tp->sack_enable) {
1975 if (IN_FASTRECOVERY(tp)) {
1979 } else if (tcp_do_newreno) {
1980 if (SEQ_LEQ(th->th_ack,
1986 win = min(tp->snd_wnd, tp->snd_cwnd) /
1990 tp->snd_ssthresh = win * tp->t_maxseg;
1991 ENTER_FASTRECOVERY(tp);
1992 tp->snd_recover = tp->snd_max;
1993 callout_stop(tp->tt_rexmt);
1995 if (tp->sack_enable) {
1996 tcpstat.tcps_sack_recovery_episode++;
1997 tp->sack_newdata = tp->snd_nxt;
1998 tp->snd_cwnd = tp->t_maxseg;
1999 (void) tcp_output(tp);
2002 tp->snd_nxt = th->th_ack;
2003 tp->snd_cwnd = tp->t_maxseg;
2004 (void) tcp_output(tp);
2005 KASSERT(tp->snd_limited <= 2,
2006 ("%s: tp->snd_limited too big",
2008 tp->snd_cwnd = tp->snd_ssthresh +
2010 (tp->t_dupacks - tp->snd_limited);
2011 if (SEQ_GT(onxt, tp->snd_nxt))
2014 } else if (tcp_do_rfc3042) {
2015 u_long oldcwnd = tp->snd_cwnd;
2016 tcp_seq oldsndmax = tp->snd_max;
2019 KASSERT(tp->t_dupacks == 1 ||
2021 ("%s: dupacks not 1 or 2",
2023 if (tp->t_dupacks == 1)
2024 tp->snd_limited = 0;
2026 (tp->snd_nxt - tp->snd_una) +
2027 (tp->t_dupacks - tp->snd_limited) *
2029 (void) tcp_output(tp);
2030 sent = tp->snd_max - oldsndmax;
2031 if (sent > tp->t_maxseg) {
2032 KASSERT((tp->t_dupacks == 2 &&
2033 tp->snd_limited == 0) ||
2034 (sent == tp->t_maxseg + 1 &&
2035 tp->t_flags & TF_SENTFIN),
2036 ("%s: sent too much",
2038 tp->snd_limited = 2;
2039 } else if (sent > 0)
2041 tp->snd_cwnd = oldcwnd;
2049 KASSERT(SEQ_GT(th->th_ack, tp->snd_una),
2050 ("%s: th_ack <= snd_una", __func__));
2053 * If the congestion window was inflated to account
2054 * for the other side's cached packets, retract it.
2056 if (tcp_do_newreno || tp->sack_enable) {
2057 if (IN_FASTRECOVERY(tp)) {
2058 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
2059 if (tp->sack_enable)
2060 tcp_sack_partialack(tp, th);
2062 tcp_newreno_partial_ack(tp, th);
2065 * Out of fast recovery.
2066 * Window inflation should have left us
2067 * with approximately snd_ssthresh
2069 * But in case we would be inclined to
2070 * send a burst, better to do it via
2071 * the slow start mechanism.
2073 if (SEQ_GT(th->th_ack +
2076 tp->snd_cwnd = tp->snd_max -
2080 tp->snd_cwnd = tp->snd_ssthresh;
2084 if (tp->t_dupacks >= tcprexmtthresh &&
2085 tp->snd_cwnd > tp->snd_ssthresh)
2086 tp->snd_cwnd = tp->snd_ssthresh;
2090 * If we reach this point, ACK is not a duplicate,
2091 * i.e., it ACKs something we sent.
2093 if (tp->t_flags & TF_NEEDSYN) {
2095 * T/TCP: Connection was half-synchronized, and our
2096 * SYN has been ACK'd (so connection is now fully
2097 * synchronized). Go to non-starred state,
2098 * increment snd_una for ACK of SYN, and check if
2099 * we can do window scaling.
2101 tp->t_flags &= ~TF_NEEDSYN;
2103 /* Do window scaling? */
2104 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2105 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2106 tp->rcv_scale = tp->request_r_scale;
2107 /* Send window already scaled. */
2112 KASSERT(headlocked, ("%s: process_ACK: head not locked",
2114 INP_LOCK_ASSERT(tp->t_inpcb);
2116 acked = th->th_ack - tp->snd_una;
2117 tcpstat.tcps_rcvackpack++;
2118 tcpstat.tcps_rcvackbyte += acked;
2121 * If we just performed our first retransmit, and the ACK
2122 * arrives within our recovery window, then it was a mistake
2123 * to do the retransmit in the first place. Recover our
2124 * original cwnd and ssthresh, and proceed to transmit where
2127 if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) {
2128 ++tcpstat.tcps_sndrexmitbad;
2129 tp->snd_cwnd = tp->snd_cwnd_prev;
2130 tp->snd_ssthresh = tp->snd_ssthresh_prev;
2131 tp->snd_recover = tp->snd_recover_prev;
2132 if (tp->t_flags & TF_WASFRECOVERY)
2133 ENTER_FASTRECOVERY(tp);
2134 tp->snd_nxt = tp->snd_max;
2135 tp->t_badrxtwin = 0; /* XXX probably not required */
2139 * If we have a timestamp reply, update smoothed
2140 * round trip time. If no timestamp is present but
2141 * transmit timer is running and timed sequence
2142 * number was acked, update smoothed round trip time.
2143 * Since we now have an rtt measurement, cancel the
2144 * timer backoff (cf., Phil Karn's retransmit alg.).
2145 * Recompute the initial retransmit timer.
2147 * Some boxes send broken timestamp replies
2148 * during the SYN+ACK phase, ignore
2149 * timestamps of 0 or we could calculate a
2150 * huge RTT and blow up the retransmit timer.
2152 if ((to.to_flags & TOF_TS) != 0 &&
2154 if (!tp->t_rttlow || tp->t_rttlow > ticks - to.to_tsecr)
2155 tp->t_rttlow = ticks - to.to_tsecr;
2156 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1);
2157 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
2158 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime)
2159 tp->t_rttlow = ticks - tp->t_rtttime;
2160 tcp_xmit_timer(tp, ticks - tp->t_rtttime);
2162 tcp_xmit_bandwidth_limit(tp, th->th_ack);
2165 * If all outstanding data is acked, stop retransmit
2166 * timer and remember to restart (more output or persist).
2167 * If there is more data to be acked, restart retransmit
2168 * timer, using current (possibly backed-off) value.
2170 if (th->th_ack == tp->snd_max) {
2171 callout_stop(tp->tt_rexmt);
2173 } else if (!callout_active(tp->tt_persist))
2174 callout_reset(tp->tt_rexmt, tp->t_rxtcur,
2175 tcp_timer_rexmt, tp);
2178 * If no data (only SYN) was ACK'd,
2179 * skip rest of ACK processing.
2185 * When new data is acked, open the congestion window.
2186 * If the window gives us less than ssthresh packets
2187 * in flight, open exponentially (maxseg per packet).
2188 * Otherwise open linearly: maxseg per window
2189 * (maxseg^2 / cwnd per packet).
2191 if ((!tcp_do_newreno && !tp->sack_enable) ||
2192 !IN_FASTRECOVERY(tp)) {
2193 u_int cw = tp->snd_cwnd;
2194 u_int incr = tp->t_maxseg;
2195 if (cw > tp->snd_ssthresh)
2196 incr = incr * incr / cw;
2197 tp->snd_cwnd = min(cw+incr, TCP_MAXWIN<<tp->snd_scale);
2199 SOCKBUF_LOCK(&so->so_snd);
2200 if (acked > so->so_snd.sb_cc) {
2201 tp->snd_wnd -= so->so_snd.sb_cc;
2202 sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc);
2205 sbdrop_locked(&so->so_snd, acked);
2206 tp->snd_wnd -= acked;
2209 sowwakeup_locked(so);
2210 /* detect una wraparound */
2211 if ((tcp_do_newreno || tp->sack_enable) &&
2212 !IN_FASTRECOVERY(tp) &&
2213 SEQ_GT(tp->snd_una, tp->snd_recover) &&
2214 SEQ_LEQ(th->th_ack, tp->snd_recover))
2215 tp->snd_recover = th->th_ack - 1;
2216 if ((tcp_do_newreno || tp->sack_enable) &&
2217 IN_FASTRECOVERY(tp) &&
2218 SEQ_GEQ(th->th_ack, tp->snd_recover))
2219 EXIT_FASTRECOVERY(tp);
2220 tp->snd_una = th->th_ack;
2221 if (tp->sack_enable) {
2222 if (SEQ_GT(tp->snd_una, tp->snd_recover))
2223 tp->snd_recover = tp->snd_una;
2225 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2226 tp->snd_nxt = tp->snd_una;
2228 switch (tp->t_state) {
2231 * In FIN_WAIT_1 STATE in addition to the processing
2232 * for the ESTABLISHED state if our FIN is now acknowledged
2233 * then enter FIN_WAIT_2.
2235 case TCPS_FIN_WAIT_1:
2236 if (ourfinisacked) {
2238 * If we can't receive any more
2239 * data, then closing user can proceed.
2240 * Starting the timer is contrary to the
2241 * specification, but if we don't get a FIN
2242 * we'll hang forever.
2245 * we should release the tp also, and use a
2248 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2251 soisdisconnected(so);
2252 timeout = (tcp_fast_finwait2_recycle) ?
2253 tcp_finwait2_timeout : tcp_maxidle;
2254 callout_reset(tp->tt_2msl, timeout,
2255 tcp_timer_2msl, tp);
2257 tp->t_state = TCPS_FIN_WAIT_2;
2262 * In CLOSING STATE in addition to the processing for
2263 * the ESTABLISHED state if the ACK acknowledges our FIN
2264 * then enter the TIME-WAIT state, otherwise ignore
2268 if (ourfinisacked) {
2269 KASSERT(headlocked, ("%s: process_ACK: "
2270 "head not locked", __func__));
2272 INP_INFO_WUNLOCK(&tcbinfo);
2280 * In LAST_ACK, we may still be waiting for data to drain
2281 * and/or to be acked, as well as for the ack of our FIN.
2282 * If our FIN is now acknowledged, delete the TCB,
2283 * enter the closed state and return.
2286 if (ourfinisacked) {
2287 KASSERT(headlocked, ("%s: process_ACK: "
2288 "tcp_close: head not locked", __func__));
2295 * In TIME_WAIT state the only thing that should arrive
2296 * is a retransmission of the remote FIN. Acknowledge
2297 * it and restart the finack timer.
2299 case TCPS_TIME_WAIT:
2300 KASSERT(tp->t_state != TCPS_TIME_WAIT,
2301 ("%s: timewait", __func__));
2302 callout_reset(tp->tt_2msl, 2 * tcp_msl,
2303 tcp_timer_2msl, tp);
2309 KASSERT(headlocked, ("%s: step6: head not locked", __func__));
2310 INP_LOCK_ASSERT(tp->t_inpcb);
2313 * Update window information.
2314 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2316 if ((thflags & TH_ACK) &&
2317 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
2318 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
2319 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
2320 /* keep track of pure window updates */
2322 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
2323 tcpstat.tcps_rcvwinupd++;
2324 tp->snd_wnd = tiwin;
2325 tp->snd_wl1 = th->th_seq;
2326 tp->snd_wl2 = th->th_ack;
2327 if (tp->snd_wnd > tp->max_sndwnd)
2328 tp->max_sndwnd = tp->snd_wnd;
2333 * Process segments with URG.
2335 if ((thflags & TH_URG) && th->th_urp &&
2336 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2338 * This is a kludge, but if we receive and accept
2339 * random urgent pointers, we'll crash in
2340 * soreceive. It's hard to imagine someone
2341 * actually wanting to send this much urgent data.
2343 SOCKBUF_LOCK(&so->so_rcv);
2344 if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
2345 th->th_urp = 0; /* XXX */
2346 thflags &= ~TH_URG; /* XXX */
2347 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
2348 goto dodata; /* XXX */
2351 * If this segment advances the known urgent pointer,
2352 * then mark the data stream. This should not happen
2353 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2354 * a FIN has been received from the remote side.
2355 * In these states we ignore the URG.
2357 * According to RFC961 (Assigned Protocols),
2358 * the urgent pointer points to the last octet
2359 * of urgent data. We continue, however,
2360 * to consider it to indicate the first octet
2361 * of data past the urgent section as the original
2362 * spec states (in one of two places).
2364 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
2365 tp->rcv_up = th->th_seq + th->th_urp;
2366 so->so_oobmark = so->so_rcv.sb_cc +
2367 (tp->rcv_up - tp->rcv_nxt) - 1;
2368 if (so->so_oobmark == 0)
2369 so->so_rcv.sb_state |= SBS_RCVATMARK;
2371 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
2373 SOCKBUF_UNLOCK(&so->so_rcv);
2375 * Remove out of band data so doesn't get presented to user.
2376 * This can happen independent of advancing the URG pointer,
2377 * but if two URG's are pending at once, some out-of-band
2378 * data may creep in... ick.
2380 if (th->th_urp <= (u_long)tlen &&
2381 !(so->so_options & SO_OOBINLINE)) {
2382 /* hdr drop is delayed */
2383 tcp_pulloutofband(so, th, m, drop_hdrlen);
2387 * If no out of band data is expected,
2388 * pull receive urgent pointer along
2389 * with the receive window.
2391 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
2392 tp->rcv_up = tp->rcv_nxt;
2395 KASSERT(headlocked, ("%s: dodata: head not locked", __func__));
2396 INP_LOCK_ASSERT(tp->t_inpcb);
2399 * Process the segment text, merging it into the TCP sequencing queue,
2400 * and arranging for acknowledgment of receipt if necessary.
2401 * This process logically involves adjusting tp->rcv_wnd as data
2402 * is presented to the user (this happens in tcp_usrreq.c,
2403 * case PRU_RCVD). If a FIN has already been received on this
2404 * connection then we just ignore the text.
2406 if ((tlen || (thflags & TH_FIN)) &&
2407 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2408 tcp_seq save_start = th->th_seq;
2409 tcp_seq save_end = th->th_seq + tlen;
2410 m_adj(m, drop_hdrlen); /* delayed header drop */
2412 * Insert segment which includes th into TCP reassembly queue
2413 * with control block tp. Set thflags to whether reassembly now
2414 * includes a segment with FIN. This handles the common case
2415 * inline (segment is the next to be received on an established
2416 * connection, and the queue is empty), avoiding linkage into
2417 * and removal from the queue and repetition of various
2419 * Set DELACK for segments received in order, but ack
2420 * immediately when segments are out of order (so
2421 * fast retransmit can work).
2423 if (th->th_seq == tp->rcv_nxt &&
2424 LIST_EMPTY(&tp->t_segq) &&
2425 TCPS_HAVEESTABLISHED(tp->t_state)) {
2427 tp->t_flags |= TF_DELACK;
2429 tp->t_flags |= TF_ACKNOW;
2430 tp->rcv_nxt += tlen;
2431 thflags = th->th_flags & TH_FIN;
2432 tcpstat.tcps_rcvpack++;
2433 tcpstat.tcps_rcvbyte += tlen;
2435 SOCKBUF_LOCK(&so->so_rcv);
2436 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
2439 sbappendstream_locked(&so->so_rcv, m);
2440 sorwakeup_locked(so);
2442 thflags = tcp_reass(tp, th, &tlen, m);
2443 tp->t_flags |= TF_ACKNOW;
2445 if (tlen > 0 && tp->sack_enable)
2446 tcp_update_sack_list(tp, save_start, save_end);
2449 * Note the amount of data that peer has sent into
2450 * our window, in order to estimate the sender's
2454 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
2462 * If FIN is received ACK the FIN and let the user know
2463 * that the connection is closing.
2465 if (thflags & TH_FIN) {
2466 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2469 * If connection is half-synchronized
2470 * (ie NEEDSYN flag on) then delay ACK,
2471 * so it may be piggybacked when SYN is sent.
2472 * Otherwise, since we received a FIN then no
2473 * more input can be expected, send ACK now.
2475 if (tp->t_flags & TF_NEEDSYN)
2476 tp->t_flags |= TF_DELACK;
2478 tp->t_flags |= TF_ACKNOW;
2481 switch (tp->t_state) {
2484 * In SYN_RECEIVED and ESTABLISHED STATES
2485 * enter the CLOSE_WAIT state.
2487 case TCPS_SYN_RECEIVED:
2488 tp->t_starttime = ticks;
2490 case TCPS_ESTABLISHED:
2491 tp->t_state = TCPS_CLOSE_WAIT;
2495 * If still in FIN_WAIT_1 STATE FIN has not been acked so
2496 * enter the CLOSING state.
2498 case TCPS_FIN_WAIT_1:
2499 tp->t_state = TCPS_CLOSING;
2503 * In FIN_WAIT_2 state enter the TIME_WAIT state,
2504 * starting the time-wait timer, turning off the other
2507 case TCPS_FIN_WAIT_2:
2508 KASSERT(headlocked == 1, ("%s: dodata: "
2509 "TCP_FIN_WAIT_2: head not locked", __func__));
2511 INP_INFO_WUNLOCK(&tcbinfo);
2515 * In TIME_WAIT state restart the 2 MSL time_wait timer.
2517 case TCPS_TIME_WAIT:
2518 KASSERT(tp->t_state != TCPS_TIME_WAIT,
2519 ("%s: timewait", __func__));
2520 callout_reset(tp->tt_2msl, 2 * tcp_msl,
2521 tcp_timer_2msl, tp);
2525 INP_INFO_WUNLOCK(&tcbinfo);
2528 if (so->so_options & SO_DEBUG)
2529 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
2534 * Return any desired output.
2536 if (needoutput || (tp->t_flags & TF_ACKNOW))
2537 (void) tcp_output(tp);
2540 KASSERT(headlocked == 0, ("%s: check_delack: head locked",
2542 INP_LOCK_ASSERT(tp->t_inpcb);
2543 if (tp->t_flags & TF_DELACK) {
2544 tp->t_flags &= ~TF_DELACK;
2545 callout_reset(tp->tt_delack, tcp_delacktime,
2546 tcp_timer_delack, tp);
2548 INP_UNLOCK(tp->t_inpcb);
2552 KASSERT(headlocked, ("%s: dropafterack: head not locked", __func__));
2554 * Generate an ACK dropping incoming segment if it occupies
2555 * sequence space, where the ACK reflects our state.
2557 * We can now skip the test for the RST flag since all
2558 * paths to this code happen after packets containing
2559 * RST have been dropped.
2561 * In the SYN-RECEIVED state, don't send an ACK unless the
2562 * segment we received passes the SYN-RECEIVED ACK test.
2563 * If it fails send a RST. This breaks the loop in the
2564 * "LAND" DoS attack, and also prevents an ACK storm
2565 * between two listening ports that have been sent forged
2566 * SYN segments, each with the source address of the other.
2568 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
2569 (SEQ_GT(tp->snd_una, th->th_ack) ||
2570 SEQ_GT(th->th_ack, tp->snd_max)) ) {
2571 rstreason = BANDLIM_RST_OPENPORT;
2575 if (so->so_options & SO_DEBUG)
2576 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
2579 KASSERT(headlocked, ("%s: headlocked should be 1", __func__));
2580 INP_INFO_WUNLOCK(&tcbinfo);
2581 tp->t_flags |= TF_ACKNOW;
2582 (void) tcp_output(tp);
2583 INP_UNLOCK(tp->t_inpcb);
2588 KASSERT(headlocked, ("%s: dropwithreset: head not locked", __func__));
2590 tcp_dropwithreset(m, th, tp, tlen, rstreason);
2593 INP_UNLOCK(tp->t_inpcb);
2595 INP_INFO_WUNLOCK(&tcbinfo);
2600 * Drop space held by incoming segment and return.
2603 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
2604 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
2608 INP_UNLOCK(tp->t_inpcb);
2610 INP_INFO_WUNLOCK(&tcbinfo);
2617 * Issue RST on TCP segment. The mbuf must still include the original
2621 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
2622 int tlen, int rstreason)
2626 struct ip6_hdr *ip6;
2630 * Generate a RST, dropping incoming segment.
2631 * Make ACK acceptable to originator of segment.
2632 * Don't bother to respond if destination was broadcast/multicast.
2634 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
2637 if (mtod(m, struct ip *)->ip_v == 6) {
2638 ip6 = mtod(m, struct ip6_hdr *);
2639 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
2640 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
2642 /* IPv6 anycast check is done at tcp6_input() */
2646 ip = mtod(m, struct ip *);
2647 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
2648 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
2649 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
2650 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
2654 /* Perform bandwidth limiting. */
2655 if (badport_bandlim(rstreason) < 0)
2658 /* tcp_respond consumes the mbuf chain. */
2659 if (th->th_flags & TH_ACK) {
2660 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0,
2661 th->th_ack, TH_RST);
2663 if (th->th_flags & TH_SYN)
2665 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
2666 (tcp_seq)0, TH_RST|TH_ACK);
2675 * Parse TCP options and place in tcpopt.
2678 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
2683 for (; cnt > 0; cnt -= optlen, cp += optlen) {
2685 if (opt == TCPOPT_EOL)
2687 if (opt == TCPOPT_NOP)
2693 if (optlen < 2 || optlen > cnt)
2698 if (optlen != TCPOLEN_MAXSEG)
2700 if (!(flags & TO_SYN))
2702 to->to_flags |= TOF_MSS;
2703 bcopy((char *)cp + 2,
2704 (char *)&to->to_mss, sizeof(to->to_mss));
2705 to->to_mss = ntohs(to->to_mss);
2708 if (optlen != TCPOLEN_WINDOW)
2710 if (!(flags & TO_SYN))
2712 to->to_flags |= TOF_SCALE;
2713 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT);
2715 case TCPOPT_TIMESTAMP:
2716 if (optlen != TCPOLEN_TIMESTAMP)
2718 to->to_flags |= TOF_TS;
2719 bcopy((char *)cp + 2,
2720 (char *)&to->to_tsval, sizeof(to->to_tsval));
2721 to->to_tsval = ntohl(to->to_tsval);
2722 bcopy((char *)cp + 6,
2723 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
2724 to->to_tsecr = ntohl(to->to_tsecr);
2726 #ifdef TCP_SIGNATURE
2728 * XXX In order to reply to a host which has set the
2729 * TCP_SIGNATURE option in its initial SYN, we have to
2730 * record the fact that the option was observed here
2731 * for the syncache code to perform the correct response.
2733 case TCPOPT_SIGNATURE:
2734 if (optlen != TCPOLEN_SIGNATURE)
2736 to->to_flags |= (TOF_SIGNATURE | TOF_SIGLEN);
2739 case TCPOPT_SACK_PERMITTED:
2740 if (optlen != TCPOLEN_SACK_PERMITTED)
2742 if (!(flags & TO_SYN))
2746 to->to_flags |= TOF_SACKPERM;
2749 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
2751 to->to_flags |= TOF_SACK;
2752 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
2753 to->to_sacks = cp + 2;
2754 tcpstat.tcps_sack_rcv_blocks++;
2763 * Pull out of band byte out of a segment so
2764 * it doesn't appear in the user's data queue.
2765 * It is still reflected in the segment length for
2766 * sequencing purposes.
2769 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m,
2772 int cnt = off + th->th_urp - 1;
2775 if (m->m_len > cnt) {
2776 char *cp = mtod(m, caddr_t) + cnt;
2777 struct tcpcb *tp = sototcpcb(so);
2780 tp->t_oobflags |= TCPOOB_HAVEDATA;
2781 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
2783 if (m->m_flags & M_PKTHDR)
2792 panic("tcp_pulloutofband");
2796 * Collect new round-trip time estimate
2797 * and update averages and current timeout.
2800 tcp_xmit_timer(struct tcpcb *tp, int rtt)
2804 INP_LOCK_ASSERT(tp->t_inpcb);
2806 tcpstat.tcps_rttupdated++;
2808 if (tp->t_srtt != 0) {
2810 * srtt is stored as fixed point with 5 bits after the
2811 * binary point (i.e., scaled by 8). The following magic
2812 * is equivalent to the smoothing algorithm in rfc793 with
2813 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
2814 * point). Adjust rtt to origin 0.
2816 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
2817 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
2819 if ((tp->t_srtt += delta) <= 0)
2823 * We accumulate a smoothed rtt variance (actually, a
2824 * smoothed mean difference), then set the retransmit
2825 * timer to smoothed rtt + 4 times the smoothed variance.
2826 * rttvar is stored as fixed point with 4 bits after the
2827 * binary point (scaled by 16). The following is
2828 * equivalent to rfc793 smoothing with an alpha of .75
2829 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
2830 * rfc793's wired-in beta.
2834 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
2835 if ((tp->t_rttvar += delta) <= 0)
2837 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
2838 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2841 * No rtt measurement yet - use the unsmoothed rtt.
2842 * Set the variance to half the rtt (so our first
2843 * retransmit happens at 3*rtt).
2845 tp->t_srtt = rtt << TCP_RTT_SHIFT;
2846 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
2847 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2853 * the retransmit should happen at rtt + 4 * rttvar.
2854 * Because of the way we do the smoothing, srtt and rttvar
2855 * will each average +1/2 tick of bias. When we compute
2856 * the retransmit timer, we want 1/2 tick of rounding and
2857 * 1 extra tick because of +-1/2 tick uncertainty in the
2858 * firing of the timer. The bias will give us exactly the
2859 * 1.5 tick we need. But, because the bias is
2860 * statistical, we have to test that we don't drop below
2861 * the minimum feasible timer (which is 2 ticks).
2863 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
2864 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
2867 * We received an ack for a packet that wasn't retransmitted;
2868 * it is probably safe to discard any error indications we've
2869 * received recently. This isn't quite right, but close enough
2870 * for now (a route might have failed after we sent a segment,
2871 * and the return path might not be symmetrical).
2873 tp->t_softerror = 0;
2877 * Determine a reasonable value for maxseg size.
2878 * If the route is known, check route for mtu.
2879 * If none, use an mss that can be handled on the outgoing
2880 * interface without forcing IP to fragment; if bigger than
2881 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
2882 * to utilize large mbufs. If no route is found, route has no mtu,
2883 * or the destination isn't local, use a default, hopefully conservative
2884 * size (usually 512 or the default IP max size, but no more than the mtu
2885 * of the interface), as we can't discover anything about intervening
2886 * gateways or networks. We also initialize the congestion/slow start
2887 * window to be a single segment if the destination isn't local.
2888 * While looking at the routing entry, we also initialize other path-dependent
2889 * parameters from pre-set or cached values in the routing entry.
2891 * Also take into account the space needed for options that we
2892 * send regularly. Make maxseg shorter by that amount to assure
2893 * that we can send maxseg amount of data even when the options
2894 * are present. Store the upper limit of the length of options plus
2898 * In case of T/TCP, we call this routine during implicit connection
2899 * setup as well (offer = -1), to initialize maxseg from the cached
2902 * NOTE that this routine is only called when we process an incoming
2903 * segment. Outgoing SYN/ACK MSS settings are handled in tcp_mssopt().
2906 tcp_mss(struct tcpcb *tp, int offer)
2911 struct inpcb *inp = tp->t_inpcb;
2913 struct hc_metrics_lite metrics;
2914 int origoffer = offer;
2917 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
2918 size_t min_protoh = isipv6 ?
2919 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
2920 sizeof (struct tcpiphdr);
2922 const size_t min_protoh = sizeof(struct tcpiphdr);
2928 maxmtu = tcp_maxmtu6(&inp->inp_inc, &mtuflags);
2929 tp->t_maxopd = tp->t_maxseg = tcp_v6mssdflt;
2933 maxmtu = tcp_maxmtu(&inp->inp_inc, &mtuflags);
2934 tp->t_maxopd = tp->t_maxseg = tcp_mssdflt;
2936 so = inp->inp_socket;
2939 * no route to sender, stay with default mss and return
2944 /* what have we got? */
2948 * Offer == 0 means that there was no MSS on the SYN
2949 * segment, in this case we use tcp_mssdflt.
2953 isipv6 ? tcp_v6mssdflt :
2960 * Offer == -1 means that we didn't receive SYN yet.
2966 * Prevent DoS attack with too small MSS. Round up
2967 * to at least minmss.
2969 offer = max(offer, tcp_minmss);
2971 * Sanity check: make sure that maxopd will be large
2972 * enough to allow some data on segments even if the
2973 * all the option space is used (40bytes). Otherwise
2974 * funny things may happen in tcp_output.
2976 offer = max(offer, 64);
2980 * rmx information is now retrieved from tcp_hostcache
2982 tcp_hc_get(&inp->inp_inc, &metrics);
2985 * if there's a discovered mtu int tcp hostcache, use it
2986 * else, use the link mtu.
2988 if (metrics.rmx_mtu)
2989 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
2993 mss = maxmtu - min_protoh;
2994 if (!path_mtu_discovery &&
2995 !in6_localaddr(&inp->in6p_faddr))
2996 mss = min(mss, tcp_v6mssdflt);
3000 mss = maxmtu - min_protoh;
3001 if (!path_mtu_discovery &&
3002 !in_localaddr(inp->inp_faddr))
3003 mss = min(mss, tcp_mssdflt);
3006 mss = min(mss, offer);
3009 * maxopd stores the maximum length of data AND options
3010 * in a segment; maxseg is the amount of data in a normal
3011 * segment. We need to store this value (maxopd) apart
3012 * from maxseg, because now every segment carries options
3013 * and thus we normally have somewhat less data in segments.
3018 * origoffer==-1 indicates, that no segments were received yet.
3019 * In this case we just guess.
3021 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
3023 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
3024 mss -= TCPOLEN_TSTAMP_APPA;
3027 #if (MCLBYTES & (MCLBYTES - 1)) == 0
3029 mss &= ~(MCLBYTES-1);
3032 mss = mss / MCLBYTES * MCLBYTES;
3037 * If there's a pipesize, change the socket buffer to that size,
3038 * don't change if sb_hiwat is different than default (then it
3039 * has been changed on purpose with setsockopt).
3040 * Make the socket buffers an integral number of mss units;
3041 * if the mss is larger than the socket buffer, decrease the mss.
3043 SOCKBUF_LOCK(&so->so_snd);
3044 if ((so->so_snd.sb_hiwat == tcp_sendspace) && metrics.rmx_sendpipe)
3045 bufsize = metrics.rmx_sendpipe;
3047 bufsize = so->so_snd.sb_hiwat;
3051 bufsize = roundup(bufsize, mss);
3052 if (bufsize > sb_max)
3054 if (bufsize > so->so_snd.sb_hiwat)
3055 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL);
3057 SOCKBUF_UNLOCK(&so->so_snd);
3060 SOCKBUF_LOCK(&so->so_rcv);
3061 if ((so->so_rcv.sb_hiwat == tcp_recvspace) && metrics.rmx_recvpipe)
3062 bufsize = metrics.rmx_recvpipe;
3064 bufsize = so->so_rcv.sb_hiwat;
3065 if (bufsize > mss) {
3066 bufsize = roundup(bufsize, mss);
3067 if (bufsize > sb_max)
3069 if (bufsize > so->so_rcv.sb_hiwat)
3070 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL);
3072 SOCKBUF_UNLOCK(&so->so_rcv);
3074 * While we're here, check the others too
3076 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
3078 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
3079 tcpstat.tcps_usedrtt++;
3080 if (metrics.rmx_rttvar) {
3081 tp->t_rttvar = metrics.rmx_rttvar;
3082 tcpstat.tcps_usedrttvar++;
3084 /* default variation is +- 1 rtt */
3086 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
3088 TCPT_RANGESET(tp->t_rxtcur,
3089 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
3090 tp->t_rttmin, TCPTV_REXMTMAX);
3092 if (metrics.rmx_ssthresh) {
3094 * There's some sort of gateway or interface
3095 * buffer limit on the path. Use this to set
3096 * the slow start threshhold, but set the
3097 * threshold to no less than 2*mss.
3099 tp->snd_ssthresh = max(2 * mss, metrics.rmx_ssthresh);
3100 tcpstat.tcps_usedssthresh++;
3102 if (metrics.rmx_bandwidth)
3103 tp->snd_bandwidth = metrics.rmx_bandwidth;
3106 * Set the slow-start flight size depending on whether this
3107 * is a local network or not.
3109 * Extend this so we cache the cwnd too and retrieve it here.
3110 * Make cwnd even bigger than RFC3390 suggests but only if we
3111 * have previous experience with the remote host. Be careful
3112 * not make cwnd bigger than remote receive window or our own
3113 * send socket buffer. Maybe put some additional upper bound
3114 * on the retrieved cwnd. Should do incremental updates to
3115 * hostcache when cwnd collapses so next connection doesn't
3116 * overloads the path again.
3118 * RFC3390 says only do this if SYN or SYN/ACK didn't got lost.
3119 * We currently check only in syncache_socket for that.
3121 #define TCP_METRICS_CWND
3122 #ifdef TCP_METRICS_CWND
3123 if (metrics.rmx_cwnd)
3124 tp->snd_cwnd = max(mss,
3125 min(metrics.rmx_cwnd / 2,
3126 min(tp->snd_wnd, so->so_snd.sb_hiwat)));
3130 tp->snd_cwnd = min(4 * mss, max(2 * mss, 4380));
3132 else if ((isipv6 && in6_localaddr(&inp->in6p_faddr)) ||
3133 (!isipv6 && in_localaddr(inp->inp_faddr)))
3135 else if (in_localaddr(inp->inp_faddr))
3137 tp->snd_cwnd = mss * ss_fltsz_local;
3139 tp->snd_cwnd = mss * ss_fltsz;
3141 /* Check the interface for TSO capabilities. */
3142 if (mtuflags & CSUM_TSO)
3143 tp->t_flags |= TF_TSO;
3147 * Determine the MSS option to send on an outgoing SYN.
3150 tcp_mssopt(struct in_conninfo *inc)
3157 int isipv6 = inc->inc_isipv6 ? 1 : 0;
3160 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
3164 mss = tcp_v6mssdflt;
3165 maxmtu = tcp_maxmtu6(inc, NULL);
3166 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3167 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3172 maxmtu = tcp_maxmtu(inc, NULL);
3173 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3174 min_protoh = sizeof(struct tcpiphdr);
3176 if (maxmtu && thcmtu)
3177 mss = min(maxmtu, thcmtu) - min_protoh;
3178 else if (maxmtu || thcmtu)
3179 mss = max(maxmtu, thcmtu) - min_protoh;
3186 * On a partial ack arrives, force the retransmission of the
3187 * next unacknowledged segment. Do not clear tp->t_dupacks.
3188 * By setting snd_nxt to ti_ack, this forces retransmission timer to
3192 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
3194 tcp_seq onxt = tp->snd_nxt;
3195 u_long ocwnd = tp->snd_cwnd;
3197 callout_stop(tp->tt_rexmt);
3199 tp->snd_nxt = th->th_ack;
3201 * Set snd_cwnd to one segment beyond acknowledged offset.
3202 * (tp->snd_una has not yet been updated when this function is called.)
3204 tp->snd_cwnd = tp->t_maxseg + (th->th_ack - tp->snd_una);
3205 tp->t_flags |= TF_ACKNOW;
3206 (void) tcp_output(tp);
3207 tp->snd_cwnd = ocwnd;
3208 if (SEQ_GT(onxt, tp->snd_nxt))
3211 * Partial window deflation. Relies on fact that tp->snd_una
3214 if (tp->snd_cwnd > th->th_ack - tp->snd_una)
3215 tp->snd_cwnd -= th->th_ack - tp->snd_una;
3218 tp->snd_cwnd += tp->t_maxseg;
3222 * Returns 1 if the TIME_WAIT state was killed and we should start over,
3223 * looking for a pcb in the listen state. Returns 0 otherwise.
3226 tcp_timewait(struct inpcb *inp, struct tcpopt *to, struct tcphdr *th,
3227 struct mbuf *m, int tlen)
3233 int isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
3235 const int isipv6 = 0;
3238 /* tcbinfo lock required for tcp_twclose(), tcp_timer_2msl_reset(). */
3239 INP_INFO_WLOCK_ASSERT(&tcbinfo);
3240 INP_LOCK_ASSERT(inp);
3243 * XXXRW: Time wait state for inpcb has been recycled, but inpcb is
3244 * still present. This is undesirable, but temporarily necessary
3245 * until we work out how to handle inpcb's who's timewait state has
3252 thflags = th->th_flags;
3255 * NOTE: for FIN_WAIT_2 (to be added later),
3256 * must validate sequence number before accepting RST
3260 * If the segment contains RST:
3261 * Drop the segment - see Stevens, vol. 2, p. 964 and
3264 if (thflags & TH_RST)
3268 /* PAWS not needed at the moment */
3270 * RFC 1323 PAWS: If we have a timestamp reply on this segment
3271 * and it's less than ts_recent, drop it.
3273 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
3274 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
3275 if ((thflags & TH_ACK) == 0)
3280 * ts_recent is never updated because we never accept new segments.
3285 * If a new connection request is received
3286 * while in TIME_WAIT, drop the old connection
3287 * and start over if the sequence numbers
3288 * are above the previous ones.
3290 if ((thflags & TH_SYN) && SEQ_GT(th->th_seq, tw->rcv_nxt)) {
3296 * Drop the the segment if it does not contain an ACK.
3298 if ((thflags & TH_ACK) == 0)
3302 * Reset the 2MSL timer if this is a duplicate FIN.
3304 if (thflags & TH_FIN) {
3305 seq = th->th_seq + tlen + (thflags & TH_SYN ? 1 : 0);
3306 if (seq + 1 == tw->rcv_nxt)
3307 tcp_timer_2msl_reset(tw, 1);
3311 * Acknowledge the segment if it has data or is not a duplicate ACK.
3313 if (thflags != TH_ACK || tlen != 0 ||
3314 th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt)
3315 tcp_twrespond(tw, TH_ACK);
3319 * Generate a RST, dropping incoming segment.
3320 * Make ACK acceptable to originator of segment.
3321 * Don't bother to respond if destination was broadcast/multicast.
3323 if (m->m_flags & (M_BCAST|M_MCAST))
3326 struct ip6_hdr *ip6;
3328 /* IPv6 anycast check is done at tcp6_input() */
3329 ip6 = mtod(m, struct ip6_hdr *);
3330 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
3331 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
3336 ip = mtod(m, struct ip *);
3337 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
3338 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
3339 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
3340 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
3343 if (thflags & TH_ACK) {
3345 mtod(m, void *), th, m, 0, th->th_ack, TH_RST);
3347 seq = th->th_seq + (thflags & TH_SYN ? 1 : 0);
3349 mtod(m, void *), th, m, seq, 0, TH_RST|TH_ACK);