2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
5 * The Regents of the University of California. All rights reserved.
6 * Copyright (c) 2007-2008,2010
7 * Swinburne University of Technology, Melbourne, Australia.
8 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org>
9 * Copyright (c) 2010 The FreeBSD Foundation
10 * Copyright (c) 2010-2011 Juniper Networks, Inc.
11 * All rights reserved.
13 * Portions of this software were developed at the Centre for Advanced Internet
14 * Architectures, Swinburne University of Technology, by Lawrence Stewart,
15 * James Healy and David Hayes, made possible in part by a grant from the Cisco
16 * University Research Program Fund at Community Foundation Silicon Valley.
18 * Portions of this software were developed at the Centre for Advanced
19 * Internet Architectures, Swinburne University of Technology, Melbourne,
20 * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
22 * Portions of this software were developed by Robert N. M. Watson under
23 * contract to Juniper Networks, Inc.
25 * Redistribution and use in source and binary forms, with or without
26 * modification, are permitted provided that the following conditions
28 * 1. Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * 2. Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in the
32 * documentation and/or other materials provided with the distribution.
33 * 3. Neither the name of the University nor the names of its contributors
34 * may be used to endorse or promote products derived from this software
35 * without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
38 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
42 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
43 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
45 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
46 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
49 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
56 #include "opt_inet6.h"
57 #include "opt_ipsec.h"
58 #include "opt_tcpdebug.h"
60 #include <sys/param.h>
61 #include <sys/kernel.h>
63 #include <sys/hhook.h>
65 #include <sys/malloc.h>
67 #include <sys/proc.h> /* for proc0 declaration */
68 #include <sys/protosw.h>
70 #include <sys/signalvar.h>
71 #include <sys/socket.h>
72 #include <sys/socketvar.h>
73 #include <sys/sysctl.h>
74 #include <sys/syslog.h>
75 #include <sys/systm.h>
77 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
82 #include <net/if_var.h>
83 #include <net/route.h>
86 #define TCPSTATES /* for logging */
88 #include <netinet/in.h>
89 #include <netinet/in_kdtrace.h>
90 #include <netinet/in_pcb.h>
91 #include <netinet/in_systm.h>
92 #include <netinet/ip.h>
93 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
94 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
95 #include <netinet/ip_var.h>
96 #include <netinet/ip_options.h>
97 #include <netinet/ip6.h>
98 #include <netinet/icmp6.h>
99 #include <netinet6/in6_pcb.h>
100 #include <netinet6/in6_var.h>
101 #include <netinet6/ip6_var.h>
102 #include <netinet6/nd6.h>
104 #include <netinet/tcp_fastopen.h>
106 #include <netinet/tcp.h>
107 #include <netinet/tcp_fsm.h>
108 #include <netinet/tcp_seq.h>
109 #include <netinet/tcp_timer.h>
110 #include <netinet/tcp_var.h>
111 #include <netinet6/tcp6_var.h>
112 #include <netinet/tcpip.h>
113 #include <netinet/cc/cc.h>
115 #include <netinet/tcp_pcap.h>
117 #include <netinet/tcp_syncache.h>
119 #include <netinet/tcp_debug.h>
120 #endif /* TCPDEBUG */
122 #include <netinet/tcp_offload.h>
125 #include <netipsec/ipsec_support.h>
127 #include <machine/in_cksum.h>
129 #include <security/mac/mac_framework.h>
131 const int tcprexmtthresh = 3;
133 int tcp_log_in_vain = 0;
134 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
136 "Log all incoming TCP segments to closed ports");
138 VNET_DEFINE(int, blackhole) = 0;
139 #define V_blackhole VNET(blackhole)
140 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_VNET | CTLFLAG_RW,
141 &VNET_NAME(blackhole), 0,
142 "Do not send RST on segments to closed ports");
144 VNET_DEFINE(int, tcp_delack_enabled) = 1;
145 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_VNET | CTLFLAG_RW,
146 &VNET_NAME(tcp_delack_enabled), 0,
147 "Delay ACK to try and piggyback it onto a data packet");
149 VNET_DEFINE(int, drop_synfin) = 0;
150 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_VNET | CTLFLAG_RW,
151 &VNET_NAME(drop_synfin), 0,
152 "Drop TCP packets with SYN+FIN set");
154 VNET_DEFINE(int, tcp_do_rfc6675_pipe) = 0;
155 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc6675_pipe, CTLFLAG_VNET | CTLFLAG_RW,
156 &VNET_NAME(tcp_do_rfc6675_pipe), 0,
157 "Use calculated pipe/in-flight bytes per RFC 6675");
159 VNET_DEFINE(int, tcp_do_rfc3042) = 1;
160 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_VNET | CTLFLAG_RW,
161 &VNET_NAME(tcp_do_rfc3042), 0,
162 "Enable RFC 3042 (Limited Transmit)");
164 VNET_DEFINE(int, tcp_do_rfc3390) = 1;
165 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_VNET | CTLFLAG_RW,
166 &VNET_NAME(tcp_do_rfc3390), 0,
167 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
169 VNET_DEFINE(int, tcp_initcwnd_segments) = 10;
170 SYSCTL_INT(_net_inet_tcp, OID_AUTO, initcwnd_segments,
171 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_initcwnd_segments), 0,
172 "Slow-start flight size (initial congestion window) in number of segments");
174 VNET_DEFINE(int, tcp_do_rfc3465) = 1;
175 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_VNET | CTLFLAG_RW,
176 &VNET_NAME(tcp_do_rfc3465), 0,
177 "Enable RFC 3465 (Appropriate Byte Counting)");
179 VNET_DEFINE(int, tcp_abc_l_var) = 2;
180 SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_VNET | CTLFLAG_RW,
181 &VNET_NAME(tcp_abc_l_var), 2,
182 "Cap the max cwnd increment during slow-start to this number of segments");
184 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN");
186 VNET_DEFINE(int, tcp_do_ecn) = 2;
187 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW,
188 &VNET_NAME(tcp_do_ecn), 0,
191 VNET_DEFINE(int, tcp_ecn_maxretries) = 1;
192 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_VNET | CTLFLAG_RW,
193 &VNET_NAME(tcp_ecn_maxretries), 0,
194 "Max retries before giving up on ECN");
196 VNET_DEFINE(int, tcp_insecure_syn) = 0;
197 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_syn, CTLFLAG_VNET | CTLFLAG_RW,
198 &VNET_NAME(tcp_insecure_syn), 0,
199 "Follow RFC793 instead of RFC5961 criteria for accepting SYN packets");
201 VNET_DEFINE(int, tcp_insecure_rst) = 0;
202 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_VNET | CTLFLAG_RW,
203 &VNET_NAME(tcp_insecure_rst), 0,
204 "Follow RFC793 instead of RFC5961 criteria for accepting RST packets");
206 VNET_DEFINE(int, tcp_recvspace) = 1024*64;
207 #define V_tcp_recvspace VNET(tcp_recvspace)
208 SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_VNET | CTLFLAG_RW,
209 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size");
211 VNET_DEFINE(int, tcp_do_autorcvbuf) = 1;
212 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_VNET | CTLFLAG_RW,
213 &VNET_NAME(tcp_do_autorcvbuf), 0,
214 "Enable automatic receive buffer sizing");
216 VNET_DEFINE(int, tcp_autorcvbuf_inc) = 16*1024;
217 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_VNET | CTLFLAG_RW,
218 &VNET_NAME(tcp_autorcvbuf_inc), 0,
219 "Incrementor step size of automatic receive buffer");
221 VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024;
222 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_VNET | CTLFLAG_RW,
223 &VNET_NAME(tcp_autorcvbuf_max), 0,
224 "Max size of automatic receive buffer");
226 VNET_DEFINE(struct inpcbhead, tcb);
227 #define tcb6 tcb /* for KAME src sync over BSD*'s */
228 VNET_DEFINE(struct inpcbinfo, tcbinfo);
231 * TCP statistics are stored in an array of counter(9)s, which size matches
232 * size of struct tcpstat. TCP running connection count is a regular array.
234 VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat);
235 SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat,
236 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
237 VNET_DEFINE(counter_u64_t, tcps_states[TCP_NSTATES]);
238 SYSCTL_COUNTER_U64_ARRAY(_net_inet_tcp, TCPCTL_STATES, states, CTLFLAG_RD |
239 CTLFLAG_VNET, &VNET_NAME(tcps_states)[0], TCP_NSTATES,
240 "TCP connection counts by TCP state");
243 tcp_vnet_init(const void *unused)
246 COUNTER_ARRAY_ALLOC(V_tcps_states, TCP_NSTATES, M_WAITOK);
247 VNET_PCPUSTAT_ALLOC(tcpstat, M_WAITOK);
249 VNET_SYSINIT(tcp_vnet_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
250 tcp_vnet_init, NULL);
254 tcp_vnet_uninit(const void *unused)
257 COUNTER_ARRAY_FREE(V_tcps_states, TCP_NSTATES);
258 VNET_PCPUSTAT_FREE(tcpstat);
260 VNET_SYSUNINIT(tcp_vnet_uninit, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
261 tcp_vnet_uninit, NULL);
265 * Kernel module interface for updating tcpstat. The argument is an index
266 * into tcpstat treated as an array.
269 kmod_tcpstat_inc(int statnum)
272 counter_u64_add(VNET(tcpstat)[statnum], 1);
277 * Wrapper for the TCP established input helper hook.
280 hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
282 struct tcp_hhook_data hhook_data;
284 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) {
289 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data,
296 * CC wrapper hook functions
299 cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t nsegs,
302 INP_WLOCK_ASSERT(tp->t_inpcb);
304 tp->ccv->nsegs = nsegs;
305 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th);
306 if (tp->snd_cwnd <= tp->snd_wnd)
307 tp->ccv->flags |= CCF_CWND_LIMITED;
309 tp->ccv->flags &= ~CCF_CWND_LIMITED;
311 if (type == CC_ACK) {
312 if (tp->snd_cwnd > tp->snd_ssthresh) {
313 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack,
314 nsegs * V_tcp_abc_l_var * tcp_maxseg(tp));
315 if (tp->t_bytes_acked >= tp->snd_cwnd) {
316 tp->t_bytes_acked -= tp->snd_cwnd;
317 tp->ccv->flags |= CCF_ABC_SENTAWND;
320 tp->ccv->flags &= ~CCF_ABC_SENTAWND;
321 tp->t_bytes_acked = 0;
325 if (CC_ALGO(tp)->ack_received != NULL) {
326 /* XXXLAS: Find a way to live without this */
327 tp->ccv->curack = th->th_ack;
328 CC_ALGO(tp)->ack_received(tp->ccv, type);
333 cc_conn_init(struct tcpcb *tp)
335 struct hc_metrics_lite metrics;
336 struct inpcb *inp = tp->t_inpcb;
340 INP_WLOCK_ASSERT(tp->t_inpcb);
342 tcp_hc_get(&inp->inp_inc, &metrics);
343 maxseg = tcp_maxseg(tp);
345 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
347 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
348 TCPSTAT_INC(tcps_usedrtt);
349 if (metrics.rmx_rttvar) {
350 tp->t_rttvar = metrics.rmx_rttvar;
351 TCPSTAT_INC(tcps_usedrttvar);
353 /* default variation is +- 1 rtt */
355 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
357 TCPT_RANGESET(tp->t_rxtcur,
358 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
359 tp->t_rttmin, TCPTV_REXMTMAX);
361 if (metrics.rmx_ssthresh) {
363 * There's some sort of gateway or interface
364 * buffer limit on the path. Use this to set
365 * the slow start threshold, but set the
366 * threshold to no less than 2*mss.
368 tp->snd_ssthresh = max(2 * maxseg, metrics.rmx_ssthresh);
369 TCPSTAT_INC(tcps_usedssthresh);
373 * Set the initial slow-start flight size.
375 * RFC5681 Section 3.1 specifies the default conservative values.
376 * RFC3390 specifies slightly more aggressive values.
377 * RFC6928 increases it to ten segments.
378 * Support for user specified value for initial flight size.
380 * If a SYN or SYN/ACK was lost and retransmitted, we have to
381 * reduce the initial CWND to one segment as congestion is likely
382 * requiring us to be cautious.
384 if (tp->snd_cwnd == 1)
385 tp->snd_cwnd = maxseg; /* SYN(-ACK) lost */
386 else if (V_tcp_initcwnd_segments)
387 tp->snd_cwnd = min(V_tcp_initcwnd_segments * maxseg,
388 max(2 * maxseg, V_tcp_initcwnd_segments * 1460));
389 else if (V_tcp_do_rfc3390)
390 tp->snd_cwnd = min(4 * maxseg, max(2 * maxseg, 4380));
392 /* Per RFC5681 Section 3.1 */
394 tp->snd_cwnd = 2 * maxseg;
395 else if (maxseg > 1095)
396 tp->snd_cwnd = 3 * maxseg;
398 tp->snd_cwnd = 4 * maxseg;
401 if (CC_ALGO(tp)->conn_init != NULL)
402 CC_ALGO(tp)->conn_init(tp->ccv);
406 cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
410 INP_WLOCK_ASSERT(tp->t_inpcb);
414 if (!IN_FASTRECOVERY(tp->t_flags)) {
415 tp->snd_recover = tp->snd_max;
416 if (tp->t_flags & TF_ECN_PERMIT)
417 tp->t_flags |= TF_ECN_SND_CWR;
421 if (!IN_CONGRECOVERY(tp->t_flags)) {
422 TCPSTAT_INC(tcps_ecn_rcwnd);
423 tp->snd_recover = tp->snd_max;
424 if (tp->t_flags & TF_ECN_PERMIT)
425 tp->t_flags |= TF_ECN_SND_CWR;
429 maxseg = tcp_maxseg(tp);
431 tp->t_bytes_acked = 0;
432 EXIT_RECOVERY(tp->t_flags);
433 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 /
435 tp->snd_cwnd = maxseg;
438 TCPSTAT_INC(tcps_sndrexmitbad);
439 /* RTO was unnecessary, so reset everything. */
440 tp->snd_cwnd = tp->snd_cwnd_prev;
441 tp->snd_ssthresh = tp->snd_ssthresh_prev;
442 tp->snd_recover = tp->snd_recover_prev;
443 if (tp->t_flags & TF_WASFRECOVERY)
444 ENTER_FASTRECOVERY(tp->t_flags);
445 if (tp->t_flags & TF_WASCRECOVERY)
446 ENTER_CONGRECOVERY(tp->t_flags);
447 tp->snd_nxt = tp->snd_max;
448 tp->t_flags &= ~TF_PREVVALID;
453 if (CC_ALGO(tp)->cong_signal != NULL) {
455 tp->ccv->curack = th->th_ack;
456 CC_ALGO(tp)->cong_signal(tp->ccv, type);
461 cc_post_recovery(struct tcpcb *tp, struct tcphdr *th)
463 INP_WLOCK_ASSERT(tp->t_inpcb);
465 /* XXXLAS: KASSERT that we're in recovery? */
467 if (CC_ALGO(tp)->post_recovery != NULL) {
468 tp->ccv->curack = th->th_ack;
469 CC_ALGO(tp)->post_recovery(tp->ccv);
471 /* XXXLAS: EXIT_RECOVERY ? */
472 tp->t_bytes_acked = 0;
476 * Indicate whether this ack should be delayed. We can delay the ack if
477 * following conditions are met:
478 * - There is no delayed ack timer in progress.
479 * - Our last ack wasn't a 0-sized window. We never want to delay
480 * the ack that opens up a 0-sized window.
481 * - LRO wasn't used for this segment. We make sure by checking that the
482 * segment size is not larger than the MSS.
484 #define DELAY_ACK(tp, tlen) \
485 ((!tcp_timer_active(tp, TT_DELACK) && \
486 (tp->t_flags & TF_RXWIN0SENT) == 0) && \
487 (tlen <= tp->t_maxseg) && \
488 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
491 cc_ecnpkt_handler(struct tcpcb *tp, struct tcphdr *th, uint8_t iptos)
493 INP_WLOCK_ASSERT(tp->t_inpcb);
495 if (CC_ALGO(tp)->ecnpkt_handler != NULL) {
496 switch (iptos & IPTOS_ECN_MASK) {
498 tp->ccv->flags |= CCF_IPHDR_CE;
501 tp->ccv->flags &= ~CCF_IPHDR_CE;
504 tp->ccv->flags &= ~CCF_IPHDR_CE;
508 if (th->th_flags & TH_CWR)
509 tp->ccv->flags |= CCF_TCPHDR_CWR;
511 tp->ccv->flags &= ~CCF_TCPHDR_CWR;
513 if (tp->t_flags & TF_DELACK)
514 tp->ccv->flags |= CCF_DELACK;
516 tp->ccv->flags &= ~CCF_DELACK;
518 CC_ALGO(tp)->ecnpkt_handler(tp->ccv);
520 if (tp->ccv->flags & CCF_ACKNOW)
521 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
526 * TCP input handling is split into multiple parts:
527 * tcp6_input is a thin wrapper around tcp_input for the extended
528 * ip6_protox[] call format in ip6_input
529 * tcp_input handles primary segment validation, inpcb lookup and
530 * SYN processing on listen sockets
531 * tcp_do_segment processes the ACK and text of the segment for
532 * establishing, established and closing connections
536 tcp6_input(struct mbuf **mp, int *offp, int proto)
538 struct mbuf *m = *mp;
539 struct in6_ifaddr *ia6;
542 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
545 * draft-itojun-ipv6-tcp-to-anycast
546 * better place to put this in?
548 ip6 = mtod(m, struct ip6_hdr *);
549 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */);
550 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
553 ifa_free(&ia6->ia_ifa);
554 ip6 = mtod(m, struct ip6_hdr *);
555 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
556 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
557 return (IPPROTO_DONE);
560 ifa_free(&ia6->ia_ifa);
562 return (tcp_input(mp, offp, proto));
567 tcp_input(struct mbuf **mp, int *offp, int proto)
569 struct mbuf *m = *mp;
570 struct tcphdr *th = NULL;
571 struct ip *ip = NULL;
572 struct inpcb *inp = NULL;
573 struct tcpcb *tp = NULL;
574 struct socket *so = NULL;
584 int rstreason = 0; /* For badport_bandlim accounting purposes */
586 struct m_tag *fwd_tag = NULL;
588 struct ip6_hdr *ip6 = NULL;
591 const void *ip6 = NULL;
593 struct tcpopt to; /* options in this segment */
594 char *s = NULL; /* address and port logging */
598 * The size of tcp_saveipgen must be the size of the max ip header,
601 u_char tcp_saveipgen[IP6_HDR_LEN];
602 struct tcphdr tcp_savetcp;
607 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
614 TCPSTAT_INC(tcps_rcvtotal);
618 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */
620 if (m->m_len < (sizeof(*ip6) + sizeof(*th))) {
621 m = m_pullup(m, sizeof(*ip6) + sizeof(*th));
623 TCPSTAT_INC(tcps_rcvshort);
624 return (IPPROTO_DONE);
628 ip6 = mtod(m, struct ip6_hdr *);
629 th = (struct tcphdr *)((caddr_t)ip6 + off0);
630 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
631 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) {
632 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
633 th->th_sum = m->m_pkthdr.csum_data;
635 th->th_sum = in6_cksum_pseudo(ip6, tlen,
636 IPPROTO_TCP, m->m_pkthdr.csum_data);
637 th->th_sum ^= 0xffff;
639 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen);
641 TCPSTAT_INC(tcps_rcvbadsum);
646 * Be proactive about unspecified IPv6 address in source.
647 * As we use all-zero to indicate unbounded/unconnected pcb,
648 * unspecified IPv6 address can be used to confuse us.
650 * Note that packets with unspecified IPv6 destination is
651 * already dropped in ip6_input.
653 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
657 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
660 #if defined(INET) && defined(INET6)
666 * Get IP and TCP header together in first mbuf.
667 * Note: IP leaves IP header in first mbuf.
669 if (off0 > sizeof (struct ip)) {
671 off0 = sizeof(struct ip);
673 if (m->m_len < sizeof (struct tcpiphdr)) {
674 if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
676 TCPSTAT_INC(tcps_rcvshort);
677 return (IPPROTO_DONE);
680 ip = mtod(m, struct ip *);
681 th = (struct tcphdr *)((caddr_t)ip + off0);
682 tlen = ntohs(ip->ip_len) - off0;
685 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
686 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
687 th->th_sum = m->m_pkthdr.csum_data;
689 th->th_sum = in_pseudo(ip->ip_src.s_addr,
691 htonl(m->m_pkthdr.csum_data + tlen +
693 th->th_sum ^= 0xffff;
695 struct ipovly *ipov = (struct ipovly *)ip;
698 * Checksum extended TCP header and data.
701 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
702 ipov->ih_len = htons(tlen);
703 th->th_sum = in_cksum(m, len);
704 /* Reset length for SDT probes. */
705 ip->ip_len = htons(len);
708 /* Re-initialization for later version check */
709 ip->ip_v = IPVERSION;
710 ip->ip_hl = off0 >> 2;
714 TCPSTAT_INC(tcps_rcvbadsum);
721 * Check that TCP offset makes sense,
722 * pull out TCP options and adjust length. XXX
724 off = th->th_off << 2;
725 if (off < sizeof (struct tcphdr) || off > tlen) {
726 TCPSTAT_INC(tcps_rcvbadoff);
729 tlen -= off; /* tlen is used instead of ti->ti_len */
730 if (off > sizeof (struct tcphdr)) {
733 IP6_EXTHDR_CHECK(m, off0, off, IPPROTO_DONE);
734 ip6 = mtod(m, struct ip6_hdr *);
735 th = (struct tcphdr *)((caddr_t)ip6 + off0);
738 #if defined(INET) && defined(INET6)
743 if (m->m_len < sizeof(struct ip) + off) {
744 if ((m = m_pullup(m, sizeof (struct ip) + off))
746 TCPSTAT_INC(tcps_rcvshort);
747 return (IPPROTO_DONE);
749 ip = mtod(m, struct ip *);
750 th = (struct tcphdr *)((caddr_t)ip + off0);
754 optlen = off - sizeof (struct tcphdr);
755 optp = (u_char *)(th + 1);
757 thflags = th->th_flags;
760 * Convert TCP protocol specific fields to host format.
762 tcp_fields_to_host(th);
765 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options.
767 drop_hdrlen = off0 + off;
770 * Locate pcb for segment; if we're likely to add or remove a
771 * connection then first acquire pcbinfo lock. There are three cases
772 * where we might discover later we need a write lock despite the
773 * flags: ACKs moving a connection out of the syncache, ACKs for a
774 * connection in TIMEWAIT and SYNs not targeting a listening socket.
776 if ((thflags & (TH_FIN | TH_RST)) != 0) {
777 INP_INFO_RLOCK(&V_tcbinfo);
778 ti_locked = TI_RLOCKED;
780 ti_locked = TI_UNLOCKED;
783 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
787 (isipv6 && (m->m_flags & M_IP6_NEXTHOP))
789 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP))
792 #if defined(INET) && !defined(INET6)
793 (m->m_flags & M_IP_NEXTHOP)
796 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
800 if (ti_locked == TI_RLOCKED) {
801 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
803 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
807 if (isipv6 && fwd_tag != NULL) {
808 struct sockaddr_in6 *next_hop6;
810 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1);
812 * Transparently forwarded. Pretend to be the destination.
813 * Already got one like this?
815 inp = in6_pcblookup_mbuf(&V_tcbinfo,
816 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport,
817 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif, m);
820 * It's new. Try to find the ambushing socket.
821 * Because we've rewritten the destination address,
822 * any hardware-generated hash is ignored.
824 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src,
825 th->th_sport, &next_hop6->sin6_addr,
826 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) :
827 th->th_dport, INPLOOKUP_WILDCARD |
828 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
831 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src,
832 th->th_sport, &ip6->ip6_dst, th->th_dport,
833 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
834 m->m_pkthdr.rcvif, m);
837 #if defined(INET6) && defined(INET)
841 if (fwd_tag != NULL) {
842 struct sockaddr_in *next_hop;
844 next_hop = (struct sockaddr_in *)(fwd_tag+1);
846 * Transparently forwarded. Pretend to be the destination.
847 * already got one like this?
849 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport,
850 ip->ip_dst, th->th_dport, INPLOOKUP_WLOCKPCB,
851 m->m_pkthdr.rcvif, m);
854 * It's new. Try to find the ambushing socket.
855 * Because we've rewritten the destination address,
856 * any hardware-generated hash is ignored.
858 inp = in_pcblookup(&V_tcbinfo, ip->ip_src,
859 th->th_sport, next_hop->sin_addr,
860 next_hop->sin_port ? ntohs(next_hop->sin_port) :
861 th->th_dport, INPLOOKUP_WILDCARD |
862 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
865 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src,
866 th->th_sport, ip->ip_dst, th->th_dport,
867 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
868 m->m_pkthdr.rcvif, m);
872 * If the INPCB does not exist then all data in the incoming
873 * segment is discarded and an appropriate RST is sent back.
874 * XXX MRT Send RST using which routing table?
878 * Log communication attempts to ports that are not
881 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
882 tcp_log_in_vain == 2) {
883 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6)))
884 log(LOG_INFO, "%s; %s: Connection attempt "
885 "to closed port\n", s, __func__);
888 * When blackholing do not respond with a RST but
889 * completely ignore the segment and drop it.
891 if ((V_blackhole == 1 && (thflags & TH_SYN)) ||
895 rstreason = BANDLIM_RST_CLOSEDPORT;
898 INP_WLOCK_ASSERT(inp);
900 * While waiting for inp lock during the lookup, another thread
901 * can have dropped the inpcb, in which case we need to loop back
902 * and try to find a new inpcb to deliver to.
904 if (inp->inp_flags & INP_DROPPED) {
909 if ((inp->inp_flowtype == M_HASHTYPE_NONE) &&
910 (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) &&
911 ((inp->inp_socket == NULL) ||
912 (inp->inp_socket->so_options & SO_ACCEPTCONN) == 0)) {
913 inp->inp_flowid = m->m_pkthdr.flowid;
914 inp->inp_flowtype = M_HASHTYPE_GET(m);
916 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
918 if (isipv6 && IPSEC_ENABLED(ipv6) &&
919 IPSEC_CHECK_POLICY(ipv6, m, inp) != 0) {
927 if (IPSEC_ENABLED(ipv4) &&
928 IPSEC_CHECK_POLICY(ipv4, m, inp) != 0) {
935 * Check the minimum TTL for socket.
937 if (inp->inp_ip_minttl != 0) {
940 if (inp->inp_ip_minttl > ip6->ip6_hlim)
944 if (inp->inp_ip_minttl > ip->ip_ttl)
949 * A previous connection in TIMEWAIT state is supposed to catch stray
950 * or duplicate segments arriving late. If this segment was a
951 * legitimate new connection attempt, the old INPCB gets removed and
952 * we can try again to find a listening socket.
954 * At this point, due to earlier optimism, we may hold only an inpcb
955 * lock, and not the inpcbinfo write lock. If so, we need to try to
956 * acquire it, or if that fails, acquire a reference on the inpcb,
957 * drop all locks, acquire a global write lock, and then re-acquire
958 * the inpcb lock. We may at that point discover that another thread
959 * has tried to free the inpcb, in which case we need to loop back
960 * and try to find a new inpcb to deliver to.
962 * XXXRW: It may be time to rethink timewait locking.
965 if (inp->inp_flags & INP_TIMEWAIT) {
966 if (ti_locked == TI_UNLOCKED) {
967 if (INP_INFO_TRY_RLOCK(&V_tcbinfo) == 0) {
970 INP_INFO_RLOCK(&V_tcbinfo);
971 ti_locked = TI_RLOCKED;
973 if (in_pcbrele_wlocked(inp)) {
976 } else if (inp->inp_flags & INP_DROPPED) {
982 ti_locked = TI_RLOCKED;
984 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
986 if (thflags & TH_SYN)
987 tcp_dooptions(&to, optp, optlen, TO_SYN);
989 * NB: tcp_twcheck unlocks the INP and frees the mbuf.
991 if (tcp_twcheck(inp, &to, th, m, tlen))
993 INP_INFO_RUNLOCK(&V_tcbinfo);
994 return (IPPROTO_DONE);
997 * The TCPCB may no longer exist if the connection is winding
998 * down or it is in the CLOSED state. Either way we drop the
999 * segment and send an appropriate response.
1001 tp = intotcpcb(inp);
1002 if (tp == NULL || tp->t_state == TCPS_CLOSED) {
1003 rstreason = BANDLIM_RST_CLOSEDPORT;
1008 if (tp->t_flags & TF_TOE) {
1009 tcp_offload_input(tp, m);
1010 m = NULL; /* consumed by the TOE driver */
1016 * We've identified a valid inpcb, but it could be that we need an
1017 * inpcbinfo write lock but don't hold it. In this case, attempt to
1018 * acquire using the same strategy as the TIMEWAIT case above. If we
1019 * relock, we have to jump back to 'relocked' as the connection might
1020 * now be in TIMEWAIT.
1023 if ((thflags & (TH_FIN | TH_RST)) != 0)
1024 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1026 if (!((tp->t_state == TCPS_ESTABLISHED && (thflags & TH_SYN) == 0) ||
1027 (tp->t_state == TCPS_LISTEN && (thflags & TH_SYN) &&
1028 !IS_FASTOPEN(tp->t_flags)))) {
1029 if (ti_locked == TI_UNLOCKED) {
1030 if (INP_INFO_TRY_RLOCK(&V_tcbinfo) == 0) {
1033 INP_INFO_RLOCK(&V_tcbinfo);
1034 ti_locked = TI_RLOCKED;
1036 if (in_pcbrele_wlocked(inp)) {
1039 } else if (inp->inp_flags & INP_DROPPED) {
1046 ti_locked = TI_RLOCKED;
1048 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1052 INP_WLOCK_ASSERT(inp);
1053 if (mac_inpcb_check_deliver(inp, m))
1056 so = inp->inp_socket;
1057 KASSERT(so != NULL, ("%s: so == NULL", __func__));
1059 if (so->so_options & SO_DEBUG) {
1060 ostate = tp->t_state;
1063 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6));
1066 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
1069 #endif /* TCPDEBUG */
1071 * When the socket is accepting connections (the INPCB is in LISTEN
1072 * state) we look into the SYN cache if this is a new connection
1073 * attempt or the completion of a previous one.
1075 KASSERT(tp->t_state == TCPS_LISTEN || !(so->so_options & SO_ACCEPTCONN),
1076 ("%s: so accepting but tp %p not listening", __func__, tp));
1077 if (tp->t_state == TCPS_LISTEN && (so->so_options & SO_ACCEPTCONN)) {
1078 struct in_conninfo inc;
1080 bzero(&inc, sizeof(inc));
1083 inc.inc_flags |= INC_ISIPV6;
1084 inc.inc6_faddr = ip6->ip6_src;
1085 inc.inc6_laddr = ip6->ip6_dst;
1089 inc.inc_faddr = ip->ip_src;
1090 inc.inc_laddr = ip->ip_dst;
1092 inc.inc_fport = th->th_sport;
1093 inc.inc_lport = th->th_dport;
1094 inc.inc_fibnum = so->so_fibnum;
1097 * Check for an existing connection attempt in syncache if
1098 * the flag is only ACK. A successful lookup creates a new
1099 * socket appended to the listen queue in SYN_RECEIVED state.
1101 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
1103 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1105 * Parse the TCP options here because
1106 * syncookies need access to the reflected
1109 tcp_dooptions(&to, optp, optlen, 0);
1111 * NB: syncache_expand() doesn't unlock
1112 * inp and tcpinfo locks.
1114 rstreason = syncache_expand(&inc, &to, th, &so, m);
1115 if (rstreason < 0) {
1117 * A failing TCP MD5 signature comparison
1118 * must result in the segment being dropped
1119 * and must not produce any response back
1123 } else if (rstreason == 0) {
1125 * No syncache entry or ACK was not
1126 * for our SYN/ACK. Send a RST.
1127 * NB: syncache did its own logging
1128 * of the failure cause.
1130 rstreason = BANDLIM_RST_OPENPORT;
1138 * We completed the 3-way handshake
1139 * but could not allocate a socket
1140 * either due to memory shortage,
1141 * listen queue length limits or
1142 * global socket limits. Send RST
1143 * or wait and have the remote end
1144 * retransmit the ACK for another
1147 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1148 log(LOG_DEBUG, "%s; %s: Listen socket: "
1149 "Socket allocation failed due to "
1150 "limits or memory shortage, %s\n",
1152 V_tcp_sc_rst_sock_fail ?
1153 "sending RST" : "try again");
1154 if (V_tcp_sc_rst_sock_fail) {
1155 rstreason = BANDLIM_UNLIMITED;
1161 * Socket is created in state SYN_RECEIVED.
1162 * Unlock the listen socket, lock the newly
1163 * created socket and update the tp variable.
1165 INP_WUNLOCK(inp); /* listen socket */
1166 inp = sotoinpcb(so);
1168 * New connection inpcb is already locked by
1169 * syncache_expand().
1171 INP_WLOCK_ASSERT(inp);
1172 tp = intotcpcb(inp);
1173 KASSERT(tp->t_state == TCPS_SYN_RECEIVED,
1174 ("%s: ", __func__));
1176 * Process the segment and the data it
1177 * contains. tcp_do_segment() consumes
1178 * the mbuf chain and unlocks the inpcb.
1180 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen,
1182 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1183 return (IPPROTO_DONE);
1186 * Segment flag validation for new connection attempts:
1188 * Our (SYN|ACK) response was rejected.
1189 * Check with syncache and remove entry to prevent
1192 * NB: syncache_chkrst does its own logging of failure
1195 if (thflags & TH_RST) {
1196 syncache_chkrst(&inc, th);
1200 * We can't do anything without SYN.
1202 if ((thflags & TH_SYN) == 0) {
1203 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1204 log(LOG_DEBUG, "%s; %s: Listen socket: "
1205 "SYN is missing, segment ignored\n",
1207 TCPSTAT_INC(tcps_badsyn);
1211 * (SYN|ACK) is bogus on a listen socket.
1213 if (thflags & TH_ACK) {
1214 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1215 log(LOG_DEBUG, "%s; %s: Listen socket: "
1216 "SYN|ACK invalid, segment rejected\n",
1218 syncache_badack(&inc); /* XXX: Not needed! */
1219 TCPSTAT_INC(tcps_badsyn);
1220 rstreason = BANDLIM_RST_OPENPORT;
1224 * If the drop_synfin option is enabled, drop all
1225 * segments with both the SYN and FIN bits set.
1226 * This prevents e.g. nmap from identifying the
1228 * XXX: Poor reasoning. nmap has other methods
1229 * and is constantly refining its stack detection
1231 * XXX: This is a violation of the TCP specification
1232 * and was used by RFC1644.
1234 if ((thflags & TH_FIN) && V_drop_synfin) {
1235 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1236 log(LOG_DEBUG, "%s; %s: Listen socket: "
1237 "SYN|FIN segment ignored (based on "
1238 "sysctl setting)\n", s, __func__);
1239 TCPSTAT_INC(tcps_badsyn);
1243 * Segment's flags are (SYN) or (SYN|FIN).
1245 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored
1246 * as they do not affect the state of the TCP FSM.
1247 * The data pointed to by TH_URG and th_urp is ignored.
1249 KASSERT((thflags & (TH_RST|TH_ACK)) == 0,
1250 ("%s: Listen socket: TH_RST or TH_ACK set", __func__));
1251 KASSERT(thflags & (TH_SYN),
1252 ("%s: Listen socket: TH_SYN not set", __func__));
1255 * If deprecated address is forbidden,
1256 * we do not accept SYN to deprecated interface
1257 * address to prevent any new inbound connection from
1258 * getting established.
1259 * When we do not accept SYN, we send a TCP RST,
1260 * with deprecated source address (instead of dropping
1261 * it). We compromise it as it is much better for peer
1262 * to send a RST, and RST will be the final packet
1265 * If we do not forbid deprecated addresses, we accept
1266 * the SYN packet. RFC2462 does not suggest dropping
1268 * If we decipher RFC2462 5.5.4, it says like this:
1269 * 1. use of deprecated addr with existing
1270 * communication is okay - "SHOULD continue to be
1272 * 2. use of it with new communication:
1273 * (2a) "SHOULD NOT be used if alternate address
1274 * with sufficient scope is available"
1275 * (2b) nothing mentioned otherwise.
1276 * Here we fall into (2b) case as we have no choice in
1277 * our source address selection - we must obey the peer.
1279 * The wording in RFC2462 is confusing, and there are
1280 * multiple description text for deprecated address
1281 * handling - worse, they are not exactly the same.
1282 * I believe 5.5.4 is the best one, so we follow 5.5.4.
1284 if (isipv6 && !V_ip6_use_deprecated) {
1285 struct in6_ifaddr *ia6;
1287 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */);
1289 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
1290 ifa_free(&ia6->ia_ifa);
1291 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1292 log(LOG_DEBUG, "%s; %s: Listen socket: "
1293 "Connection attempt to deprecated "
1294 "IPv6 address rejected\n",
1296 rstreason = BANDLIM_RST_OPENPORT;
1300 ifa_free(&ia6->ia_ifa);
1304 * Basic sanity checks on incoming SYN requests:
1305 * Don't respond if the destination is a link layer
1306 * broadcast according to RFC1122 4.2.3.10, p. 104.
1307 * If it is from this socket it must be forged.
1308 * Don't respond if the source or destination is a
1309 * global or subnet broad- or multicast address.
1310 * Note that it is quite possible to receive unicast
1311 * link-layer packets with a broadcast IP address. Use
1312 * in_broadcast() to find them.
1314 if (m->m_flags & (M_BCAST|M_MCAST)) {
1315 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1316 log(LOG_DEBUG, "%s; %s: Listen socket: "
1317 "Connection attempt from broad- or multicast "
1318 "link layer address ignored\n", s, __func__);
1323 if (th->th_dport == th->th_sport &&
1324 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) {
1325 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1326 log(LOG_DEBUG, "%s; %s: Listen socket: "
1327 "Connection attempt to/from self "
1328 "ignored\n", s, __func__);
1331 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1332 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
1333 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1334 log(LOG_DEBUG, "%s; %s: Listen socket: "
1335 "Connection attempt from/to multicast "
1336 "address ignored\n", s, __func__);
1341 #if defined(INET) && defined(INET6)
1346 if (th->th_dport == th->th_sport &&
1347 ip->ip_dst.s_addr == ip->ip_src.s_addr) {
1348 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1349 log(LOG_DEBUG, "%s; %s: Listen socket: "
1350 "Connection attempt from/to self "
1351 "ignored\n", s, __func__);
1354 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
1355 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
1356 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
1357 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
1358 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1359 log(LOG_DEBUG, "%s; %s: Listen socket: "
1360 "Connection attempt from/to broad- "
1361 "or multicast address ignored\n",
1368 * SYN appears to be valid. Create compressed TCP state
1372 if (so->so_options & SO_DEBUG)
1373 tcp_trace(TA_INPUT, ostate, tp,
1374 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1376 TCP_PROBE3(debug__input, tp, th, m);
1377 tcp_dooptions(&to, optp, optlen, TO_SYN);
1379 if (syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL))
1380 goto tfo_socket_result;
1382 syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL);
1385 * Entry added to syncache and mbuf consumed.
1386 * Only the listen socket is unlocked by syncache_add().
1388 if (ti_locked == TI_RLOCKED) {
1389 INP_INFO_RUNLOCK(&V_tcbinfo);
1390 ti_locked = TI_UNLOCKED;
1392 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1393 return (IPPROTO_DONE);
1394 } else if (tp->t_state == TCPS_LISTEN) {
1396 * When a listen socket is torn down the SO_ACCEPTCONN
1397 * flag is removed first while connections are drained
1398 * from the accept queue in a unlock/lock cycle of the
1399 * ACCEPT_LOCK, opening a race condition allowing a SYN
1400 * attempt go through unhandled.
1404 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1405 if (tp->t_flags & TF_SIGNATURE) {
1406 tcp_dooptions(&to, optp, optlen, thflags);
1407 if ((to.to_flags & TOF_SIGNATURE) == 0) {
1408 TCPSTAT_INC(tcps_sig_err_nosigopt);
1411 if (!TCPMD5_ENABLED() ||
1412 TCPMD5_INPUT(m, th, to.to_signature) != 0)
1416 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1419 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later
1420 * state. tcp_do_segment() always consumes the mbuf chain, unlocks
1421 * the inpcb, and unlocks pcbinfo.
1423 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos, ti_locked);
1424 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1425 return (IPPROTO_DONE);
1428 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1430 if (ti_locked == TI_RLOCKED) {
1431 INP_INFO_RUNLOCK(&V_tcbinfo);
1432 ti_locked = TI_UNLOCKED;
1436 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropwithreset "
1437 "ti_locked: %d", __func__, ti_locked));
1438 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1443 tcp_dropwithreset(m, th, tp, tlen, rstreason);
1446 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
1447 m = NULL; /* mbuf chain got consumed. */
1452 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1454 if (ti_locked == TI_RLOCKED) {
1455 INP_INFO_RUNLOCK(&V_tcbinfo);
1456 ti_locked = TI_UNLOCKED;
1460 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropunlock "
1461 "ti_locked: %d", __func__, ti_locked));
1462 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1470 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1475 return (IPPROTO_DONE);
1479 * Automatic sizing of receive socket buffer. Often the send
1480 * buffer size is not optimally adjusted to the actual network
1481 * conditions at hand (delay bandwidth product). Setting the
1482 * buffer size too small limits throughput on links with high
1483 * bandwidth and high delay (eg. trans-continental/oceanic links).
1485 * On the receive side the socket buffer memory is only rarely
1486 * used to any significant extent. This allows us to be much
1487 * more aggressive in scaling the receive socket buffer. For
1488 * the case that the buffer space is actually used to a large
1489 * extent and we run out of kernel memory we can simply drop
1490 * the new segments; TCP on the sender will just retransmit it
1491 * later. Setting the buffer size too big may only consume too
1492 * much kernel memory if the application doesn't read() from
1493 * the socket or packet loss or reordering makes use of the
1496 * The criteria to step up the receive buffer one notch are:
1497 * 1. Application has not set receive buffer size with
1498 * SO_RCVBUF. Setting SO_RCVBUF clears SB_AUTOSIZE.
1499 * 2. the number of bytes received during the time it takes
1500 * one timestamp to be reflected back to us (the RTT);
1501 * 3. received bytes per RTT is within seven eighth of the
1502 * current socket buffer size;
1503 * 4. receive buffer size has not hit maximal automatic size;
1505 * This algorithm does one step per RTT at most and only if
1506 * we receive a bulk stream w/o packet losses or reorderings.
1507 * Shrinking the buffer during idle times is not necessary as
1508 * it doesn't consume any memory when idle.
1510 * TODO: Only step up if the application is actually serving
1511 * the buffer to better manage the socket buffer resources.
1514 tcp_autorcvbuf(struct mbuf *m, struct tcphdr *th, struct socket *so,
1515 struct tcpcb *tp, int tlen)
1519 if (V_tcp_do_autorcvbuf && (so->so_rcv.sb_flags & SB_AUTOSIZE) &&
1520 tp->t_srtt != 0 && tp->rfbuf_ts != 0 &&
1521 TCP_TS_TO_TICKS(tcp_ts_getticks() - tp->rfbuf_ts) >
1522 (tp->t_srtt >> TCP_RTT_SHIFT)) {
1523 if (tp->rfbuf_cnt > (so->so_rcv.sb_hiwat / 8 * 7) &&
1524 so->so_rcv.sb_hiwat < V_tcp_autorcvbuf_max) {
1525 newsize = min(so->so_rcv.sb_hiwat +
1526 V_tcp_autorcvbuf_inc, V_tcp_autorcvbuf_max);
1528 TCP_PROBE6(receive__autoresize, NULL, tp, m, tp, th, newsize);
1530 /* Start over with next RTT. */
1534 tp->rfbuf_cnt += tlen; /* add up */
1541 tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
1542 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos,
1545 int thflags, acked, ourfinisacked, needoutput = 0, sack_changed;
1546 int rstreason, todrop, win;
1550 struct in_conninfo *inc;
1559 * The size of tcp_saveipgen must be the size of the max ip header,
1562 u_char tcp_saveipgen[IP6_HDR_LEN];
1563 struct tcphdr tcp_savetcp;
1566 thflags = th->th_flags;
1567 inc = &tp->t_inpcb->inp_inc;
1568 tp->sackhint.last_sack_ack = 0;
1570 nsegs = max(1, m->m_pkthdr.lro_nsegs);
1573 * If this is either a state-changing packet or current state isn't
1574 * established, we require a write lock on tcbinfo. Otherwise, we
1575 * allow the tcbinfo to be in either alocked or unlocked, as the
1576 * caller may have unnecessarily acquired a write lock due to a race.
1578 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 ||
1579 tp->t_state != TCPS_ESTABLISHED) {
1580 KASSERT(ti_locked == TI_RLOCKED, ("%s ti_locked %d for "
1581 "SYN/FIN/RST/!EST", __func__, ti_locked));
1582 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1585 if (ti_locked == TI_RLOCKED)
1586 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1588 KASSERT(ti_locked == TI_UNLOCKED, ("%s: EST "
1589 "ti_locked: %d", __func__, ti_locked));
1590 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1594 INP_WLOCK_ASSERT(tp->t_inpcb);
1595 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
1597 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
1601 /* Save segment, if requested. */
1602 tcp_pcap_add(th, m, &(tp->t_inpkts));
1605 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) {
1606 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1607 log(LOG_DEBUG, "%s; %s: "
1608 "SYN|FIN segment ignored (based on "
1609 "sysctl setting)\n", s, __func__);
1616 * If a segment with the ACK-bit set arrives in the SYN-SENT state
1617 * check SEQ.ACK first.
1619 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
1620 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
1621 rstreason = BANDLIM_UNLIMITED;
1626 * Segment received on connection.
1627 * Reset idle time and keep-alive timer.
1628 * XXX: This should be done after segment
1629 * validation to ignore broken/spoofed segs.
1631 tp->t_rcvtime = ticks;
1634 * Scale up the window into a 32-bit value.
1635 * For the SYN_SENT state the scale is zero.
1637 tiwin = th->th_win << tp->snd_scale;
1640 * TCP ECN processing.
1642 if (tp->t_flags & TF_ECN_PERMIT) {
1643 if (thflags & TH_CWR)
1644 tp->t_flags &= ~TF_ECN_SND_ECE;
1645 switch (iptos & IPTOS_ECN_MASK) {
1647 tp->t_flags |= TF_ECN_SND_ECE;
1648 TCPSTAT_INC(tcps_ecn_ce);
1650 case IPTOS_ECN_ECT0:
1651 TCPSTAT_INC(tcps_ecn_ect0);
1653 case IPTOS_ECN_ECT1:
1654 TCPSTAT_INC(tcps_ecn_ect1);
1658 /* Process a packet differently from RFC3168. */
1659 cc_ecnpkt_handler(tp, th, iptos);
1661 /* Congestion experienced. */
1662 if (thflags & TH_ECE) {
1663 cc_cong_signal(tp, th, CC_ECN);
1668 * Parse options on any incoming segment.
1670 tcp_dooptions(&to, (u_char *)(th + 1),
1671 (th->th_off << 2) - sizeof(struct tcphdr),
1672 (thflags & TH_SYN) ? TO_SYN : 0);
1674 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1675 if ((tp->t_flags & TF_SIGNATURE) != 0 &&
1676 (to.to_flags & TOF_SIGNATURE) == 0) {
1677 TCPSTAT_INC(tcps_sig_err_sigopt);
1678 /* XXX: should drop? */
1682 * If echoed timestamp is later than the current time,
1683 * fall back to non RFC1323 RTT calculation. Normalize
1684 * timestamp if syncookies were used when this connection
1687 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
1688 to.to_tsecr -= tp->ts_offset;
1689 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks()))
1693 * Process options only when we get SYN/ACK back. The SYN case
1694 * for incoming connections is handled in tcp_syncache.
1695 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
1696 * or <SYN,ACK>) segment itself is never scaled.
1697 * XXX this is traditional behavior, may need to be cleaned up.
1699 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
1700 if ((to.to_flags & TOF_SCALE) &&
1701 (tp->t_flags & TF_REQ_SCALE)) {
1702 tp->t_flags |= TF_RCVD_SCALE;
1703 tp->snd_scale = to.to_wscale;
1706 * Initial send window. It will be updated with
1707 * the next incoming segment to the scaled value.
1709 tp->snd_wnd = th->th_win;
1710 if (to.to_flags & TOF_TS) {
1711 tp->t_flags |= TF_RCVD_TSTMP;
1712 tp->ts_recent = to.to_tsval;
1713 tp->ts_recent_age = tcp_ts_getticks();
1715 if (to.to_flags & TOF_MSS)
1716 tcp_mss(tp, to.to_mss);
1717 if ((tp->t_flags & TF_SACK_PERMIT) &&
1718 (to.to_flags & TOF_SACKPERM) == 0)
1719 tp->t_flags &= ~TF_SACK_PERMIT;
1723 * If timestamps were negotiated during SYN/ACK they should
1724 * appear on every segment during this session and vice versa.
1726 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) {
1727 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1728 log(LOG_DEBUG, "%s; %s: Timestamp missing, "
1729 "no action\n", s, __func__);
1733 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) {
1734 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1735 log(LOG_DEBUG, "%s; %s: Timestamp not expected, "
1736 "no action\n", s, __func__);
1742 * Header prediction: check for the two common cases
1743 * of a uni-directional data xfer. If the packet has
1744 * no control flags, is in-sequence, the window didn't
1745 * change and we're not retransmitting, it's a
1746 * candidate. If the length is zero and the ack moved
1747 * forward, we're the sender side of the xfer. Just
1748 * free the data acked & wake any higher level process
1749 * that was blocked waiting for space. If the length
1750 * is non-zero and the ack didn't move, we're the
1751 * receiver side. If we're getting packets in-order
1752 * (the reassembly queue is empty), add the data to
1753 * the socket buffer and note that we need a delayed ack.
1754 * Make sure that the hidden state-flags are also off.
1755 * Since we check for TCPS_ESTABLISHED first, it can only
1758 if (tp->t_state == TCPS_ESTABLISHED &&
1759 th->th_seq == tp->rcv_nxt &&
1760 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1761 tp->snd_nxt == tp->snd_max &&
1762 tiwin && tiwin == tp->snd_wnd &&
1763 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
1764 LIST_EMPTY(&tp->t_segq) &&
1765 ((to.to_flags & TOF_TS) == 0 ||
1766 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) {
1769 * If last ACK falls within this segment's sequence numbers,
1770 * record the timestamp.
1771 * NOTE that the test is modified according to the latest
1772 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1774 if ((to.to_flags & TOF_TS) != 0 &&
1775 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1776 tp->ts_recent_age = tcp_ts_getticks();
1777 tp->ts_recent = to.to_tsval;
1781 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1782 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1783 !IN_RECOVERY(tp->t_flags) &&
1784 (to.to_flags & TOF_SACK) == 0 &&
1785 TAILQ_EMPTY(&tp->snd_holes)) {
1787 * This is a pure ack for outstanding data.
1789 if (ti_locked == TI_RLOCKED)
1790 INP_INFO_RUNLOCK(&V_tcbinfo);
1791 ti_locked = TI_UNLOCKED;
1793 TCPSTAT_INC(tcps_predack);
1796 * "bad retransmit" recovery.
1798 if (tp->t_rxtshift == 1 &&
1799 tp->t_flags & TF_PREVVALID &&
1800 (int)(ticks - tp->t_badrxtwin) < 0) {
1801 cc_cong_signal(tp, th, CC_RTO_ERR);
1805 * Recalculate the transmit timer / rtt.
1807 * Some boxes send broken timestamp replies
1808 * during the SYN+ACK phase, ignore
1809 * timestamps of 0 or we could calculate a
1810 * huge RTT and blow up the retransmit timer.
1812 if ((to.to_flags & TOF_TS) != 0 &&
1816 t = tcp_ts_getticks() - to.to_tsecr;
1817 if (!tp->t_rttlow || tp->t_rttlow > t)
1820 TCP_TS_TO_TICKS(t) + 1);
1821 } else if (tp->t_rtttime &&
1822 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1823 if (!tp->t_rttlow ||
1824 tp->t_rttlow > ticks - tp->t_rtttime)
1825 tp->t_rttlow = ticks - tp->t_rtttime;
1827 ticks - tp->t_rtttime);
1829 acked = BYTES_THIS_ACK(tp, th);
1832 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
1833 hhook_run_tcp_est_in(tp, th, &to);
1836 TCPSTAT_ADD(tcps_rcvackpack, nsegs);
1837 TCPSTAT_ADD(tcps_rcvackbyte, acked);
1838 sbdrop(&so->so_snd, acked);
1839 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
1840 SEQ_LEQ(th->th_ack, tp->snd_recover))
1841 tp->snd_recover = th->th_ack - 1;
1844 * Let the congestion control algorithm update
1845 * congestion control related information. This
1846 * typically means increasing the congestion
1849 cc_ack_received(tp, th, nsegs, CC_ACK);
1851 tp->snd_una = th->th_ack;
1853 * Pull snd_wl2 up to prevent seq wrap relative
1856 tp->snd_wl2 = th->th_ack;
1861 * If all outstanding data are acked, stop
1862 * retransmit timer, otherwise restart timer
1863 * using current (possibly backed-off) value.
1864 * If process is waiting for space,
1865 * wakeup/selwakeup/signal. If data
1866 * are ready to send, let tcp_output
1867 * decide between more output or persist.
1870 if (so->so_options & SO_DEBUG)
1871 tcp_trace(TA_INPUT, ostate, tp,
1872 (void *)tcp_saveipgen,
1875 TCP_PROBE3(debug__input, tp, th, m);
1876 if (tp->snd_una == tp->snd_max)
1877 tcp_timer_activate(tp, TT_REXMT, 0);
1878 else if (!tcp_timer_active(tp, TT_PERSIST))
1879 tcp_timer_activate(tp, TT_REXMT,
1882 if (sbavail(&so->so_snd))
1883 (void) tp->t_fb->tfb_tcp_output(tp);
1886 } else if (th->th_ack == tp->snd_una &&
1887 tlen <= sbspace(&so->so_rcv)) {
1888 int newsize = 0; /* automatic sockbuf scaling */
1891 * This is a pure, in-sequence data packet with
1892 * nothing on the reassembly queue and we have enough
1893 * buffer space to take it.
1895 if (ti_locked == TI_RLOCKED)
1896 INP_INFO_RUNLOCK(&V_tcbinfo);
1897 ti_locked = TI_UNLOCKED;
1899 /* Clean receiver SACK report if present */
1900 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
1901 tcp_clean_sackreport(tp);
1902 TCPSTAT_INC(tcps_preddat);
1903 tp->rcv_nxt += tlen;
1905 * Pull snd_wl1 up to prevent seq wrap relative to
1908 tp->snd_wl1 = th->th_seq;
1910 * Pull rcv_up up to prevent seq wrap relative to
1913 tp->rcv_up = tp->rcv_nxt;
1914 TCPSTAT_ADD(tcps_rcvpack, nsegs);
1915 TCPSTAT_ADD(tcps_rcvbyte, tlen);
1917 if (so->so_options & SO_DEBUG)
1918 tcp_trace(TA_INPUT, ostate, tp,
1919 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1921 TCP_PROBE3(debug__input, tp, th, m);
1923 newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
1925 /* Add data to socket buffer. */
1926 SOCKBUF_LOCK(&so->so_rcv);
1927 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1931 * Set new socket buffer size.
1932 * Give up when limit is reached.
1935 if (!sbreserve_locked(&so->so_rcv,
1937 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
1938 m_adj(m, drop_hdrlen); /* delayed header drop */
1939 sbappendstream_locked(&so->so_rcv, m, 0);
1941 /* NB: sorwakeup_locked() does an implicit unlock. */
1942 sorwakeup_locked(so);
1943 if (DELAY_ACK(tp, tlen)) {
1944 tp->t_flags |= TF_DELACK;
1946 tp->t_flags |= TF_ACKNOW;
1947 tp->t_fb->tfb_tcp_output(tp);
1954 * Calculate amount of space in receive window,
1955 * and then do TCP input processing.
1956 * Receive window is amount of space in rcv queue,
1957 * but not less than advertised window.
1959 win = sbspace(&so->so_rcv);
1962 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1964 switch (tp->t_state) {
1967 * If the state is SYN_RECEIVED:
1968 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1970 case TCPS_SYN_RECEIVED:
1971 if ((thflags & TH_ACK) &&
1972 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1973 SEQ_GT(th->th_ack, tp->snd_max))) {
1974 rstreason = BANDLIM_RST_OPENPORT;
1978 if (IS_FASTOPEN(tp->t_flags)) {
1980 * When a TFO connection is in SYN_RECEIVED, the
1981 * only valid packets are the initial SYN, a
1982 * retransmit/copy of the initial SYN (possibly with
1983 * a subset of the original data), a valid ACK, a
1986 if ((thflags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) {
1987 rstreason = BANDLIM_RST_OPENPORT;
1989 } else if (thflags & TH_SYN) {
1990 /* non-initial SYN is ignored */
1991 if ((tcp_timer_active(tp, TT_DELACK) ||
1992 tcp_timer_active(tp, TT_REXMT)))
1994 } else if (!(thflags & (TH_ACK|TH_FIN|TH_RST))) {
2002 * If the state is SYN_SENT:
2003 * if seg contains a RST with valid ACK (SEQ.ACK has already
2004 * been verified), then drop the connection.
2005 * if seg contains a RST without an ACK, drop the seg.
2006 * if seg does not contain SYN, then drop the seg.
2007 * Otherwise this is an acceptable SYN segment
2008 * initialize tp->rcv_nxt and tp->irs
2009 * if seg contains ack then advance tp->snd_una
2010 * if seg contains an ECE and ECN support is enabled, the stream
2012 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
2013 * arrange for segment to be acked (eventually)
2014 * continue processing rest of data/controls, beginning with URG
2017 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) {
2018 TCP_PROBE5(connect__refused, NULL, tp,
2020 tp = tcp_drop(tp, ECONNREFUSED);
2022 if (thflags & TH_RST)
2024 if (!(thflags & TH_SYN))
2027 tp->irs = th->th_seq;
2029 if (thflags & TH_ACK) {
2030 TCPSTAT_INC(tcps_connects);
2033 mac_socketpeer_set_from_mbuf(m, so);
2035 /* Do window scaling on this connection? */
2036 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2037 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2038 tp->rcv_scale = tp->request_r_scale;
2040 tp->rcv_adv += min(tp->rcv_wnd,
2041 TCP_MAXWIN << tp->rcv_scale);
2042 tp->snd_una++; /* SYN is acked */
2044 * If there's data, delay ACK; if there's also a FIN
2045 * ACKNOW will be turned on later.
2047 if (DELAY_ACK(tp, tlen) && tlen != 0)
2048 tcp_timer_activate(tp, TT_DELACK,
2051 tp->t_flags |= TF_ACKNOW;
2053 if ((thflags & TH_ECE) && V_tcp_do_ecn) {
2054 tp->t_flags |= TF_ECN_PERMIT;
2055 TCPSTAT_INC(tcps_ecn_shs);
2059 * Received <SYN,ACK> in SYN_SENT[*] state.
2061 * SYN_SENT --> ESTABLISHED
2062 * SYN_SENT* --> FIN_WAIT_1
2064 tp->t_starttime = ticks;
2065 if (tp->t_flags & TF_NEEDFIN) {
2066 tcp_state_change(tp, TCPS_FIN_WAIT_1);
2067 tp->t_flags &= ~TF_NEEDFIN;
2070 tcp_state_change(tp, TCPS_ESTABLISHED);
2071 TCP_PROBE5(connect__established, NULL, tp,
2074 tcp_timer_activate(tp, TT_KEEP,
2079 * Received initial SYN in SYN-SENT[*] state =>
2080 * simultaneous open.
2081 * If it succeeds, connection is * half-synchronized.
2082 * Otherwise, do 3-way handshake:
2083 * SYN-SENT -> SYN-RECEIVED
2084 * SYN-SENT* -> SYN-RECEIVED*
2086 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
2087 tcp_timer_activate(tp, TT_REXMT, 0);
2088 tcp_state_change(tp, TCPS_SYN_RECEIVED);
2091 KASSERT(ti_locked == TI_RLOCKED, ("%s: trimthenstep6: "
2092 "ti_locked %d", __func__, ti_locked));
2093 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2094 INP_WLOCK_ASSERT(tp->t_inpcb);
2097 * Advance th->th_seq to correspond to first data byte.
2098 * If data, trim to stay within window,
2099 * dropping FIN if necessary.
2102 if (tlen > tp->rcv_wnd) {
2103 todrop = tlen - tp->rcv_wnd;
2107 TCPSTAT_INC(tcps_rcvpackafterwin);
2108 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2110 tp->snd_wl1 = th->th_seq - 1;
2111 tp->rcv_up = th->th_seq;
2113 * Client side of transaction: already sent SYN and data.
2114 * If the remote host used T/TCP to validate the SYN,
2115 * our data will be ACK'd; if so, enter normal data segment
2116 * processing in the middle of step 5, ack processing.
2117 * Otherwise, goto step 6.
2119 if (thflags & TH_ACK)
2125 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
2126 * do normal processing.
2128 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
2132 break; /* continue normal processing */
2136 * States other than LISTEN or SYN_SENT.
2137 * First check the RST flag and sequence number since reset segments
2138 * are exempt from the timestamp and connection count tests. This
2139 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
2140 * below which allowed reset segments in half the sequence space
2141 * to fall though and be processed (which gives forged reset
2142 * segments with a random sequence number a 50 percent chance of
2143 * killing a connection).
2144 * Then check timestamp, if present.
2145 * Then check the connection count, if present.
2146 * Then check that at least some bytes of segment are within
2147 * receive window. If segment begins before rcv_nxt,
2148 * drop leading data (and SYN); if nothing left, just ack.
2150 if (thflags & TH_RST) {
2152 * RFC5961 Section 3.2
2154 * - RST drops connection only if SEG.SEQ == RCV.NXT.
2155 * - If RST is in window, we send challenge ACK.
2157 * Note: to take into account delayed ACKs, we should
2158 * test against last_ack_sent instead of rcv_nxt.
2159 * Note 2: we handle special case of closed window, not
2160 * covered by the RFC.
2162 if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
2163 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) ||
2164 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) {
2166 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2167 KASSERT(ti_locked == TI_RLOCKED,
2168 ("%s: TH_RST ti_locked %d, th %p tp %p",
2169 __func__, ti_locked, th, tp));
2170 KASSERT(tp->t_state != TCPS_SYN_SENT,
2171 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p",
2174 if (V_tcp_insecure_rst ||
2175 tp->last_ack_sent == th->th_seq) {
2176 TCPSTAT_INC(tcps_drops);
2177 /* Drop the connection. */
2178 switch (tp->t_state) {
2179 case TCPS_SYN_RECEIVED:
2180 so->so_error = ECONNREFUSED;
2182 case TCPS_ESTABLISHED:
2183 case TCPS_FIN_WAIT_1:
2184 case TCPS_FIN_WAIT_2:
2185 case TCPS_CLOSE_WAIT:
2188 so->so_error = ECONNRESET;
2195 TCPSTAT_INC(tcps_badrst);
2196 /* Send challenge ACK. */
2197 tcp_respond(tp, mtod(m, void *), th, m,
2198 tp->rcv_nxt, tp->snd_nxt, TH_ACK);
2199 tp->last_ack_sent = tp->rcv_nxt;
2207 * RFC5961 Section 4.2
2208 * Send challenge ACK for any SYN in synchronized state.
2210 if ((thflags & TH_SYN) && tp->t_state != TCPS_SYN_SENT &&
2211 tp->t_state != TCPS_SYN_RECEIVED) {
2212 KASSERT(ti_locked == TI_RLOCKED,
2213 ("tcp_do_segment: TH_SYN ti_locked %d", ti_locked));
2214 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2216 TCPSTAT_INC(tcps_badsyn);
2217 if (V_tcp_insecure_syn &&
2218 SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
2219 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
2220 tp = tcp_drop(tp, ECONNRESET);
2221 rstreason = BANDLIM_UNLIMITED;
2223 /* Send challenge ACK. */
2224 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt,
2225 tp->snd_nxt, TH_ACK);
2226 tp->last_ack_sent = tp->rcv_nxt;
2233 * RFC 1323 PAWS: If we have a timestamp reply on this segment
2234 * and it's less than ts_recent, drop it.
2236 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
2237 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
2239 /* Check to see if ts_recent is over 24 days old. */
2240 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) {
2242 * Invalidate ts_recent. If this segment updates
2243 * ts_recent, the age will be reset later and ts_recent
2244 * will get a valid value. If it does not, setting
2245 * ts_recent to zero will at least satisfy the
2246 * requirement that zero be placed in the timestamp
2247 * echo reply when ts_recent isn't valid. The
2248 * age isn't reset until we get a valid ts_recent
2249 * because we don't want out-of-order segments to be
2250 * dropped when ts_recent is old.
2254 TCPSTAT_INC(tcps_rcvduppack);
2255 TCPSTAT_ADD(tcps_rcvdupbyte, tlen);
2256 TCPSTAT_INC(tcps_pawsdrop);
2264 * In the SYN-RECEIVED state, validate that the packet belongs to
2265 * this connection before trimming the data to fit the receive
2266 * window. Check the sequence number versus IRS since we know
2267 * the sequence numbers haven't wrapped. This is a partial fix
2268 * for the "LAND" DoS attack.
2270 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
2271 rstreason = BANDLIM_RST_OPENPORT;
2275 todrop = tp->rcv_nxt - th->th_seq;
2277 if (thflags & TH_SYN) {
2287 * Following if statement from Stevens, vol. 2, p. 960.
2290 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
2292 * Any valid FIN must be to the left of the window.
2293 * At this point the FIN must be a duplicate or out
2294 * of sequence; drop it.
2299 * Send an ACK to resynchronize and drop any data.
2300 * But keep on processing for RST or ACK.
2302 tp->t_flags |= TF_ACKNOW;
2304 TCPSTAT_INC(tcps_rcvduppack);
2305 TCPSTAT_ADD(tcps_rcvdupbyte, todrop);
2307 TCPSTAT_INC(tcps_rcvpartduppack);
2308 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop);
2310 drop_hdrlen += todrop; /* drop from the top afterwards */
2311 th->th_seq += todrop;
2313 if (th->th_urp > todrop)
2314 th->th_urp -= todrop;
2322 * If new data are received on a connection after the
2323 * user processes are gone, then RST the other end.
2325 if ((so->so_state & SS_NOFDREF) &&
2326 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
2327 KASSERT(ti_locked == TI_RLOCKED, ("%s: SS_NOFDEREF && "
2328 "CLOSE_WAIT && tlen ti_locked %d", __func__, ti_locked));
2329 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2331 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
2332 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data "
2333 "after socket was closed, "
2334 "sending RST and removing tcpcb\n",
2335 s, __func__, tcpstates[tp->t_state], tlen);
2339 TCPSTAT_INC(tcps_rcvafterclose);
2340 rstreason = BANDLIM_UNLIMITED;
2345 * If segment ends after window, drop trailing data
2346 * (and PUSH and FIN); if nothing left, just ACK.
2348 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
2350 TCPSTAT_INC(tcps_rcvpackafterwin);
2351 if (todrop >= tlen) {
2352 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen);
2354 * If window is closed can only take segments at
2355 * window edge, and have to drop data and PUSH from
2356 * incoming segments. Continue processing, but
2357 * remember to ack. Otherwise, drop segment
2360 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
2361 tp->t_flags |= TF_ACKNOW;
2362 TCPSTAT_INC(tcps_rcvwinprobe);
2366 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2369 thflags &= ~(TH_PUSH|TH_FIN);
2373 * If last ACK falls within this segment's sequence numbers,
2374 * record its timestamp.
2376 * 1) That the test incorporates suggestions from the latest
2377 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
2378 * 2) That updating only on newer timestamps interferes with
2379 * our earlier PAWS tests, so this check should be solely
2380 * predicated on the sequence space of this segment.
2381 * 3) That we modify the segment boundary check to be
2382 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
2383 * instead of RFC1323's
2384 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
2385 * This modified check allows us to overcome RFC1323's
2386 * limitations as described in Stevens TCP/IP Illustrated
2387 * Vol. 2 p.869. In such cases, we can still calculate the
2388 * RTT correctly when RCV.NXT == Last.ACK.Sent.
2390 if ((to.to_flags & TOF_TS) != 0 &&
2391 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
2392 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
2393 ((thflags & (TH_SYN|TH_FIN)) != 0))) {
2394 tp->ts_recent_age = tcp_ts_getticks();
2395 tp->ts_recent = to.to_tsval;
2399 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
2400 * flag is on (half-synchronized state), then queue data for
2401 * later processing; else drop segment and return.
2403 if ((thflags & TH_ACK) == 0) {
2404 if (tp->t_state == TCPS_SYN_RECEIVED ||
2405 (tp->t_flags & TF_NEEDSYN)) {
2407 if (tp->t_state == TCPS_SYN_RECEIVED &&
2408 IS_FASTOPEN(tp->t_flags)) {
2409 tp->snd_wnd = tiwin;
2414 } else if (tp->t_flags & TF_ACKNOW)
2423 switch (tp->t_state) {
2426 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
2427 * ESTABLISHED state and continue processing.
2428 * The ACK was checked above.
2430 case TCPS_SYN_RECEIVED:
2432 TCPSTAT_INC(tcps_connects);
2434 /* Do window scaling? */
2435 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2436 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2437 tp->rcv_scale = tp->request_r_scale;
2438 tp->snd_wnd = tiwin;
2442 * SYN-RECEIVED -> ESTABLISHED
2443 * SYN-RECEIVED* -> FIN-WAIT-1
2445 tp->t_starttime = ticks;
2446 if (tp->t_flags & TF_NEEDFIN) {
2447 tcp_state_change(tp, TCPS_FIN_WAIT_1);
2448 tp->t_flags &= ~TF_NEEDFIN;
2450 tcp_state_change(tp, TCPS_ESTABLISHED);
2451 TCP_PROBE5(accept__established, NULL, tp,
2454 if (tp->t_tfo_pending) {
2455 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
2456 tp->t_tfo_pending = NULL;
2459 * Account for the ACK of our SYN prior to
2460 * regular ACK processing below.
2465 * TFO connections call cc_conn_init() during SYN
2466 * processing. Calling it again here for such
2467 * connections is not harmless as it would undo the
2468 * snd_cwnd reduction that occurs when a TFO SYN|ACK
2471 if (!IS_FASTOPEN(tp->t_flags))
2474 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
2477 * If segment contains data or ACK, will call tcp_reass()
2478 * later; if not, do so now to pass queued data to user.
2480 if (tlen == 0 && (thflags & TH_FIN) == 0)
2481 (void) tcp_reass(tp, (struct tcphdr *)0, 0,
2483 tp->snd_wl1 = th->th_seq - 1;
2487 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
2488 * ACKs. If the ack is in the range
2489 * tp->snd_una < th->th_ack <= tp->snd_max
2490 * then advance tp->snd_una to th->th_ack and drop
2491 * data from the retransmission queue. If this ACK reflects
2492 * more up to date window information we update our window information.
2494 case TCPS_ESTABLISHED:
2495 case TCPS_FIN_WAIT_1:
2496 case TCPS_FIN_WAIT_2:
2497 case TCPS_CLOSE_WAIT:
2500 if (SEQ_GT(th->th_ack, tp->snd_max)) {
2501 TCPSTAT_INC(tcps_rcvacktoomuch);
2504 if ((tp->t_flags & TF_SACK_PERMIT) &&
2505 ((to.to_flags & TOF_SACK) ||
2506 !TAILQ_EMPTY(&tp->snd_holes)))
2507 sack_changed = tcp_sack_doack(tp, &to, th->th_ack);
2510 * Reset the value so that previous (valid) value
2511 * from the last ack with SACK doesn't get used.
2513 tp->sackhint.sacked_bytes = 0;
2516 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
2517 hhook_run_tcp_est_in(tp, th, &to);
2520 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
2523 maxseg = tcp_maxseg(tp);
2525 (tiwin == tp->snd_wnd ||
2526 (tp->t_flags & TF_SACK_PERMIT))) {
2528 * If this is the first time we've seen a
2529 * FIN from the remote, this is not a
2530 * duplicate and it needs to be processed
2531 * normally. This happens during a
2532 * simultaneous close.
2534 if ((thflags & TH_FIN) &&
2535 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2539 TCPSTAT_INC(tcps_rcvdupack);
2541 * If we have outstanding data (other than
2542 * a window probe), this is a completely
2543 * duplicate ack (ie, window info didn't
2544 * change and FIN isn't set),
2545 * the ack is the biggest we've
2546 * seen and we've seen exactly our rexmt
2547 * threshold of them, assume a packet
2548 * has been dropped and retransmit it.
2549 * Kludge snd_nxt & the congestion
2550 * window so we send only this one
2553 * We know we're losing at the current
2554 * window size so do congestion avoidance
2555 * (set ssthresh to half the current window
2556 * and pull our congestion window back to
2557 * the new ssthresh).
2559 * Dup acks mean that packets have left the
2560 * network (they're now cached at the receiver)
2561 * so bump cwnd by the amount in the receiver
2562 * to keep a constant cwnd packets in the
2565 * When using TCP ECN, notify the peer that
2566 * we reduced the cwnd.
2569 * Following 2 kinds of acks should not affect
2572 * 2) Acks with SACK but without any new SACK
2573 * information in them. These could result from
2574 * any anomaly in the network like a switch
2575 * duplicating packets or a possible DoS attack.
2577 if (th->th_ack != tp->snd_una ||
2578 ((tp->t_flags & TF_SACK_PERMIT) &&
2581 else if (!tcp_timer_active(tp, TT_REXMT))
2583 else if (++tp->t_dupacks > tcprexmtthresh ||
2584 IN_FASTRECOVERY(tp->t_flags)) {
2585 cc_ack_received(tp, th, nsegs,
2587 if ((tp->t_flags & TF_SACK_PERMIT) &&
2588 IN_FASTRECOVERY(tp->t_flags)) {
2592 * Compute the amount of data in flight first.
2593 * We can inject new data into the pipe iff
2594 * we have less than 1/2 the original window's
2595 * worth of data in flight.
2597 if (V_tcp_do_rfc6675_pipe)
2598 awnd = tcp_compute_pipe(tp);
2600 awnd = (tp->snd_nxt - tp->snd_fack) +
2601 tp->sackhint.sack_bytes_rexmit;
2603 if (awnd < tp->snd_ssthresh) {
2604 tp->snd_cwnd += maxseg;
2605 if (tp->snd_cwnd > tp->snd_ssthresh)
2606 tp->snd_cwnd = tp->snd_ssthresh;
2609 tp->snd_cwnd += maxseg;
2610 (void) tp->t_fb->tfb_tcp_output(tp);
2612 } else if (tp->t_dupacks == tcprexmtthresh) {
2613 tcp_seq onxt = tp->snd_nxt;
2616 * If we're doing sack, check to
2617 * see if we're already in sack
2618 * recovery. If we're not doing sack,
2619 * check to see if we're in newreno
2622 if (tp->t_flags & TF_SACK_PERMIT) {
2623 if (IN_FASTRECOVERY(tp->t_flags)) {
2628 if (SEQ_LEQ(th->th_ack,
2634 /* Congestion signal before ack. */
2635 cc_cong_signal(tp, th, CC_NDUPACK);
2636 cc_ack_received(tp, th, nsegs,
2638 tcp_timer_activate(tp, TT_REXMT, 0);
2640 if (tp->t_flags & TF_SACK_PERMIT) {
2642 tcps_sack_recovery_episode);
2643 tp->sack_newdata = tp->snd_nxt;
2644 tp->snd_cwnd = maxseg;
2645 (void) tp->t_fb->tfb_tcp_output(tp);
2648 tp->snd_nxt = th->th_ack;
2649 tp->snd_cwnd = maxseg;
2650 (void) tp->t_fb->tfb_tcp_output(tp);
2651 KASSERT(tp->snd_limited <= 2,
2652 ("%s: tp->snd_limited too big",
2654 tp->snd_cwnd = tp->snd_ssthresh +
2656 (tp->t_dupacks - tp->snd_limited);
2657 if (SEQ_GT(onxt, tp->snd_nxt))
2660 } else if (V_tcp_do_rfc3042) {
2662 * Process first and second duplicate
2663 * ACKs. Each indicates a segment
2664 * leaving the network, creating room
2665 * for more. Make sure we can send a
2666 * packet on reception of each duplicate
2667 * ACK by increasing snd_cwnd by one
2668 * segment. Restore the original
2669 * snd_cwnd after packet transmission.
2671 cc_ack_received(tp, th, nsegs,
2673 uint32_t oldcwnd = tp->snd_cwnd;
2674 tcp_seq oldsndmax = tp->snd_max;
2678 KASSERT(tp->t_dupacks == 1 ||
2680 ("%s: dupacks not 1 or 2",
2682 if (tp->t_dupacks == 1)
2683 tp->snd_limited = 0;
2685 (tp->snd_nxt - tp->snd_una) +
2686 (tp->t_dupacks - tp->snd_limited) *
2689 * Only call tcp_output when there
2690 * is new data available to be sent.
2691 * Otherwise we would send pure ACKs.
2693 SOCKBUF_LOCK(&so->so_snd);
2694 avail = sbavail(&so->so_snd) -
2695 (tp->snd_nxt - tp->snd_una);
2696 SOCKBUF_UNLOCK(&so->so_snd);
2698 (void) tp->t_fb->tfb_tcp_output(tp);
2699 sent = tp->snd_max - oldsndmax;
2700 if (sent > maxseg) {
2701 KASSERT((tp->t_dupacks == 2 &&
2702 tp->snd_limited == 0) ||
2703 (sent == maxseg + 1 &&
2704 tp->t_flags & TF_SENTFIN),
2705 ("%s: sent too much",
2707 tp->snd_limited = 2;
2708 } else if (sent > 0)
2710 tp->snd_cwnd = oldcwnd;
2717 * This ack is advancing the left edge, reset the
2722 * If this ack also has new SACK info, increment the
2723 * counter as per rfc6675.
2725 if ((tp->t_flags & TF_SACK_PERMIT) && sack_changed)
2729 KASSERT(SEQ_GT(th->th_ack, tp->snd_una),
2730 ("%s: th_ack <= snd_una", __func__));
2733 * If the congestion window was inflated to account
2734 * for the other side's cached packets, retract it.
2736 if (IN_FASTRECOVERY(tp->t_flags)) {
2737 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
2738 if (tp->t_flags & TF_SACK_PERMIT)
2739 tcp_sack_partialack(tp, th);
2741 tcp_newreno_partial_ack(tp, th);
2743 cc_post_recovery(tp, th);
2746 * If we reach this point, ACK is not a duplicate,
2747 * i.e., it ACKs something we sent.
2749 if (tp->t_flags & TF_NEEDSYN) {
2751 * T/TCP: Connection was half-synchronized, and our
2752 * SYN has been ACK'd (so connection is now fully
2753 * synchronized). Go to non-starred state,
2754 * increment snd_una for ACK of SYN, and check if
2755 * we can do window scaling.
2757 tp->t_flags &= ~TF_NEEDSYN;
2759 /* Do window scaling? */
2760 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2761 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2762 tp->rcv_scale = tp->request_r_scale;
2763 /* Send window already scaled. */
2768 INP_WLOCK_ASSERT(tp->t_inpcb);
2770 acked = BYTES_THIS_ACK(tp, th);
2771 KASSERT(acked >= 0, ("%s: acked unexepectedly negative "
2772 "(tp->snd_una=%u, th->th_ack=%u, tp=%p, m=%p)", __func__,
2773 tp->snd_una, th->th_ack, tp, m));
2774 TCPSTAT_ADD(tcps_rcvackpack, nsegs);
2775 TCPSTAT_ADD(tcps_rcvackbyte, acked);
2778 * If we just performed our first retransmit, and the ACK
2779 * arrives within our recovery window, then it was a mistake
2780 * to do the retransmit in the first place. Recover our
2781 * original cwnd and ssthresh, and proceed to transmit where
2784 if (tp->t_rxtshift == 1 && tp->t_flags & TF_PREVVALID &&
2785 (int)(ticks - tp->t_badrxtwin) < 0)
2786 cc_cong_signal(tp, th, CC_RTO_ERR);
2789 * If we have a timestamp reply, update smoothed
2790 * round trip time. If no timestamp is present but
2791 * transmit timer is running and timed sequence
2792 * number was acked, update smoothed round trip time.
2793 * Since we now have an rtt measurement, cancel the
2794 * timer backoff (cf., Phil Karn's retransmit alg.).
2795 * Recompute the initial retransmit timer.
2797 * Some boxes send broken timestamp replies
2798 * during the SYN+ACK phase, ignore
2799 * timestamps of 0 or we could calculate a
2800 * huge RTT and blow up the retransmit timer.
2802 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) {
2805 t = tcp_ts_getticks() - to.to_tsecr;
2806 if (!tp->t_rttlow || tp->t_rttlow > t)
2808 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1);
2809 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
2810 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime)
2811 tp->t_rttlow = ticks - tp->t_rtttime;
2812 tcp_xmit_timer(tp, ticks - tp->t_rtttime);
2816 * If all outstanding data is acked, stop retransmit
2817 * timer and remember to restart (more output or persist).
2818 * If there is more data to be acked, restart retransmit
2819 * timer, using current (possibly backed-off) value.
2821 if (th->th_ack == tp->snd_max) {
2822 tcp_timer_activate(tp, TT_REXMT, 0);
2824 } else if (!tcp_timer_active(tp, TT_PERSIST))
2825 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
2828 * If no data (only SYN) was ACK'd,
2829 * skip rest of ACK processing.
2835 * Let the congestion control algorithm update congestion
2836 * control related information. This typically means increasing
2837 * the congestion window.
2839 cc_ack_received(tp, th, nsegs, CC_ACK);
2841 SOCKBUF_LOCK(&so->so_snd);
2842 if (acked > sbavail(&so->so_snd)) {
2843 if (tp->snd_wnd >= sbavail(&so->so_snd))
2844 tp->snd_wnd -= sbavail(&so->so_snd);
2847 mfree = sbcut_locked(&so->so_snd,
2848 (int)sbavail(&so->so_snd));
2851 mfree = sbcut_locked(&so->so_snd, acked);
2852 if (tp->snd_wnd >= (uint32_t) acked)
2853 tp->snd_wnd -= acked;
2858 /* NB: sowwakeup_locked() does an implicit unlock. */
2859 sowwakeup_locked(so);
2861 /* Detect una wraparound. */
2862 if (!IN_RECOVERY(tp->t_flags) &&
2863 SEQ_GT(tp->snd_una, tp->snd_recover) &&
2864 SEQ_LEQ(th->th_ack, tp->snd_recover))
2865 tp->snd_recover = th->th_ack - 1;
2866 /* XXXLAS: Can this be moved up into cc_post_recovery? */
2867 if (IN_RECOVERY(tp->t_flags) &&
2868 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
2869 EXIT_RECOVERY(tp->t_flags);
2871 tp->snd_una = th->th_ack;
2872 if (tp->t_flags & TF_SACK_PERMIT) {
2873 if (SEQ_GT(tp->snd_una, tp->snd_recover))
2874 tp->snd_recover = tp->snd_una;
2876 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2877 tp->snd_nxt = tp->snd_una;
2879 switch (tp->t_state) {
2882 * In FIN_WAIT_1 STATE in addition to the processing
2883 * for the ESTABLISHED state if our FIN is now acknowledged
2884 * then enter FIN_WAIT_2.
2886 case TCPS_FIN_WAIT_1:
2887 if (ourfinisacked) {
2889 * If we can't receive any more
2890 * data, then closing user can proceed.
2891 * Starting the timer is contrary to the
2892 * specification, but if we don't get a FIN
2893 * we'll hang forever.
2896 * we should release the tp also, and use a
2899 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2900 soisdisconnected(so);
2901 tcp_timer_activate(tp, TT_2MSL,
2902 (tcp_fast_finwait2_recycle ?
2903 tcp_finwait2_timeout :
2906 tcp_state_change(tp, TCPS_FIN_WAIT_2);
2911 * In CLOSING STATE in addition to the processing for
2912 * the ESTABLISHED state if the ACK acknowledges our FIN
2913 * then enter the TIME-WAIT state, otherwise ignore
2917 if (ourfinisacked) {
2918 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2920 INP_INFO_RUNLOCK(&V_tcbinfo);
2927 * In LAST_ACK, we may still be waiting for data to drain
2928 * and/or to be acked, as well as for the ack of our FIN.
2929 * If our FIN is now acknowledged, delete the TCB,
2930 * enter the closed state and return.
2933 if (ourfinisacked) {
2934 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2943 INP_WLOCK_ASSERT(tp->t_inpcb);
2946 * Update window information.
2947 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2949 if ((thflags & TH_ACK) &&
2950 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
2951 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
2952 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
2953 /* keep track of pure window updates */
2955 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
2956 TCPSTAT_INC(tcps_rcvwinupd);
2957 tp->snd_wnd = tiwin;
2958 tp->snd_wl1 = th->th_seq;
2959 tp->snd_wl2 = th->th_ack;
2960 if (tp->snd_wnd > tp->max_sndwnd)
2961 tp->max_sndwnd = tp->snd_wnd;
2966 * Process segments with URG.
2968 if ((thflags & TH_URG) && th->th_urp &&
2969 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2971 * This is a kludge, but if we receive and accept
2972 * random urgent pointers, we'll crash in
2973 * soreceive. It's hard to imagine someone
2974 * actually wanting to send this much urgent data.
2976 SOCKBUF_LOCK(&so->so_rcv);
2977 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) {
2978 th->th_urp = 0; /* XXX */
2979 thflags &= ~TH_URG; /* XXX */
2980 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
2981 goto dodata; /* XXX */
2984 * If this segment advances the known urgent pointer,
2985 * then mark the data stream. This should not happen
2986 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2987 * a FIN has been received from the remote side.
2988 * In these states we ignore the URG.
2990 * According to RFC961 (Assigned Protocols),
2991 * the urgent pointer points to the last octet
2992 * of urgent data. We continue, however,
2993 * to consider it to indicate the first octet
2994 * of data past the urgent section as the original
2995 * spec states (in one of two places).
2997 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
2998 tp->rcv_up = th->th_seq + th->th_urp;
2999 so->so_oobmark = sbavail(&so->so_rcv) +
3000 (tp->rcv_up - tp->rcv_nxt) - 1;
3001 if (so->so_oobmark == 0)
3002 so->so_rcv.sb_state |= SBS_RCVATMARK;
3004 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
3006 SOCKBUF_UNLOCK(&so->so_rcv);
3008 * Remove out of band data so doesn't get presented to user.
3009 * This can happen independent of advancing the URG pointer,
3010 * but if two URG's are pending at once, some out-of-band
3011 * data may creep in... ick.
3013 if (th->th_urp <= (uint32_t)tlen &&
3014 !(so->so_options & SO_OOBINLINE)) {
3015 /* hdr drop is delayed */
3016 tcp_pulloutofband(so, th, m, drop_hdrlen);
3020 * If no out of band data is expected,
3021 * pull receive urgent pointer along
3022 * with the receive window.
3024 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
3025 tp->rcv_up = tp->rcv_nxt;
3028 INP_WLOCK_ASSERT(tp->t_inpcb);
3031 * Process the segment text, merging it into the TCP sequencing queue,
3032 * and arranging for acknowledgment of receipt if necessary.
3033 * This process logically involves adjusting tp->rcv_wnd as data
3034 * is presented to the user (this happens in tcp_usrreq.c,
3035 * case PRU_RCVD). If a FIN has already been received on this
3036 * connection then we just ignore the text.
3039 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
3040 IS_FASTOPEN(tp->t_flags));
3042 #define tfo_syn (false)
3044 if ((tlen || (thflags & TH_FIN) || tfo_syn) &&
3045 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
3046 tcp_seq save_start = th->th_seq;
3047 m_adj(m, drop_hdrlen); /* delayed header drop */
3049 * Insert segment which includes th into TCP reassembly queue
3050 * with control block tp. Set thflags to whether reassembly now
3051 * includes a segment with FIN. This handles the common case
3052 * inline (segment is the next to be received on an established
3053 * connection, and the queue is empty), avoiding linkage into
3054 * and removal from the queue and repetition of various
3056 * Set DELACK for segments received in order, but ack
3057 * immediately when segments are out of order (so
3058 * fast retransmit can work).
3060 if (th->th_seq == tp->rcv_nxt &&
3061 LIST_EMPTY(&tp->t_segq) &&
3062 (TCPS_HAVEESTABLISHED(tp->t_state) ||
3064 if (DELAY_ACK(tp, tlen) || tfo_syn)
3065 tp->t_flags |= TF_DELACK;
3067 tp->t_flags |= TF_ACKNOW;
3068 tp->rcv_nxt += tlen;
3069 thflags = th->th_flags & TH_FIN;
3070 TCPSTAT_INC(tcps_rcvpack);
3071 TCPSTAT_ADD(tcps_rcvbyte, tlen);
3072 SOCKBUF_LOCK(&so->so_rcv);
3073 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
3076 sbappendstream_locked(&so->so_rcv, m, 0);
3077 /* NB: sorwakeup_locked() does an implicit unlock. */
3078 sorwakeup_locked(so);
3081 * XXX: Due to the header drop above "th" is
3082 * theoretically invalid by now. Fortunately
3083 * m_adj() doesn't actually frees any mbufs
3084 * when trimming from the head.
3086 thflags = tcp_reass(tp, th, &tlen, m);
3087 tp->t_flags |= TF_ACKNOW;
3089 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT))
3090 tcp_update_sack_list(tp, save_start, save_start + tlen);
3093 * Note the amount of data that peer has sent into
3094 * our window, in order to estimate the sender's
3098 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt))
3099 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
3101 len = so->so_rcv.sb_hiwat;
3109 * If FIN is received ACK the FIN and let the user know
3110 * that the connection is closing.
3112 if (thflags & TH_FIN) {
3113 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
3116 * If connection is half-synchronized
3117 * (ie NEEDSYN flag on) then delay ACK,
3118 * so it may be piggybacked when SYN is sent.
3119 * Otherwise, since we received a FIN then no
3120 * more input can be expected, send ACK now.
3122 if (tp->t_flags & TF_NEEDSYN)
3123 tp->t_flags |= TF_DELACK;
3125 tp->t_flags |= TF_ACKNOW;
3128 switch (tp->t_state) {
3131 * In SYN_RECEIVED and ESTABLISHED STATES
3132 * enter the CLOSE_WAIT state.
3134 case TCPS_SYN_RECEIVED:
3135 tp->t_starttime = ticks;
3137 case TCPS_ESTABLISHED:
3138 tcp_state_change(tp, TCPS_CLOSE_WAIT);
3142 * If still in FIN_WAIT_1 STATE FIN has not been acked so
3143 * enter the CLOSING state.
3145 case TCPS_FIN_WAIT_1:
3146 tcp_state_change(tp, TCPS_CLOSING);
3150 * In FIN_WAIT_2 state enter the TIME_WAIT state,
3151 * starting the time-wait timer, turning off the other
3154 case TCPS_FIN_WAIT_2:
3155 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
3156 KASSERT(ti_locked == TI_RLOCKED, ("%s: dodata "
3157 "TCP_FIN_WAIT_2 ti_locked: %d", __func__,
3161 INP_INFO_RUNLOCK(&V_tcbinfo);
3165 if (ti_locked == TI_RLOCKED)
3166 INP_INFO_RUNLOCK(&V_tcbinfo);
3167 ti_locked = TI_UNLOCKED;
3170 if (so->so_options & SO_DEBUG)
3171 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
3174 TCP_PROBE3(debug__input, tp, th, m);
3177 * Return any desired output.
3179 if (needoutput || (tp->t_flags & TF_ACKNOW))
3180 (void) tp->t_fb->tfb_tcp_output(tp);
3183 KASSERT(ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d",
3184 __func__, ti_locked));
3185 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3186 INP_WLOCK_ASSERT(tp->t_inpcb);
3188 if (tp->t_flags & TF_DELACK) {
3189 tp->t_flags &= ~TF_DELACK;
3190 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
3192 INP_WUNLOCK(tp->t_inpcb);
3197 * Generate an ACK dropping incoming segment if it occupies
3198 * sequence space, where the ACK reflects our state.
3200 * We can now skip the test for the RST flag since all
3201 * paths to this code happen after packets containing
3202 * RST have been dropped.
3204 * In the SYN-RECEIVED state, don't send an ACK unless the
3205 * segment we received passes the SYN-RECEIVED ACK test.
3206 * If it fails send a RST. This breaks the loop in the
3207 * "LAND" DoS attack, and also prevents an ACK storm
3208 * between two listening ports that have been sent forged
3209 * SYN segments, each with the source address of the other.
3211 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
3212 (SEQ_GT(tp->snd_una, th->th_ack) ||
3213 SEQ_GT(th->th_ack, tp->snd_max)) ) {
3214 rstreason = BANDLIM_RST_OPENPORT;
3218 if (so->so_options & SO_DEBUG)
3219 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3222 TCP_PROBE3(debug__input, tp, th, m);
3223 if (ti_locked == TI_RLOCKED)
3224 INP_INFO_RUNLOCK(&V_tcbinfo);
3225 ti_locked = TI_UNLOCKED;
3227 tp->t_flags |= TF_ACKNOW;
3228 (void) tp->t_fb->tfb_tcp_output(tp);
3229 INP_WUNLOCK(tp->t_inpcb);
3234 if (ti_locked == TI_RLOCKED)
3235 INP_INFO_RUNLOCK(&V_tcbinfo);
3236 ti_locked = TI_UNLOCKED;
3239 tcp_dropwithreset(m, th, tp, tlen, rstreason);
3240 INP_WUNLOCK(tp->t_inpcb);
3242 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
3246 if (ti_locked == TI_RLOCKED) {
3247 INP_INFO_RUNLOCK(&V_tcbinfo);
3248 ti_locked = TI_UNLOCKED;
3252 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3256 * Drop space held by incoming segment and return.
3259 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
3260 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3263 TCP_PROBE3(debug__input, tp, th, m);
3265 INP_WUNLOCK(tp->t_inpcb);
3273 * Issue RST and make ACK acceptable to originator of segment.
3274 * The mbuf must still include the original packet header.
3278 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
3279 int tlen, int rstreason)
3285 struct ip6_hdr *ip6;
3289 INP_WLOCK_ASSERT(tp->t_inpcb);
3292 /* Don't bother if destination was broadcast/multicast. */
3293 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
3296 if (mtod(m, struct ip *)->ip_v == 6) {
3297 ip6 = mtod(m, struct ip6_hdr *);
3298 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
3299 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
3301 /* IPv6 anycast check is done at tcp6_input() */
3304 #if defined(INET) && defined(INET6)
3309 ip = mtod(m, struct ip *);
3310 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
3311 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
3312 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
3313 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
3318 /* Perform bandwidth limiting. */
3319 if (badport_bandlim(rstreason) < 0)
3322 /* tcp_respond consumes the mbuf chain. */
3323 if (th->th_flags & TH_ACK) {
3324 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0,
3325 th->th_ack, TH_RST);
3327 if (th->th_flags & TH_SYN)
3329 if (th->th_flags & TH_FIN)
3331 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
3332 (tcp_seq)0, TH_RST|TH_ACK);
3340 * Parse TCP options and place in tcpopt.
3343 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
3348 for (; cnt > 0; cnt -= optlen, cp += optlen) {
3350 if (opt == TCPOPT_EOL)
3352 if (opt == TCPOPT_NOP)
3358 if (optlen < 2 || optlen > cnt)
3363 if (optlen != TCPOLEN_MAXSEG)
3365 if (!(flags & TO_SYN))
3367 to->to_flags |= TOF_MSS;
3368 bcopy((char *)cp + 2,
3369 (char *)&to->to_mss, sizeof(to->to_mss));
3370 to->to_mss = ntohs(to->to_mss);
3373 if (optlen != TCPOLEN_WINDOW)
3375 if (!(flags & TO_SYN))
3377 to->to_flags |= TOF_SCALE;
3378 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT);
3380 case TCPOPT_TIMESTAMP:
3381 if (optlen != TCPOLEN_TIMESTAMP)
3383 to->to_flags |= TOF_TS;
3384 bcopy((char *)cp + 2,
3385 (char *)&to->to_tsval, sizeof(to->to_tsval));
3386 to->to_tsval = ntohl(to->to_tsval);
3387 bcopy((char *)cp + 6,
3388 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
3389 to->to_tsecr = ntohl(to->to_tsecr);
3391 case TCPOPT_SIGNATURE:
3393 * In order to reply to a host which has set the
3394 * TCP_SIGNATURE option in its initial SYN, we have
3395 * to record the fact that the option was observed
3396 * here for the syncache code to perform the correct
3399 if (optlen != TCPOLEN_SIGNATURE)
3401 to->to_flags |= TOF_SIGNATURE;
3402 to->to_signature = cp + 2;
3404 case TCPOPT_SACK_PERMITTED:
3405 if (optlen != TCPOLEN_SACK_PERMITTED)
3407 if (!(flags & TO_SYN))
3411 to->to_flags |= TOF_SACKPERM;
3414 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
3418 to->to_flags |= TOF_SACK;
3419 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
3420 to->to_sacks = cp + 2;
3421 TCPSTAT_INC(tcps_sack_rcv_blocks);
3424 case TCPOPT_FAST_OPEN:
3425 if ((optlen != TCPOLEN_FAST_OPEN_EMPTY) &&
3426 (optlen < TCPOLEN_FAST_OPEN_MIN) &&
3427 (optlen > TCPOLEN_FAST_OPEN_MAX))
3429 if (!(flags & TO_SYN))
3431 if (!V_tcp_fastopen_enabled)
3433 to->to_flags |= TOF_FASTOPEN;
3434 to->to_tfo_len = optlen - 2;
3435 to->to_tfo_cookie = to->to_tfo_len ? cp + 2 : NULL;
3445 * Pull out of band byte out of a segment so
3446 * it doesn't appear in the user's data queue.
3447 * It is still reflected in the segment length for
3448 * sequencing purposes.
3451 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m,
3454 int cnt = off + th->th_urp - 1;
3457 if (m->m_len > cnt) {
3458 char *cp = mtod(m, caddr_t) + cnt;
3459 struct tcpcb *tp = sototcpcb(so);
3461 INP_WLOCK_ASSERT(tp->t_inpcb);
3464 tp->t_oobflags |= TCPOOB_HAVEDATA;
3465 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
3467 if (m->m_flags & M_PKTHDR)
3476 panic("tcp_pulloutofband");
3480 * Collect new round-trip time estimate
3481 * and update averages and current timeout.
3484 tcp_xmit_timer(struct tcpcb *tp, int rtt)
3488 INP_WLOCK_ASSERT(tp->t_inpcb);
3490 TCPSTAT_INC(tcps_rttupdated);
3492 if ((tp->t_srtt != 0) && (tp->t_rxtshift <= TCP_RTT_INVALIDATE)) {
3494 * srtt is stored as fixed point with 5 bits after the
3495 * binary point (i.e., scaled by 8). The following magic
3496 * is equivalent to the smoothing algorithm in rfc793 with
3497 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
3498 * point). Adjust rtt to origin 0.
3500 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
3501 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
3503 if ((tp->t_srtt += delta) <= 0)
3507 * We accumulate a smoothed rtt variance (actually, a
3508 * smoothed mean difference), then set the retransmit
3509 * timer to smoothed rtt + 4 times the smoothed variance.
3510 * rttvar is stored as fixed point with 4 bits after the
3511 * binary point (scaled by 16). The following is
3512 * equivalent to rfc793 smoothing with an alpha of .75
3513 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
3514 * rfc793's wired-in beta.
3518 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
3519 if ((tp->t_rttvar += delta) <= 0)
3521 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
3522 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3525 * No rtt measurement yet - use the unsmoothed rtt.
3526 * Set the variance to half the rtt (so our first
3527 * retransmit happens at 3*rtt).
3529 tp->t_srtt = rtt << TCP_RTT_SHIFT;
3530 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
3531 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3537 * the retransmit should happen at rtt + 4 * rttvar.
3538 * Because of the way we do the smoothing, srtt and rttvar
3539 * will each average +1/2 tick of bias. When we compute
3540 * the retransmit timer, we want 1/2 tick of rounding and
3541 * 1 extra tick because of +-1/2 tick uncertainty in the
3542 * firing of the timer. The bias will give us exactly the
3543 * 1.5 tick we need. But, because the bias is
3544 * statistical, we have to test that we don't drop below
3545 * the minimum feasible timer (which is 2 ticks).
3547 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
3548 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
3551 * We received an ack for a packet that wasn't retransmitted;
3552 * it is probably safe to discard any error indications we've
3553 * received recently. This isn't quite right, but close enough
3554 * for now (a route might have failed after we sent a segment,
3555 * and the return path might not be symmetrical).
3557 tp->t_softerror = 0;
3561 * Determine a reasonable value for maxseg size.
3562 * If the route is known, check route for mtu.
3563 * If none, use an mss that can be handled on the outgoing interface
3564 * without forcing IP to fragment. If no route is found, route has no mtu,
3565 * or the destination isn't local, use a default, hopefully conservative
3566 * size (usually 512 or the default IP max size, but no more than the mtu
3567 * of the interface), as we can't discover anything about intervening
3568 * gateways or networks. We also initialize the congestion/slow start
3569 * window to be a single segment if the destination isn't local.
3570 * While looking at the routing entry, we also initialize other path-dependent
3571 * parameters from pre-set or cached values in the routing entry.
3573 * NOTE that resulting t_maxseg doesn't include space for TCP options or
3574 * IP options, e.g. IPSEC data, since length of this data may vary, and
3575 * thus it is calculated for every segment separately in tcp_output().
3577 * NOTE that this routine is only called when we process an incoming
3578 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS
3579 * settings are handled in tcp_mssopt().
3582 tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer,
3583 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap)
3586 uint32_t maxmtu = 0;
3587 struct inpcb *inp = tp->t_inpcb;
3588 struct hc_metrics_lite metrics;
3590 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
3591 size_t min_protoh = isipv6 ?
3592 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
3593 sizeof (struct tcpiphdr);
3595 const size_t min_protoh = sizeof(struct tcpiphdr);
3598 INP_WLOCK_ASSERT(tp->t_inpcb);
3600 if (mtuoffer != -1) {
3601 KASSERT(offer == -1, ("%s: conflict", __func__));
3602 offer = mtuoffer - min_protoh;
3608 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap);
3609 tp->t_maxseg = V_tcp_v6mssdflt;
3612 #if defined(INET) && defined(INET6)
3617 maxmtu = tcp_maxmtu(&inp->inp_inc, cap);
3618 tp->t_maxseg = V_tcp_mssdflt;
3623 * No route to sender, stay with default mss and return.
3627 * In case we return early we need to initialize metrics
3628 * to a defined state as tcp_hc_get() would do for us
3629 * if there was no cache hit.
3631 if (metricptr != NULL)
3632 bzero(metricptr, sizeof(struct hc_metrics_lite));
3636 /* What have we got? */
3640 * Offer == 0 means that there was no MSS on the SYN
3641 * segment, in this case we use tcp_mssdflt as
3642 * already assigned to t_maxseg above.
3644 offer = tp->t_maxseg;
3649 * Offer == -1 means that we didn't receive SYN yet.
3655 * Prevent DoS attack with too small MSS. Round up
3656 * to at least minmss.
3658 offer = max(offer, V_tcp_minmss);
3662 * rmx information is now retrieved from tcp_hostcache.
3664 tcp_hc_get(&inp->inp_inc, &metrics);
3665 if (metricptr != NULL)
3666 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite));
3669 * If there's a discovered mtu in tcp hostcache, use it.
3670 * Else, use the link mtu.
3672 if (metrics.rmx_mtu)
3673 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
3677 mss = maxmtu - min_protoh;
3678 if (!V_path_mtu_discovery &&
3679 !in6_localaddr(&inp->in6p_faddr))
3680 mss = min(mss, V_tcp_v6mssdflt);
3683 #if defined(INET) && defined(INET6)
3688 mss = maxmtu - min_protoh;
3689 if (!V_path_mtu_discovery &&
3690 !in_localaddr(inp->inp_faddr))
3691 mss = min(mss, V_tcp_mssdflt);
3695 * XXX - The above conditional (mss = maxmtu - min_protoh)
3696 * probably violates the TCP spec.
3697 * The problem is that, since we don't know the
3698 * other end's MSS, we are supposed to use a conservative
3699 * default. But, if we do that, then MTU discovery will
3700 * never actually take place, because the conservative
3701 * default is much less than the MTUs typically seen
3702 * on the Internet today. For the moment, we'll sweep
3703 * this under the carpet.
3705 * The conservative default might not actually be a problem
3706 * if the only case this occurs is when sending an initial
3707 * SYN with options and data to a host we've never talked
3708 * to before. Then, they will reply with an MSS value which
3709 * will get recorded and the new parameters should get
3710 * recomputed. For Further Study.
3713 mss = min(mss, offer);
3716 * Sanity check: make sure that maxseg will be large
3717 * enough to allow some data on segments even if the
3718 * all the option space is used (40bytes). Otherwise
3719 * funny things may happen in tcp_output.
3721 * XXXGL: shouldn't we reserve space for IP/IPv6 options?
3729 tcp_mss(struct tcpcb *tp, int offer)
3735 struct hc_metrics_lite metrics;
3736 struct tcp_ifcap cap;
3738 KASSERT(tp != NULL, ("%s: tp == NULL", __func__));
3740 bzero(&cap, sizeof(cap));
3741 tcp_mss_update(tp, offer, -1, &metrics, &cap);
3747 * If there's a pipesize, change the socket buffer to that size,
3748 * don't change if sb_hiwat is different than default (then it
3749 * has been changed on purpose with setsockopt).
3750 * Make the socket buffers an integral number of mss units;
3751 * if the mss is larger than the socket buffer, decrease the mss.
3753 so = inp->inp_socket;
3754 SOCKBUF_LOCK(&so->so_snd);
3755 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe)
3756 bufsize = metrics.rmx_sendpipe;
3758 bufsize = so->so_snd.sb_hiwat;
3762 bufsize = roundup(bufsize, mss);
3763 if (bufsize > sb_max)
3765 if (bufsize > so->so_snd.sb_hiwat)
3766 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL);
3768 SOCKBUF_UNLOCK(&so->so_snd);
3770 * Sanity check: make sure that maxseg will be large
3771 * enough to allow some data on segments even if the
3772 * all the option space is used (40bytes). Otherwise
3773 * funny things may happen in tcp_output.
3775 * XXXGL: shouldn't we reserve space for IP/IPv6 options?
3777 tp->t_maxseg = max(mss, 64);
3779 SOCKBUF_LOCK(&so->so_rcv);
3780 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe)
3781 bufsize = metrics.rmx_recvpipe;
3783 bufsize = so->so_rcv.sb_hiwat;
3784 if (bufsize > mss) {
3785 bufsize = roundup(bufsize, mss);
3786 if (bufsize > sb_max)
3788 if (bufsize > so->so_rcv.sb_hiwat)
3789 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL);
3791 SOCKBUF_UNLOCK(&so->so_rcv);
3793 /* Check the interface for TSO capabilities. */
3794 if (cap.ifcap & CSUM_TSO) {
3795 tp->t_flags |= TF_TSO;
3796 tp->t_tsomax = cap.tsomax;
3797 tp->t_tsomaxsegcount = cap.tsomaxsegcount;
3798 tp->t_tsomaxsegsize = cap.tsomaxsegsize;
3803 * Determine the MSS option to send on an outgoing SYN.
3806 tcp_mssopt(struct in_conninfo *inc)
3809 uint32_t thcmtu = 0;
3810 uint32_t maxmtu = 0;
3813 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
3816 if (inc->inc_flags & INC_ISIPV6) {
3817 mss = V_tcp_v6mssdflt;
3818 maxmtu = tcp_maxmtu6(inc, NULL);
3819 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3822 #if defined(INET) && defined(INET6)
3827 mss = V_tcp_mssdflt;
3828 maxmtu = tcp_maxmtu(inc, NULL);
3829 min_protoh = sizeof(struct tcpiphdr);
3832 #if defined(INET6) || defined(INET)
3833 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3836 if (maxmtu && thcmtu)
3837 mss = min(maxmtu, thcmtu) - min_protoh;
3838 else if (maxmtu || thcmtu)
3839 mss = max(maxmtu, thcmtu) - min_protoh;
3846 * On a partial ack arrives, force the retransmission of the
3847 * next unacknowledged segment. Do not clear tp->t_dupacks.
3848 * By setting snd_nxt to ti_ack, this forces retransmission timer to
3852 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
3854 tcp_seq onxt = tp->snd_nxt;
3855 uint32_t ocwnd = tp->snd_cwnd;
3856 u_int maxseg = tcp_maxseg(tp);
3858 INP_WLOCK_ASSERT(tp->t_inpcb);
3860 tcp_timer_activate(tp, TT_REXMT, 0);
3862 tp->snd_nxt = th->th_ack;
3864 * Set snd_cwnd to one segment beyond acknowledged offset.
3865 * (tp->snd_una has not yet been updated when this function is called.)
3867 tp->snd_cwnd = maxseg + BYTES_THIS_ACK(tp, th);
3868 tp->t_flags |= TF_ACKNOW;
3869 (void) tp->t_fb->tfb_tcp_output(tp);
3870 tp->snd_cwnd = ocwnd;
3871 if (SEQ_GT(onxt, tp->snd_nxt))
3874 * Partial window deflation. Relies on fact that tp->snd_una
3877 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th))
3878 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th);
3881 tp->snd_cwnd += maxseg;
3885 tcp_compute_pipe(struct tcpcb *tp)
3887 return (tp->snd_max - tp->snd_una +
3888 tp->sackhint.sack_bytes_rexmit -
3889 tp->sackhint.sacked_bytes);